text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
# Modified by dgranda
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import gobject
import sys
import logging
import datetime
import matplotlib
import dateutil.parser
from dateutil.tz import * # for tzutc()
from SimpleGladeApp import *
from popupmenu import PopupMenu
from aboutdialog import About
import pytrainer.record
from pytrainer.lib.date import Date, second2time
from pytrainer.lib.xmlUtils import XMLParser
#from pytrainer.lib.gpx import Gpx
from pytrainer.extensions.googlemaps import Googlemaps
from pytrainer.extensions.osm import Osm
from pytrainer.lib.unitsconversor import *
from pytrainer.recordgraph import RecordGraph
from pytrainer.daygraph import DayGraph
from pytrainer.weekgraph import WeekGraph
from pytrainer.monthgraph import MonthGraph
from pytrainer.yeargraph import YearGraph
from pytrainer.totalgraph import TotalGraph
from pytrainer.heartrategraph import HeartRateGraph
from pytrainer.extensions.mapviewer import MapViewer
from pytrainer.extensions.waypointeditor import WaypointEditor
from pytrainer.core.equipment import EquipmentService
from pytrainer.gui.drawGraph import DrawGraph
from pytrainer.gui.windowcalendar import WindowCalendar
from pytrainer.lib.listview import ListSearch
from pytrainer.lib.uc import UC
class Main(SimpleGladeApp):
def __init__(self, sport_service, data_path = None, parent = None, version = None, gpxDir = None):
self._sport_service = sport_service
def url_hook(dialog, url):
pytrainer.lib.webUtils.open_url_in_browser(url)
# Available in PyGTK 2.6 and above
gtk.about_dialog_set_url_hook(url_hook)
self.version = version
self.parent = parent
self.pytrainer_main = parent
self.data_path = data_path
glade_path="glade/pytrainer.glade"
root = "window1"
domain = None
SimpleGladeApp.__init__(self, self.data_path+glade_path, root, domain)
self.uc = UC()
self.popup = PopupMenu(data_path,self)
self.block = False
self.activeSport = None
self.gpxDir = gpxDir
self.record_list = None
self.laps = None
#Setup graph
self.grapher = DrawGraph(self, self.pytrainer_main)
self.y1_limits = None
self.y1_color = None
self.y1_linewidth = 1
# setup Search ListView
self.listsearch = ListSearch(sport_service, self, self.pytrainer_main)
self.aboutwindow = None
def new(self):
self.menublocking = 0
self.selected_view="day"
self.window1.set_title ("pytrainer %s" % self.version)
try:
width, height = self.pytrainer_main.profile.getValue("pytraining","window_size").split(',')
self.window1.resize(int(width), int(height))
except:
pass
self.record_list = []
#create the columns for the listdayrecord
if self.pytrainer_main.profile.prf_us_system:
distance_unit = _("Miles")
else:
distance_unit = _("Km")
columns = [{'name':_("id"), 'visible':False},{'name':_("Start"), }, {'name':_("Sport")},{'name':distance_unit}]
self.create_treeview(self.recordTreeView,columns)
#create the columns for the listarea
# different codings for mean see eg http://de.wikipedia.org/wiki/%C3%98#Kodierung
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Title")},
{'name':_("Date")},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity': 'distance'},
{'name':_("Sport")},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_(u"\u2300 HR"), 'xalign':1.0},
{'name':_(u"\u2300 Speed"), 'xalign':1.0, 'format_float':'%.1f', 'quantity': 'speed'},
{'name':_("Calories"), 'xalign':1.0}
]
self.create_treeview(self.allRecordTreeView,columns)
self.create_menulist(columns)
#create the columns for the waypoints treeview
columns=[{'name':_("id"), 'visible':False},{'name':_("Waypoint")}]
self.create_treeview(self.waypointTreeView,columns)
#create the columns for the athlete history treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Date")},
{'name':_("Weight"), 'xalign':1.0},
{'name':_("Body Fat %"), 'xalign':1.0},
{'name':_("Resting HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0}
]
self.create_treeview(self.athleteTreeView,columns)
#create the columns for the stats treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Sport")},
{'name':_("Records"), 'xalign':1.0},
{'name':_("Total duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Total distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Max duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Max distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
]
self.create_treeview(self.statsTreeView,columns)
#create the columns for the laps treeview
columns=[
{'name':_("Lap")},
{'name':_("Trigger"), 'xalign':0, 'pixbuf':True},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Avg pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Max pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Calories"), 'xalign':1.0},
{'name':_("Intensity"), 'visible':False},
{'name':_("Comments"), 'xalign':0.0},
]
self.create_treeview(self.lapsTreeView,columns)
#create the columns for the projected times treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Race"), 'xalign':1.0},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
]
self.create_treeview(self.analyticsTreeView,columns,sortable=False)
#create the columns for the rank treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Rank"), 'visible':True},
{'name':_("Date"), 'xalign':1.0},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_("Speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Pace"), 'format_float':'%.2f', 'quantity':'pace'},
{'name':_("Color"), 'visible':False},
]
self.create_treeview(self.rankingTreeView,columns,sortable=False)
self.fileconf = self.pytrainer_main.profile.confdir+"/listviewmenu.xml"
if not os.path.isfile(self.fileconf):
self._createXmlListView(self.fileconf)
self.showAllRecordTreeViewColumns()
self.allRecordTreeView.set_search_column(1)
self.notebook.set_current_page(1)
#Set correct map viewer
if self.pytrainer_main.profile.getValue("pytraining","default_viewer") == "1":
self.radiobuttonOSM.set_active(1)
else:
self.radiobuttonGMap.set_active(1)
self.comboMapLineType.set_active(0)
def _float_or(self, value, default):
'''Function to parse and return a float, or the default if the parsing fails'''
try:
result = float(value)
except Exception as e:
#print type(e)
#print e
result = default
return result
def setup(self):
logging.debug(">>")
self.createGraphs()
self.createMap(MapViewer,self.pytrainer_main.waypoint)
self.createWaypointEditor(WaypointEditor,self.pytrainer_main.waypoint, parent=self.pytrainer_main)
page = self.notebook.get_current_page()
self.on_page_change(None,None,page)
logging.debug("<<")
def _createXmlListView(self,file):
menufile = XMLParser(file)
savedOptions = []
savedOptions.append(("date","True"))
savedOptions.append(("distance","True"))
savedOptions.append(("average","False"))
savedOptions.append(("title","True"))
savedOptions.append(("sport","True"))
savedOptions.append(("id_record","False"))
savedOptions.append(("time","False"))
savedOptions.append(("beats","False"))
savedOptions.append(("calories","False"))
menufile.createXMLFile("listviewmenu",savedOptions)
def removeImportPlugin(self, plugin):
for widget in self.menuitem1_menu:
if widget.get_name() == plugin[1]:
self.menuitem1_menu.remove(widget)
def removeExtension(self, extension):
for widget in self.recordbuttons_hbox:
if widget.get_name() == extension[1]:
logging.debug("Removing extension: %s " % extension[0])
self.recordbuttons_hbox.remove(widget)
def addImportPlugin(self,plugin):
button = gtk.MenuItem(plugin[0])
button.set_name(plugin[1])
button.connect("activate", self.parent.runPlugin, plugin[1])
self.menuitem1_menu.insert(button,3)
self.menuitem1_menu.show_all()
def addExtension(self,extension):
#txtbutton,extensioncode,extensiontype = extension
button = gtk.Button(extension[0])
button.set_name(extension[1])
button.connect("button_press_event", self.runExtension, extension)
self.recordbuttons_hbox.pack_start(button,False,False,0)
self.recordbuttons_hbox.show_all()
def runExtension(self,widget,widget2,extension):
#print extension
txtbutton,extensioncode,extensiontype = extension
id = None
if extensiontype=="record":
selected,iter = self.recordTreeView.get_selection().get_selected()
id = selected.get_value(iter,0)
self.parent.runExtension(extension,id)
def createGraphs(self):
logging.debug(">>")
self.drawarearecord = RecordGraph(self.record_graph_vbox, self.window1, self.record_combovalue, self.record_combovalue2, self.btnShowLaps, self.tableConfigY1, pytrainer_main=self.pytrainer_main)
self.drawareaheartrate = HeartRateGraph(self.heartrate_vbox, self.window1, self.heartrate_vbox2, pytrainer_main=self.pytrainer_main)
self.day_vbox.hide()
sports = self._sport_service.get_all_sports()
self.drawareaweek = WeekGraph(sports, self.weekview, self.window1, self.week_combovalue, self.week_combovalue2, self.pytrainer_main)
self.drawareamonth = MonthGraph(sports, self.month_vbox, self.window1, self.month_combovalue,self.month_combovalue2, self.pytrainer_main)
self.drawareayear = YearGraph(sports, self.year_vbox, self.window1, self.year_combovalue,self.year_combovalue2, self.pytrainer_main)
self.drawareatotal = TotalGraph(sports, self.total_vbox, self.window1, self.total_combovalue,self.total_combovalue2, self.pytrainer_main)
logging.debug("<<")
def createMap(self,MapViewer,waypoint):
logging.debug(">>")
self.waypoint = waypoint
if not getattr(self, 'mapviewer', None):
self.mapviewer = MapViewer(self.data_path, pytrainer_main=self.parent, box=self.map_vbox)
self.mapviewer_fs = MapViewer(self.data_path, pytrainer_main=self.parent, box=self.map_vbox_old)
logging.debug("<<")
def updateSportList(self,listSport):
logging.debug(">>")
liststore = self.sportlist.get_model()
if self.sportlist.get_active() is not 0:
self.sportlist.set_active(0) #Set first item active if it isnt
firstEntry = self.sportlist.get_active_text()
liststore.clear() #Delete all items
#Re-add "All Sports"
liststore.append([firstEntry])
#Re-add all sports in listSport
for sport in listSport:
liststore.append([sport.name])
self.sportlist.set_active(0)
logging.debug("<<")
def render_duration(self, column, cell, model, iter):
orig = cell.get_property('text')
if not ':' in orig:
h,m,s = second2time(int(orig))
new = '%d:%02d:%02d' % (h,m,s)
else:
new = orig
if orig[:4] == ' 0:0':
new = orig[4:]
elif orig[:3] == ' 0:':
new = orig[3:]
if len(new)>5:
hours = int(new[:-6])
days = _("d")
if hours>23:
new = "%d %s %02d:%s" % (hours / 24, days, hours%24 ,new[-5:])
cell.set_property('text', new)
def render_float(self, column, cell, model, iter, data):
_format, _quantity, _idx = data
_val = model.get_value(iter, _idx)
_val = self.uc.sys2usr(_quantity, _val)
_val_str = _format % float(_val)
cell.set_property('text', _val_str)
def create_treeview(self,treeview,columns,sortable=True):
for column_index, column_dict in enumerate(columns):
if 'pixbuf' in column_dict:
renderer = gtk.CellRendererPixbuf()
else:
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_dict['name'])
column.pack_start(renderer, expand=False)
if 'pixbuf' in column_dict:
column.add_attribute(renderer, 'pixbuf', column_index)
else:
column.add_attribute(renderer, 'text', column_index)
column.set_resizable(True)
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
if 'xalign' in column_dict:
renderer.set_property('xalign', column_dict['xalign'])
if 'visible' in column_dict:
column.set_visible(column_dict['visible'])
if 'format_float' in column_dict:
column.set_cell_data_func(renderer, self.render_float, [column_dict['format_float'], column_dict['quantity'], column_index])
if 'format_duration' in column_dict and column_dict['format_duration']:
column.set_cell_data_func(renderer, self.render_duration)
if sortable:
column.set_sort_column_id(column_index)
treeview.append_column(column)
def actualize_recordview(self,activity):
logging.debug(">>")
if activity.id is None:
#Blank out fields
self.record_distance.set_text("")
self.record_upositive.set_text("")
self.record_unegative.set_text("")
self.record_average.set_text("")
self.record_maxspeed.set_text("")
self.record_pace.set_text("")
self.record_maxpace.set_text("")
self.record_sport.set_text("")
self.record_date.set_text("")
self.record_time.set_text("")
self.record_duration.set_text("")
#self.record_minute.set_text("")
#self.record_second.set_text("")
self.record_calories.set_text("")
self.record_title.set_text("")
self.label_record_equipment.set_text("")
self.frame_laps.hide()
com_buffer = self.record_comments.get_buffer()
start,end = com_buffer.get_bounds()
com_buffer.set_text("")
#Move to main record page and grey out
self.recordview.set_current_page(0)
self.recordview.set_sensitive(0)
logging.debug("<<")
return
#Set the units for the activity results, e.g. km, km/h etc
self.r_distance_unit.set_text(activity.distance_unit)
self.r_speed_unit.set_text(activity.speed_unit)
self.r_maxspeed_unit.set_text(activity.speed_unit)
self.r_pace_unit.set_text(activity.pace_unit)
self.r_maxpace_unit.set_text(activity.pace_unit)
self.r_ascent_unit.set_text(activity.height_unit)
self.r_descent_unit.set_text(activity.height_unit)
if activity.has_data:
self.recordview.set_sensitive(1)
dateTime = activity.date_time
recordDateTime = dateTime.strftime("%Y-%m-%d %H:%M:%S")
recordDate = dateTime.strftime("%x")
recordTime = dateTime.strftime("%X")
recordDateTimeOffset = dateTime.strftime("%z")
self.record_distance.set_text(activity.get_value_f('distance', "%0.2f"))
self.record_upositive.set_text(activity.get_value_f('upositive', "%0.2f"))
self.record_unegative.set_text(activity.get_value_f('unegative', "%0.2f"))
self.record_average.set_text(activity.get_value_f('average', "%0.2f"))
self.record_maxspeed.set_text(activity.get_value_f('maxspeed', "%0.2f"))
self.record_pace.set_text(activity.get_value_f('pace', "%s"))
self.record_maxpace.set_text(activity.get_value_f('maxpace', "%s"))
self.record_sport.set_text(activity.sport_name)
self.record_date.set_text(recordDate)
self.record_time.set_text(recordTime)
self.record_duration.set_text(activity.get_value_f('time', '%s'))
self.record_calories.set_text(activity.get_value_f('calories', "%0.0f"))
self.record_title.set_text(activity.title)
hrun,mrun,srun = second2time(activity.time)
hpause,mpause,spause = second2time(activity.time_pause)
self.record_runrest.set_text("%02d:%02d:%02d / %02d:%02d:%02d" %(hrun,mrun,srun,hpause,mpause,spause))
buffer = self.record_comments.get_buffer()
start,end = buffer.get_bounds()
buffer.set_text(activity.comments)
equipment = self.parent.record.get_record_equipment(activity.id)
if len(equipment) > 0:
equipment_text = ", ".join(map(lambda(item): item.description, equipment))
self.label_record_equipment.set_text(equipment_text)
else:
self.label_record_equipment.set_markup("<i>None</i>")
if len(activity.laps)>1:
store = gtk.ListStore(
gobject.TYPE_INT,
gtk.gdk.Pixbuf,
gobject.TYPE_FLOAT,
gobject.TYPE_STRING,
gobject.TYPE_FLOAT,
gobject.TYPE_FLOAT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
)
for lap in activity.laps:
t = lap['elapsed_time']
m = lap['distance']
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
m = km2miles(m)
s = m / float(t) * 3.6
max_speed = lap['max_speed'] * 3.6
if s > 0:
pace = "%d:%02d" %((3600/s)/60,(3600/s)%60)
if max_speed >0:
max_pace = "%d:%02d" %((3600/max_speed)/60,(3600/max_speed)%60)
else:
max_pace = "0:00"
else:
pace = "0:00"
max_pace = "0:00"
color = {
'active' : '#000000',
'rest' : '#808080',
'resting' : '#808080',
}
pic = gtk.gdk.pixbuf_new_from_file(self.data_path+"glade/trigger_%s.png" % lap['laptrigger'])
iter = store.append()
store.set(iter,
0, lap['lap_number']+1,
1, pic,
2, m/1000,
3, str(int(float(t))),
4, s,
5, max_speed,
6, pace,
7, max_pace,
8, lap['avg_hr'] if lap['avg_hr'] else 0,
9, lap['max_hr'] if lap['max_hr'] else 0,
10, lap['calories'],
11, color[lap['intensity']],
12, '' if not lap['comments'] else (lap['comments'] if len(lap['comments'])<40 else "%s..." % lap['comments'][:40]),
)
self.lapsTreeView.set_model(store)
self.lapsTreeView.set_rules_hint(True)
# Use grey color for "rest" laps
for c in self.lapsTreeView.get_columns():
for cr in c.get_cell_renderers():
if type(cr)==gtk.CellRendererText:
c.add_attribute(cr, 'foreground', 11)
def edited_cb(cell, path, new_text, (liststore, activity)):
liststore[path][12] = new_text
activity.laps[int(path)]['comments'] = new_text
self.pytrainer_main.ddbb.update("laps", "comments", [new_text,], "record=%s and lap_number=%s" % (activity.id, path))
def show_tooltip(widget, x, y, keyboard_mode, tooltip, user_param1):
path = self.lapsTreeView.get_path_at_pos(x,y-20)
if not path: return False
if path[1] != self.lapsTreeView.get_columns()[12]: return False
comments = user_param1[1].laps[path[0][0]]['comments']
if comments and len(comments)>40:
tooltip.set_text(comments)
return True
return False
if getattr(self.lapsTreeView, 'tooltip_handler_id', None):
self.lapsTreeView.disconnect(self.lapsTreeView.tooltip_handler_id)
self.lapsTreeView.tooltip_handler_id = self.lapsTreeView.connect('query-tooltip', show_tooltip, (store, activity))
i = 0
for cr in self.lapsTreeView.get_columns()[12].get_cell_renderers():
cr.set_property('editable', True)
if getattr(self, 'lapview_handler_id', None):
cr.disconnect(self.lapview_handler_id)
self.lapview_handler_id = cr.connect('edited', edited_cb, (store, activity))
tooltip = gtk.Tooltip()
tooltip.set_text(activity.laps[i]['comments'])
self.lapsTreeView.set_tooltip_cell(tooltip, i, self.lapsTreeView.get_columns()[12], cr)
i += 1
self.frame_laps.show()
else:
self.frame_laps.hide()
else:
self.recordview.set_current_page(0)
self.recordview.set_sensitive(0)
logging.debug("<<")
def actualize_recordgraph(self,activity):
logging.debug(">>")
self.record_list = activity.tracks
self.laps = activity.laps
if activity.gpx_file is not None:
if not self.pytrainer_main.startup_options.newgraph:
logging.debug("Using the original graphing")
logging.debug("Activity has GPX data")
#Show drop down boxes
self.hbox30.show()
#Hide new graph details
self.graph_data_hbox.hide()
self.hboxGraphOptions.hide()
#Enable graph
self.record_vbox.set_sensitive(1)
self.drawarearecord.drawgraph(self.record_list,self.laps)
else:
#Still just test code....
logging.debug("Using the new TEST graphing approach")
#Hide current drop down boxes
self.hbox30.hide()
self.graph_data_hbox.hide()
#Enable graph
self.record_vbox.set_sensitive(1)
#Create a frame showing data available for graphing
#Remove existing frames
for child in self.graph_data_hbox.get_children():
if isinstance(child, gtk.Frame):
self.graph_data_hbox.remove(child)
#Build frames and vboxs to hold checkbuttons
xFrame = gtk.Frame(label=_("Show on X Axis"))
y1Frame = gtk.Frame(label=_("Show on Y1 Axis"))
y2Frame = gtk.Frame(label=_("Show on Y2 Axis"))
limitsFrame = gtk.Frame(label=_("Axis Limits"))
xvbox = gtk.VBox()
y1box = gtk.Table()
y2box = gtk.Table()
limitsbox = gtk.Table()
#Populate X axis data
#Create x axis items
xdistancebutton = gtk.RadioButton(label=_("Distance"))
xtimebutton = gtk.RadioButton(group=xdistancebutton, label=_("Time"))
xlapsbutton = gtk.CheckButton(label=_("Laps"))
y1gridbutton = gtk.CheckButton(label=_("Left Axis Grid"))
y2gridbutton = gtk.CheckButton(label=_("Right Axis Grid"))
xgridbutton = gtk.CheckButton(label=_("X Axis Grid"))
#Set state of buttons
if activity.x_axis == "distance":
xdistancebutton.set_active(True)
elif activity.x_axis == "time":
xtimebutton.set_active(True)
xlapsbutton.set_active(activity.show_laps)
y1gridbutton.set_active(activity.y1_grid)
y2gridbutton.set_active(activity.y2_grid)
xgridbutton.set_active(activity.x_grid)
#Connect handlers to buttons
xdistancebutton.connect("toggled", self.on_xaxischange, "distance", activity)
xtimebutton.connect("toggled", self.on_xaxischange, "time", activity)
xlapsbutton.connect("toggled", self.on_xlapschange, activity)
y1gridbutton.connect("toggled", self.on_gridchange, "y1", activity)
y2gridbutton.connect("toggled", self.on_gridchange, "y2", activity)
xgridbutton.connect("toggled", self.on_gridchange, "x", activity)
#Add buttons to frame
xvbox.pack_start(xdistancebutton, expand=False)
xvbox.pack_start(xtimebutton, expand=False)
xvbox.pack_start(xlapsbutton, expand=False)
xvbox.pack_start(y1gridbutton, expand=False)
xvbox.pack_start(y2gridbutton, expand=False)
xvbox.pack_start(xgridbutton, expand=False)
xFrame.add(xvbox)
#Populate axis limits frame
#TODO Need to change these to editable objects and redraw graphs if changed....
#Create labels etc
minlabel = gtk.Label("<small>Min</small>")
minlabel.set_use_markup(True)
maxlabel = gtk.Label("<small>Max</small>")
maxlabel.set_use_markup(True)
xlimlabel = gtk.Label("X")
limits = {}
xminlabel = gtk.Entry(max=10)
xmaxlabel = gtk.Entry(max=10)
limits['xminlabel'] = xminlabel
limits['xmaxlabel'] = xmaxlabel
xminlabel.set_width_chars(5)
xminlabel.set_alignment(1.0)
xmaxlabel.set_width_chars(5)
xmaxlabel.set_alignment(1.0)
y1limlabel = gtk.Label("Y1")
y1minlabel = gtk.Entry(max=10)
y1maxlabel = gtk.Entry(max=10)
limits['y1minlabel'] = y1minlabel
limits['y1maxlabel'] = y1maxlabel
y1minlabel.set_width_chars(5)
y1minlabel.set_alignment(1.0)
y1maxlabel.set_width_chars(5)
y1maxlabel.set_alignment(1.0)
y2limlabel = gtk.Label("Y2")
y2minlabel = gtk.Entry(max=10)
y2maxlabel = gtk.Entry(max=10)
limits['y2minlabel'] = y2minlabel
limits['y2maxlabel'] = y2maxlabel
y2minlabel.set_width_chars(5)
y2minlabel.set_alignment(1.0)
y2maxlabel.set_width_chars(5)
y2maxlabel.set_alignment(1.0)
resetbutton = gtk.Button(_('Reset Limits'))
resetbutton.connect("clicked", self.on_setlimits, activity, True, None)
setbutton = gtk.Button(_('Set Limits'))
setbutton.connect("clicked", self.on_setlimits, activity, False, limits)
#Add labels etc to table
limitsbox.attach(minlabel, 1, 2, 0, 1, yoptions=gtk.SHRINK)
limitsbox.attach(maxlabel, 2, 3, 0, 1, yoptions=gtk.SHRINK)
limitsbox.attach(xlimlabel, 0, 1, 1, 2, yoptions=gtk.SHRINK)
limitsbox.attach(xminlabel, 1, 2, 1, 2, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(xmaxlabel, 2, 3, 1, 2, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(y1limlabel, 0, 1, 2, 3, yoptions=gtk.SHRINK)
limitsbox.attach(y1minlabel, 1, 2, 2, 3, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(y1maxlabel, 2, 3, 2, 3, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(y2limlabel, 0, 1, 3, 4, yoptions=gtk.SHRINK)
limitsbox.attach(y2minlabel, 1, 2, 3, 4, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(y2maxlabel, 2, 3, 3, 4, yoptions=gtk.SHRINK, xpadding=5)
limitsbox.attach(setbutton, 0, 3, 4, 5, yoptions=gtk.SHRINK)
limitsbox.attach(resetbutton, 0, 3, 5, 6, yoptions=gtk.SHRINK)
limitsFrame.add(limitsbox)
row = 0
if activity.x_axis == "distance":
data = activity.distance_data
elif activity.x_axis == "time":
data = activity.time_data
else:
print "x axis is unknown"
#Sort data
keys = data.keys()
keys.sort()
#Populate Y axis data
for graphdata in keys:
#First Y axis...
#Create button
y1button = gtk.CheckButton(label=data[graphdata].title)
#Make button active if this data is to be displayed...
y1button.set_active(data[graphdata].show_on_y1)
#Connect handler for toggle state changes
y1button.connect("toggled", self.on_y1change, y1box, graphdata, activity)
#Attach button to container
y1box.attach(y1button, 0, 1, row, row+1, xoptions=gtk.EXPAND|gtk.FILL)
if data[graphdata].linecolor is not None:
#Create a color choser
y1color = gtk.ColorButton()
#Set color to current activity color
_color = gtk.gdk.color_parse(data[graphdata].linecolor)
y1color.set_color(_color)
#Connect handler for color state changes
y1color.connect("color-set", self.on_y1colorchange, y1box, graphdata, activity)
#Attach to container
y1box.attach(y1color, 1, 2, row, row+1)
else:
blanklabel = gtk.Label("")
y1box.attach(blanklabel, 1, 2, row, row+1)
#Second Y axis
y2button = gtk.CheckButton(label=data[graphdata].title)
y2button.set_active(data[graphdata].show_on_y2)
y2button.connect("toggled", self.on_y2change, y2box, graphdata, activity)
y2box.attach(y2button, 0, 1, row, row+1, xoptions=gtk.EXPAND|gtk.FILL)
if data[graphdata].y2linecolor is not None:
y2color = gtk.ColorButton()
_color = gtk.gdk.color_parse(data[graphdata].y2linecolor)
y2color.set_color(_color)
y2color.connect("color-set", self.on_y2colorchange, y2box, graphdata, activity)
#Attach to container
y2box.attach(y2color, 1, 2, row, row+1)
else:
blanklabel = gtk.Label("")
y2box.attach(blanklabel, 1, 2, row, row+1)
row += 1
y1Frame.add(y1box)
y2Frame.add(y2box)
self.graph_data_hbox.pack_start(xFrame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(y1Frame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(y2Frame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(limitsFrame, expand=False, fill=True, padding=5)
#self.graph_data_hbox.show_all()
self.hboxGraphOptions.show_all()
act = self.grapher.drawActivityGraph(activity=activity, box=self.record_graph_vbox)
if act.x_limits_u[0] is not None:
xmin, xmax = act.x_limits_u
else:
xmin, xmax = act.x_limits
if act.y1_limits_u[0] is not None:
y1min, y1max = act.y1_limits_u
else:
y1min, y1max = act.y1_limits
if act.y2_limits_u[0] is not None:
y2min, y2max = act.y2_limits_u
else:
y2min, y2max = act.y2_limits
#print y1min, y1max, y2min, y2max
if xmin is not None and xmax is not None:
xminlabel.set_text(str(xmin))
xmaxlabel.set_text(str(xmax))
if y1min is not None and y1max is not None:
y1minlabel.set_text(str(y1min))
y1maxlabel.set_text(str(y1max))
if y2min is not None and y2max is not None:
y2minlabel.set_text(str(y2min))
y2maxlabel.set_text(str(y2max))
#Default to showing options
self.buttonGraphShowOptions.hide()
self.scrolledwindowGraphOptions.show()
self.buttonGraphHideOptions.show()
else:
logging.debug("Activity has no GPX data")
#Show drop down boxes
self.hbox30.show()
#Hide new graph details
self.graph_data_hbox.hide()
self.hboxGraphOptions.hide()
#Remove graph
vboxChildren = self.record_graph_vbox.get_children()
logging.debug('Vbox has %d children %s' % (len(vboxChildren), str(vboxChildren) ))
# ToDo: check why vertical container is shared
for child in vboxChildren:
#Remove all FigureCanvasGTK and NavigationToolbar2GTKAgg to stop double ups of graphs
if isinstance(child, matplotlib.backends.backend_gtkagg.FigureCanvasGTK) or isinstance(child, matplotlib.backends.backend_gtkagg.NavigationToolbar2GTKAgg):
logging.debug('Removing child: '+str(child))
self.record_graph_vbox.remove(child)
self.record_vbox.set_sensitive(0)
logging.debug("<<")
def actualize_heartrategraph(self,activity):
logging.debug(">>")
if activity.tracks is not None and len(activity.tracks)>0:
self.heartrate_vbox_.set_sensitive(1)
self.drawareaheartrate.drawgraph(activity.tracks)
else:
self.heartrate_vbox_.set_sensitive(0)
logging.debug("<<")
def actualize_hrview(self,activity):
logging.debug(">>")
zones = self.pytrainer_main.profile.getZones()
record_list = activity.tracks
is_karvonen_method = self.pytrainer_main.profile.getValue("pytraining","prf_hrzones_karvonen")
if record_list is not None and len(record_list)>0:
record_list=record_list[0]
self.record_zone1.set_text("%s-%s" %(zones[4][0],zones[4][1]))
self.record_zone2.set_text("%s-%s" %(zones[3][0],zones[3][1]))
self.record_zone3.set_text("%s-%s" %(zones[2][0],zones[2][1]))
self.record_zone4.set_text("%s-%s" %(zones[1][0],zones[1][1]))
self.record_zone5.set_text("%s-%s" %(zones[0][0],zones[0][1]))
beats = activity.beats
maxbeats = activity.maxbeats
self.record_beats.set_text("%0.0f" %beats)
self.record_maxbeats.set_text("%0.0f" %maxbeats)
self.record_calories2.set_text("%0.0f" %activity.calories)
if is_karvonen_method=="True":
self.record_zonesmethod.set_text(_("Karvonen method"))
else:
self.record_zonesmethod.set_text(_("Percentages method"))
#else:
# self.recordview.set_sensitive(0)
logging.debug("<<")
def actualize_analytics(self,activity):
logging.debug(">>")
record_list = activity.tracks
def project(d,a):
return int(a.time * (d / a.distance)**1.06)
DISTANCES = {
.8 : _("800 m"),
1.5 : _("1500 m"),
5 : _("5K"),
7 : _("7K"),
10 : _("10K"),
21.1 : _("Half marathon"),
42.195 : _("Marathon"),
100 : _("100K"),
}
projected_store = gtk.ListStore(
gobject.TYPE_STRING, #id
gobject.TYPE_STRING, #name
gobject.TYPE_STRING, #distance
gobject.TYPE_STRING, #time
)
ds = DISTANCES.keys()
ds = sorted(ds)
for d in ds:
v = DISTANCES[d]
iter = projected_store.append()
projected_store.set (
iter,
0, str(d),
1, v,
2, str(d),
3, str(project(d, activity)),
)
self.analyticsTreeView.set_model(projected_store)
self.analytics_activity = activity
self.on_change_rank_percentage()
logging.debug("<<")
def on_change_rank_percentage(self, widget=None):
activity = self.analytics_activity
if widget:
percentage = widget.get_value() / 100
else:
percentage = .05
records = self.pytrainer_main.ddbb.select_dict("records", ["distance","time","id_record","date","average","pace"], "distance > %f AND distance < %f AND sport=%d order by average desc" % (activity.distance * (1-percentage), activity.distance * (1+percentage), activity.sport_id))
count = 1
for r in records:
if r['average'] > activity.average:
count += 1
import numpy
speeds = [r['average'] for r in records]
if self.pytrainer_main.profile.prf_us_system:
self.label_ranking_range.set_text("%.2f - %.2f %s" % (km2miles(activity.distance * (1-percentage)), km2miles(activity.distance * (1+percentage)), activity.distance_unit))
else:
self.label_ranking_range.set_text("%.2f - %.2f %s" % (activity.distance * (1-percentage), activity.distance * (1+percentage), activity.distance_unit))
self.label_ranking_rank.set_text("%s/%s" % (count, len(records)))
self.label_ranking_avg.set_text("%.2f %s" % (km2miles(numpy.average(speeds)) if self.pytrainer_main.profile.prf_us_system else numpy.average(speeds), activity.speed_unit))
self.label_ranking_speed.set_text("%.2f %s" % (km2miles(activity.average) if self.pytrainer_main.profile.prf_us_system else activity.average, activity.speed_unit))
self.label_ranking_stddev.set_text("%.4f" % (km2miles(numpy.std(speeds)) if self.pytrainer_main.profile.prf_us_system else numpy.std(speeds)))
self.label_ranking_dev.set_text("%+.2fσ" % ((activity.average - numpy.average(speeds)) / numpy.std(speeds)))
rank_store = gtk.ListStore(
gobject.TYPE_INT, #id
gobject.TYPE_INT, #rank
gobject.TYPE_STRING, #date
gobject.TYPE_STRING, #distance
gobject.TYPE_STRING, #time
gobject.TYPE_STRING, #speed
gobject.TYPE_STRING, #pace
gobject.TYPE_STRING, #color
)
length = len(records)
rec_set = [0,]
for r in xrange(max(count-3, 1) if count>1 else count, min(count+3, length-2) if count < length else count):
rec_set.append(r)
if length>1 and count!=length:
rec_set.append(-1)
for i in rec_set:
r = records[i]
iter = rank_store.append()
rank = length if i==-1 else i+1
rank_store.set (
iter,
0, i,
1, rank,
2, r['date'],
3, km2miles(r['distance']) if self.pytrainer_main.profile.prf_us_system else r['distance'],
4, str(r['time']),
5, r['average'],
6, r['pace'],
7, '#3AA142' if rank==count else '#000000',
)
for c in self.rankingTreeView.get_columns()[:-1]:
for cr in c.get_cell_renderers():
if type(cr)==gtk.CellRendererText:
c.add_attribute(cr, 'foreground', 7)
self.rankingTreeView.set_model(rank_store)
def actualize_dayview(self,record_list=None, activity_list=None):
logging.debug(">>")
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
self.d_distance_unit.set_text(_("miles"))
self.d_speed_unit.set_text(_("miles/h"))
self.d_maxspeed_unit.set_text(_("miles/h"))
self.d_pace_unit.set_text(_("min/mile"))
self.d_maxpace_unit.set_text(_("min/mile"))
else:
self.d_distance_unit.set_text(_("km"))
self.d_speed_unit.set_text(_("km/h"))
self.d_maxspeed_unit.set_text(_("km/h"))
self.d_pace_unit.set_text(_("min/km"))
self.d_maxpace_unit.set_text(_("min/km"))
if len(record_list)>0:
tbeats = 0
distance = 0
calories = 0
timeinseconds = 0
beats = 0
maxbeats = 0
maxspeed = 0
average = 0
maxpace = "0:00"
pace = "0:00"
totalascent = 0
totaldescent = 0
for record in record_list:
distance += self.parseFloat(record[2])
calories += self.parseFloat(record[7])
timeinseconds += self.parseFloat(record[3])
beats = self.parseFloat(record[4])
totalascent += self.parseFloat(record[13])
totaldescent += self.parseFloat(record[14])
if float(beats)>0:
tbeats += beats*(self.parseFloat(record[3])/60/60)
if record[9] > maxspeed:
maxspeed = self.parseFloat(record[9])
if record[10] > maxbeats:
maxbeats = self.parseFloat(record[10])
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
distance = km2miles(distance)
maxspeed = km2miles(maxspeed)
if tbeats > 0 and timeinseconds > 0:
tbeats = tbeats/(timeinseconds/60/60)
if distance > 0 and timeinseconds > 0:
average = distance/(timeinseconds/60/60)
if maxspeed > 0:
maxpace = "%d:%02d" %((3600/maxspeed)/60,(3600/maxspeed)%60)
if average > 0:
pace = "%d:%02d" %((3600/average)/60,(3600/average)%60)
self.dayview.set_sensitive(1)
self.day_distance.set_text("%0.2f" %distance)
hour,min,sec = second2time(timeinseconds)
self.day_hour.set_text("%d" %hour)
self.day_minute.set_text("%02d" %min)
self.day_second.set_text("%02d" %sec)
if tbeats:
self.day_beats.set_text("%0.0f" %tbeats)
else:
self.day_beats.set_text("")
self.day_maxbeats.set_text("%0.0f" %maxbeats)
if average:
self.day_average.set_text("%0.2f" %average)
else:
self.day_average.set_text("")
self.day_maxspeed.set_text("%0.2f" %maxspeed)
self.day_pace.set_text("%s" %pace)
self.day_maxpace.set_text("%s" %maxpace)
self.day_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.day_calories.set_text("%0.0f" %calories)
self.day_topic.set_text(str(record[1]))
else:
self.dayview.set_sensitive(0)
logging.debug("<<")
def actualize_daygraph(self,record_list):
logging.debug(">>")
if len(record_list)>0:
self.day_vbox.set_sensitive(1)
else:
self.day_vbox.set_sensitive(0)
self.drawareaday.drawgraph(record_list)
logging.debug("<<")
def actualize_map(self,activity, full_screen=False):
logging.debug(">>")
#Check which type of map viewer to use
if self.radiobuttonOSM.get_active():
#Use OSM to draw map
logging.debug("Using OSM to draw map....")
htmlfile = Osm(data_path=self.data_path, waypoint=self.waypoint, pytrainer_main=self.parent).drawMap(activity, self.comboMapLineType.get_active())
elif self.radiobuttonGMap.get_active():
#Use Google to draw map
logging.debug("Using Google to draw map")
htmlfile = Googlemaps(data_path=self.data_path, waypoint=self.waypoint, pytrainer_main=self.parent).drawMap(activity, self.comboMapLineType.get_active())
else:
#Unknown map type...
logging.error("Unknown map viewer requested")
htmlfile = self.mapviewer.createErrorHtml()
logging.debug("Displaying htmlfile: %s" % htmlfile)
if full_screen:
logging.debug("Displaying in full screen mode")
self.mapviewer_fs.display_map(htmlfile=htmlfile)
else:
logging.debug("Displaying in embedded mode")
self.mapviewer.display_map(htmlfile=htmlfile)
logging.debug("<<")
def actualize_weekview(self, record_list, date_range):
logging.debug(">>")
self.week_date.set_text("%s - %s (%d)" % (date_range.start_date.strftime("%a %d %b"), date_range.end_date.strftime("%a %d %b"), int(date_range.end_date.strftime("%V"))) )
km = calories = time = average = beats = 0
num_records = len(record_list)
logging.info("Number of records selected week: "+str(num_records))
time_in_min = 0
tbeats = 0
maxspeed = 0
pace = "0:00"
maxpace = "0:00"
maxbeats = 0
totalascent = 0
totaldescent = 0
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
self.w_distance_unit.set_text(_("miles"))
self.w_speed_unit.set_text(_("miles/h"))
self.w_maxspeed_unit.set_text(_("miles/h"))
self.w_pace_unit.set_text(_("min/mile"))
self.w_maxpace_unit.set_text(_("min/mile"))
else:
self.w_distance_unit.set_text(_("km"))
self.w_speed_unit.set_text(_("km/h"))
self.w_maxspeed_unit.set_text(_("km/h"))
self.w_pace_unit.set_text(_("min/km"))
self.w_maxpace_unit.set_text(_("min/km"))
if num_records>0:
for record in record_list:
km += self.parseFloat(record[1])
time += self.parseFloat(record[2])
average += self.parseFloat(record[5])
calories += self.parseFloat(record[6])
beats = self.parseFloat(record[3])
totalascent += self.parseFloat(record[10])
totaldescent += self.parseFloat(record[11])
if float(beats) > 0:
time_in_min += time/60
tbeats += beats*(time/60)
if record[7] > maxspeed:
maxspeed = self.parseFloat(record[7])
if record[8] > maxbeats:
maxbeats = self.parseFloat(record[8])
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
km = km2miles(km)
maxspeed = km2miles(maxspeed)
if time_in_min > 0:
tbeats = tbeats/time_in_min
else:
tbeats = 0
if km > 0:
average = (km/(time/3600))
else:
average = 0
if maxspeed > 0:
#maxpace = 60/maxspeed
maxpace = "%d:%02d" %((3600/maxspeed)/60,(3600/maxspeed)%60)
if average > 0:
#pace = 60/average
pace = "%d:%02d" %((3600/average)/60,(3600/average)%60)
self.weeka_distance.set_text("%0.2f" %km)
hour,min,sec = second2time(time)
self.weeka_hour.set_text("%d" %hour)
self.weeka_minute.set_text("%02d" %min)
self.weeka_second.set_text("%02d" %sec)
self.weeka_maxbeats.set_text("%0.0f" %(maxbeats))
self.weeka_beats.set_text("%0.0f" %(tbeats))
self.weeka_average.set_text("%0.2f" %average)
self.weeka_maxspeed.set_text("%0.2f" %maxspeed)
self.weeka_pace.set_text(pace)
self.weeka_maxpace.set_text(maxpace)
self.weeka_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.weeka_calories.set_text("%0.0f" %calories)
self.weekview.set_sensitive(1)
else:
self.weekview.set_sensitive(0)
self.drawareaweek.drawgraph(record_list, date_range.start_date)
logging.debug("<<")
def actualize_monthview(self,record_list, nameMonth):
logging.debug(">>")
self.month_date.set_text(nameMonth)
km = calories = time = average = beats = 0
num_records = len(record_list)
time_in_min = 0
tbeats = 0
maxspeed = 0
pace = "0:00"
maxpace = "0:00"
maxbeats = 0
totalascent = 0
totaldescent = 0
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
self.m_distance_unit.set_text(_("miles"))
self.m_speed_unit.set_text(_("miles/h"))
self.m_maxspeed_unit.set_text(_("miles/h"))
self.m_pace_unit.set_text(_("min/mile"))
self.m_maxpace_unit.set_text(_("min/mile"))
else:
self.m_distance_unit.set_text(_("km"))
self.m_speed_unit.set_text(_("km/h"))
self.m_maxspeed_unit.set_text(_("km/h"))
self.m_pace_unit.set_text(_("min/km"))
self.m_maxpace_unit.set_text(_("min/km"))
if num_records>0:
for record in record_list:
km += self.parseFloat(record[1])
time += self.parseFloat(record[2])
average += self.parseFloat(record[5])
calories += self.parseFloat(record[6])
beats = self.parseFloat(record[3])
totalascent += self.parseFloat(record[10])
totaldescent += self.parseFloat(record[11])
if float(beats) > 0:
time_in_min += time/60
tbeats += beats*(time/60)
if record[7] > maxspeed:
maxspeed = self.parseFloat(record[7])
if record[8] > maxbeats:
maxbeats = self.parseFloat(record[8])
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
km = km2miles(km)
maxspeed = km2miles(maxspeed)
if time_in_min > 0:
tbeats = tbeats/time_in_min
else:
tbeats = 0
if km > 0 and time > 0: # time can be zero when a new year starts
average = (km/(time/3600))
else:
average = 0
if maxspeed > 0:
#maxpace = 60/maxspeed
maxpace = "%d:%02d" %((3600/maxspeed)/60,float(3600/maxspeed)%60)
if average > 0:
#pace = 60/average
pace = "%d:%02d" %((3600/average)/60,float(3600/average)%60)
self.montha_distance.set_text("%0.2f" %km)
hour,min,sec = second2time(time)
self.montha_hour.set_text("%d" %hour)
self.montha_minute.set_text("%02d" %min)
self.montha_second.set_text("%02d" %sec)
self.montha_maxbeats.set_text("%0.0f" %(maxbeats))
self.montha_beats.set_text("%0.0f" %(tbeats))
self.montha_average.set_text("%0.2f" %average)
self.montha_maxspeed.set_text("%0.2f" %maxspeed)
self.montha_pace.set_text(pace)
self.montha_maxpace.set_text(maxpace)
self.montha_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.montha_calories.set_text("%0.0f" %calories)
self.monthview.set_sensitive(1)
else:
self.monthview.set_sensitive(0)
logging.debug("<<")
def actualize_monthgraph(self,record_list, daysInMonth):
logging.debug(">>")
self.drawareamonth.drawgraph(record_list, daysInMonth)
logging.debug("<<")
def actualize_yearview(self,record_list, year):
logging.debug(">>")
self.year_date.set_text("%d" %int(year))
km = calories = time = average = beats = 0
num_records = len(record_list)
time_in_min = 0
tbeats = 0
maxspeed = 0
pace = "0:00"
maxpace = "0:00"
maxbeats = 0
totalascent = 0
totaldescent = 0
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
self.y_distance_unit.set_text(_("miles"))
self.y_speed_unit.set_text(_("miles/h"))
self.y_maxspeed_unit.set_text(_("miles/h"))
self.y_pace_unit.set_text(_("min/mile"))
self.y_maxpace_unit.set_text(_("min/mile"))
else:
self.y_distance_unit.set_text(_("km"))
self.y_speed_unit.set_text(_("km/h"))
self.y_maxspeed_unit.set_text(_("km/h"))
self.y_pace_unit.set_text(_("min/km"))
self.y_maxpace_unit.set_text(_("min/km"))
if num_records>0:
for record in record_list:
km += self.parseFloat(record[1])
time += self.parseFloat(record[2])
average += self.parseFloat(record[5])
calories += self.parseFloat(record[6])
beats = self.parseFloat(record[3])
totalascent += self.parseFloat(record[10])
totaldescent += self.parseFloat(record[11])
if float(beats) > 0:
time_in_min += time/60
tbeats += beats*(time/60)
if record[7] > maxspeed:
maxspeed = self.parseFloat(record[7])
if record[8] > maxbeats:
maxbeats = self.parseFloat(record[8])
if self.pytrainer_main.profile.getValue("pytraining","prf_us_system") == "True":
km = km2miles(km)
maxspeed = km2miles(maxspeed)
if time_in_min > 0:
tbeats = tbeats/time_in_min
else:
tbeats = 0
if km > 0:
average = (km/(time/3600))
else:
average = 0
if maxspeed > 0:
#maxpace = 60/maxspeed
maxpace = "%d:%02d" %((3600/maxspeed)/60,(3600/maxspeed)%60)
if average > 0:
#pace = 60/average
pace = "%d:%02d" %((3600/average)/60,(3600/average)%60)
self.yeara_distance.set_text("%0.2f" %km)
hour,min,sec = second2time(time)
self.yeara_hour.set_text("%d" %hour)
self.yeara_minute.set_text("%02d" %min)
self.yeara_second.set_text("%02d" %sec)
self.yeara_beats.set_text("%0.0f" %tbeats)
self.yeara_maxbeats.set_text("%0.0f" %(maxbeats))
self.yeara_average.set_text("%0.2f" %average)
self.yeara_maxspeed.set_text("%0.2f" %maxspeed)
self.yeara_pace.set_text(pace)
self.yeara_maxpace.set_text(maxpace)
self.yeara_ascdesc.set_text("%d/%d " %(totalascent,totaldescent))
self.yeara_calories.set_text("%0.0f" %calories)
self.yearview.set_sensitive(1)
else:
self.yearview.set_sensitive(0)
self.drawareayear.drawgraph([])
logging.debug("<<")
def actualize_yeargraph(self,record_list):
logging.debug(">>")
self.drawareayear.drawgraph(record_list)
logging.debug("<<")
def actualize_athleteview(self, athlete):
logging.debug(">>")
self.labelName.set_text(athlete.name)
self.labelDOB.set_text(athlete.age)
self.labelHeight.set_text(athlete.height+" cm")
#Create history treeview
history_store = gtk.ListStore(
gobject.TYPE_STRING, #id
gobject.TYPE_STRING, #date
gobject.TYPE_STRING, #weight
gobject.TYPE_STRING, #body fat %
gobject.TYPE_STRING, #resting HR
gobject.TYPE_STRING #max HR
)
for data in athlete.data:
weight = data['weight']
date = data['date']
iter = history_store.append()
history_store.set (
iter,
0, (data['id_athletestat']),
1, date,
2, weight,
3, (data['bodyfat']),
4, (data['restinghr']),
5, (data['maxhr']),
)
self.athleteTreeView.set_model(history_store)
self.grapher.drawAthleteGraph(athlete=athlete, box=self.boxAthleteGraph)
logging.debug("<<")
def actualize_statsview(self, stats, record_list):
logging.debug(">>")
self.labelTotalDistance.set_text(str(stats.data['total_distance']) + " km")
self.labelTotalDuration.set_text(str(stats.data['total_duration'] / 3600) + " hours")
self.labelStartDate.set_text(stats.data['start_date'].strftime('%Y-%m-%d'))
self.labelEndDate.set_text(stats.data['end_date'].strftime('%Y-%m-%d'))
data = self.parent.stats.data
store = gtk.ListStore(
gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_FLOAT,
gobject.TYPE_FLOAT,
gobject.TYPE_FLOAT,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_FLOAT
)
for s in data['sports'].values():
iter = store.append()
c = 0
store.set (iter, c, c)
c += 1
store.set (iter, c, s['name'])
c += 1
store.set (iter, c, s['count'])
for f in data['fields'][3:]:
c += 1
store.set (iter, c, s['total_'+f])
c += 1
if s['total_duration']!=0: # Avoid division by zero if 0 length sport activity exists in DB
store.set (iter, c, s['total_distance'] / s['total_duration'] * 3600.)
for f in data['fields']:
c += 1
store.set (iter, c, s[f])
self.statsTreeView.set_model(store)
self.statsTreeView.set_rules_hint(True)
store.set_sort_column_id(3, gtk.SORT_DESCENDING)
self.drawareatotal.drawgraph(record_list)
logging.debug("<<")
def actualize_listview(self,record_list):
logging.debug(">>")
#recod list tiene:
#date,distance,average,title,sports.name,id_record,time,beats,caloriesi
#Laas columnas son:
#column_names=[_("id"),_("Title"),_("Date"),_("Distance"),_("Sport"),_("Time"),_("Beats"),_("Average"),("Calories")]
store = gtk.ListStore(
gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_FLOAT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_INT,
gobject.TYPE_FLOAT,
gobject.TYPE_INT,
object)
for i in record_list:
try:
hour,min,sec = second2time(int(i[6]))
except (ValueError, TypeError):
hour,min,sec = (0,0,0)
_time = "%2d:%02d:%02d" %(hour,min,sec)
try:
_id = int(i[5])
except (ValueError, TypeError) as e:
logging.debug("Unable to determine id for record: %s" % str(i))
logging.debug(str(e))
continue
_title = str(i[3])
_date = str(i[0])
try:
_distance = float(i[1])
except (ValueError, TypeError):
_distance = 0
_sport = str(i[4])
try:
_average = float(i[2])
except (ValueError, TypeError):
_average = 0
try:
_calories = int(i[8])
except (ValueError, TypeError):
_calories = 0
try:
_beats = round(float(i[7]))
except (ValueError, TypeError) as e:
logging.debug("Unable to parse beats for %s" % str(i[7]) )
logging.debug(str(e))
_beats = 0.0
iter = store.append()
store.set (
iter,
0, _id,
1, _title,
2, _date,
3, _distance,
4, _sport,
5, _time,
6, _beats,
7, _average,
8, _calories
)
#self.allRecordTreeView.set_headers_clickable(True)
self.allRecordTreeView.set_model(store)
self.allRecordTreeView.set_rules_hint(True)
logging.debug("<<")
def actualize_waypointview(self,record_list,default_waypoint,redrawmap = 1):
logging.debug(">>")
#redrawmap: indica si tenemos que refrescar tb el mapa. 1 si 0 no
#waypoint list tiene:
#id_waypoint,lat,lon,ele,comment,time,name,sym
#Laas columnas son:
#column_names=[_("id"),_("Waypoint")]
store = gtk.ListStore(
gobject.TYPE_INT,
gobject.TYPE_STRING,
object)
iterOne = False
iterDefault = False
counter = 0
default_id = 0
for i in record_list:
iter = store.append()
if not iterOne:
iterOne = iter
if int(i[0])==default_waypoint:
iterDefault = iter
default_id = counter
store.set (
iter,
0, int(i[0]),
1, str(i[6])
)
counter+=1
self.waypointTreeView.set_model(store)
if iterDefault:
self.waypointTreeView.get_selection().select_iter(iterDefault)
elif iterOne:
self.waypointTreeView.get_selection().select_iter(iterOne)
if len(record_list) > 0:
self.waypoint_latitude.set_text(str(record_list[default_id][1]))
self.waypoint_longitude.set_text(str(record_list[default_id][2]))
self.waypoint_name.set_text(str(record_list[default_id][6]))
self.waypoint_description.set_text(str(record_list[default_id][4]))
self.set_waypoint_type(str(record_list[default_id][7]))
if redrawmap == 1:
self.waypointeditor.createHtml(default_waypoint)
self.waypointeditor.drawMap()
logging.debug("<<")
def set_waypoint_type(self, type):
x = 0
tree_model = self.waypoint_type.get_model()
if tree_model is not None:
#iter = tree_model.get_iter_root()
for item in tree_model:
#if isinstance(item, gtk.TreeModelRow):
if item[0] == type:
self.waypoint_type.set_active(x)
return
x += 1
self.waypoint_type.insert_text(0, type)
self.waypoint_type.set_active(0)
return
def on_waypointTreeView_button_press(self, treeview, event):
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 1:
selected,iter = treeview.get_selection().get_selected()
id_waypoint=selected.get_value(iter,0)
self.parent.refreshWaypointView(id_waypoint)
return False
def on_listareasearch_clicked(self, widget):
lisOpt = {
_("Title"):"title",
_("Date"):"date",
_("Distance"):"distance",
_("Sport"):"sport",
_("Time"):"time",
_("Beats"):"beats",
_("Average"):"average",
_("Calories"):"calories"
}
self.listsearch.title = self.lsa_searchvalue.get_text()
self.listsearch.sport = self.lsa_sport.get_active()
self.listsearch.past = self.lsa_past.get_active()
self.listsearch.duration = self.lsa_duration.get_active()
self.listsearch.distance = self.lsa_distance.get_active()
self.parent.refreshListView(self.listsearch.condition)
def on_listareareset_clicked(self, widget):
self.listsearch.reset_lsa()
self.parent.refreshListView(self.listsearch.condition)
def create_menulist(self,columns):
for i, column_dict in enumerate(columns):
if 'visible' in column_dict and not column_dict['visible']:
pass
else:
item = gtk.CheckMenuItem(column_dict['name'])
#self.lsa_searchoption.append_text(name)
item.connect("button_press_event", self.on_menulistview_activate, i)
self.menulistviewOptions.append(item)
self.menulistviewOptions.show_all()
def on_menulistview_activate(self,widget,widget2,widget_position):
listMenus = {
0:"title",
1:"date",
2:"distance",
3:"sport",
4:"time",
5:"beats",
6:"average",
7:"calories" }
items = self.menulistviewOptions.get_children()
if items[widget_position-1].get_active():
newValue = "False"
else:
newValue = "True"
menufile = XMLParser(self.fileconf)
menufile.setValue("listviewmenu",listMenus[widget_position-1],newValue)
self.showAllRecordTreeViewColumns()
def showAllRecordTreeViewColumns(self):
menufile = XMLParser(self.fileconf)
listMenus = {
"id_record":0,
"title":1,
"date":2,
"distance":3,
"sport":4,
"time":5,
"beats":6,
"average":7,
"calories":8 }
columns = self.allRecordTreeView.get_columns()
menuItems = self.menulistviewOptions.get_children()
for column in listMenus:
visible = menufile.getValue("listviewmenu",column)
if visible == "True":
visible = True
else:
visible = False
numcolumn = listMenus[column]
#show the selected columns
columns[numcolumn].set_visible(visible)
#select the choice in the menu
if numcolumn != 0 and self.menublocking != 1:
menuItems[numcolumn-1].set_active(visible)
self.menublocking = 1
def createWaypointEditor(self,WaypointEditor,waypoint, parent=None):
self.waypointeditor = WaypointEditor(self.data_path, self.waypointvbox,waypoint,parent)
def zoom_graph(self, y1limits=None, y1color=None, y1_linewidth=1):
logging.debug(">>")
logging.debug("Reseting graph Y axis with ylimits: %s" % str(y1limits) )
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=y1limits, y1color=y1color, y1_linewidth=y1_linewidth)
logging.debug("<<")
def update_athlete_item(self, idx, date, weight, bf, restingHR, maxHR):
logging.debug(">>")
#Prepare vars
idx = str(idx)
date = str(date)
weight = str(weight)
bf = str(bf)
restingHR = str(restingHR)
maxHR = str(maxHR)
#Set vars
self.labelAthleteIdx.set_text(idx)
self.entryAthleteDate.set_text(date)
self.entryAthleteWeight.set_text(weight)
self.entryAthleteBF.set_text(bf)
self.entryAthleteRestingHR.set_text(restingHR)
self.entryAthleteMaxHR.set_text(maxHR)
logging.debug("<<")
######################
## Lista de eventos ##
######################
def on_xaxischange(self, widget, data=None, activity=None):
'''Handler for record graph axis selection changes'''
if widget.get_active():
activity.x_axis = data
self.actualize_recordgraph(activity)
def on_xlapschange(self, widget, activity=None):
if widget.get_active():
activity.show_laps = True
else:
activity.show_laps = False
self.actualize_recordgraph(activity)
def on_gridchange(self, widget, axis=None, activity=None):
'''Handler for record graph grid selection changes'''
if axis == 'y1':
activity.y1_grid = not activity.y1_grid
elif axis == 'y2':
activity.y2_grid = not activity.y2_grid
elif axis == 'x':
activity.x_grid = not activity.x_grid
self.actualize_recordgraph(activity)
def on_y1colorchange(self, widget, box, graphdata, activity):
'''Hander for changes to y1 color selection'''
logging.debug("Setting %s to color %s" % (graphdata, widget.get_color() ) )
if activity.x_axis == "distance":
activity.distance_data[graphdata].set_color(str(widget.get_color()))
elif activity.x_axis == "time":
activity.time_data[graphdata].set_color(str(widget.get_color()))
#Replot the activity
self.actualize_recordgraph(activity)
def on_y2colorchange(self, widget, box, graphdata, activity):
'''Hander for changes to y2 color selection'''
logging.debug("Setting %s to color %s" % (graphdata, widget.get_color() ) )
if activity.x_axis == "distance":
activity.distance_data[graphdata].set_color(None, str(widget.get_color()))
elif activity.x_axis == "time":
activity.time_data[graphdata].set_color(None, str(widget.get_color()))
#Replot the activity
self.actualize_recordgraph(activity)
def on_y1change(self, widget, box, graphdata, activity):
'''Hander for changes to y1 selection'''
logging.debug("Y1 selection toggled: %s" % graphdata)
#Loop through all options at set data correctly
for child in box.get_children():
if activity.x_axis == "distance":
for item in activity.distance_data:
if activity.distance_data[item].title == child.get_label():
logging.debug( "Setting %s to %s" % (item, str(child.get_active()) ) )
activity.distance_data[item].show_on_y1 = child.get_active()
elif activity.x_axis == "time":
for item in activity.time_data:
if activity.time_data[item].title == child.get_label():
logging.debug( "Setting %s to %s" % (item, str(child.get_active()) ) )
activity.time_data[item].show_on_y1 = child.get_active()
#Replot the activity
self.actualize_recordgraph(activity)
def on_y2change(self, widget, box, graphdata, activity):
'''Hander for changes to y2 selection'''
logging.debug("Y2 selection toggled: %s" % graphdata)
#Loop through all options at set data correctly
for child in box.get_children():
if activity.x_axis == "distance":
for item in activity.distance_data:
if activity.distance_data[item].title == child.get_label():
logging.debug( "Setting %s to %s" % (item, str(child.get_active()) ) )
activity.distance_data[item].show_on_y2 = child.get_active()
elif activity.x_axis == "time":
for item in activity.time_data:
if activity.time_data[item].title == child.get_label():
logging.debug( "Setting %s to %s" % (item, str(child.get_active()) ) )
activity.time_data[item].show_on_y2 = child.get_active()
#Replot the activity
self.actualize_recordgraph(activity)
def on_setlimits(self, widget, activity, reset, data):
'''Handler for setting graph limits buttons'''
if data is None:
logging.debug("Resetting graph limits...")
activity.x_limits_u = (None, None)
activity.y1_limits_u = (None, None)
activity.y2_limits_u = (None, None)
#Replot the activity
self.actualize_recordgraph(activity)
else:
#Setting to limits in boxes
logging.debug("Setting graph limits...")
#Determine contents of boxes...
xmin = self._float_or(data['xminlabel'].get_text(), activity.x_limits[0])
xmax = self._float_or(data['xmaxlabel'].get_text(), activity.x_limits[1])
y1min = self._float_or(data['y1minlabel'].get_text(), activity.y1_limits[0])
y1max = self._float_or(data['y1maxlabel'].get_text(), activity.y1_limits[1])
y2min = self._float_or(data['y2minlabel'].get_text(), activity.y2_limits[0])
y2max = self._float_or(data['y2maxlabel'].get_text(), activity.y2_limits[1])
logging.debug("Setting graph limits x: (%s,%s), y1: (%s,%s), y2: (%s,%s)" % (str(xmin), str(xmax), str(y1min), str(y1max), str(y2min), str(y2max)) )
activity.x_limits_u = (xmin, xmax)
activity.y1_limits_u = (y1min, y1max)
activity.y2_limits_u = (y2min, y2max)
#Replot the activity
self.actualize_recordgraph(activity)
def on_window1_configure_event(self, widget, event):
#print widget #window widget
#print event # resize event
self.size = self.window1.get_size()
def on_buttonShowOptions_clicked(self, widget):
position_set = self.hpaned1.get_property('position-set')
if position_set:
#Currently not showing options - show them
self.hpaned1.set_property('position-set', False)
self.buttonShowOptions.set_tooltip_text(_('Hide graph display options') )
else:
#Hide options
self.hpaned1.set_position(0)
self.buttonShowOptions.set_tooltip_text(_('Show graph display options') )
#logging.debug('Position: %d' % self.hpaned1.get_position() )
logging.debug('Position set: %s' % self.hpaned1.get_property('position-set') )
def on_buttonGraphHideOptions_clicked(self, widget):
logging.debug('on_buttonGraphHideOptions_clicked')
self.buttonGraphHideOptions.hide()
self.scrolledwindowGraphOptions.hide()
#for child in self.graph_data_hbox.get_children():
# if isinstance(child, gtk.Frame):
# child.hide()
self.buttonGraphShowOptions.show()
def on_buttonGraphShowOptions_clicked(self, widget):
logging.debug('on_buttonGraphShowOptions_clicked')
self.buttonGraphShowOptions.hide()
#for child in self.graph_data_hbox.get_children():
# if isinstance(child, gtk.Frame):
# child.show()
self.scrolledwindowGraphOptions.show()
self.buttonGraphHideOptions.show()
def on_buttonRedrawMap_clicked(self, widget):
logging.debug('on_buttonRedrawMap_clicked')
self.parent.refreshMapView()
def on_radiobuttonMap_toggled(self, widget):
#Ignore the deselected toggle event
if widget.get_active() == False:
return
logging.debug( 'on_radiobuttonMap_toggled '+ widget.get_name()+ ' activated')
self.parent.refreshMapView()
def on_comboMapLineType_changed(self, widget):
logging.debug( 'on_comboMapLineType_changed '+ widget.get_name()+ ' = ' + str(+ widget.get_active()))
self.parent.refreshMapView()
def on_hpaned1_move_handle(self, widget):
print "Handler"
print widget
def on_spinbuttonY1_value_changed(self, widget):
y1min = self.spinbuttonY1Min.get_value()
y1max = self.spinbuttonY1Max.get_value()
#Check to see if the min and max have the same...
if y1min == y1max:
if widget.get_name() == "spinbuttonY1Min": #User was changing the min spinbutton, so move max up
y1max += 1
else: #Move min down
y1min -= 1
self.y1_limits=(y1min, y1max)
self.zoom_graph(y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_buttonResetGraph_clicked(self, widget):
#self.zoom_graph()
#Reset stored values
self.y1_limits = None
self.y1_color = None
self.y1_linewidth = 1
self.zoom_graph()
def on_colorbuttonY1LineColor_color_set(self, widget):
y1color = widget.get_color()
cs = y1color.to_string()
self.y1_color = cs[0:3] + cs[5:7] + cs[9:11]
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_spinbuttonY1LineWeight_value_changed(self, widget):
self.y1_linewidth = self.spinbuttonY1LineWeight.get_value_as_int()
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_edit_clicked(self,widget):
selected,iter = self.recordTreeView.get_selection().get_selected()
id_record = selected.get_value(iter,0)
self.parent.editRecord(id_record, self.selected_view)
def on_remove_clicked(self,widget):
selected,iter = self.recordTreeView.get_selection().get_selected()
id_record = selected.get_value(iter,0)
self.parent.removeRecord(id_record)
def on_export_csv_activate(self,widget):
self.parent.exportCsv()
def on_newrecord_clicked(self,widget):
if self.selected_view == 'athlete':
#print 'New athlete'
self.on_athleteTreeView_edit( None, None)
else:
self.parent.newRecord(view=self.selected_view)
def on_edituser_activate(self,widget):
self.parent.editProfile()
def on_calendar_doubleclick(self,widget):
self.parent.newRecord()
def on_sportlist_changed(self,widget):
logging.debug("--")
if self.sportlist.get_active_text() != self.activeSport:
self.activeSport = self.sportlist.get_active_text()
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
else:
logging.debug("on_sportlist_changed called with no change")
def on_page_change(self,widget,gpointer,page):
logging.debug("--")
if page == 0:
self.selected_view="record"
elif page == 1:
self.selected_view="day"
elif page == 2:
self.selected_view="week"
elif page == 3:
self.selected_view="month"
elif page == 4:
self.selected_view="year"
elif page == 5:
self.selected_view="athlete"
elif page == 6:
self.selected_view="stats"
else:
self.selected_view="record"
self.parent.refreshGraphView(self.selected_view)
def on_recordpage_change(self,widget,gpointer,page):
if page == 0:
selected_view="info"
elif page == 1:
selected_view="graphs"
elif page == 2:
selected_view="map"
elif page == 3:
selected_view="heartrate"
elif page == 4:
selected_view="analytics"
self.parent.refreshRecordGraphView(selected_view)
def on_showmap_clicked(self,widget):
self.infoarea.hide()
self.maparea.show()
self.parent.refreshMapView(full_screen=True)
def on_hidemap_clicked(self,widget):
self.maparea.hide()
self.infoarea.show()
def on_btnShowLaps_toggled(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_day_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_week_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_month_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_year_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_total_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_calendar_selected(self, widget):
logging.debug(">>")
logging.debug("Block (%s) | Selected view: %s" % (self.block, self.selected_view))
if self.block:
self.block = False
else:
if self.selected_view == "record":
self.recordview.set_current_page(0)
self.parent.refreshRecordGraphView("info")
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
logging.debug("<<")
def on_calendar_changemonth(self,widget):
logging.debug("--")
self.block = True
self.notebook.set_current_page(3)
self.selected_view="month"
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
def on_calendar_next_year(self,widget):
logging.debug("--")
self.block = True
self.notebook.set_current_page(4)
self.selected_view="year"
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
def on_classicview_activate(self,widget):
self.waypointarea.hide()
self.listarea.hide()
#self.athletearea.hide()
self.selected_view = "record"
self.classicarea.show()
def on_listview_activate(self,widget):
self.waypointarea.hide()
self.classicarea.hide()
#self.athletearea.hide()
self.selected_view = "listview"
#self.parent.refreshListView()
self.parent.refreshListView(self.listsearch.condition)
self.listarea.show()
def on_athleteview_activate(self,widget=None):
#self.waypointarea.hide()
#self.classicarea.hide()
#self.listarea.hide()
self.parent.refreshAthleteView()
#self.athletearea.show()
def on_statsview_activate(self,widget=None):
self.parent.refreshStatsView()
def on_waypointsview_activate(self,widget):
self.listarea.hide()
self.classicarea.hide()
#self.athletearea.hide()
self.parent.refreshWaypointView()
self.waypointarea.show()
def on_menu_importdata_activate(self,widget):
self.parent.importData()
def on_extensions_activate(self,widget):
self.parent.editExtensions()
def on_gpsplugins_activate(self,widget):
self.parent.editGpsPlugins()
#hasta aqui revisado
def on_recordTreeView_button_press_event(self, treeview, event):
''' Handler for clicks on recordTreeview list (all records for a day)
event.button = mouse button pressed (i.e. 1 = left, 3 = right)
'''
logging.debug(">>")
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 3:
selected,iter = treeview.get_selection().get_selected()
#Por si hay un registro (malo) sin fecha, pa poder borrarlo
try:
date = self.parent.date.getDate()
except:
date = None
self.popup.show(selected.get_value(iter,0), event.button, time, date)
elif event.button == 1:
self.notebook.set_current_page(0)
self.parent.refreshGraphView("record")
logging.debug("<<")
return False
def on_allRecordTreeView_button_press(self, treeview, event):
''' Handler for clicks on listview list
event.button = mouse button pressed (i.e. 1 = left, 3 = right)
'''
logging.debug(">>")
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 3:
selected,iter = treeview.get_selection().get_selected()
#Por si hay un registro (malo) sin fecha, pa poder borrarlo
try:
date = self.parent.date.getDate()
except:
pass
self.popup.show(selected.get_value(iter,0), event.button, time, selected.get_value(iter,2))
elif event.button == 1:
self.notebook.set_current_page(0)
self.parent.refreshGraphView("record")
logging.debug("<<")
return False
def actualize_recordTreeView(self, record_list):
logging.debug(">>")
iterOne = False
store = gtk.TreeStore(
gobject.TYPE_INT, #record_id
gobject.TYPE_STRING, #Time
gobject.TYPE_STRING, #Sport
gobject.TYPE_STRING, #Distance
object)
for i in record_list:
#Get lap info
#Could get an activity from the pool here, but is slow??
id_record = i[8]
laps = self.parent.record.getLaps(id_record)
iter = store.append(None)
if not iterOne:
iterOne = iter
dateTime = i[12]
if dateTime is not None:
localTime = dateutil.parser.parse(dateTime).strftime("%H:%M")
else:
localTime = ""
if self.pytrainer_main.profile.prf_us_system:
dist = km2miles(i[2])
else:
dist = i[2]
distance = "%0.2f" % (float(dist) )
store.set (
iter,
0, int(i[8]),
1, str(localTime),
2, str(i[0]),
3, str(distance) #Needs to be US pref aware....
)
if laps is not None:
for lap in laps:
#"id_lap, record, elapsed_time, distance, start_lat, start_lon, end_lat, end_lon, calories, lap_number",
lapNumber = "%s %02d" % ( _("lap"), int(lap[9])+1 )
if self.pytrainer_main.profile.prf_us_system:
dist = km2miles(lap[3])
else:
dist = lap[3]
distance = "%0.2f" % (float(dist) / 1000.0)
timeHours = int(float(lap[2]) / 3600)
timeMin = int((float(lap[2]) / 3600.0 - timeHours) * 60)
timeSec = float(lap[2]) - (timeHours * 3600) - (timeMin * 60)
if timeHours > 0:
duration = "%d%s%02d%s%02d%s" % (timeHours, _("h"), timeMin, _("m"), timeSec, _("s"))
else:
duration = "%2d%s%02d%s" % (timeMin, _("m"), timeSec, _("s"))
child_iter = store.append(iter)
store.set (
child_iter,
0, int(i[8]),
1, lapNumber,
2, duration,
3, distance
)
store.set_sort_column_id(1, gtk.SORT_ASCENDING)
self.recordTreeView.set_model(store)
if iterOne:
self.recordTreeView.get_selection().select_iter(iterOne)
logging.debug("<<")
def parseFloat(self,string):
try:
return float(string)
except:
return float(0)
def actualize_calendar(self,record_list):
logging.debug(">>")
self.calendar.clear_marks()
#Mark each day that has activity
for i in record_list:
self.calendar.mark_day(int(i))
#Turn on displaying of week numbers
display_options = self.calendar.get_display_options()
self.calendar.set_display_options(display_options|gtk.CALENDAR_SHOW_WEEK_NUMBERS)
logging.debug("<<")
def on_about_activate(self,widget):
if self.aboutwindow is None:
self.aboutwindow = About(self.data_path, self.version)
self.aboutwindow.run()
else:
self.aboutwindow.present()
def getSportSelected(self):
sport = self.sportlist.get_active()
if (sport > 0):
return self.sportlist.get_active_text()
else:
return None
def quit(self, *args):
window_size = "%d, %d" % self.size
self.pytrainer_main.profile.setValue("pytraining","window_size", window_size)
self.parent.quit()
#sys.exit("Exit!")
#self.parent.webservice.stop()
#self.gtk_main_quit()
def on_yearview_clicked(self,widget):
self.notebook.set_current_page(2)
self.selected_view="year"
self.actualize_yearview()
def on_recordTree_clicked(self,widget,num,num2):
selected,iter = widget.get_selection().get_selected()
self.parent.editRecord(selected.get_value(iter,0), self.selected_view)
### athleteview events ###
def on_athleteTreeView_button_press_event(self, treeview, event):
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
selected,iter = treeview.get_selection().get_selected()
if event.button == 3:
#Right mouse button...
idx = selected.get_value(iter,0)
date = selected.get_value(iter,1)
weight = selected.get_value(iter,2)
bf = selected.get_value(iter,3)
restingHR = selected.get_value(iter,4)
maxHR = selected.get_value(iter,5)
#print "show popup etc (clicked on idx %s, date %s)" % (idx, date)
#Show popup menu...
popup = gtk.Menu()
#Edit Entry Item
menuitem = gtk.MenuItem(label=_("Edit Entry"))
menuitem.connect("activate", self.on_athleteTreeView_edit, {'id':idx, 'date':date, 'weight':weight, 'bf':bf, 'restingHR':restingHR, 'maxHR':maxHR})
popup.attach(menuitem, 0, 1, 0, 1)
#New Entry Item
menuitem = gtk.MenuItem(label=_("New Entry"))
menuitem.connect("activate", self.on_athleteTreeView_edit, None)
popup.attach(menuitem, 0, 1, 1, 2)
#Separator
menuitem = gtk.SeparatorMenuItem()
popup.attach(menuitem, 0, 1, 2, 3)
#Delete Entry Item
menuitem = gtk.MenuItem(label=_("Delete Entry"))
menuitem.connect("activate", self.on_athleteTreeView_delete, idx)
popup.attach(menuitem, 0, 1, 3, 4)
popup.show_all()
popup.popup( None, None, None, event.button, time)
#self.popup.show(selected.get_value(iter,0), event.button, time)
#self.popup.popup( None, None, None, event_button, time)
else:
#Left mouse - so display this row
pass
'''
idx = selected.get_value(iter,0)
date = selected.get_value(iter,1)
weight = selected.get_value(iter,2)
bf = selected.get_value(iter,3)
restingHR = selected.get_value(iter,4)
maxHR = selected.get_value(iter,5)
self.update_athlete_item(idx, date, weight, bf, restingHR, maxHR)'''
def on_athleteTreeView_edit(self, widget, data):
logging.debug('>>')
if data is None:
#New entry...
logging.debug('New athlete entry')
title = _('Create Athlete Entry')
data = {'id':None, 'date': Date().getDate().strftime("%Y-%m-%d"), 'weight':"", 'bf':"", 'restingHR':"", 'maxHR':""}
else:
logging.debug('Edit existing athlete entry: %s', str(data))
title = _('Edit Athlete Entry')
dialog = gtk.Dialog(title=title, parent=self.pytrainer_main.windowmain.window1, flags= gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_SAVE, gtk.RESPONSE_ACCEPT))
dialog.set_has_separator(True)
dialog.set_modal(False)
#Get Content area of dialog
vbox = dialog.get_content_area()
#Build data display
table = gtk.Table(1,2)
self.entryList = []
#Add date
label = gtk.Label(_("<b>Date</b>"))
label.set_use_markup(True)
entry = gtk.Entry()
entry.set_text(data['date'])
self.entryList.append(entry)
#Date calander widget
cal = gtk.Image()
cal.set_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_BUTTON)
calbut = gtk.Button()
calbut.add(cal)
calbut.connect("clicked", self.on_athletecalendar_clicked)
table.attach(label,0,1,0,1)
table.attach(entry,1,2,0,1)
#table.attach(calbut,2,3,0,1) #TODO
#Add weight
label = gtk.Label(_("<b>Weight</b>"))
label.set_use_markup(True)
entry = gtk.Entry()
entry.set_text(data['weight'])
self.entryList.append(entry)
table.attach(label,0,1,1,2)
table.attach(entry,1,2,1,2)
#Add Body fat
label = gtk.Label(_("<b>Body Fat</b>"))
label.set_use_markup(True)
entry = gtk.Entry()
entry.set_text(data['bf'])
self.entryList.append(entry)
table.attach(label,0,1,2,3)
table.attach(entry,1,2,2,3)
#Add Resting HR
label = gtk.Label(_("<b>Resting Heart Rate</b>"))
label.set_use_markup(True)
entry = gtk.Entry()
entry.set_text(data['restingHR'])
self.entryList.append(entry)
table.attach(label,0,1,3,4)
table.attach(entry,1,2,3,4)
#Add Max HR
label = gtk.Label(_("<b>Max Heart Rate</b>"))
label.set_use_markup(True)
entry = gtk.Entry()
entry.set_text(data['maxHR'])
self.entryList.append(entry)
table.attach(label,0,1,4,5)
table.attach(entry,1,2,4,5)
vbox.add(table)
vbox.show_all()
response = dialog.run()
#dialog.destroy()
if response == gtk.RESPONSE_ACCEPT:
#print "on_athleteTreeView_edit save called", data
data['date'] = self.entryList[0].get_text()
data['weight'] = self.entryList[1].get_text()
data['bf'] = self.entryList[2].get_text()
data['restingHR'] = self.entryList[3].get_text()
data['maxHR'] = self.entryList[4].get_text()
self.on_athleteSave(data)
logging.debug('Athlete data saved: %s' % str(data))
dialog.destroy()
logging.debug('<<')
def on_athleteTreeView_delete(self, widget, data):
'''User has opted to delete entry'''
logging.debug(">>")
msg = _("Delete this database entry?")
md = gtk.MessageDialog(self.pytrainer_main.windowmain.window1, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, msg)
md.set_title(_("Are you sure?"))
response = md.run()
md.destroy()
if response == gtk.RESPONSE_OK:
logging.debug("User confirmed deletion of athlete entry with id: %s" % data)
self.pytrainer_main.athlete.delete_record(data)
self.parent.refreshAthleteView()
else:
logging.debug("User canceled athlete record deletion for id %s" % data)
logging.debug("<<")
def on_athleteSave(self, data):
#Get data in fields
id_athletestat = data['id']
date = data['date']
#Check if valid date supplied
try:
_date = dateutil.parser.parse(date).date()
except (ValueError) as e:
#TODO generate error message
print type(e)
print e
return
weight = data['weight']
bodyfat = data['bf']
restinghr = data['restingHR']
maxhr = data['maxHR']
#TODO - are any other fields required?
#Check if an entry has been edited or is a new one
if id_athletestat is None or id_athletestat == "":
#New entry
logging.debug('Creating new entry with values: date %s, weight %s, bodyfat %s, restinghr %s, maxhr %s' % (date, weight, bodyfat, restinghr, maxhr) )
self.parent.athlete.insert_athlete_stats(date, weight, bodyfat, restinghr, maxhr)
else:
#Edited existing entry
logging.debug('Updating id_athletestat:%s with values: date %s, weight %s, bodyfat %s, restinghr %s, maxhr %s' % (id_athletestat, date, weight, bodyfat, restinghr, maxhr) )
self.parent.athlete.update_athlete_stats(id_athletestat, date, weight, bodyfat, restinghr, maxhr)
self.parent.refreshAthleteView()
def on_athletecalendar_clicked(self,widget):
logging.debug(">>")
calendardialog = WindowCalendar(self.data_path,self)
calendardialog.run()
logging.debug("<<")
def setDate(self,date):
print date
#self.entryAthleteDate.set_text(date)
######## waypoints events ##########
def on_savewaypoint_clicked(self,widget):
selected,iter = self.waypointTreeView.get_selection().get_selected()
id_waypoint = selected.get_value(iter,0)
lat = self.waypoint_latitude.get_text()
lon = self.waypoint_longitude.get_text()
name = self.waypoint_name.get_text()
desc = self.waypoint_description.get_text()
sym = self.waypoint_type.get_active_text()
self.parent.updateWaypoint(id_waypoint,lat,lon,name,desc,sym)
def on_removewaypoint_clicked(self,widget):
selected,iter = self.waypointTreeView.get_selection().get_selected()
id_waypoint = selected.get_value(iter,0)
self.parent.removeWaypoint(id_waypoint)
def on_hrpiebutton_clicked(self,widget):
self.heartrate_vbox2.show()
self.heartrate_vbox.hide()
def on_hrplotbutton_clicked(self,widget):
self.heartrate_vbox.show()
self.heartrate_vbox2.hide()
|
viiru-/pytrainer
|
pytrainer/gui/windowmain.py
|
Python
|
gpl-2.0
| 103,483 | 0.011026 |
import logging
from ftmstore import get_dataset
log = logging.getLogger(__name__)
MODEL_ORIGIN = "model"
def get_aggregator_name(collection):
return "collection_%s" % collection.id
def get_aggregator(collection, origin="aleph"):
"""Connect to a followthemoney dataset."""
dataset = get_aggregator_name(collection)
return get_dataset(dataset, origin=origin)
|
pudo/aleph
|
aleph/logic/aggregator.py
|
Python
|
mit
| 378 | 0 |
import RPi.GPIO as GPIO
import time
import sys
#on renseigne le pin sur lequel est branché le cable de commande du servo moteur superieur (haut-bas)
servo_pin = 12
#recuperation de la valeur du mouvement a envoyer au servo
duty_cycle = float(sys.argv[1])
GPIO.setmode(GPIO.BOARD)
GPIO.setup(servo_pin, GPIO.OUT)
# Creation du cannal PWM sur le servo pin avec une frequence de 50Hz
pwm_servo = GPIO.PWM(servo_pin, 50)
pwm_servo.start(duty_cycle)
try:
while True:
pwm_servo.ChangeDutyCycle(duty_cycle) #le servo se pivote avec la valeur donnee en entree
time.sleep(0.01) # on attend un petit moment que le servo finisse son action
GPIO.cleanup() # on sort proprement de GPIO et on sort de la fonction avec exit()
exit()
except KeyboardInterrupt:
print("CTRL-C: Terminating program.") # si le programme est utilise seul, cela permet de l'eteindre en cas d'urgence
|
MarionPiEnsg/RaspiModel
|
Application/Raspberry_Pi/scripts_python/1-activeRobotHaut.py
|
Python
|
gpl-3.0
| 910 | 0.015402 |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
def configuration(parent_name='special', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_precompute', parent_name, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration().todict())
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/special/_precompute/setup.py
|
Python
|
mit
| 396 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple generator and discriminator models.
Based on the convolutional and "deconvolutional" models presented in
"Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks" by A. Radford et. al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def _leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.2)
def _batch_norm(x, is_training, name):
return tf.layers.batch_normalization(
x, momentum=0.9, epsilon=1e-5, training=is_training, name=name)
def _dense(x, channels, name):
return tf.layers.dense(
x, channels,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _conv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _deconv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d_transpose(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def discriminator(x, is_training=True, scope='Discriminator'):
# conv64-lrelu + conv128-bn-lrelu + fc1024-bn-lrelu + fc1
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _conv2d(x, 64, 4, 2, name='d_conv1')
x = _leaky_relu(x)
x = _conv2d(x, 128, 4, 2, name='d_conv2')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn2'))
x = tf.reshape(x, [-1, 7 * 7 * 128])
x = _dense(x, 1024, name='d_fc3')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn3'))
x = _dense(x, 1, name='d_fc4')
return x
def generator(x, is_training=True, scope='Generator'):
# fc1024-bn-relu + fc6272-bn-relu + deconv64-bn-relu + deconv1-tanh
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _dense(x, 1024, name='g_fc1')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn1'))
x = _dense(x, 7 * 7 * 128, name='g_fc2')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn2'))
x = tf.reshape(x, [-1, 7, 7, 128])
x = _deconv2d(x, 64, 4, 2, name='g_dconv3')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn3'))
x = _deconv2d(x, 1, 4, 2, name='g_dconv4')
x = tf.tanh(x)
return x
# TODO(chrisying): objective score (e.g. MNIST score)
|
tensorflow/tpu
|
models/experimental/dcgan/mnist_model.py
|
Python
|
apache-2.0
| 3,215 | 0.002799 |
# -*- coding: utf-8 -*-
__author__ = 'k'
import re
import scrapy
from bs4 import BeautifulSoup
import logging
from thepaper.items import NewsItem
import json
logger = logging.getLogger("NbdSpider")
from thepaper.settings import *
from thepaper.util import judge_news_crawl
import time
class DonewsSpider(scrapy.spiders.Spider):
domain = "http://www.donews.com/net/"
name = "donews"
allowed_domains = ["donews.com",]
flag = {}
start_urls = [
"http://www.donews.com/net/",
"http://www.donews.com/original/",
]
def parse(self,response):
origin_url = response.url
topic_url = origin_url[:-1]
self.flag.setdefault(topic_url,0)
yield scrapy.Request(origin_url,callback=self.parse_topic)
def parse_topic(self,response):
origin_url = response.url
temp = origin_url.rsplit("/",1)
topic_url = temp[0]
if temp[1] == "":
pageindex = 1
else:
pageindex = temp[1].split("_",1)[-1].split(".",1)[0]
soup = BeautifulSoup(response.body,"lxml")
catalogue = soup.find("div",class_ ="arttitle").text.strip()
news_list = soup.find("ul",class_ = "art_list mt11").find_all("li")
for news in news_list:
title_info = news.find("h5",class_= "title")
text_info = news.find("div",class_ = "text")
news_date = text_info.find("span",class_ = "time").text
news_date = "%s-%s-%s %s:00" % (time.strftime("%Y"),int(news_date[0:2]),int(news_date[3:5]),news_date[7:])
author = text_info.find("span",class_ = "place").text.strip()
if author == "":
author = None
abstract = text_info.find("p",class_ = "info").text.strip()
pic = text_info.find("img").get("src") if text_info.find("img") else None
title = title_info.find("a").text.strip()
news_url = title_info.find("a").get("href")
temp = news_url.split("/")
news_no = temp[-2] + "_" + temp[-1].split(".")[0]
item = NewsItem(
news_url =news_url,
news_date = news_date,
title = title,
abstract = abstract,
author = author,
news_no = news_no,
catalogue = catalogue,
pic = pic,
)
item = judge_news_crawl(item)
if item:
yield scrapy.Request(item["news_url"],callback=self.parse_news,meta={'item':item})
else:
self.flag[topic_url] = pageindex
if not self.flag[topic_url]:
next_url = "%s/index_%s.html" % (topic_url,int(pageindex) + 1)
yield scrapy.Request(next_url,callback=self.parse_topic)
def parse_news(self,response):
item = response.meta.get("item",NewsItem())
soup = BeautifulSoup(response.body,"lxml")
referer_web = soup.find("span", id= "source_baidu").text if soup.find("span", id= "source_baidu") else None
temp = soup.find("div",id = "arttext")
if item["pic"] == None:
item["pic"] = temp.find("img").get("src") if temp.find("img") else None
content = "\n\n".join([ t.text.strip() for t in temp.find_all("p")])
item['referer_web'] = referer_web
item['content'] = content
item['crawl_date'] = NOW
yield item
|
yinzishao/NewsScrapy
|
thepaper/thepaper/spiders/donews_spider.py
|
Python
|
lgpl-3.0
| 3,445 | 0.02148 |
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i8', (3,))])
assert str(t_ma[0]) == "([1, --, 3],)"
assert repr(t_ma[0]) == "([1, --, 3],)"
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i8', (2,2))])
assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i8'), ('b', '<i8')])
assert str(t_0d[0]) == "(--, 2)"
assert repr(t_0d[0]) == "(--, 2)"
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i8', (2,2)), ('b', float)])
assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i8'), ('b', 'i4,i4')])
assert str(t_ne[0]) == "(--, (--, 1))"
assert repr(t_ne[0]) == "(--, (--, 1))"
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert mx[0] is mx1
assert mx[1] is not mx2
assert np.all(mx[1].data == mx2.data)
assert np.all(mx[1].mask)
# check that we return a view.
mx[1].data[0] = 0.
assert mx2[0] == 0.
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(IndexError, ott.count, 1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data,
# [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
###############################################################################
if __name__ == "__main__":
run_module_suite()
|
pyparallel/numpy
|
numpy/ma/tests/test_core.py
|
Python
|
bsd-3-clause
| 161,025 | 0.000689 |
from typing import cast, List, TypeVar, Any, Type, Optional
from uuid import UUID
from graphscale import check
from graphscale.pent import (
create_pent,
delete_pent,
update_pent,
Pent,
PentContext,
PentMutationData,
PentMutationPayload,
)
T = TypeVar('T')
def typed_or_none(obj: Any, cls: Type[T]) -> Optional[T]:
return obj if isinstance(obj, cls) else None
async def gen_pent_dynamic(context: PentContext, out_cls_name: str, obj_id: UUID) -> Pent:
out_cls = context.cls_from_name(out_cls_name)
pent = await out_cls.gen(context, obj_id)
return cast(Pent, pent)
async def gen_delete_pent_dynamic(
context: PentContext, pent_cls_name: str, payload_cls_name: str, obj_id: UUID
) -> PentMutationPayload:
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
deleted_id = await delete_pent(context, pent_cls, obj_id)
return cast(PentMutationPayload, payload_cls(deleted_id))
async def gen_create_pent_dynamic(
context: PentContext,
pent_cls_name: str,
data_cls_name: str,
payload_cls_name: str,
data: PentMutationData
) -> PentMutationPayload:
data_cls = context.cls_from_name(data_cls_name)
check.isinst(data, data_cls)
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
out_pent = await create_pent(context, pent_cls, data)
return cast(PentMutationPayload, payload_cls(out_pent))
async def gen_update_pent_dynamic(
context: PentContext,
obj_id: UUID,
pent_cls_name: str,
data_cls_name: str,
payload_cls_name: str,
data: PentMutationData
) -> PentMutationPayload:
data_cls = context.cls_from_name(data_cls_name)
check.isinst(data, data_cls)
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
pent = await update_pent(context, pent_cls, obj_id, data)
return cast(PentMutationPayload, payload_cls(pent))
async def gen_browse_pents_dynamic(
context: PentContext, after: UUID, first: int, out_cls_name: str
) -> List[Pent]:
out_cls = context.cls_from_name(out_cls_name)
pents = await out_cls.gen_browse(context, after, first)
return cast(List[Pent], pents)
|
schrockn/graphscale
|
graphscale/grapple/graphql_impl.py
|
Python
|
mit
| 2,294 | 0.000872 |
import pytz
from datetime import datetime, timedelta
def is_dst(zonename, date):
local_tz = pytz.timezone(zonename)
localized_time = local_tz.localize(date)
return localized_time.dst() != timedelta(0)
def get_offset(zonename, date):
local_tz = pytz.timezone(zonename)
if zonename == 'UTC':
return 0
elif is_dst(zonename, date):
return local_tz.utcoffset(date, is_dst=True).total_seconds() / 60
else:
return local_tz.utcoffset(date, is_dst=False).total_seconds() / 60
def convert_to_mmol(iterable):
conversion_factor = 18.01559
if isinstance(iterable, float) or isinstance(iterable, int):
return iterable / conversion_factor
return [reading / conversion_factor for reading in iterable]
def round_to(n, precision=0.005):
""" The round function can take positive or negative values
and round them to a certain precision.
In the fake data generator, only positive values are being passed into it
"""
if n >= 0:
correction = 0.5
else:
correction = -0.5
result = int(n / precision + correction) * precision
return round(result, 3)
def make_timesteps(start_time, offset, timelist):
""" Convert list of floats representing time into epoch time
start_time -- a timezone naive datetime object
offset -- offset in minutes
timelist -- a list of incrementing floats representing time increments
"""
timesteps = []
epoch_ts = convert_ISO_to_epoch(str(start_time), '%Y-%m-%d %H:%M:%S')
local_timestamp = epoch_ts - offset*60
for time_item in timelist:
new_time = int(local_timestamp) + int(time_item * 60)
timesteps.append(new_time)
return timesteps
def convert_ISO_to_epoch(datetime_string, date_format):
""" Takes a datetime string and returns an epoch time in seconds
Only works when datetime_string is in UTC
"""
datetime_object = datetime.strptime(datetime_string, date_format)
epoch = datetime.utcfromtimestamp(0)
delta = datetime_object - epoch
return int(delta.total_seconds())
def get_rate_from_settings(schedule, time, name):
"""Obtains a rate or amount from settings based on time of day
If name is basalSchedules, returns rate as well as start and stop times
Otherwise, if name is carbRatio or insulinSensitivity, returns just amount
Returned results are in mmol/L.
"""
t = datetime.strptime(time, '%Y-%m-%dT%H:%M:%S')
if name == "basalSchedules": #account for variation in naming
value_name = "rate" #set initial rate
else:
value_name = "amount"
ms_since_midnight = t.hour*60*60*1000 + t.minute*60*1000 + t.second*1000
last_segment = schedule[len(schedule)-1]
full_day = 86400000 #24 hours in ms
rate = schedule[0][value_name] #set initial rate
initial_start = ms_since_midnight #set initial start time
for segment in schedule:
end = segment["start"]
if ms_since_midnight < segment["start"]:
break
elif ms_since_midnight >= last_segment["start"]:
start = last_segment["start"]
end = full_day
rate = last_segment[value_name]
break
start = segment["start"]
rate = segment[value_name] #update rate to next segment rate
if name == "basalSchedules":
return rate, start, initial_start, end
return rate #only rate needed for insulin sensitivity/carb ratio events
|
tidepool-org/dfaker
|
dfaker/tools.py
|
Python
|
bsd-2-clause
| 3,495 | 0.009442 |
import time
from unittest import TestCase
from unittest import mock
from elasticsearch_raven import utils
class RetryLoopTest(TestCase):
@mock.patch('time.sleep')
def test_delay(self, sleep):
retry_generator = utils.retry_loop(1)
for i in range(4):
retry = next(retry_generator)
retry(Exception('test'))
self.assertEqual([mock.call(1), mock.call(1), mock.call(1)],
sleep.mock_calls)
@mock.patch('time.sleep')
def test_back_off(self, sleep):
retry_generator = utils.retry_loop(1, max_delay=4, back_off=2)
for i in range(5):
retry = next(retry_generator)
retry(Exception('test'))
self.assertEqual([mock.call(1), mock.call(2), mock.call(4), mock.call(4)],
sleep.mock_calls)
|
serathius/elasticsearch-raven
|
tests/test_utils.py
|
Python
|
mit
| 838 | 0.001193 |
# encoding: utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ExceptionDialog(QMessageBox):
def __init__(self,parent,exc,t1=None,t2=None):
QMessageBox.__init__(self,parent)
if t1==None: t1=(exc.args[0] if len(exc.args)>0 else None)
self.setText(u'<b>'+exc.__class__.__name__+':</b><br>\n'+unicode(t1))
#QMessageBox.setTitle(self,xc.__class__.__name__)
import traceback
tbRaw=traceback.format_exc()
# newlines are already <br> after Qt.convertFromPlainText, discard to avoid empty lines
tb='<small><pre>'+Qt.convertFromPlainText(tbRaw).replace('\n','')+'</pre></small>'
self.setInformativeText(t2 if t2 else tb)
self.setDetailedText(tbRaw)
self.setIcon(QMessageBox.Critical)
self.setStandardButtons(QMessageBox.Ok)
self.setDefaultButton(QMessageBox.Ok)
self.setEscapeButton(QMessageBox.Ok)
def showExceptionDialog(parent,exc,t1=None,t2=None):
# event loop brokne, modal dialogs won't work
# just show and don't care anymore
ExceptionDialog(parent,exc).show()
# import traceback
# QMessageBox.critical(parent,exc.__class__.__name__,'<b>'+exc.__class__.__name__+':</b><br>'+exc.args[0]+'+<br><small><pre>'+Qt.convertFromPlainText((traceback.format_exc()))+'</pre></small>')
if __name__=='__main__':
import sys
qapp=QApplication(sys.argv)
e=ValueError('123, 234, 345','asdsd')
showExceptionDialog(None,e)
|
sjl767/woo
|
gui/qt4/ExceptionDialog.py
|
Python
|
gpl-2.0
| 1,473 | 0.017651 |
from __future__ import absolute_import
from django.contrib import admin
from django.contrib.admin.models import DELETION
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.utils.html import escape
from admin.common_auth.logs import OSFLogEntry
from admin.common_auth.forms import UserRegistrationForm
from osf.models.user import OSFUser
class PermissionAdmin(admin.ModelAdmin):
search_fields = ['name', 'codename']
class CustomUserAdmin(UserAdmin):
add_form = UserRegistrationForm
list_display = ['username', 'given_name', 'is_active']
admin.site.register(OSFUser, CustomUserAdmin)
admin.site.register(Permission, PermissionAdmin)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = [f.name for f in OSFLogEntry._meta.get_fields()]
list_filter = [
'user',
'action_flag'
]
search_fields = [
'object_repr',
'change_message'
]
list_display = [
'action_time',
'user',
'object_link',
'object_id',
'message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
if obj.action_flag == DELETION:
link = escape(obj.object_repr)
elif obj.content_type is None:
link = escape(obj.object_repr)
else:
ct = obj.content_type
link = u'<a href="%s">%s</a>' % (
reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
return link
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def queryset(self, request):
return super(LogEntryAdmin, self).queryset(request) \
.prefetch_related('content_type')
admin.site.register(OSFLogEntry, LogEntryAdmin)
|
mluo613/osf.io
|
admin/common_auth/admin.py
|
Python
|
apache-2.0
| 2,230 | 0.000897 |
# -*- coding: utf-8 -*-
import oauth2 # XXX pumazi: factor this out
from webob.multidict import MultiDict, NestedMultiDict
from webob.request import Request as WebObRequest
__all__ = ['Request']
class Request(WebObRequest):
"""The OAuth version of the WebOb Request.
Provides an easier way to obtain OAuth request parameters
(e.g. oauth_token) from the WSGI environment."""
def _checks_positive_for_oauth(self, params_var):
"""Simple check for the presence of OAuth parameters."""
checks = [ p.find('oauth_') >= 0 for p in params_var ]
return True in checks
@property
def str_oauth_header(self):
extracted = {}
# Check for OAuth in the Header
if 'authorization' in self.headers:
auth_header = self.headers['authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header.lstrip('OAuth ')
try:
# Extract the parameters from the header.
extracted = oauth2.Request._split_header(auth_header)
except:
raise Error('Unable to parse OAuth parameters from '
'the Authorization header.')
return extracted
@property
def str_oauth_POST(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_POST):
extracted = dict([ (k, v,) for k, v in self.str_POST.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
@property
def str_oauth_GET(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_GET):
extracted = dict([ (k, v,) for k, v in self.str_GET.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
def params(self):
params = WebObRequest.params.fget(self)
return NestedMultiDict(params, self.str_oauth_header)
params = property(params, doc=WebObRequest.params.__doc__)
@property
def oauth_params(self):
"""Simple way to get the OAuth parameters without sifting through
the entire stack of parameters.
We check the header first, because it is low hanging fruit.
However, it would be more efficient to check for the POSTed
parameters, because the specification defines the POST method as the
recommended request type before using GET or the Authorization
header."""
extracted = {}
# OAuth in the Header
extracted.update(self.str_oauth_header)
# OAuth in a GET or POST method
extracted.update(self.str_oauth_GET)
extracted.update(self.str_oauth_POST)
# Return the extracted oauth variables
return MultiDict(extracted)
@property
def nonoauth_params(self):
"""Simple way to get the non-OAuth parameters from the request."""
oauth_param_keys = self.oauth_params.keys()
return dict([(k, v) for k, v in self.params.iteritems() if k not in oauth_param_keys])
|
karacos/karacos-wsgi
|
lib/wsgioauth/request.py
|
Python
|
lgpl-3.0
| 3,175 | 0.004409 |
"""Geometry functions and utilities."""
from enum import Enum
from typing import Sequence, Union
import numpy as np # type: ignore
from pybotics.errors import PyboticsError
class OrientationConvention(Enum):
"""Orientation of a body with respect to a fixed coordinate system."""
EULER_XYX = "xyx"
EULER_XYZ = "xyz"
EULER_XZX = "xzx"
EULER_XZY = "xzy"
EULER_YXY = "yxy"
EULER_YXZ = "yxz"
EULER_YZX = "yzx"
EULER_YZY = "yzy"
EULER_ZXY = "zxy"
EULER_ZXZ = "zxz"
EULER_ZYX = "zyx"
EULER_ZYZ = "zyz"
FIXED_XYX = "xyx"
FIXED_XYZ = "zyx"
FIXED_XZX = "xzx"
FIXED_XZY = "yzx"
FIXED_YXY = "yxy"
FIXED_YXZ = "zxy"
FIXED_YZX = "xzy"
FIXED_YZY = "yzy"
FIXED_ZXY = "yxz"
FIXED_ZXZ = "zxz"
FIXED_ZYX = "xyz"
FIXED_ZYZ = "zyz"
def vector_2_matrix(
vector: Sequence[float],
convention: Union[OrientationConvention, str] = OrientationConvention.EULER_ZYX,
) -> np.ndarray:
"""
Calculate the pose from the position and euler angles.
:param convention:
:param vector: transform vector
:return: 4x4 transform matrix
"""
# get individual variables
translation_component = vector[:3]
rotation_component = vector[-3:]
# validate and extract orientation info
if isinstance(convention, OrientationConvention):
convention = convention.value
try:
OrientationConvention(convention)
except ValueError as e:
raise PyboticsError(str(e))
# iterate through rotation order
# build rotation matrix
transform_matrix = np.eye(4)
for axis, value in zip(convention, rotation_component): # type: ignore
current_rotation = globals()[f"rotation_matrix_{axis}"](value)
transform_matrix = np.dot(transform_matrix, current_rotation)
# add translation component
transform_matrix[:-1, -1] = translation_component
return transform_matrix
def position_from_matrix(matrix: np.ndarray) -> np.ndarray:
"""Get the position values from a 4x4 transform matrix."""
return matrix[:-1, -1]
def matrix_2_vector(
matrix: np.ndarray,
convention: OrientationConvention = OrientationConvention.EULER_ZYX,
) -> np.ndarray:
"""Convert 4x4 matrix to a vector."""
# call function
try:
return globals()[f"_matrix_2_{convention.name.lower()}"](matrix)
except KeyError: # pragma: no cover
raise NotImplementedError
def _matrix_2_euler_zyx(matrix: np.ndarray) -> np.ndarray:
"""
Calculate the equivalent position and euler angles of the given pose.
From: Craig, John J. Introduction to robotics: mechanics and control, 2005
:param matrix: 4x4 transform matrix
:return: transform vector
"""
# solution degenerates near ry = +/- 90deg
sb = -matrix[2, 0]
cb = np.sqrt(matrix[0, 0] ** 2 + matrix[1, 0] ** 2)
if np.isclose(cb, 0):
a = 0.0
b = np.sign(sb) * np.pi / 2
sc = matrix[0, 1]
cc = matrix[1, 1]
c = np.sign(sb) * np.arctan2(sc, cc)
else:
b = np.arctan2(sb, cb)
sa = matrix[1, 0] / cb
ca = matrix[0, 0] / cb
a = np.arctan2(sa, ca)
sc = matrix[2, 1] / cb
cc = matrix[2, 2] / cb
c = np.arctan2(sc, cc)
vector = np.hstack((matrix[:-1, -1], [a, b, c]))
return vector
def wrap_2_pi(angle: float) -> float:
"""
Wrap given angle to +/- PI.
:param angle: angle to wrap
:return: wrapped angle
"""
# FIXME: remove float() cast when numpy is supported in mypy
result = float((angle + np.pi) % (2 * np.pi) - np.pi)
return result
def rotation_matrix_x(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the X axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([1, 0, 0, 0, 0, c, -s, 0, 0, s, c, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def rotation_matrix_y(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the Y axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([c, 0, s, 0, 0, 1, 0, 0, -s, 0, c, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def rotation_matrix_z(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the Z axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([c, -s, 0, 0, s, c, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def translation_matrix(xyz: Sequence[float]) -> np.ndarray:
"""Generate a basic 4x4 translation matrix."""
# validate
if len(xyz) != 3:
raise PyboticsError("len(xyz) must be 3")
matrix = np.eye(4)
matrix[:-1, -1] = xyz
return matrix
|
nnadeau/pybotics
|
pybotics/geometry.py
|
Python
|
mit
| 4,716 | 0.000848 |
"Change Manager for literal values (supporting ==)"
from __future__ import annotations
from .bitmap import bitmap
from .index_update import IndexUpdate
from .changemanager_base import BaseChangeManager
from typing import (
Any,
TYPE_CHECKING,
)
if TYPE_CHECKING:
from .slot import Slot
class LiteralChangeManager(BaseChangeManager):
"""
Manage changes that occured in a literal value between runs.
"""
VALUE = bitmap([0])
def __init__(
self,
slot: Slot,
buffer_created: bool = True,
buffer_updated: bool = False,
buffer_deleted: bool = True,
buffer_exposed: bool = False,
buffer_masked: bool = False,
) -> None:
super(LiteralChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked,
)
self._last_value: Any = None
def reset(self, mid: str) -> None:
super(LiteralChangeManager, self).reset(mid)
self._last_value = None
def compute_updates(self, data: Any) -> IndexUpdate:
last_value = self._last_value
changes = IndexUpdate()
if last_value == data:
return changes
if last_value is None:
if self.created.buffer:
changes.created.update(self.VALUE)
elif data is None:
if self.deleted.buffer:
changes.deleted.update(self.VALUE)
elif self.updated.buffer:
changes.updated.update(self.VALUE)
self._last_value = data
return changes
def update(self, run_number: int, data: Any, mid: str) -> None:
# pylint: disable=unused-argument
if run_number != 0 and run_number <= self._last_update:
return
changes = self.compute_updates(data)
self._last_update = run_number
self._row_changes.combine(
changes, self.created.buffer, self.updated.buffer, self.deleted.buffer
)
|
jdfekete/progressivis
|
progressivis/core/changemanager_literal.py
|
Python
|
bsd-2-clause
| 2,053 | 0.000487 |
from guizero import App
app = App()
app.info("Info", "This is a guizero app")
app.error("Error", "Try and keep these out your code...")
app.warn("Warning", "These are helpful to alert users")
app.display()
|
lawsie/guizero
|
examples/alert.py
|
Python
|
bsd-3-clause
| 205 | 0.004878 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
**strong_typing** is a Python package containing some classes to create strongly
typed structures in Python
``strong_typing`` in a few words
--------------------------------
In Python, all variables are weakly typed, which means that a variable can take
all values of any type. The Python interpreter will then infer at runtime which
operations this variable can undergo depending on what it contains. This is
called "type inference".
This can be a problem in different situations
- A function that does not receive the expected type as input
- A variable or a class attribute whose type is changed through assignment
To avoid functions being called with bad arguments, you can use Python's
`typing module <https://docs.python.org/3/library/typing.html>`_) (however only
with Python3). To check if a variable is not incorrectly used, you can install
and run `mypy module <http://mypy.readthedocs.io/en/latest/>`_).
But if the latest is great for static check (without running the code), it does
not work on the code you don't own.
If, for instance you design a class expecting a certain type of attributes,
``mypy`` can very easily detect if you don't mistakenly override these
attributes with wrong typed data.
But if you put this class in a Python package and that someone else uses it,
there is no way to be sure they will respect your attribute's type.
To make sure they do, you would need to define a descriptor's class for each
attribute and define a setter function protecting your value against abusive
set. That's what we did :)
In the end, your class could look like this:
::
class MyTypedStruct(Struct):
__ATTRIBUTES__ = [IntegerParameter(name="my_int"),
FloatParameter(name="my_float")]
__DESCRIPTION__ = "A sample of class with typed attributes"
"""
def load_version():
import os
CONTAINING_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
return open(os.path.join(CONTAINING_DIRECTORY,"VERSION")).read().split()[0]
__VERSION__ = load_version()
from . import typed_parameters
from . import typed_containers
from ._struct import *
from ._versioned_struct import *
from ._display_widget import *
# Remove symbols that must not be exported
del load_version
#––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––#
|
aldebaran/strong_typing
|
strong_typing/__init__.py
|
Python
|
bsd-3-clause
| 4,043 | 0.004116 |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_config(fit_common.unittest.TestCase):
def test_api_20_config(self):
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
# check required fields
self.assertIn('PATH', api_data['json'], 'PATH field error')
self.assertIn('amqp', api_data['json'], 'amqp field error')
self.assertIn('apiServerAddress', api_data['json'], 'apiServerAddress field error')
self.assertIn('apiServerPort', api_data['json'], 'apiServerPort field error')
self.assertIn('broadcastaddr', api_data['json'], 'broadcastaddr field error')
self.assertIn('subnetmask', api_data['json'], 'subnetmask field error')
self.assertIn('mongo', api_data['json'], 'mongo field error')
def test_api_20_config_httpendpoints(self):
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
self.assertIn('httpEndpoints', api_data['json'], 'httpEndpoints field list error')
# verify both northbound and southbound endpoints are configured (as a minimum)
for endpoint in api_data['json']['httpEndpoints']:
self.assertIn('address', endpoint, 'missing httpEndpoints address field')
self.assertIn('authEnabled', endpoint, 'missing httpEndpoints authEnabled field')
self.assertIn('httpsEnabled', endpoint, 'missing httpEndpoints httpsEnabled field')
self.assertIn('proxiesEnabled', endpoint, 'missing httpEndpoints proxiesEnabled field')
self.assertIn('routers', endpoint, 'missing httpEndpoints routers field')
self.assertIn(endpoint['routers'], ['northbound-api-router', 'southbound-api-router'], 'unexpected httpEndpoints routers field')
def test_api_20_config_patch(self):
api_data_save = fit_common.rackhdapi('/api/2.0/config')['json']
if ("logColorEnable" not in api_data_save):
api_data_save['logColorEnable'] = False
if (api_data_save['logColorEnable'] is True):
data_payload = {"logColorEnable": False}
else:
data_payload = {"logColorEnable": True}
api_data = fit_common.rackhdapi("/api/2.0/config", action="patch", payload=data_payload)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']:
self.assertNotEqual(item, '', 'Empty JSON Field:' + item)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
if ("logColorEnable" in api_data_save and api_data_save['logColorEnable'] is True):
self.assertEqual(api_data['json']['logColorEnable'], False, "Incorrect value for 'logColorEnable', should be False")
else:
self.assertEqual(api_data['json']['logColorEnable'], True, "Incorrect value 'logColorEnable', should be True")
api_data = fit_common.rackhdapi("/api/2.0/config", action="patch", payload=api_data_save)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['json'], api_data_save, "Patch failure, config not returned to default.")
if __name__ == '__main__':
fit_common.unittest.main()
|
johren/RackHD
|
test/tests/rackhd20/test_rackhd20_api_config.py
|
Python
|
apache-2.0
| 3,739 | 0.006419 |
import numpy as np
import matplotlib.pyplot as plt
N=10000
np.random.seed(34)
lognormal_values = np.random.lognormal(size=N)
_, bins, _ = plt.hist(lognormal_values, np.sqrt(N), normed=True, lw=1, label="Histogram")
sigma = 1
mu = 0
x = np.linspace(min(bins), max(bins), len(bins))
pdf = np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))/ (x * sigma * np.sqrt(2 * np.pi))
plt.xlim([0, 15])
plt.plot(x, pdf,'--', lw=3, label="PDF")
plt.title('Lognormal distribution')
plt.xlabel('Value')
plt.ylabel('Normalized frequency')
plt.grid()
plt.legend(loc='best')
plt.show()
|
moonbury/notebooks
|
github/Numpy/Chapter6/lognormaldist.py
|
Python
|
gpl-3.0
| 563 | 0.008881 |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import Series, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
import pandas.util.testing as tm
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assert_raises_regex(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert (com._random_state(state2).uniform() ==
npr.RandomState(10).uniform())
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com._random_state('test')
with pytest.raises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='x'))
assert (matched == 'x')
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='y'))
assert (matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert (matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert (matched == 'y')
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/tests/test_common.py
|
Python
|
mit
| 4,870 | 0.000205 |
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import unittest
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
class TestFirefoxProfile:
def setup_method(self, method):
self.driver = webdriver.Firefox()
self.webserver = SimpleWebServer()
self.webserver.start()
def test_that_we_can_accept_a_profile(self):
profile1 = webdriver.FirefoxProfile()
profile1.set_preference("startup.homepage_welcome_url",
self.webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = webdriver.FirefoxProfile(profile1.path)
driver = webdriver.Firefox(firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_unicode_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_integer_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert True == profile.default_preferences["sample.bool.preference"]
def test_that_we_delete_the_profile(self):
path = self.driver.firefox_profile.path
self.driver.quit()
assert not os.path.exists(path)
def test_profiles_do_not_share_preferences(self):
self.profile1 = webdriver.FirefoxProfile()
self.profile1.accept_untrusted_certs = False
self.profile2 = webdriver.FirefoxProfile()
# Default is true. Should remain so.
assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] == True
def test_none_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = None
try:
self.profile.set_proxy(proxy)
assert False, "exception after passing empty proxy is expected"
except ValueError as e:
pass
assert "network.proxy.type" not in self.profile.default_preferences
def test_unspecified_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
self.profile.set_proxy(proxy)
assert "network.proxy.type" not in self.profile.default_preferences
def test_manual_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.no_proxy = 'localhost, foo.localhost'
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = None
proxy.sslProxy = 'some2.url'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value']
assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost'
assert self.profile.default_preferences["network.proxy.http"] == 'some.url'
assert self.profile.default_preferences["network.proxy.http_port"] == 1234
assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url'
assert "network.proxy.ssl_port" not in self.profile.default_preferences
assert "network.proxy.ftp" not in self.profile.default_preferences
def test_pac_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.proxy_autoconfig_url = 'http://some.url:12345/path'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value']
assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path'
def test_autodetect_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.auto_detect = True
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value']
def teardown_method(self, method):
try:
self.driver.quit()
except:
pass #don't care since we may have killed the browser above
self.webserver.stop()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def teardown_module(module):
try:
TestFirefoxProfile.driver.quit()
except:
pass #Don't Care since we may have killed the browser above
|
onedox/selenium
|
py/test/selenium/webdriver/firefox/ff_profile_tests.py
|
Python
|
apache-2.0
| 8,146 | 0.002701 |
#!/usr/bin/env python
import argparse
import gzip
import logging
import os
import shutil
import subprocess
browser_specific_args = {
"firefox": ["--install-browser"]
}
def tests_affected(commit_range):
output = subprocess.check_output([
"python", "./wpt", "tests-affected", "--null", commit_range
], stderr=open(os.devnull, "w"))
tests = output.split("\0")
# Account for trailing null byte
if tests and not tests[-1]:
tests.pop()
return tests
def find_wptreport(args):
parser = argparse.ArgumentParser()
parser.add_argument('--log-wptreport', action='store')
return parser.parse_known_args(args)[0].log_wptreport
def gzip_file(filename, delete_original=True):
with open(filename, 'rb') as f_in:
with gzip.open('%s.gz' % filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if delete_original:
os.unlink(filename)
def main(product, commit_range, wpt_args):
"""Invoke the `wpt run` command according to the needs of the TaskCluster
continuous integration service."""
logger = logging.getLogger("tc-run")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
child = subprocess.Popen(['python', './wpt', 'manifest-download'])
child.wait()
if commit_range:
logger.info(
"Identifying tests affected in range '%s'..." % commit_range
)
tests = tests_affected(commit_range)
logger.info("Identified %s affected tests" % len(tests))
if not tests:
logger.info("Quitting because no tests were affected.")
return
else:
tests = []
logger.info("Running all tests")
wpt_args += [
"--log-tbpl-level=info",
"--log-tbpl=-",
"-y",
"--no-pause",
"--no-restart-on-unexpected",
"--install-fonts",
"--no-headless"
]
wpt_args += browser_specific_args.get(product, [])
command = ["python", "./wpt", "run"] + wpt_args + [product] + tests
logger.info("Executing command: %s" % " ".join(command))
subprocess.check_call(command)
wptreport = find_wptreport(wpt_args)
if wptreport:
gzip_file(wptreport)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--commit-range", action="store",
help="""Git commit range. If specified, this will be
supplied to the `wpt tests-affected` command to
determine the list of test to execute""")
parser.add_argument("product", action="store",
help="Browser to run tests in")
parser.add_argument("wpt_args", nargs="*",
help="Arguments to forward to `wpt run` command")
main(**vars(parser.parse_args()))
|
jimberlage/servo
|
tests/wpt/web-platform-tests/tools/ci/taskcluster-run.py
|
Python
|
mpl-2.0
| 3,009 | 0.000665 |
#!/usr/bin/env python
from subprocess import call
call(["bickle", "builds", "stpettersens/Packager", "-n", "5"])
|
stpettersens/Packager
|
travis.py
|
Python
|
mit
| 114 | 0 |
"""Module provides provides a convinient class :class:`Attachment` to access (Create,
Read, Delete) document attachments."""
import base64, logging
from os.path import basename
from copy import deepcopy
from mimetypes import guess_type
from httperror import *
from httpc import HttpSession, ResourceNotFound, OK, CREATED
from couchpy import CouchPyError
# TODO :
# 1. URL-encoding for attachment file-names
log = logging.getLogger( __name__ )
def _readattach( conn, paths=[], hthdrs={} ) :
"""
GET /<db>/<doc>/<attachment>
GET /<db>/_design/<design-doc>/<attachment>
"""
s, h, d = conn.get( paths, hthdrs, None )
if s == OK :
return s, h, d
else :
return (None, None, None)
def _writeattach( conn, paths=[], body='', hthdrs={}, **query ) :
"""
PUT /<db>/<doc>/<attachment>
PUT /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
if 'Content-Length' not in hthdrs :
raise CouchPyError( '`Content-Length` header field not supplied' )
if 'Content-Type' not in hthdrs :
raise CouchPyError( '`Content-Type` header field not supplied' )
s, h, d = conn.put( paths, hthdrs, body, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
def _deleteattach( conn, paths=[], hthdrs={}, **query ) :
"""
DELETE /<db>/<doc>/<attachment>
DELETE /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
s, h, d = conn.delete( paths, hthdrs, None, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
class Attachment( object ) :
def __init__( self, doc, filename ) :
"""Class instance object represents a single attachment in a document,
use the :class:`Document` object and attachment `filename` to create
the instance.
"""
self.doc = doc
self.db = doc.db
self.filename = filename
self.conn = doc.conn
self.hthdrs = self.conn.mixinhdrs( self.doc.hthdrs, hthdrs )
def __eq__( self, other ) :
"""Compare whether the attachment info and data are same"""
cond = self.doc._id == other.doc._id and self.doc._rev == self.doc._rev
cond = cond and self.attachinfo() == other.attachinfo()
return cond
def attachinfo( self, field=None ) :
"""Information from attachment stub in the document. If `field`
key-word argument is provided, value of that particular field is
returned, otherwise, entire dictionary of information is returned
"""
a = self.doc.doc.get( '_attachments', {} ).get( self.filename, {} )
val = a if field == None else a.get( field, None )
return val
def data( self, hthdrs={} ) :
"""Returns the content of the file attached to the document. Can
optionally take a dictionary of http headers.
"""
hthdrs = self.conn.mixinhdrs( self.hthdrs, hthdrs )
data, content_type = self.getattachment(
self.db, self.doc, self.filename, hthdrs=hthdrs
)
return data, content_type
content_type = property( lambda self : self.attachinfo('content_type') )
length = property( lambda self : self.attachinfo('length') )
revpos = property( lambda self : self.attachinfo('revpos') )
stub = property( lambda self : self.attachinfo('stub') )
content = property( lambda self : self.data() )
@classmethod
def getattachment( cls, db, doc, filename, hthdrs={} ) :
"""Returns a tuple of, ( <filedata>, <content_type> )
for attachment `filename` in `doc` stored in database `db`
"""
id_ = doc if isinstance(doc, basestring) else doc._id
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _readattach( db.conn, paths, hthdrs=hthdrs )
content_type = h.get( 'Content-Type', None )
return (d.getvalue(), content_type)
@classmethod
def putattachment( cls, db, doc, filepath, data, content_type=None,
hthdrs={}, **query ) :
"""Upload the supplied content (data) as attachment to the specified
document (doc). `filepath` provided must be a URL encoded string.
If `doc` is document-id, then `rev` keyword parameter should be
present in query.
"""
from couchpy.doc import Document
from couchpy.designdoc import DesignDocument
filename = basename( filepath )
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
(ctype, enc) = guess_type(filepath)
hthdrs.update(
{ 'Content-Type' : content_type
} if content_type != None else { 'Content-Type' : ctype }
)
hthdrs.update( {'Content-Length' : len(data)} if data else {} )
s, h, d = _writeattach( db.conn, paths, data, hthdrs=hthdrs, rev=rev )
if isinstance( doc, (Document,DesignDocument) ) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def delattachment( cls, db, doc, filename, hthdrs={}, **query ) :
"""Deletes the attachment form the specified doc. You must
supply the rev argument with the current revision to delete the
attachment."""
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _deleteattach( db.conn, paths, hthdrs=hthdrs, rev=rev )
if isinstance(doc, Document) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def files2attach( cls, fnames=[] ) :
"""Helper method that will convert specified files `fnames` into
attachment structures in document format (key, value) pairs that is
suitable for writing into CouchDB.
"""
fnames = ( isinstance(fnames, basestring) and [fnames] ) or fnames
attachs = {}
for f in fnames :
if isinstance(f, (list,tuple)) :
ctype, fname = f
fdata = base64.encodestring( open(fname).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
elif isinstance(f, basestring) :
(ctype, enc) = guess_type(f)
fname, data = f, base64.encodestring( open(f).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
return attachs
|
prataprc/CouchPy
|
couchpy/.Attic/attachment.py
|
Python
|
gpl-3.0
| 7,105 | 0.024208 |
from __future__ import absolute_import, print_function, division
from six.moves import xrange
def render_string(string, sub):
"""
string: a string, containing formatting instructions
sub: a dictionary containing keys and values to substitute for
them.
returns: string % sub
The only difference between this function and the % operator
is that it raises an exception with a more informative error
message than the % operator does.
"""
try:
finalCode = string % sub
except Exception as E:
# If unable to render the string, render longer and longer
# initial substrings until we find the minimal initial substring
# that causes an error
i = 0
while i <= len(string):
try:
finalCode = string[0:i] % sub
except Exception as F:
if str(F) == str(E):
raise Exception(
string[0:i] + "<<<< caused exception " + str(F))
i += 1
assert False
return finalCode
def pretty_format(string):
lines = string.split('\n')
lines = [strip_leading_white_space(line) for line in lines]
indent = 0
for i in xrange(len(lines)):
indent -= lines[i].count('}')
if indent < 0:
indent = 0
#
lines[i] = (' ' * indent) + lines[i]
indent += lines[i].count('{')
#
rval = '\n'.join(lines)
return rval
def strip_leading_white_space(line):
while len(line) > 0 and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
return line
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/misc/strutil.py
|
Python
|
bsd-3-clause
| 1,620 | 0 |
# -*- coding: utf-8 -*-
# Copyright � 2006 Steven J. Bethard <steven.bethard@gmail.com>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the 3-clause BSD
# license. No warranty expressed or implied.
# For details, see the accompanying file LICENSE.txt.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
* handles both optional and positional arguments
* produces highly informative usage messages
* supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file:
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
HelpFormatter, RawDescriptionHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default, while
RawDescriptionHelpFormatter tells the parser not to perform any
line-wrapping on description text.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '0.9.0'
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = '==PARSER=='
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format:
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join(func(*args) for func, args in self.items)
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max(len(s) for s in invocations)
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help() % dict(prog=self._prog)
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join(part
for part in part_strings
if part and part is not SUPPRESS)
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if no optionals or positionals are available, usage is just prog
if usage is None and not actions:
usage = '%(prog)s'
# if optionals and positionals are available, calculate usage
elif usage is None:
usage = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# determine width of "usage: PROG" and width of text
prefix_width = len(prefix) + len(usage) + 1
prefix_indent = self._current_indent + prefix_width
text_width = self._width - self._current_indent
# put them on one line if they're short enough
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
if prefix_width + len(action_usage) + 1 < text_width:
usage = '%s %s' % (usage, action_usage)
# if they're long, wrap optionals and positionals individually
else:
optional_usage = format(optionals, groups)
positional_usage = format(positionals, groups)
indent = ' ' * prefix_indent
# usage is made of PROG, optionals and positionals
parts = [usage, ' ']
# options always get added right after PROG
if optional_usage:
parts.append(_textwrap.fill(
optional_usage, text_width,
initial_indent=indent,
subsequent_indent=indent).lstrip())
# if there were options, put arguments on the next line
# otherwise, start them right after PROG
if positional_usage:
part = _textwrap.fill(
positional_usage, text_width,
initial_indent=indent,
subsequent_indent=indent).lstrip()
if optional_usage:
part = '\n' + indent + part
parts.append(part)
usage = ''.join(parts)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
start = actions.index(group._group_actions[0])
if start != -1:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
inserts[start] = '['
inserts[end] = ']'
else:
inserts[start] = '('
inserts[end] = ')'
for i in xrange(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join(item for item in parts if item is not None)
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
return self._format_metavar(action, action.dest)
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _format_metavar(self, action, default_metavar):
if action.metavar is not None:
name = action.metavar
elif action.choices is not None:
choice_strs = (str(choice) for choice in action.choices)
name = '{%s}' % ','.join(choice_strs)
else:
name = default_metavar
return name
def _format_args(self, action, default_metavar):
name = self._format_metavar(action, default_metavar)
if action.nargs is None:
result = name
elif action.nargs == OPTIONAL:
result = '[%s]' % name
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % (name, name)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % (name, name)
elif action.nargs is PARSER:
result = '%s ...' % name
else:
result = ' '.join([name] * action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name, value in params.items():
if value is SUPPRESS:
del params[name]
if params.get('choices') is not None:
choices_str = ', '.join(str(c) for c in params['choices'])
params['choices'] = choices_str
return action.help % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
class RawDescriptionHelpFormatter(HelpFormatter):
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(True))
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""ArgumentError(message, argument)
Raised whenever there was an error creating or using an argument
(optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Action(*strings, **options)
Action objects hold the information necessary to convert a
set of command-line arguments (possibly including an initial option
string) into the desired Python object(s).
Keyword Arguments:
option_strings -- A list of command-line option strings which
should be associated with this action.
dest -- The name of the attribute to hold the created object(s)
nargs -- The number of command-line arguments that should be consumed.
By default, one argument will be consumed and a single value will
be produced. Other values include:
* N (an integer) consumes N arguments (and produces a list)
* '?' consumes zero or one arguments
* '*' consumes zero or more arguments (and produces a list)
* '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
default -- The value to be produced if the option is not specified.
type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
required -- True if the action must always be specified at the command
line. This is only meaningful for optional command-line arguments.
help -- The help string describing the argument.
metavar -- The name to be used for the option's argument with the help
string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar'
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs must be > 0')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs must be > 0')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
_ensure_value(namespace, self.dest, []).append(values)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
_ensure_value(namespace, self.dest, []).append(self.const)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_version()
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
parser.parse_args(arg_strings, namespace)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join(repr(arg) for arg in args if arg is not None)
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
def __init__(self, **kwargs):
for name, value in kwargs.iteritems():
setattr(self, name, value)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default settings methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
action = action_class(**kwargs)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on one-or-fewer-character option strings
if len(option_string) < 2:
msg = _('invalid option string %r: '
'must be at least two characters long')
raise ValueError(msg % option_string)
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# error on strings that are all prefix characters
if not (set(option_string) - set(self.prefix_chars)):
msg = _('invalid option string %r: '
'must contain characters other than %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join(option_string
for option_string, action
in conflicting_actions)
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
argument_default=None,
conflict_handler='error',
add_help=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.add_help = add_help
self._has_subparsers = False
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if self.add_help:
self.add_argument(
'-h', '--help', action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
'-v', '--version', action='version', default=SUPPRESS,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._has_subparsers:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._positionals._add_action(action)
self._has_subparsers = True
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest, value in self._defaults.iteritems():
if not hasattr(namespace, dest):
setattr(namespace, dest, value)
# parse the arguments and exit if there are any errors
try:
return self._parse_args(args, namespace)
except ArgumentError, err:
self.error(str(err))
def _parse_args(self, arg_strings, namespace):
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, raise an error
if action is None:
self.error(_('no such option: %s') % option_string)
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
for char in self.prefix_chars:
option_string = char + explicit_arg[0]
explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min(
index
for index in option_string_indices
if index >= start_index)
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were unparseable
# arguments
if start_index not in option_string_indices:
msg = _('extra arguments found: %s')
extras = arg_strings[start_index:next_option_string_index]
self.error(msg % ' '.join(extras))
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were too
# many supplied
if stop_index != len(arg_strings):
extras = arg_strings[stop_index:]
self.error(_('extra arguments found: %s') % ' '.join(extras))
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace
return namespace
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None:_('expected one argument'),
OPTIONAL:_('expected at most one argument'),
ONE_OR_MORE:_('expected at least one argument')
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in xrange(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join(self._get_nargs_pattern(action)
for action in actions_slice)
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend(len(string) for string in match.groups())
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if it's just dashes, it was meant to be positional
if not arg_string.strip('-'):
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join(opt_str for _, opt_str, _ in option_tuples)
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow one argument followed by any number of options or arguments
elif nargs is PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs is not PARSER:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# PARSER arguments convert all values, but check only the first
elif action.nargs is PARSER:
value = list(self._get_value(action, v) for v in arg_strings)
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = list(self._get_value(action, v) for v in arg_strings)
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# TypeErrors or ValueErrors indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
self._print_message(self.format_help(), file)
def print_version(self, file=None):
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
_sys.stderr.write(message)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
|
emsrc/pycornetto
|
lib/cornetto/argparse.py
|
Python
|
gpl-3.0
| 77,238 | 0.000531 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_diax.iff"
result.attribute_template_id = 9
result.stfName("npc_name","diax")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_diax.py
|
Python
|
mit
| 426 | 0.049296 |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 10:21:20 2016
@author: suraj
"""
import pickle
import numpy as np
X = pickle.load(open('x_att.p'))
y = pickle.load(open('y_att.p'))
batchX = []
batchy = []
def convertPointsToBatch(day_of_week,data1,data2):
for i in range(5):
batchX.extend(data1[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
batchy.extend(data2[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
pass
for i in range(7):
convertPointsToBatch(i,X,y)
batchX = np.array(batchX)
batchy = np.array(batchy)
print batchX.shape
print batchy.shape
print batchX[0]
print batchy[0]
pickle.dump(batchX,open('batch_x_att.p','wb'))
pickle.dump(batchy,open('batch_y_att.p','wb'))
|
suraj-jayakumar/lstm-rnn-ad
|
src/testdata/random_data_15min_ts/point_to_batch_data_conversion.py
|
Python
|
apache-2.0
| 762 | 0.018373 |
import webapp2
import models
class PrefsPage(webapp2.RequestHandler):
def post(self):
userprefs = models.get_userprefs()
try:
tz_offset = int(self.request.get('tz_offset'))
userprefs.tz_offset = tz_offset
userprefs.put()
except ValueError:
# User entered a value that wasn't an integer. Ignore for now.
pass
self.redirect('/')
application = webapp2.WSGIApplication([('/prefs', PrefsPage)],
debug=True)
|
jscontreras/learning-gae
|
pgae-examples-master/2e/python/clock/clock4/prefs.py
|
Python
|
lgpl-3.0
| 541 | 0.003697 |
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-15 Jim Easterbrook jim@jim-easterbrook.me.uk
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB v0.4.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library. It
is one of several USB device modules, each of which uses a different
USB library interface. See :ref:`Installation - USB
library<dependencies-usb>` for details.
Testing
=======
Run :py:mod:`pywws.testweatherstation` with increased verbosity so it
reports which USB device access module is being used::
python -m pywws.testweatherstation -vv
18:28:09:pywws.weatherstation.CUSBDrive:using pywws.device_pyusb
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import platform
import usb
class USBDevice(object):
def __init__(self, idVendor, idProduct):
"""Low level USB device access via PyUSB library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
dev = self._find_device(idVendor, idProduct)
if not dev:
raise IOError("Weather station device not found")
self.devh = dev.open()
if not self.devh:
raise IOError("Open device failed")
self.devh.reset()
## if platform.system() is 'Windows':
## self.devh.setConfiguration(1)
try:
self.devh.claimInterface(0)
except usb.USBError:
# claim interface failed, try detaching kernel driver first
if not hasattr(self.devh, 'detachKernelDriver'):
raise RuntimeError(
"Please upgrade pyusb (or python-usb) to 0.4 or higher")
try:
self.devh.detachKernelDriver(0)
self.devh.claimInterface(0)
except usb.USBError:
raise IOError("Claim interface failed")
# device may have data left over from an incomplete read
for i in range(4):
try:
self.devh.interruptRead(0x81, 8, 1200)
except usb.USBError:
break
def __del__(self):
if self.devh:
try:
self.devh.releaseInterface()
except usb.USBError:
# interface was not claimed. No problem
pass
def _find_device(self, idVendor, idProduct):
"""Find a USB device by product and vendor id."""
for bus in usb.busses():
for device in bus.devices:
if (device.idVendor == idVendor and
device.idProduct == idProduct):
return device
return None
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.devh.interruptRead(0x81, size, 1200)
if result is None or len(result) < size:
raise IOError('pywws.device_libusb.USBDevice.read_data failed')
return list(result)
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True
|
3v1n0/pywws
|
src/pywws/device_pyusb.py
|
Python
|
gpl-2.0
| 5,584 | 0.002328 |
""" Generates a list of [b, N, n] where N is the amount of b-bit primes
and n is the amount of b-bit safe primes. """
import gmpy
import json
for b in xrange(1,33):
N = 0
n = 0
p = gmpy.mpz(2**b)
while True:
p = gmpy.next_prime(p)
if p > 2**(b+1):
break
if gmpy.is_prime(2*p + 1):
n += 1
N += 1
d = n/float(N)
print json.dumps([b, N, n])
|
bwesterb/germain
|
exact.py
|
Python
|
gpl-3.0
| 437 | 0.004577 |
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam service-account keys'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
service_account_name=dict(required=True, type='str'),
key_format=dict(type='str', choices=['p12', 'json']),
key_id=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccountKeys(module.params['service_account_name'],
key_format=module.params['key_format'])
state = module.params['state']
#####
# Get
#####
if state == 'list':
api_rval = gcloud.list_service_account_keys()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account_key(module.params['key_id'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
outputfile = '/tmp/glcoud_iam_sa_keys'
api_rval = gcloud.create_service_account_key(outputfile)
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa_keys.py
|
Python
|
apache-2.0
| 2,395 | 0.00334 |
#!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
])
|
macioosch/dynamo-hard-spheres-sim
|
to_csv_pretty.py
|
Python
|
gpl-3.0
| 3,184 | 0.005653 |
# Copyright (C) 2016 The OpenTimestamps developers
#
# This file is part of python-opentimestamps.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-opentimestamps including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
"""Timestamp signature verification"""
import opentimestamps.core.serialize
class VerificationError(Exception):
"""Attestation verification errors"""
class TimeAttestation:
"""Time-attesting signature"""
TAG = None
TAG_SIZE = 8
# FIXME: What should this be?
MAX_PAYLOAD_SIZE = 8192
"""Maximum size of a attestation payload"""
def _serialize_payload(self, ctx):
raise NotImplementedError
def serialize(self, ctx):
ctx.write_bytes(self.TAG)
payload_ctx = opentimestamps.core.serialize.BytesSerializationContext()
self._serialize_payload(payload_ctx)
ctx.write_varbytes(payload_ctx.getbytes())
def __eq__(self, other):
"""Implementation of equality operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return False
else:
return NotImplemented
def __lt__(self, other):
"""Implementation of less than operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return self.TAG < other.TAG
else:
return NotImplemented
@classmethod
def deserialize(cls, ctx):
tag = ctx.read_bytes(cls.TAG_SIZE)
serialized_attestation = ctx.read_varbytes(cls.MAX_PAYLOAD_SIZE)
import opentimestamps.core.serialize
payload_ctx = opentimestamps.core.serialize.BytesDeserializationContext(serialized_attestation)
# FIXME: probably a better way to do this...
import opentimestamps.core.dubious.notary
if tag == PendingAttestation.TAG:
r = PendingAttestation.deserialize(payload_ctx)
elif tag == BitcoinBlockHeaderAttestation.TAG:
r = BitcoinBlockHeaderAttestation.deserialize(payload_ctx)
elif tag == opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.TAG:
r = opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.deserialize(payload_ctx)
else:
return UnknownAttestation(tag, serialized_attestation)
# If attestations want to have unspecified fields for future
# upgradability they should do so explicitly.
payload_ctx.assert_eof()
return r
class UnknownAttestation(TimeAttestation):
"""Placeholder for attestations that don't support"""
def __init__(self, tag, payload):
if tag.__class__ != bytes:
raise TypeError("tag must be bytes instance; got %r" % tag.__class__)
elif len(tag) != self.TAG_SIZE:
raise ValueError("tag must be exactly %d bytes long; got %d" % (self.TAG_SIZE, len(tag)))
if payload.__class__ != bytes:
raise TypeError("payload must be bytes instance; got %r" % tag.__class__)
elif len(payload) > self.MAX_PAYLOAD_SIZE:
raise ValueError("payload must be <= %d bytes long; got %d" % (self.MAX_PAYLOAD_SIZE, len(payload)))
# FIXME: we should check that tag != one of the tags that we do know
# about; if it does the operators < and =, and hash() will likely act
# strangely
self.TAG = tag
self.payload = payload
def __repr__(self):
return 'UnknownAttestation(%r, %r)' % (self.TAG, self.payload)
def __eq__(self, other):
if other.__class__ is UnknownAttestation:
return self.TAG == other.TAG and self.payload == other.payload
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is UnknownAttestation:
return (self.tag, self.payload) < (other.tag, other.payload)
else:
super().__eq__(other)
def __hash__(self):
return hash((self.TAG, self.payload))
def _serialize_payload(self, ctx):
# Notice how this is write_bytes, not write_varbytes - the latter would
# incorrectly add a length header to the actual payload.
ctx.write_bytes(self.payload)
# Note how neither of these signatures actually has the time...
class PendingAttestation(TimeAttestation):
"""Pending attestation
Commitment has been recorded in a remote calendar for future attestation,
and we have a URI to find a more complete timestamp in the future.
Nothing other than the URI is recorded, nor is there provision made to add
extra metadata (other than the URI) in future upgrades. The rational here
is that remote calendars promise to keep commitments indefinitely, so from
the moment they are created it should be possible to find the commitment in
the calendar. Thus if you're not satisfied with the local verifiability of
a timestamp, the correct thing to do is just ask the remote calendar if
additional attestations are available and/or when they'll be available.
While we could additional metadata like what types of attestations the
remote calendar expects to be able to provide in the future, that metadata
can easily change in the future too. Given that we don't expect timestamps
to normally have more than a small number of remote calendar attestations,
it'd be better to have verifiers get the most recent status of such
information (possibly with appropriate negative response caching).
"""
TAG = bytes.fromhex('83dfe30d2ef90c8e')
MAX_URI_LENGTH = 1000
"""Maximum legal URI length, in bytes"""
ALLOWED_URI_CHARS = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._/:"
"""Characters allowed in URI's
Note how we've left out the characters necessary for parameters, queries,
or fragments, as well as IPv6 [] notation, percent-encoding special
characters, and @ login notation. Hopefully this keeps us out of trouble!
"""
@classmethod
def check_uri(cls, uri):
"""Check URI for validity
Raises ValueError appropriately
"""
if len(uri) > cls.MAX_URI_LENGTH:
raise ValueError("URI exceeds maximum length")
for char in uri:
if char not in cls.ALLOWED_URI_CHARS:
raise ValueError("URI contains invalid character %r" % bytes([char]))
def __init__(self, uri):
if not isinstance(uri, str):
raise TypeError("URI must be a string")
self.check_uri(uri.encode())
self.uri = uri
def __repr__(self):
return 'PendingAttestation(%r)' % self.uri
def __eq__(self, other):
if other.__class__ is PendingAttestation:
return self.uri == other.uri
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is PendingAttestation:
return self.uri < other.uri
else:
super().__eq__(other)
def __hash__(self):
return hash(self.uri)
def _serialize_payload(self, ctx):
ctx.write_varbytes(self.uri.encode())
@classmethod
def deserialize(cls, ctx):
utf8_uri = ctx.read_varbytes(cls.MAX_URI_LENGTH)
try:
cls.check_uri(utf8_uri)
except ValueError as exp:
raise opentimestamps.core.serialize.DeserializationError("Invalid URI: %r" % exp)
return PendingAttestation(utf8_uri.decode())
class BitcoinBlockHeaderAttestation(TimeAttestation):
"""Signed by the Bitcoin blockchain
The commitment digest will be the merkleroot of the blockheader.
The block height is recorded so that looking up the correct block header in
an external block header database doesn't require every header to be stored
locally (33MB and counting). (remember that a memory-constrained local
client can save an MMR that commits to all blocks, and use an external service to fill
in pruned details).
Otherwise no additional redundant data about the block header is recorded.
This is very intentional: since the attestation contains (nearly) the
absolute bare minimum amount of data, we encourage implementations to do
the correct thing and get the block header from a by-height index, check
that the merkleroots match, and then calculate the time from the header
information. Providing more data would encourage implementations to cheat.
Remember that the only thing that would invalidate the block height is a
reorg, but in the event of a reorg the merkleroot will be invalid anyway,
so there's no point to recording data in the attestation like the header
itself. At best that would just give us extra confirmation that a reorg
made the attestation invalid; reorgs deep enough to invalidate timestamps are
exceptionally rare events anyway, so better to just tell the user the timestamp
can't be verified rather than add almost-never tested code to handle that case
more gracefully.
"""
TAG = bytes.fromhex('0588960d73d71901')
def __init__(self, height):
self.height = height
def __eq__(self, other):
if other.__class__ is BitcoinBlockHeaderAttestation:
return self.height == other.height
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is BitcoinBlockHeaderAttestation:
return self.height < other.height
else:
super().__eq__(other)
def __hash__(self):
return hash(self.height)
def verify_against_blockheader(self, digest, block_header):
"""Verify attestation against a block header
Returns the block time on success; raises VerificationError on failure.
"""
if len(digest) != 32:
raise VerificationError("Expected digest with length 32 bytes; got %d bytes" % len(digest))
elif digest != block_header.hashMerkleRoot:
raise VerificationError("Digest does not match merkleroot")
return block_header.nTime
def __repr__(self):
return 'BitcoinBlockHeaderAttestation(%r)' % self.height
def _serialize_payload(self, ctx):
ctx.write_varuint(self.height)
@classmethod
def deserialize(cls, ctx):
height = ctx.read_varuint()
return BitcoinBlockHeaderAttestation(height)
|
petertodd/python-opentimestamps
|
opentimestamps/core/notary.py
|
Python
|
lgpl-3.0
| 10,936 | 0.002195 |
# encoding: utf-8
def _unicode_truncate(ustr, length, encoding="UTF-8"):
"Truncate @ustr to specific encoded byte length"
bstr = ustr.encode(encoding)[:length]
return bstr.decode(encoding, 'ignore')
def extract_title_body(text, maxtitlelen=60):
"""Prepare @text: Return a (title, body) tuple
@text: A user-submitted paragraph or otherwise snippet of text. We
try to detect an obvious title and then return the title and the
following body. Otherwise we extract a title from the first words,
and return the full text as body.
@maxtitlelen: A unitless measure of approximate length of title.
The default value yields a resulting title of approximately 60 ascii
characters, or 20 asian characters.
>>> extract_title_body("Short Text")
('Short Text', '')
>>> title, body = extract_title_body(u"執筆方針については、項目名の付け方、"
... "フォーマットや表記上の諸問題に関して多くの方針が存在している。")
>>> print(title)
執筆方針については、項目名の付け方、フォ
>>> print(body) # doctest: +ELLIPSIS
執筆方針については、項目名の付け方、フォ...して多くの方針が存在している。
"""
# if you don't make real tests, it's not not worth doing it at all.
if not text.strip():
return text, ""
def split_first_line(text):
"""Take first non-empty line of text"""
lines = iter(text.splitlines())
for l in lines:
l = l.strip()
if not l:
continue
rest = "\n".join(lines)
return l, rest
# We use the UTF-8 encoding and truncate due to it:
# this is a good heuristic for ascii vs "wide characters"
# it results in taking fewer characters if they are asian, which
# is exactly what we want
def split_first_words(text, maxlen):
text = text.lstrip()
first_text = _unicode_truncate(text, maxlen)
words = first_text.split()
if len(words) > 3:
words = words[:-1]
first_words = " ".join(words[:-1])
if text.startswith(first_words):
first_text = first_words
rest_text = text[len(first_text):]
return first_text, rest_text
firstline, rest = split_first_line(text)
if len(firstline.encode("UTF-8")) > maxtitlelen:
firstline, rest = split_first_words(text, maxtitlelen)
else:
return firstline, rest
if rest.strip():
return firstline, text
else:
return text, ""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
engla/kupfer
|
kupfer/textutils.py
|
Python
|
gpl-3.0
| 2,681 | 0.001617 |
#! /usr/bin/env python
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
def load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_1000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/test_v2_1000.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
train_x = train_fs[:,range(1, label_index)]
train_y= train_fs[:,-1]
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_1000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = svr.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# use gbm regression to predict the loss, based on the result of gbm classifier
def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70]
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list)
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05)
gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train]))
preds = gbr1000.predict(sub_x_Test[ind_test])
preds_all[ind_test] = np.power(np.e, preds)
preds_all[ind_tmp0] = 0
return preds_all
# predict the loss based on the Gaussian process regressor, which has been trained
def gp_predict(clf, x_Test):
size = len(x_Test)
part_size = 3000
cnt = (size-1) / part_size + 1
preds = []
for i in range(cnt):
if i < cnt - 1:
pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size])
else:
pred_part = clf.predict(x_Test[i*part_size: size])
preds.extend(pred_part)
return np.power(np.e,preds)
# train the gaussian process regressor
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
#Owing to out of memory, the model was trained by part of training data
#Attention, this part was trained on the ram of more than 96G
sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
ind_train = np.where(train_y>0)[0]
part_size= int(0.7 * len(ind_train))
gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
flag = (sub_x_Test_part[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp])
gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
gp_preds = np.zeros(len(sub_x_Test_part))
gp_preds[ind_tmp] = gp_preds_tmp
return gp_preds
# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part
def gbc_gp_predict(train_x, train_y, test_x):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9)
gbc.fit(sub_x_Train, labels)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test])
gp_preds = np.zeros(len(test_x))
gp_preds[ind_test] = gp_preds_part
return gp_preds
# invoke the function gbc_svr_predict_part
def gbc_svr_predict(gbc, train_x, train_y, test_x):
svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list,
features.feature_pair_mul_list, features.feature_pair_divide_list,
features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf,
features.feature_pair_plus_list2)
return svr_preds
# invoke the function gbc_gbr_predict_part
def gbc_gbr_predict(gbc, train_x, train_y, test_x):
gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list,
features.feature_pair_plus_list, features.feature_pair_mul_list,
features.feature_pair_divide_list, features.feature_pair_sub_mul_list,
features.feature_pair_sub_list2)
return gbr_preds
# the main function
if __name__ == '__main__':
train_fs = load_train_fs()
print 1
test_fs = load_test_fs()
print 2
train_x, train_y = train_type(train_fs)
print 3
test_x = test_type(test_fs)
print 4
gbc = gbc_classify(train_x, train_y)
print 5
svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x)
print 6
gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x)
print 7
gp_preds = gbc_gp_predict(train_x, train_y, test_x)
preds_all = svr_preds * 0.4 + gp_preds * 0.2 + gbr_preds * 0.4
output_preds(preds_all)
|
Goodideax/CS249
|
new.py
|
Python
|
bsd-3-clause
| 14,678 | 0.014784 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : lookup value selector
Description : Enables the selection of lookup values from a
lookup entity.
Date : 09/February/2017
copyright : (C) 2017 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : stdm@unhabitat.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import (
QStandardItem,
QStandardItemModel
)
from qgis.PyQt.QtWidgets import (
QApplication,
QDialog
)
from stdm.settings import current_profile
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.notification import NotificationBar
WIDGET, BASE = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_lookup_value_selector.ui'))
class LookupValueSelector(WIDGET, BASE):
"""
A dialog that enables to select a value and code from a lookup.
.. versionadded:: 1.5
"""
def __init__(self, parent, lookup_entity_name, profile=None):
"""
Initializes LookupValueSelector.
:param parent: The parent of the dialog.
:type parent: QWidget
:param lookup_entity_name: The lookup entity name
:type lookup_entity_name: String
:param profile: The current profile object
:type profile: Object
"""
QDialog.__init__(self, parent, Qt.WindowTitleHint |
Qt.WindowCloseButtonHint)
self.setupUi(self)
self.value_and_code = None
if profile is None:
self._profile = current_profile()
else:
self._profile = profile
self.lookup_entity = self._profile.entity_by_name(
'{}_{}'.format(self._profile.prefix, lookup_entity_name)
)
self.notice = NotificationBar(self.notice_bar)
self._view_model = QStandardItemModel()
self.value_list_box.setModel(self._view_model)
header_item = QStandardItem(lookup_entity_name)
self._view_model.setHorizontalHeaderItem(0, header_item)
self.populate_value_list_view()
self.selected_code = None
self.selected_value_code = None
self.value_list_box.clicked.connect(self.validate_selected_code)
def populate_value_list_view(self):
"""
Populates the lookup values and codes.
"""
self.value_and_code = self.lookup_entity.values
for value, code in self.value_and_code.items():
u_value = str(value)
code_value = self.lookup_entity.values[u_value]
value_code = QStandardItem('{} ({})'.format(
code_value.value, code.code
)
)
value_code.setData(code.code)
self._view_model.appendRow(value_code)
def validate_selected_code(self):
"""
Validate the selected code for the presence of Code or not.
"""
self.notice.clear()
self.selected_code_value()
if self.selected_code == '':
notice = QApplication.tr(self, 'The selected value has no code.')
self.notice.insertWarningNotification(notice)
def selected_code_value(self):
"""
Get the selected lookup value.
"""
index = self.value_list_box.currentIndex()
item = self._view_model.itemFromIndex(index)
self.selected_code = item.data()
self.selected_value_code = item.text()
def accept(self):
"""
Overridden QDialog accept method.
"""
self.selected_code_value()
self.done(1)
def reject(self):
"""
Overridden QDialog accept method.
"""
self.selected_code = None
self.selected_value_code = None
self.done(0)
|
gltn/stdm
|
stdm/ui/lookup_value_selector.py
|
Python
|
gpl-2.0
| 4,591 | 0 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def box_cox(self, column_name, lambda_value=0.0, box_cox_column_name=None):
"""
Calculate the box-cox transformation for each row on a given column of the current frame
Parameters
----------
:param column_name: Name of the column to perform the transformation on
:param lambda_value: Lambda power parameter. Default is 0.0
:param box_cox_column_name: Optional column name for the box_cox value
:return: (Frame) returns a frame with a new column storing the box-cox transformed value
Calculate the box-cox transformation for each row in column 'column_name' of a frame using the lambda_value.
Box-cox transformation is computed by the following formula:
boxcox = log(y); if lambda=0,
boxcox = (y^lambda -1)/lambda ; else
where log is the natural log
Examples
--------
>>> data = [[7.7132064326674596],[0.207519493594015],[6.336482349262754],[7.4880388253861181],[4.9850701230259045]]
>>> schema = [("input", float)]
>>> my_frame = tc.frame.create(data, schema)
>>> my_frame.inspect()
[#] input
===================
[0] 7.71320643267
[1] 0.207519493594
[2] 6.33648234926
[3] 7.48803882539
[4] 4.98507012303
Compute the box-cox transformation on the 'input' column
>>> my_frame.box_cox('input',0.3)
A new column gets added to the frame which stores the box-cox transformation for each row
>>> my_frame.inspect()
[#] input input_lambda_0.3
=====================================
[0] 7.71320643267 2.81913279907
[1] 0.207519493594 -1.25365381375
[2] 6.33648234926 2.46673638752
[3] 7.48803882539 2.76469126003
[4] 4.98507012303 2.06401101556
"""
self._scala.boxCox(column_name, lambda_value, self._tc.jutils.convert.to_scala_option(box_cox_column_name))
|
trustedanalytics/spark-tk
|
python/sparktk/frame/ops/box_cox.py
|
Python
|
apache-2.0
| 2,662 | 0.003096 |
def test_local_variable():
x = 1
x = 2
|
asedunov/intellij-community
|
python/testData/inspections/PyRedeclarationInspection/localVariable.py
|
Python
|
apache-2.0
| 46 | 0.021739 |
"""IETF usage guidelines plugin
See RFC 8407
"""
import optparse
import sys
import re
from pyang import plugin
from pyang import statements
from pyang import error
from pyang.error import err_add
from pyang.plugins import lint
def pyang_plugin_init():
plugin.register_plugin(IETFPlugin())
class IETFPlugin(lint.LintPlugin):
def __init__(self):
self.found_2119_keywords = False
self.found_8174 = False
self.found_tlp = False
self.mmap = {}
lint.LintPlugin.__init__(self)
self.namespace_prefixes = ['urn:ietf:params:xml:ns:yang:']
self.modulename_prefixes = ['ietf', 'iana']
def add_opts(self, optparser):
optlist = [
optparse.make_option("--ietf",
dest="ietf",
action="store_true",
help="Validate the module(s) according to " \
"IETF rules."),
optparse.make_option("--ietf-help",
dest="ietf_help",
action="store_true",
help="Print help on the IETF checks and exit"),
]
optparser.add_options(optlist)
def setup_ctx(self, ctx):
if ctx.opts.ietf_help:
print_help()
sys.exit(0)
if not ctx.opts.ietf:
return
self._setup_ctx(ctx)
statements.add_validation_fun(
'grammar', ['description'],
lambda ctx, s: self.v_chk_description(ctx, s))
# register our error codes
error.add_error_code(
'IETF_MISSING_RFC8174', 4,
'the module seems to use RFC 2119 keywords, but the required'
+ ' text from RFC 8174 is not found or is not correct'
+ ' (see pyang --ietf-help for details).')
error.add_error_code(
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', 4,
'RFC 8407: 3.1: '
+ 'The IETF Trust Copyright statement seems to be'
+ ' missing or is not correct'
+ ' (see pyang --ietf-help for details).')
error.add_error_code(
'IETF_MISSING_RFC_TEXT', 4,
'RFC 8407: Appendix B: '
+ 'The text about which RFC this module is part of seems to be'
+ ' missing or is not correct'
+ ' (see pyang --ietf-help for details).')
def pre_validate_ctx(self, ctx, modules):
for mod in modules:
self.mmap[mod.arg] = {
'found_2119_keywords': False,
'found_8174': False}
def v_chk_description(self, ctx, s):
if s.i_module.arg not in self.mmap:
return
arg = re.sub(r'\s+', ' ', s.arg)
if s.parent.keyword == 'module' or s.parent.keyword == 'submodule':
m = re_rfc8174.search(arg)
if m is not None:
self.mmap[s.i_module.arg]['found_8174'] = True
arg = arg[:m.start()] + arg[m.end():]
m = re_tlp.search(arg)
if m is None:
err_add(ctx.errors, s.pos,
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', ())
else:
# the statement was changed to "Revised BSD License" in
# september 2021. allow both for old docs; require "Revised"
# for new.
y = int(m.group(1))
if y >= 2022 and arg.find("Simplified") > 0:
err_add(ctx.errors, s.pos,
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', ())
if s.parent.arg.startswith('ietf-'):
m = re_ietf_rfc.search(arg)
if m is None:
err_add(ctx.errors, s.pos,
'IETF_MISSING_RFC_TEXT', ())
if not self.mmap[s.i_module.arg]['found_2119_keywords']:
if re_2119_keywords.search(arg) is not None:
self.mmap[s.i_module.arg]['found_2119_keywords'] = True
self.mmap[s.i_module.arg]['description_pos'] = s.pos
def post_validate_ctx(self, ctx, modules):
if not ctx.opts.ietf:
return
for mod in modules:
if (self.mmap[mod.arg]['found_2119_keywords']
and not self.mmap[mod.arg]['found_8174']):
pos = self.mmap[mod.arg]['description_pos']
err_add(ctx.errors, pos, 'IETF_MISSING_RFC8174', ())
def print_help():
print("""
Validates the module or submodule according to the IETF rules found
in RFC 8407.
The module's or submodule's description statement must contain the
following text:
Copyright (c) <year> IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject to
the license terms contained in, the Revised BSD License set
forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(https://trustee.ietf.org/license-info).
An IETF module (but not an IANA module) must also contain the
following text:
This version of this YANG module is part of RFC XXXX
(https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself
for full legal notices.
If any description statement in the module or submodule contains
RFC 2119 key words, the module's or submodule's description statement
must contain the following text:
The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL
NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED',
'MAY', and 'OPTIONAL' in this document are to be interpreted as
described in BCP 14 (RFC 2119) (RFC 8174) when, and only when,
they appear in all capitals, as shown here.
""")
rfc8174_str = \
r"""The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL
NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED',
'MAY', and 'OPTIONAL' in this document are to be interpreted as
described in BCP 14 \(RFC 2119\) \(RFC 8174\) when, and only when,
they appear in all capitals, as shown here."""
re_rfc8174 = re.compile(re.sub(r'\s+', ' ', rfc8174_str))
tlp_str = \
r"""Copyright \(c\) ([0-9]+) IETF Trust and the persons identified as
authors of the code\. All rights reserved\.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the (Revised|Simplified) BSD License
set forth in Section 4\.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
\(https?://trustee.ietf.org/license-info\)\."""
re_tlp = re.compile(re.sub(r'\s+', ' ', tlp_str))
ietf_rfc_str = \
r"""This version of this YANG module is part of
RFC .+(\s+\(https?://www.rfc-editor.org/info/rfc.+\))?; see
the RFC itself for full legal notices\."""
re_ietf_rfc = re.compile(re.sub(r'\s+', ' ', ietf_rfc_str))
re_2119_keywords = re.compile(
r"\b(MUST|REQUIRED|SHOULD|SHALL|RECOMMENDED|MAY|OPTIONAL)\b")
|
mbj4668/pyang
|
pyang/plugins/ietf.py
|
Python
|
isc
| 7,093 | 0.001551 |
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
|
android-art-intel/Nougat
|
art-extension/tools/checker/common/archs.py
|
Python
|
apache-2.0
| 663 | 0 |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import simplejson as json
import grpc
from google.protobuf.json_format import MessageToJson
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.crypto.xmss import XMSS
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.TransferTransaction import TransferTransaction
from pyqrllib.pyqrllib import hstr2bin, bin2hstr
from qrl.generated import qrl_pb2_grpc, qrl_pb2, qrlmining_pb2, qrlmining_pb2_grpc
from flask import Flask, Response, request
from jsonrpc.backend.flask import api
app = Flask(__name__)
def read_slaves(slaves_filename):
with open(slaves_filename, 'r') as f:
slave_data = json.load(f)
slave_data[0] = bytes(hstr2bin(slave_data[0]))
return slave_data
def get_addr_state(addr: bytes) -> AddressState:
stub = get_public_stub()
response = stub.GetAddressState(request=qrl_pb2.GetAddressStateReq(address=addr))
return AddressState(response.state)
def set_unused_ots_key(xmss, addr_state, start=0):
for i in range(start, 2 ** xmss.height):
if not addr_state.ots_key_reuse(i):
xmss.set_ots_index(i)
return True
return False
def valid_payment_permission(public_stub, master_address_state, payment_xmss, json_slave_txn):
access_type = master_address_state.get_slave_permission(payment_xmss.pk)
if access_type == -1:
tx = Transaction.from_json(json_slave_txn)
public_stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
return None
if access_type == 0:
return True
return False
def get_unused_payment_xmss(public_stub):
global payment_slaves
global payment_xmss
master_address = payment_slaves[0]
master_address_state = get_addr_state(master_address)
if payment_xmss:
addr_state = get_addr_state(payment_xmss.address)
if set_unused_ots_key(payment_xmss, addr_state, payment_xmss.ots_index):
if valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return payment_xmss
else:
payment_xmss = None
if not payment_xmss:
unused_ots_found = False
for slave_seed in payment_slaves[1]:
xmss = XMSS.from_extended_seed(slave_seed)
addr_state = get_addr_state(xmss.address)
if set_unused_ots_key(xmss, addr_state): # Unused ots_key_found
payment_xmss = xmss
unused_ots_found = True
break
if not unused_ots_found: # Unused ots_key_found
return None
if not valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return None
return payment_xmss
@app.route('/api/<api_method_name>')
def api_proxy(api_method_name):
"""
Proxy JSON RPC requests to the gRPC server as well as converts back gRPC response
to JSON.
:param api_method_name:
:return:
"""
stub = qrl_pb2_grpc.PublicAPIStub(grpc.insecure_channel('{}:{}'.format(config.user.public_api_host,
config.user.public_api_port)))
public_api = qrl_pb2.DESCRIPTOR.services_by_name['PublicAPI']
api_method = public_api.FindMethodByName(api_method_name)
api_request = getattr(qrl_pb2, api_method.input_type.name)()
for arg in request.args:
if arg not in api_method.input_type.fields_by_name:
raise Exception('Invalid args %s', arg)
data_type = type(getattr(api_request, arg))
if data_type == bool and request.args[arg].lower() == 'false':
continue
value = data_type(request.args.get(arg, type=data_type))
setattr(api_request, arg, value)
resp = getattr(stub, api_method_name)(api_request, timeout=10)
return Response(response=MessageToJson(resp, sort_keys=True), status=200, mimetype='application/json')
def get_mining_stub():
global mining_stub
return mining_stub
def get_public_stub():
global public_stub
return public_stub
@api.dispatcher.add_method
def getlastblockheader(height=0):
stub = get_mining_stub()
request = qrlmining_pb2.GetLastBlockHeaderReq(height=height)
grpc_response = stub.GetLastBlockHeader(request=request, timeout=10)
block_header = {
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'timestamp': grpc_response.timestamp,
'reward': grpc_response.reward,
'hash': grpc_response.hash,
'depth': grpc_response.depth
}
resp = {
"block_header": block_header,
"status": "OK"
}
return resp
@api.dispatcher.add_method
def getblockheaderbyheight(height):
return getlastblockheader(height)
@api.dispatcher.add_method
def getblocktemplate(reserve_size, wallet_address):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockToMineReq(wallet_address=wallet_address.encode())
grpc_response = stub.GetBlockToMine(request=request, timeout=10)
resp = {
'blocktemplate_blob': grpc_response.blocktemplate_blob,
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'reserved_offset': grpc_response.reserved_offset,
'seed_hash': grpc_response.seed_hash,
'status': 'OK'
}
return resp
@api.dispatcher.add_method
def submitblock(blob):
stub = get_mining_stub()
request = qrlmining_pb2.SubmitMinedBlockReq(blob=bytes(hstr2bin(blob)))
response = stub.SubmitMinedBlock(request=request, timeout=10)
if response.error:
raise Exception # Mining pool expected exception when block submission fails
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def getblockminingcompatible(height):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockMiningCompatibleReq(height=height)
response = stub.GetBlockMiningCompatible(request=request, timeout=10)
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def transfer(destinations, fee, mixin, unlock_time):
if len(destinations) > config.dev.transaction_multi_output_limit:
raise Exception('Payment Failed: Amount exceeds the allowed limit')
addrs_to = []
amounts = []
for tx in destinations:
addrs_to.append(bytes(hstr2bin(tx['address'][1:]))) # Skipping 'Q'
amounts.append(tx['amount'])
stub = get_public_stub()
xmss = get_unused_payment_xmss(stub)
if not xmss:
raise Exception('Payment Failed: No Unused Payment XMSS found')
tx = TransferTransaction.create(addrs_to=addrs_to,
amounts=amounts,
message_data=None,
fee=fee,
xmss_pk=xmss.pk,
master_addr=payment_slaves[0])
tx.sign(xmss)
response = stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
if response.error_code != 3:
raise Exception('Transaction Submission Failed, Response Code: %s', response.error_code)
response = {'tx_hash': bin2hstr(tx.txhash)}
return response
app.add_url_rule('/json_rpc', 'api', api.as_view(), methods=['POST'])
def parse_arguments():
parser = argparse.ArgumentParser(description='QRL node')
parser.add_argument('--qrldir', '-d', dest='qrl_dir', default=config.user.qrl_dir,
help="Use a different directory for node data/configuration")
parser.add_argument('--network-type', dest='network_type', choices=['mainnet', 'testnet'],
default='mainnet', required=False, help="Runs QRL Testnet Node")
return parser.parse_args()
def main():
args = parse_arguments()
qrl_dir_post_fix = ''
copy_files = []
if args.network_type == 'testnet':
qrl_dir_post_fix = '-testnet'
package_directory = os.path.dirname(os.path.abspath(__file__))
copy_files.append(os.path.join(package_directory, 'network/testnet/genesis.yml'))
copy_files.append(os.path.join(package_directory, 'network/testnet/config.yml'))
config.user.qrl_dir = os.path.expanduser(os.path.normpath(args.qrl_dir) + qrl_dir_post_fix)
config.create_path(config.user.qrl_dir, copy_files)
config.user.load_yaml(config.user.config_path)
global payment_slaves, payment_xmss
global mining_stub, public_stub
mining_stub = qrlmining_pb2_grpc.MiningAPIStub(grpc.insecure_channel('{0}:{1}'.format(config.user.mining_api_host,
config.user.mining_api_port)))
public_stub = qrl_pb2_grpc.PublicAPIStub(grpc.insecure_channel('{0}:{1}'.format(config.user.public_api_host,
config.user.public_api_port)))
payment_xmss = None
payment_slaves = read_slaves(config.user.mining_pool_payment_wallet_path)
app.run(host=config.user.grpc_proxy_host, port=config.user.grpc_proxy_port)
if __name__ == '__main__':
main()
|
cyyber/QRL
|
src/qrl/grpcProxy.py
|
Python
|
mit
| 9,400 | 0.002766 |
import statsmodels.api
import statsmodels.genmod.families.family
import numpy as np
from sklearn.metrics import r2_score
class GLM(object):
'''
A scikit-learn style wrapper for statsmodels.api.GLM. The purpose of this class is to
make generalized linear models compatible with scikit-learn's Pipeline objects.
family : instance of subclass of statsmodels.genmod.families.family.Family
The family argument determines the distribution family to use for GLM fitting.
xlabels : iterable of strings, optional (empty by default)
The xlabels argument can be used to assign names to data columns. This argument is not
generally needed, as names can be captured automatically from most standard data
structures. If included, must have length n, where n is the number of features. Note
that column order is used to compute term values and make predictions, not column names.
'''
def __init__(self, family, add_constant=True):
self.family = family
self.add_constant = add_constant
def _scrub_x(self, X, offset, exposure, **kwargs):
'''
Sanitize input predictors and extract column names if appropriate.
'''
no_labels = False
if 'xlabels' not in kwargs and 'xlabels' not in self.__dict__:
#Try to get xlabels from input data (for example, if X is a pandas DataFrame)
try:
self.xlabels = list(X.columns)
except AttributeError:
try:
self.xlabels = list(X.design_info.column_names)
except AttributeError:
try:
self.xlabels = list(X.dtype.names)
except TypeError:
no_labels = True
elif 'xlabels' not in self.__dict__:
self.xlabels = kwargs['xlabels']
#Convert to internally used data type
X = np.asarray(X,dtype=np.float64)
m,n = X.shape
if offset is not None:
offset = np.asarray(offset,dtype=np.float64)
offset = offset.reshape(offset.shape[0])
if exposure is not None:
exposure = np.asarray(exposure,dtype=np.float64)
exposure = exposure.reshape(exposure.shape[0])
#Make up labels if none were found
if no_labels:
self.xlabels = ['x'+str(i) for i in range(n)]
return X, offset, exposure
def _scrub(self, X, y, offset, exposure, **kwargs):
'''
Sanitize input data.
'''
#Check whether X is the output of patsy.dmatrices
if y is None and type(X) is tuple:
y, X = X
#Handle X separately
X, offset, exposure = self._scrub_x(X, offset, exposure, **kwargs)
#Convert y to internally used data type
y = np.asarray(y,dtype=np.float64)
y = y.reshape(y.shape[0])
#Make sure dimensions match
if y.shape[0] != X.shape[0]:
raise ValueError('X and y do not have compatible dimensions.')
return X, y, offset, exposure
def fit(self, X, y = None, offset = None, exposure = None, xlabels = None):
'''
Fit a GLM model to the input data X and y.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the number of samples
The training response. The y parameter can be a numpy array, a pandas DataFrame with one
column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a
call to patsy.dmatrices (in which case, X contains the response).
xlabels : iterable of strings, optional (default=None)
Convenient way to set the xlabels parameter while calling fit. Ignored if None (default).
See the GLM class for an explanation of the xlabels parameter.
'''
#Format and label the data
if xlabels is not None:
self.set_params(xlabels=xlabels)
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Do the actual work
model = statsmodels.api.GLM(y, X, self.family, offset=offset, exposure=exposure)
result = model.fit()
self.coef_ = result.params
return self
def predict(self, X, offset = None, exposure = None):
'''
Predict the response based on the input data X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Linear transformation
eta = self.transform(X, offset, exposure)
#Nonlinear transformation
y_hat = self.family.fitted(eta)
return y_hat
def transform(self, X, offset = None, exposure = None):
'''
Perform a linear transformation of X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Compute linear combination
eta = np.dot(X,self.coef_)
#Apply offset and exposure
if offset is not None:
eta += offset
if exposure is not None:
eta += np.log(exposure)
return eta
def score(self, X, y = None, offset = None, exposure = None, xlabels = None):
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
y_pred = self.predict(X, offset=offset, exposure=exposure)
return r2_score(y, y_pred)
def get_params(self, deep = False):
return {}
def __repr__(self):
return self.__class__.__name__ + '()'
def __str__(self):
return self.__class__.__name__ + '()'
class GLMFamily(GLM):
family = NotImplemented
def __init__(self, add_constant=True):
super(GLMFamily,self).__init__(family=self.__class__.family(), add_constant=add_constant)
class BinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Binomial
class GammaRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gamma
class GaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gaussian
class InverseGaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.InverseGaussian
class NegativeBinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.NegativeBinomial
class PoissonRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Poisson
# def fit(self, X, y = None, exposure = None, xlabels = None):
# '''
# Fit a GLM model to the input data X and y.
#
#
# Parameters
# ----------
# X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
# The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy
# DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
#
#
# y : array-like, optional (default=None), shape = [m] where m is the number of samples
# The training response. The y parameter can be a numpy array, a pandas DataFrame with one
# column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a
# call to patsy.dmatrices (in which case, X contains the response).
#
#
# xlabels : iterable of strings, optional (default=None)
# Convenient way to set the xlabels parameter while calling fit. Ignored if None (default).
# See the GLM class for an explanation of the xlabels parameter.
#
# '''
# #Format and label the data
# if xlabels is not None:
# self.set_params(xlabels=xlabels)
# X, y = self._scrub(X,y,**self.__dict__)
# if exposure is not None:
# exposure = np.asarray(exposure)
# exposure = exposure.reshape(exposure.shape[0])
# if exposure.shape != y.shape:
# raise ValueError('Shape of exposure does not match shape of y.')
#
# #Add a constant column
# if self.add_constant:
# X = statsmodels.api.add_constant(X, prepend=True)
#
# #Do the actual work
# if exposure is None:
# model = statsmodels.api.GLM(y, X, self.family)
# else:
# model = statsmodels.api.GLM(y, X, self.family, exposure=exposure)
# result = model.fit()
# self.coef_ = result.params
#
# return self
|
jcrudy/glm-sklearn
|
glmsklearn/glm.py
|
Python
|
bsd-3-clause
| 9,972 | 0.017048 |
#Prueba para mostrar los nodos conectados a la red
from base_datos import db
import time
from datetime import timedelta, datetime,date
dir_base="/media/CasaL/st/Documentos/proyectoXbee/WSN_XBee/basesTest/xbee_db02.db"
d=timedelta(minutes=-10)
#now=datetime.now()
#calculo=now+d
#print(calculo.strftime("%H:%M:%S"))
#hoy=datetime.now()
#miFecha=date(hoy.year,7,13)
#miHoraFecha=datetime(2017,7,13,20,13)
#print(miFecha.strftime("%Y/%m/%d"))
#print(miHoraFecha)
conFechaHora='''SELECT fecha_hora FROM datos ORDER BY fecha_hora DESC LIMIT 1'''
base=db(dir_base)
ultimoRegistro=base.consultaSimp(conFechaHora)[0][0]
aux1=ultimoRegistro.split(" ")
horaReg=aux1[0].split(":")
fechaReg=aux1[1].split("/")
aux_ini=datetime(int(fechaReg[2]),int(fechaReg[1]),int(fechaReg[0]),int(horaReg[0]),int(horaReg[1]),int(horaReg[2]))
aux_final=aux_ini+d
hora_inicio=aux_ini.strftime("%H:%M:%S %d/%m/%Y")
hora_final=aux_final.strftime("%H:%M:%S %d/%m/%Y")
print (hora_final)
#print("Hora inicio: {} Hora final: {}".format(ref_ini,ref_ini+d))
respuesta=base.consultaDat('''SELECT nodo_id FROM datos WHERE fecha_hora
BETWEEN ? and ?''',(hora_final,hora_inicio))
lista_nodos=[]
for e in respuesta:
if e[0] not in lista_nodos:
lista_nodos.append(e[0])
nodos_conn=len(lista_nodos)
print("Existen {} nodos conectados a la red".format(nodos_conn))
|
seertha/WSN_XBee
|
Software/RPI/Display_lcd/nodos_conectados.py
|
Python
|
mit
| 1,363 | 0.02788 |
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
from threading import Lock
from service import Service
class StorageService(Service):
@classmethod
def on_startup(cls, config, system_json):
# TODO: config is not accessible when local
cls._system = None # We don't want to use system
cls._config = config
cls._lock = Lock()
cls.signal_startup(config.get(cls.get_service_name()))
if __name__ == "__main__":
sys.exit(1)
|
gofed/gofed-ng
|
common/service/storageService.py
|
Python
|
gpl-3.0
| 1,413 | 0.001415 |
# -*- coding: iso-8859-1 -*-
"""
RPython implementation of MD5 checksums.
See also the pure Python implementation in lib_pypy/md5.py, which might
or might not be faster than this one on top of CPython.
This is an implementation of the MD5 hash function,
as specified by RFC 1321. It was implemented using Bruce Schneier's
excellent book "Applied Cryptography", 2nd ed., 1996.
This module tries to follow the API of the CPython md5 module.
Long history:
By Dinu C. Gherman. BEWARE: this comes with no guarantee whatsoever
about fitness and/or other properties! Specifically, do not use this
in any production code! License is Python License! (Re-licensing
under the MIT would be great, though)
Special thanks to Aurelian Coman who fixed some nasty bugs!
Modernised by J. Hallén and L. Creighton for Pypy.
Converted to RPython by arigo.
"""
from rpython.rlib.rarithmetic import r_uint, r_ulonglong
if r_uint.BITS == 32:
def _rotateLeft(x, n):
"Rotate x (32 bit) left n bits circularly."
return (x << n) | (x >> (32-n))
else:
def _rotateLeft_emulator(x, n):
x &= 0xFFFFFFFF
return (x << n) | (x >> (32-n))
# ----- start of custom code, think about something better... -----
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
eci = ExternalCompilationInfo(post_include_bits=["""
static unsigned long pypy__rotateLeft(unsigned long x, long n) {
unsigned int x1 = x; /* arithmetic directly on int */
int n1 = n;
return (x1 << n1) | (x1 >> (32-n1));
}
"""])
_rotateLeft = rffi.llexternal(
"pypy__rotateLeft", [lltype.Unsigned, lltype.Signed], lltype.Unsigned,
_callable=_rotateLeft_emulator, compilation_info=eci,
_nowrapper=True, elidable_function=True)
# we expect the function _rotateLeft to be actually inlined
def _state2string(a, b, c, d):
return ''.join([
chr(a&0xFF), chr((a>>8)&0xFF), chr((a>>16)&0xFF), chr((a>>24)&0xFF),
chr(b&0xFF), chr((b>>8)&0xFF), chr((b>>16)&0xFF), chr((b>>24)&0xFF),
chr(c&0xFF), chr((c>>8)&0xFF), chr((c>>16)&0xFF), chr((c>>24)&0xFF),
chr(d&0xFF), chr((d>>8)&0xFF), chr((d>>16)&0xFF), chr((d>>24)&0xFF),
])
def _state2hexstring(a, b, c, d):
hx = '0123456789abcdef'
return ''.join([
hx[(a>>4)&0xF], hx[a&0xF], hx[(a>>12)&0xF], hx[(a>>8)&0xF],
hx[(a>>20)&0xF], hx[(a>>16)&0xF], hx[(a>>28)&0xF], hx[(a>>24)&0xF],
hx[(b>>4)&0xF], hx[b&0xF], hx[(b>>12)&0xF], hx[(b>>8)&0xF],
hx[(b>>20)&0xF], hx[(b>>16)&0xF], hx[(b>>28)&0xF], hx[(b>>24)&0xF],
hx[(c>>4)&0xF], hx[c&0xF], hx[(c>>12)&0xF], hx[(c>>8)&0xF],
hx[(c>>20)&0xF], hx[(c>>16)&0xF], hx[(c>>28)&0xF], hx[(c>>24)&0xF],
hx[(d>>4)&0xF], hx[d&0xF], hx[(d>>12)&0xF], hx[(d>>8)&0xF],
hx[(d>>20)&0xF], hx[(d>>16)&0xF], hx[(d>>28)&0xF], hx[(d>>24)&0xF],
])
def _string2uintlist(s, start, count, result):
"""Build a list of count r_uint's by unpacking the string
s[start:start+4*count] in little-endian order.
"""
for i in range(count):
p = start + i * 4
x = r_uint(ord(s[p]))
x |= r_uint(ord(s[p+1])) << 8
x |= r_uint(ord(s[p+2])) << 16
x |= r_uint(ord(s[p+3])) << 24
result[i] = x
# ======================================================================
# The real MD5 meat...
#
# Implemented after "Applied Cryptography", 2nd ed., 1996,
# pp. 436-441 by Bruce Schneier.
# ======================================================================
# F, G, H and I are basic MD5 functions.
def F(x, y, z):
return (x & y) | ((~x) & z)
def G(x, y, z):
return (x & z) | (y & (~z))
def H(x, y, z):
return x ^ y ^ z
def I(x, y, z):
return y ^ (x | (~z))
def XX(func, a, b, c, d, x, s, ac):
"""Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto."
Rotation is separate from addition to prevent recomputation
(now summed-up in one function).
"""
res = a + func(b, c, d)
res = res + x
res = res + ac
res = _rotateLeft(res, s)
res = res + b
return res
XX._annspecialcase_ = 'specialize:arg(0)' # performance hint
class RMD5(object):
"""RPython-level MD5 object.
"""
def __init__(self, initialdata=''):
self._init()
self.update(initialdata)
def _init(self):
"""Set this object to an initial empty state.
"""
self.count = r_ulonglong(0) # total number of bytes
self.input = "" # pending unprocessed data, < 64 bytes
self.uintbuffer = [r_uint(0)] * 16
# Load magic initialization constants.
self.A = r_uint(0x67452301L)
self.B = r_uint(0xefcdab89L)
self.C = r_uint(0x98badcfeL)
self.D = r_uint(0x10325476L)
def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
# 'inp' is a list of 16 r_uint values.
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, r_uint(0xD76AA478L)) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, r_uint(0xE8C7B756L)) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, r_uint(0x242070DBL)) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, r_uint(0xC1BDCEEEL)) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, r_uint(0xF57C0FAFL)) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, r_uint(0x4787C62AL)) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, r_uint(0xA8304613L)) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, r_uint(0xFD469501L)) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, r_uint(0x698098D8L)) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, r_uint(0x8B44F7AFL)) # 10
c = XX(F, c, d, a, b, inp[10], S13, r_uint(0xFFFF5BB1L)) # 11
b = XX(F, b, c, d, a, inp[11], S14, r_uint(0x895CD7BEL)) # 12
a = XX(F, a, b, c, d, inp[12], S11, r_uint(0x6B901122L)) # 13
d = XX(F, d, a, b, c, inp[13], S12, r_uint(0xFD987193L)) # 14
c = XX(F, c, d, a, b, inp[14], S13, r_uint(0xA679438EL)) # 15
b = XX(F, b, c, d, a, inp[15], S14, r_uint(0x49B40821L)) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, r_uint(0xF61E2562L)) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, r_uint(0xC040B340L)) # 18
c = XX(G, c, d, a, b, inp[11], S23, r_uint(0x265E5A51L)) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, r_uint(0xE9B6C7AAL)) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, r_uint(0xD62F105DL)) # 21
d = XX(G, d, a, b, c, inp[10], S22, r_uint(0x02441453L)) # 22
c = XX(G, c, d, a, b, inp[15], S23, r_uint(0xD8A1E681L)) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, r_uint(0xE7D3FBC8L)) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, r_uint(0x21E1CDE6L)) # 25
d = XX(G, d, a, b, c, inp[14], S22, r_uint(0xC33707D6L)) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, r_uint(0xF4D50D87L)) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, r_uint(0x455A14EDL)) # 28
a = XX(G, a, b, c, d, inp[13], S21, r_uint(0xA9E3E905L)) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, r_uint(0xFCEFA3F8L)) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, r_uint(0x676F02D9L)) # 31
b = XX(G, b, c, d, a, inp[12], S24, r_uint(0x8D2A4C8AL)) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, r_uint(0xFFFA3942L)) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, r_uint(0x8771F681L)) # 34
c = XX(H, c, d, a, b, inp[11], S33, r_uint(0x6D9D6122L)) # 35
b = XX(H, b, c, d, a, inp[14], S34, r_uint(0xFDE5380CL)) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, r_uint(0xA4BEEA44L)) # 37
d = XX(H, d, a, b, c, inp[ 4], S32, r_uint(0x4BDECFA9L)) # 38
c = XX(H, c, d, a, b, inp[ 7], S33, r_uint(0xF6BB4B60L)) # 39
b = XX(H, b, c, d, a, inp[10], S34, r_uint(0xBEBFBC70L)) # 40
a = XX(H, a, b, c, d, inp[13], S31, r_uint(0x289B7EC6L)) # 41
d = XX(H, d, a, b, c, inp[ 0], S32, r_uint(0xEAA127FAL)) # 42
c = XX(H, c, d, a, b, inp[ 3], S33, r_uint(0xD4EF3085L)) # 43
b = XX(H, b, c, d, a, inp[ 6], S34, r_uint(0x04881D05L)) # 44
a = XX(H, a, b, c, d, inp[ 9], S31, r_uint(0xD9D4D039L)) # 45
d = XX(H, d, a, b, c, inp[12], S32, r_uint(0xE6DB99E5L)) # 46
c = XX(H, c, d, a, b, inp[15], S33, r_uint(0x1FA27CF8L)) # 47
b = XX(H, b, c, d, a, inp[ 2], S34, r_uint(0xC4AC5665L)) # 48
# Round 4.
S41, S42, S43, S44 = 6, 10, 15, 21
a = XX(I, a, b, c, d, inp[ 0], S41, r_uint(0xF4292244L)) # 49
d = XX(I, d, a, b, c, inp[ 7], S42, r_uint(0x432AFF97L)) # 50
c = XX(I, c, d, a, b, inp[14], S43, r_uint(0xAB9423A7L)) # 51
b = XX(I, b, c, d, a, inp[ 5], S44, r_uint(0xFC93A039L)) # 52
a = XX(I, a, b, c, d, inp[12], S41, r_uint(0x655B59C3L)) # 53
d = XX(I, d, a, b, c, inp[ 3], S42, r_uint(0x8F0CCC92L)) # 54
c = XX(I, c, d, a, b, inp[10], S43, r_uint(0xFFEFF47DL)) # 55
b = XX(I, b, c, d, a, inp[ 1], S44, r_uint(0x85845DD1L)) # 56
a = XX(I, a, b, c, d, inp[ 8], S41, r_uint(0x6FA87E4FL)) # 57
d = XX(I, d, a, b, c, inp[15], S42, r_uint(0xFE2CE6E0L)) # 58
c = XX(I, c, d, a, b, inp[ 6], S43, r_uint(0xA3014314L)) # 59
b = XX(I, b, c, d, a, inp[13], S44, r_uint(0x4E0811A1L)) # 60
a = XX(I, a, b, c, d, inp[ 4], S41, r_uint(0xF7537E82L)) # 61
d = XX(I, d, a, b, c, inp[11], S42, r_uint(0xBD3AF235L)) # 62
c = XX(I, c, d, a, b, inp[ 2], S43, r_uint(0x2AD7D2BBL)) # 63
b = XX(I, b, c, d, a, inp[ 9], S44, r_uint(0xEB86D391L)) # 64
A += a
B += b
C += c
D += d
self.A, self.B, self.C, self.D = A, B, C, D
def _finalize(self, digestfunc):
"""Logic to add the final padding and extract the digest.
"""
# Save the state before adding the padding
count = self.count
input = self.input
A = self.A
B = self.B
C = self.C
D = self.D
index = len(input)
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
if padLen:
self.update('\200' + '\000' * (padLen-1))
# Append length (before padding).
assert len(self.input) == 56
W = self.uintbuffer
_string2uintlist(self.input, 0, 14, W)
length_in_bits = count << 3
W[14] = r_uint(length_in_bits)
W[15] = r_uint(length_in_bits >> 32)
self._transform(W)
# Store state in digest.
digest = digestfunc(self.A, self.B, self.C, self.D)
# Restore the saved state in case this instance is still used
self.count = count
self.input = input
self.A = A
self.B = B
self.C = C
self.D = D
return digest
# Down from here all methods follow the Python Standard Library
# API of the md5 module.
def update(self, inBuf):
"""Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b).
The hash is immediately calculated for all full blocks. The final
calculation is made in digest(). This allows us to keep an
intermediate value for the hash, so that we only need to make
minimal recalculation if we call update() to add moredata to
the hashed string.
"""
leninBuf = len(inBuf)
self.count += leninBuf
index = len(self.input)
partLen = 64 - index
assert partLen > 0
if leninBuf >= partLen:
W = self.uintbuffer
self.input = self.input + inBuf[:partLen]
_string2uintlist(self.input, 0, 16, W)
self._transform(W)
i = partLen
while i + 64 <= leninBuf:
_string2uintlist(inBuf, i, 16, W)
self._transform(W)
i = i + 64
else:
self.input = inBuf[i:leninBuf]
else:
self.input = self.input + inBuf
def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
return self._finalize(_state2string)
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
return self._finalize(_state2hexstring)
def copy(self):
"""Return a clone object.
Return a copy ('clone') of the md5 object. This can be used
to efficiently compute the digests of strings that share
a common initial substring.
"""
clone = RMD5()
clone._copyfrom(self)
return clone
def _copyfrom(self, other):
"""Copy all state from 'other' into 'self'.
"""
self.count = other.count
self.input = other.input
self.A = other.A
self.B = other.B
self.C = other.C
self.D = other.D
# synonyms to build new RMD5 objects, for compatibility with the
# CPython md5 module interface.
md5 = RMD5
new = RMD5
digest_size = 16
|
oblique-labs/pyVM
|
rpython/rlib/rmd5.py
|
Python
|
mit
| 14,169 | 0.019409 |
import sqlite3
import directORM
class Proveedor:
def __init__(self):
self.idProveedor = -1
self.nombre = ''
self.email = ''
self.tlf_fijo = ''
self.tlf_movil = ''
self.tlf_fijo2 = ''
self.tlf_movil2 = ''
self.banco = ''
self.cuenta_bancaria = ''
self.direccion = ''
self.foto_logo = ''
class TbProveedores:
INSERT = '''
insert into Proveedores
( nombre, email, tlf_fijo, tlf_movil, tlf_fijo2, tlf_movil2, banco, cuenta_bancaria, direccion, foto_logo)
values (?,?,?,?,?,?,?,?,?,?)
'''
DELETE = 'delete from Proveedores where idProveedor = ?'
SELECT = 'select * from Proveedores'
UPDATE = '''
update Proveedores set
nombre = ?,
email = ?,
tlf_fijo = ?,
tlf_movil = ?,
tlf_fijo2 = ?,
tlf_movil2 = ?,
banco = ?,
cuenta_bancaria = ?,
direccion = ?,
foto_logo = ?
where idProveedor = ?
'''
def __init__(self):
self.gestorDB = directORM.Db()
def remove(self, proveedor ):
sql = self.DELETE
self.gestorDB.ejecutarSQL(sql, (proveedor.idProveedor))
def get_proveedor(self, idProveedor=None):
sql = self.SELECT + " where idProveedor=" + str(idProveedor) +";"
fila = self.gestorDB.consultaUnicaSQL(sql)
if fila is None:
return None
else:
o = self.mapear_objeto(fila)
return o
def save(self, proveedor=None):
if proveedor is not None:
if self.get_proveedor(proveedor.idProveedor) is None:
sql = self.INSERT
self.gestorDB.ejecutarSQL(sql, (
proveedor.nombre,
proveedor.email,
proveedor.tlf_fijo,
proveedor.tlf_movil,
proveedor.tlf_fijo2,
proveedor.tlf_movil2,
proveedor.banco,
proveedor.cuenta_bancaria,
proveedor.direccion,
proveedor.foto_logo))
else:
sql = self.UPDATE
self.gestorDB.ejecutarSQL(sql, (
proveedor.nombre,
proveedor.email,
proveedor.tlf_fijo,
proveedor.tlf_movil,
proveedor.tlf_fijo2,
proveedor.tlf_movil2,
proveedor.banco,
proveedor.cuenta_bancaria,
proveedor.direccion,
proveedor.foto_logo,
proveedor.idProveedor))
def mapear_objeto(self, fila=None):
if fila is None:
return None
else:
o = Proveedor()
o.idProveedor = fila['idProveedor']
o.nombre = fila['nombre']
o.email = fila['email']
o.tlf_fijo = fila['tlf_fijo']
o.tlf_movil = fila['tlf_movil']
o.tlf_fijo2 = fila['tlf_fijo2']
o.tlf_movil2 = fila['tlf_movil2']
o.banco = fila['banco']
o.cuenta_bancaria = fila['cuenta_bancaria']
o.direccion = fila['direccion']
o.foto_logo = fila['foto_logo']
return o
def get_proveedores(self, filtro=None):
if filtro is None:
sql = self.SELECT
else:
sql = self.SELECT + " where " + filtro
filas = self.gestorDB.consultaSQL(sql)
objetos = list()
for fila in filas:
o = self.mapear_objeto(fila)
objetos.append(o)
return objetos
|
arkadoel/directORM
|
python/salida/directORM/forProveedores.py
|
Python
|
gpl-2.0
| 3,696 | 0.002976 |
def hsd_inc_beh(rxd, txd):
'''|
| Specify the behavior, describe data processing; there is no notion
| of clock. Access the in/out interfaces via get() and append()
| methods. The "hsd_inc_beh" function does not return values.
|________'''
if rxd.hasPacket():
data = rxd.get() + 1
txd.append(data)
|
hnikolov/pihdf
|
examples/hsd_inc/src/hsd_inc_beh.py
|
Python
|
mit
| 340 | 0.002941 |
import os
import subprocess
import sys
deltext=""
if sys.platform.startswith("linux") :
deltext="rm"
copytext="cp"
if sys.platform.startswith("darwin") :
deltext="rm"
copytext="cp"
if sys.platform.startswith("win") :
deltext="del"
copytext="copy"
def run_in_shell(cmd):
subprocess.check_call(cmd, shell=True)
def replace(namefile,oldtext,newtext):
f = open(namefile,'r')
filedata = f.read()
f.close()
newdata = filedata.replace(oldtext,newtext)
f = open(namefile,'w')
f.write(newdata)
f.close()
def rsaset(tb,tff,nb,base,ml) :
bd=tb+"_"+base
fnameh="config_big_"+bd+".h"
run_in_shell(copytext+" config_big.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"@NB@",nb)
replace(fnameh,"@BASE@",base)
fnameh="config_ff_"+tff+".h"
run_in_shell(copytext+" config_ff.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"@ML@",ml)
fnamec="big_"+bd+".c"
fnameh="big_"+bd+".h"
run_in_shell(copytext+" big.c "+fnamec)
run_in_shell(copytext+" big.h "+fnameh)
replace(fnamec,"XXX",bd)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ff_"+tff+".c"
fnameh="ff_"+tff+".h"
run_in_shell(copytext+" ff.c "+fnamec)
run_in_shell(copytext+" ff.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="rsa_"+tff+".c"
fnameh="rsa_"+tff+".h"
run_in_shell(copytext+" rsa.c "+fnamec)
run_in_shell(copytext+" rsa.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
def curveset(tb,tf,tc,nb,base,nbt,m8,mt,ct,pf,stw,sx,ab,cs) :
bd=tb+"_"+base
fnameh="config_big_"+bd+".h"
run_in_shell(copytext+" config_big.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"@NB@",nb)
replace(fnameh,"@BASE@",base)
fnameh="config_field_"+tf+".h"
run_in_shell(copytext+" config_field.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"@NBT@",nbt)
replace(fnameh,"@M8@",m8)
replace(fnameh,"@MT@",mt)
ib=int(base)
inb=int(nb)
inbt=int(nbt)
sh=ib*(1+((8*inb-1)//ib))-inbt
if sh > 30 :
sh=30
replace(fnameh,"@SH@",str(sh))
fnameh="config_curve_"+tc+".h"
run_in_shell(copytext+" config_curve.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"@CT@",ct)
replace(fnameh,"@PF@",pf)
replace(fnameh,"@ST@",stw)
replace(fnameh,"@SX@",sx)
replace(fnameh,"@CS@",cs)
replace(fnameh,"@AB@",ab)
fnamec="big_"+bd+".c"
fnameh="big_"+bd+".h"
run_in_shell(copytext+" big.c "+fnamec)
run_in_shell(copytext+" big.h "+fnameh)
replace(fnamec,"XXX",bd)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp_"+tf+".c"
fnameh="fp_"+tf+".h"
run_in_shell(copytext+" fp.c "+fnamec)
run_in_shell(copytext+" fp.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
run_in_shell("gcc -O3 -std=c99 -c rom_field_"+tf+".c")
fnamec="ecp_"+tc+".c"
fnameh="ecp_"+tc+".h"
run_in_shell(copytext+" ecp.c "+fnamec)
run_in_shell(copytext+" ecp.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecdh_"+tc+".c"
fnameh="ecdh_"+tc+".h"
run_in_shell(copytext+" ecdh.c "+fnamec)
run_in_shell(copytext+" ecdh.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
run_in_shell("gcc -O3 -std=c99 -c rom_curve_"+tc+".c")
if pf != "NOT" :
fnamec="fp2_"+tf+".c"
fnameh="fp2_"+tf+".h"
run_in_shell(copytext+" fp2.c "+fnamec)
run_in_shell(copytext+" fp2.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp4_"+tf+".c"
fnameh="fp4_"+tf+".h"
run_in_shell(copytext+" fp4.c "+fnamec)
run_in_shell(copytext+" fp4.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
if cs == "128" :
fnamec="fp12_"+tf+".c"
fnameh="fp12_"+tf+".h"
run_in_shell(copytext+" fp12.c "+fnamec)
run_in_shell(copytext+" fp12.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecp2_"+tc+".c"
fnameh="ecp2_"+tc+".h"
run_in_shell(copytext+" ecp2.c "+fnamec)
run_in_shell(copytext+" ecp2.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="pair_"+tc+".c"
fnameh="pair_"+tc+".h"
run_in_shell(copytext+" pair.c "+fnamec)
run_in_shell(copytext+" pair.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="mpin_"+tc+".c"
fnameh="mpin_"+tc+".h"
run_in_shell(copytext+" mpin.c "+fnamec)
run_in_shell(copytext+" mpin.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="bls_"+tc+".c"
fnameh="bls_"+tc+".h"
run_in_shell(copytext+" bls.c "+fnamec)
run_in_shell(copytext+" bls.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
if cs == "192" :
fnamec="fp8_"+tf+".c"
fnameh="fp8_"+tf+".h"
run_in_shell(copytext+" fp8.c "+fnamec)
run_in_shell(copytext+" fp8.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp24_"+tf+".c"
fnameh="fp24_"+tf+".h"
run_in_shell(copytext+" fp24.c "+fnamec)
run_in_shell(copytext+" fp24.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecp4_"+tc+".c"
fnameh="ecp4_"+tc+".h"
run_in_shell(copytext+" ecp4.c "+fnamec)
run_in_shell(copytext+" ecp4.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="pair192_"+tc+".c"
fnameh="pair192_"+tc+".h"
run_in_shell(copytext+" pair192.c "+fnamec)
run_in_shell(copytext+" pair192.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="mpin192_"+tc+".c"
fnameh="mpin192_"+tc+".h"
run_in_shell(copytext+" mpin192.c "+fnamec)
run_in_shell(copytext+" mpin192.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="bls192_"+tc+".c"
fnameh="bls192_"+tc+".h"
run_in_shell(copytext+" bls192.c "+fnamec)
run_in_shell(copytext+" bls192.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
if cs == "256" :
fnamec="fp8_"+tf+".c"
fnameh="fp8_"+tf+".h"
run_in_shell(copytext+" fp8.c "+fnamec)
run_in_shell(copytext+" fp8.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecp8_"+tc+".c"
fnameh="ecp8_"+tc+".h"
run_in_shell(copytext+" ecp8.c "+fnamec)
run_in_shell(copytext+" ecp8.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp16_"+tf+".c"
fnameh="fp16_"+tf+".h"
run_in_shell(copytext+" fp16.c "+fnamec)
run_in_shell(copytext+" fp16.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp48_"+tf+".c"
fnameh="fp48_"+tf+".h"
run_in_shell(copytext+" fp48.c "+fnamec)
run_in_shell(copytext+" fp48.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="pair256_"+tc+".c"
fnameh="pair256_"+tc+".h"
run_in_shell(copytext+" pair256.c "+fnamec)
run_in_shell(copytext+" pair256.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="mpin256_"+tc+".c"
fnameh="mpin256_"+tc+".h"
run_in_shell(copytext+" mpin256.c "+fnamec)
run_in_shell(copytext+" mpin256.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="bls256_"+tc+".c"
fnameh="bls256_"+tc+".h"
run_in_shell(copytext+" bls256.c "+fnamec)
run_in_shell(copytext+" bls256.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
replace("arch.h","@WL@","64")
print("Elliptic Curves")
print("1. ED25519")
print("2. C25519")
print("3. NIST256")
print("4. BRAINPOOL")
print("5. ANSSI")
print("6. HIFIVE")
print("7. GOLDILOCKS")
print("8. NIST384")
print("9. C41417")
print("10. NIST521\n")
print("11. NUMS256W")
print("12. NUMS256E")
print("13. NUMS384W")
print("14. NUMS384E")
print("15. NUMS512W")
print("16. NUMS512E")
print("17. SECP256K1\n")
print("Pairing-Friendly Elliptic Curves")
print("18. BN254")
print("19. BN254CX")
print("20. BLS383")
print("21. BLS381")
print("22. FP256BN")
print("23. FP512BN")
print("24. BLS461\n")
print("25. BLS24")
print("26. BLS48\n")
print("RSA")
print("27. RSA2048")
print("28. RSA3072")
print("29. RSA4096")
selection=[]
ptr=0
max=30
curve_selected=False
pfcurve_selected=False
rsa_selected=False
while ptr<max:
x=int(input("Choose a Scheme to support - 0 to finish: "))
if x == 0:
break
# print("Choice= ",x)
already=False
for i in range(0,ptr):
if x==selection[i]:
already=True
break
if already:
continue
selection.append(x)
ptr=ptr+1
# curveset(big,field,curve,big_length_bytes,bits_in_base,modulus_bits,modulus_mod_8,modulus_type,curve_type,pairing_friendly,sextic twist,sign of x,ate bits,curve security)
# for each curve give names for big, field and curve. In many cases the latter two will be the same.
# Typically "big" is the size in bits, always a multiple of 8, "field" describes the modulus, and "curve" is the common name for the elliptic curve
# big_length_bytes is "big" divided by 8
# Next give the number base used for 64 bit architectures, as n where the base is 2^n (note that these must be fixed for the same "big" name, if is ever re-used for another curve)
# modulus_bits is the bit length of the modulus, typically the same or slightly smaller than "big"
# modulus_mod_8 is the remainder when the modulus is divided by 8
# modulus_type is NOT_SPECIAL, or PSEUDO_MERSENNE, or MONTGOMERY_Friendly, or GENERALISED_MERSENNE (supported for GOLDILOCKS only)
# curve_type is WEIERSTRASS, EDWARDS or MONTGOMERY
# pairing_friendly is BN, BLS or NOT (if not pairing friendly)
# if pairing friendly. M or D type twist, and sign of the family parameter x
# ate bits is number of bits in Ate parameter (from romgen program)
# curve security is AES equiavlent, rounded up.
if x==1:
curveset("256","25519","ED25519","32","56","255","5","PSEUDO_MERSENNE","EDWARDS","NOT","","","","128")
curve_selected=True
if x==2:
curveset("256","25519","C25519","32","56","255","5","PSEUDO_MERSENNE","MONTGOMERY","NOT","","","","128")
curve_selected=True
if x==3:
curveset("256","NIST256","NIST256","32","56","256","7","NOT_SPECIAL","WEIERSTRASS","NOT","","","","128")
curve_selected=True
if x==4:
curveset("256","BRAINPOOL","BRAINPOOL","32","56","256","7","NOT_SPECIAL","WEIERSTRASS","NOT","","","","128")
curve_selected=True
if x==5:
curveset("256","ANSSI","ANSSI","32","56","256","7","NOT_SPECIAL","WEIERSTRASS","NOT","","","","128")
curve_selected=True
if x==6:
curveset("336","HIFIVE","HIFIVE","42","60","336","5","PSEUDO_MERSENNE","EDWARDS","NOT","","","","192")
curve_selected=True
if x==7:
curveset("448","GOLDILOCKS","GOLDILOCKS","56","58","448","7","GENERALISED_MERSENNE","EDWARDS","NOT","","","","256")
curve_selected=True
if x==8:
curveset("384","NIST384","NIST384","48","56","384","7","NOT_SPECIAL","WEIERSTRASS","NOT","","","","192")
curve_selected=True
if x==9:
curveset("416","C41417","C41417","52","60","414","7","PSEUDO_MERSENNE","EDWARDS","NOT","","","","256")
curve_selected=True
if x==10:
curveset("528","NIST521","NIST521","66","60","521","7","PSEUDO_MERSENNE","WEIERSTRASS","NOT","","","","256")
curve_selected=True
if x==11:
curveset("256","256PMW","NUMS256W","32","56","256","3","PSEUDO_MERSENNE","WEIERSTRASS","NOT","","","","128")
curve_selected=True
if x==12:
curveset("256","256PME","NUMS256E","32","56","256","3","PSEUDO_MERSENNE","EDWARDS","NOT","","","","128")
curve_selected=True
if x==13:
curveset("384","384PM","NUMS384W","48","56","384","3","PSEUDO_MERSENNE","WEIERSTRASS","NOT","","","","192")
curve_selected=True
if x==14:
curveset("384","384PM","NUMS384E","48","56","384","3","PSEUDO_MERSENNE","EDWARDS","NOT","","","","192")
curve_selected=True
if x==15:
curveset("512","512PM","NUMS512W","64","56","512","7","PSEUDO_MERSENNE","WEIERSTRASS","NOT","","","","256")
curve_selected=True
if x==16:
curveset("512","512PM","NUMS512E","64","56","512","7","PSEUDO_MERSENNE","EDWARDS","NOT","","","","256")
curve_selected=True
if x==17:
curveset("256","SECP256K1","SECP256K1","32","56","256","7","NOT_SPECIAL","WEIERSTRASS","NOT","","","","128")
curve_selected=True
if x==18:
curveset("256","BN254","BN254","32","56","254","3","NOT_SPECIAL","WEIERSTRASS","BN","D_TYPE","NEGATIVEX","66","128")
pfcurve_selected=True
if x==19:
curveset("256","BN254CX","BN254CX","32","56","254","3","NOT_SPECIAL","WEIERSTRASS","BN","D_TYPE","NEGATIVEX","66","128")
pfcurve_selected=True
if x==20:
curveset("384","BLS383","BLS383","48","58","383","3","NOT_SPECIAL","WEIERSTRASS","BLS","M_TYPE","POSITIVEX","65","128")
pfcurve_selected=True
if x==21:
curveset("384","BLS381","BLS381","48","58","381","3","NOT_SPECIAL","WEIERSTRASS","BLS","M_TYPE","NEGATIVEX","65","128")
pfcurve_selected=True
if x==22:
curveset("256","FP256BN","FP256BN","32","56","256","3","NOT_SPECIAL","WEIERSTRASS","BN","M_TYPE","NEGATIVEX","66","128")
pfcurve_selected=True
if x==23:
curveset("512","FP512BN","FP512BN","64","60","512","3","NOT_SPECIAL","WEIERSTRASS","BN","M_TYPE","POSITIVEX","130","128")
pfcurve_selected=True
# https://eprint.iacr.org/2017/334.pdf
if x==24:
curveset("464","BLS461","BLS461","58","60","461","3","NOT_SPECIAL","WEIERSTRASS","BLS","M_TYPE","NEGATIVEX","78","128")
pfcurve_selected=True
if x==25:
curveset("480","BLS24","BLS24","60","56","479","3","NOT_SPECIAL","WEIERSTRASS","BLS","M_TYPE","POSITIVEX","49","192")
pfcurve_selected=True
if x==26:
curveset("560","BLS48","BLS48","70","58","556","3","NOT_SPECIAL","WEIERSTRASS","BLS","M_TYPE","POSITIVEX","32","256")
pfcurve_selected=True
# rsaset(big,ring,big_length_bytes,bits_in_base,multiplier)
# for each choice give distinct names for "big" and "ring".
# Typically "big" is the length in bits of the underlying big number type
# "ring" is the RSA modulus size = "big" times 2^m
# big_length_bytes is "big" divided by 8
# Next give the number base used for 64 bit architecture, as n where the base is 2^n
# multiplier is 2^m (see above)
# There are choices here, different ways of getting the same result, but some faster than others
if x==27:
#256 is slower but may allow reuse of 256-bit BIGs used for elliptic curve
#512 is faster.. but best is 1024
rsaset("1024","2048","128","58","2")
#rsaset("512","2048","64","60","4")
#rsaset("256","2048","32","56","8")
rsa_selected=True
if x==28:
rsaset("384","3072","48","56","8")
rsa_selected=True
if x==29:
#rsaset("256","4096","32","56","16")
rsaset("512","4096","64","60","8")
rsa_selected=True
run_in_shell(deltext+" big.*")
run_in_shell(deltext+" fp.*")
run_in_shell(deltext+" ecp.*")
run_in_shell(deltext+" ecdh.*")
run_in_shell(deltext+" ff.*")
run_in_shell(deltext+" rsa.*")
run_in_shell(deltext+" config_big.h")
run_in_shell(deltext+" config_field.h")
run_in_shell(deltext+" config_curve.h")
run_in_shell(deltext+" config_ff.h")
run_in_shell(deltext+" fp2.*")
run_in_shell(deltext+" fp4.*")
run_in_shell(deltext+" fp8.*")
run_in_shell(deltext+" fp16.*")
run_in_shell(deltext+" fp12.*")
run_in_shell(deltext+" fp24.*")
run_in_shell(deltext+" fp48.*")
run_in_shell(deltext+" ecp2.*")
run_in_shell(deltext+" ecp4.*")
run_in_shell(deltext+" ecp8.*")
run_in_shell(deltext+" pair.*")
run_in_shell(deltext+" mpin.*")
run_in_shell(deltext+" bls.*")
run_in_shell(deltext+" pair192.*")
run_in_shell(deltext+" mpin192.*")
run_in_shell(deltext+" bls192.*")
run_in_shell(deltext+" pair256.*")
run_in_shell(deltext+" mpin256.*")
run_in_shell(deltext+" bls256.*")
# create library
run_in_shell("gcc -O3 -std=c99 -c randapi.c")
if curve_selected :
run_in_shell("gcc -O3 -std=c99 -c ecdh_support.c")
if rsa_selected :
run_in_shell("gcc -O3 -std=c99 -c rsa_support.c")
if pfcurve_selected :
run_in_shell("gcc -O3 -std=c99 -c pbc_support.c")
run_in_shell("gcc -O3 -std=c99 -c hash.c")
run_in_shell("gcc -O3 -std=c99 -c rand.c")
run_in_shell("gcc -O3 -std=c99 -c oct.c")
run_in_shell("gcc -O3 -std=c99 -c aes.c")
run_in_shell("gcc -O3 -std=c99 -c gcm.c")
run_in_shell("gcc -O3 -std=c99 -c newhope.c")
if sys.platform.startswith("win") :
run_in_shell("for %i in (*.o) do @echo %~nxi >> f.list")
run_in_shell("ar rc amcl.a @f.list")
run_in_shell(deltext+" f.list")
else :
run_in_shell("ar rc amcl.a *.o")
run_in_shell(deltext+" *.o")
#print("Your section was ")
#for i in range(0,ptr):
# print (selection[i])
|
miracl/amcl
|
version3/c/config64.py
|
Python
|
apache-2.0
| 19,998 | 0.069207 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Btc plugin for Varas
Author: Neon & A Sad Loner
Last modified: November 2016
"""
import urllib2
from plugin import Plugin
name = 'Bitcoin'
class Bitcoin(Plugin):
def __init__(self):
Plugin.__init__(self,"bitcoin","<wallet> Return current balance from a Bitcoin wallet","A Sad Loners",1.0)
def run(self,address):
#1btc = 100000000satoshi
print "https://blockchain.info/it/q/addressbalance/"+address
try:
api = urllib2.urlopen("https://blockchain.info/it/q/addressbalance/"+address)
except:
return "Unknown Error"
resp = api.read()
satoshi = float(resp)
btc = satoshi/100000000
return "Balance: " + str(btc)
|
GooogIe/VarasTG
|
plugins/btc.py
|
Python
|
gpl-3.0
| 751 | 0.050599 |
from logging import getLogger
from vms.models import Dc, DummyDc
logger = getLogger(__name__)
class DcMiddleware(object):
"""
Attach dc attribute to each request.
"""
# noinspection PyMethodMayBeStatic
def process_request(self, request):
dc = getattr(request, 'dc', None)
if not dc or dc.is_dummy:
if request.path.startswith('/api/'):
return # Managed by ExpireTokenAuthentication and request_data decorator
if request.user.is_authenticated():
# Set request.dc for logged in user
request.dc = Dc.objects.get_by_id(request.user.current_dc_id)
# Whenever we set a DC we have to set request.dc_user_permissions right after request.dc is available
request.dc_user_permissions = request.dc.get_user_permissions(request.user)
# Log this request only for authenticated users
logger.debug('"%s %s" user="%s" dc="%s" permissions=%s', request.method, request.path,
request.user.username, request.dc.name, request.dc_user_permissions)
else:
try:
# This will get DC also for external views to login and registration pages according to URL
request.dc = Dc.objects.get_by_site(request.META['HTTP_HOST'])
except (KeyError, Dc.DoesNotExist):
request.dc = DummyDc()
# Whenever we set a DC we have to set request.dc_user_permissions right after request.dc is available
request.dc_user_permissions = frozenset() # External users have no permissions
|
erigones/esdc-ce
|
vms/middleware.py
|
Python
|
apache-2.0
| 1,676 | 0.00537 |
"""
Example of using cwFitter to generate a HH model for EGL-19 Ca2+ ion channel
Based on experimental data from doi:10.1083/jcb.200203055
"""
import os.path
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../..')
from channelworm.fitter import *
if __name__ == '__main__':
userData = dict()
cwd=os.getcwd()
csv_path = os.path.dirname(cwd)+'/examples/egl-19-data/egl-19-IClamp-IV.csv'
ref = {'fig':'2B','doi':'10.1083/jcb.200203055'}
x_var = {'type':'Voltage','unit':'V','toSI':1}
y_var = {'type':'Current','unit':'A/F','toSI':75e-12}
IV = {'ref':ref,'csv_path':csv_path,'x_var':x_var,'y_var':y_var}
userData['samples'] = {'IV':IV}
# csv_path_IC_100 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-100pA.csv'
# csv_path_IC_200 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-200pA.csv'
# csv_path_IC_300 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-300pA.csv'
# csv_path_IC_400 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-400pA.csv'
# x_var_IC = {'type':'Time','unit':'s','toSI':1}
# y_var_IC = {'type':'Voltage','unit':'V','toSI':1}
# traces_IC = [{'amp':100e-12,'csv_path':csv_path_IC_100,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':200e-12,'csv_path':csv_path_IC_200,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':300e-12,'csv_path':csv_path_IC_300,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':400e-12,'csv_path':csv_path_IC_400,'x_var':x_var_IC,'y_var':y_var_IC}]
# ref_IC = {'fig':'3B','doi':'10.1083/jcb.200203055'}
# IClamp = {'ref':ref_IC,'traces':traces_IC}
# userData['samples'] = {'IClamp':IClamp,'IV':IV}
myInitiator = initiators.Initiator(userData)
sampleData = myInitiator.get_sample_params()
bio_params = myInitiator.get_bio_params()
sim_params = myInitiator.get_sim_params()
myEvaluator = evaluators.Evaluator(sampleData,sim_params,bio_params)
# bio parameters for SLO-2
bio_params['cell_type'] = 'ADAL'
bio_params['channel_type'] = 'EGL-19'
bio_params['ion_type'] = 'Ca'
bio_params['val_cell_params'][0] = 75e-12 # C_mem DOI: 10.1074/jbc.M605814200
bio_params['val_cell_params'][1] = 75e-10 # area DOI: 10.1101/pdb.top066308
bio_params['gate_params'] = {'vda': {'power': 2}}
bio_params['channel_params'] = ['g_dens','e_rev']
bio_params['unit_chan_params'] = ['S/m2','V']
bio_params['min_val_channel'] = [1, 40e-3]
bio_params['max_val_channel'] = [10, 70e-3]
bio_params['channel_params'].extend(['v_half_a','k_a','T_a'])
bio_params['unit_chan_params'].extend(['V','V','s'])
bio_params['min_val_channel'].extend([-10e-3, 4e-3, 0.0001])
bio_params['max_val_channel'].extend([ 30e-3, 20e-3, 2e-3])
# Simulation parameters for EGL-19 I/V
sim_params['v_hold'] = -70e-3
sim_params['I_init'] = 0
sim_params['pc_type'] = 'VClamp'
sim_params['deltat'] = 1e-5
sim_params['duration'] = 0.03
sim_params['start_time'] = 0.002
sim_params['end_time'] = 0.022
sim_params['protocol_start'] = -40e-3
sim_params['protocol_end'] = 80e-3
sim_params['protocol_steps'] = 10e-3
opt = '-pso'
# opt = '-ga'
# opt = None
if len(sys.argv) == 2:
opt = sys.argv[1]
if 'IV' in sampleData and opt is not None:
while True:
q = raw_input("\n\nTry fitting curves (y,n):")
if q == "n":
break # stops the loop
elif q == "y":
# Find initial guess for parameters using curve_fit, leastsq
popt = None
best_candidate = np.asarray(bio_params['min_val_channel']) + np.asarray(bio_params['max_val_channel']) / 2
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
if 'IV' in sampleData:
popt , p0 = mySimulator.optim_curve(params= bio_params['channel_params'],
best_candidate= best_candidate,
target= [sampleData['IV']['V'],sampleData['IV']['I']])
print 'Params after IV minimization:'
print p0
IV_fit_cost = myEvaluator.iv_cost(popt)
print 'IV cost:'
print IV_fit_cost
if 'VClamp' in sampleData:
VClamp_fit_cost = myEvaluator.vclamp_cost(popt)
print 'VClamp cost:'
print VClamp_fit_cost
vData = np.arange(-0.040, 0.080, 0.001)
Iopt = mySimulator.iv_act(vData,*popt)
plt.plot([x*1 for x in bestSim['V_ss']],bestSim['I_ss'], label = 'Initial parameters', color='y')
plt.plot([x*1 for x in sampleData['IV']['V']],sampleData['IV']['I'], '--ko', label = 'sample data')
plt.plot([x*1 for x in vData],Iopt, color='r', label = 'Fitted to IV curve')
plt.legend()
plt.title("IV Curve Fit")
plt.xlabel('V (mV)')
plt.ylabel('I (A)')
plt.show()
if popt is not None:
if opt == '-pso':
bio_params['min_val_channel'][0:4] = popt[0:4] - abs(popt[0:4]/2)
bio_params['max_val_channel'][0:4] = popt[0:4] + abs(popt[0:4]/2)
else:
bio_params['min_val_channel'][0:4] = popt[0:4]
bio_params['max_val_channel'][0:4] = popt[0:4]
best_candidate_params = dict(zip(bio_params['channel_params'],popt))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.compare_plots(sampleData,bestSim,show=True)
myModelator.ss_plots(bestSim,show=True)
|
cheelee/ChannelWorm
|
channelworm/fitter/examples/EGL-19-2.py
|
Python
|
mit
| 6,529 | 0.013019 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest_lib import exceptions as lib_exc
from tempest.api.baremetal.admin import base
from tempest.common.utils import data_utils
from tempest import test
class TestChassis(base.BaseBaremetalTest):
"""Tests for chassis."""
@classmethod
def resource_setup(cls):
super(TestChassis, cls).resource_setup()
_, cls.chassis = cls.create_chassis()
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in six.iteritems(expected):
if key not in ('created_at', 'updated_at'):
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
@test.idempotent_id('7c5a2e09-699c-44be-89ed-2bc189992d42')
def test_create_chassis(self):
descr = data_utils.rand_name('test-chassis')
_, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis['description'], descr)
@test.idempotent_id('cabe9c6f-dc16-41a7-b6b9-0a90c212edd5')
def test_create_chassis_unicode_description(self):
# Use a unicode string for testing:
# 'We ♡ OpenStack in Ukraine'
descr = u'В Україні ♡ OpenStack!'
_, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis['description'], descr)
@test.idempotent_id('c84644df-31c4-49db-a307-8942881f41c0')
def test_show_chassis(self):
_, chassis = self.client.show_chassis(self.chassis['uuid'])
self._assertExpected(self.chassis, chassis)
@test.idempotent_id('29c9cd3f-19b5-417b-9864-99512c3b33b3')
def test_list_chassis(self):
_, body = self.client.list_chassis()
self.assertIn(self.chassis['uuid'],
[i['uuid'] for i in body['chassis']])
@test.idempotent_id('5ae649ad-22d1-4fe1-bbc6-97227d199fb3')
def test_delete_chassis(self):
_, body = self.create_chassis()
uuid = body['uuid']
self.delete_chassis(uuid)
self.assertRaises(lib_exc.NotFound, self.client.show_chassis, uuid)
@test.idempotent_id('cda8a41f-6be2-4cbf-840c-994b00a89b44')
def test_update_chassis(self):
_, body = self.create_chassis()
uuid = body['uuid']
new_description = data_utils.rand_name('new-description')
_, body = (self.client.update_chassis(uuid,
description=new_description))
_, chassis = self.client.show_chassis(uuid)
self.assertEqual(chassis['description'], new_description)
@test.idempotent_id('76305e22-a4e2-4ab3-855c-f4e2368b9335')
def test_chassis_node_list(self):
_, node = self.create_node(self.chassis['uuid'])
_, body = self.client.list_chassis_nodes(self.chassis['uuid'])
self.assertIn(node['uuid'], [n['uuid'] for n in body['nodes']])
|
hayderimran7/tempest
|
tempest/api/baremetal/admin/test_chassis.py
|
Python
|
apache-2.0
| 3,455 | 0 |
"""Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc.split('@')[-1]
if '[' in netloc and ']' in netloc:
return netloc.split(']')[0][1:].lower()
elif ':' in netloc:
return netloc.split(':')[0].lower()
elif netloc == '':
return None
else:
return netloc.lower()
@property
def port(self):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
return int(port, 10)
else:
return None
from collections import namedtuple
class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
try:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path
_testportnum = int(url[i+1:])
except ValueError:
scheme, url = url[:i].lower(), url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit(data):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment = data
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return urlunparse((scheme, netloc, path,
params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create a circular reference
# because urllib uses urlparse methods (urljoin). If you update this function,
# update it also in urllib. This code duplication does not existin in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a+b, chr(int(a+b,16)))
for a in _hexdig for b in _hexdig)
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
# fastpath
if len(res) == 1:
return s
s = res[0]
for item in res[1:]:
try:
s += _hextochr[item[:2]] + item[2:]
except KeyError:
s += '%' + item
except UnicodeDecodeError:
s += unichr(int(item[:2], 16)) + item[2:]
return s
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
|
huran2014/huran.github.io
|
wot_gateway/usr/lib/python2.7/urlparse.py
|
Python
|
gpl-2.0
| 14,414 | 0.002081 |
# plugins module for amsn2
"""
Plugins with amsn2 will be a subclass of the aMSNPlugin() class.
When this module is initially imported it should load the plugins from the last session. Done in the init() proc.
Then the GUI should call plugins.loadPlugin(name) or plugins.unLoadPlugin(name) in order to deal with plugins.
"""
# init()
# Called when the plugins module is imported (only for the first time).
# Should find plugins and populate a list ready for getPlugins().
# Should also auto-update all plugins.
def init(): pass
# loadPlugin(plugin_name)
# Called (by the GUI or from init()) to load a plugin. plugin_name as set in plugin's XML (or from getPlugins()).
# This loads the module for the plugin. The module is then responsible for calling plugins.registerPlugin(instance).
def loadPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# unLoadPlugin(plugin_name)
# Called to unload a plugin. Name is name as set in plugin's XML.
def unLoadPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# registerPlugin(plugin_instance)
# Saves the instance of the plugin, and registers it in the loaded list.
def registerPlugin(plugin_instance):
"""
@type plugin_instance: L{amsn2.plugins.developers.aMSNPlugin}
"""
pass
# getPlugins()
# Returns a list of all available plugins, as in ['Plugin 1', 'Plugin 2']
def getPlugins(): pass
# getPluginsWithStatus()
# Returns a list with a list item for each plugin with the plugin's name, and Loaded or NotLoaded either way.
# IE: [['Plugin 1', 'Loaded'], ['Plugin 2', 'NotLoaded']]
def getPluginsWithStatus(): pass
# getLoadedPlugins()
# Returns a list of loaded plugins. as in ['Plugin 1', 'Plugin N']
def getLoadedPlugins(): pass
# findPlugin(plugin_name)
# Retruns the running instance of the plugin with name plugin_name, or None if not found.
def findPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# saveConfig(plugin_name, data)
def saveConfig(plugin_name, data):
"""
@type plugin_name: str
@type data: object
"""
pass
# Calls the init procedure.
# Will only be called on the first import (thanks to python).
init()
|
amsn/amsn2
|
amsn2/plugins/core.py
|
Python
|
gpl-2.0
| 2,184 | 0.009615 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen/plug/menu/__init__.py
"""
The menu package for allowing plugins to specify options in a generic way.
"""
from ._menu import Menu
from ._option import Option
from ._string import StringOption
from ._color import ColorOption
from ._number import NumberOption
from ._text import TextOption
from ._boolean import BooleanOption
from ._enumeratedlist import EnumeratedListOption
from ._filter import FilterOption
from ._person import PersonOption
from ._family import FamilyOption
from ._note import NoteOption
from ._media import MediaOption
from ._personlist import PersonListOption
from ._placelist import PlaceListOption
from ._surnamecolor import SurnameColorOption
from ._destination import DestinationOption
from ._style import StyleOption
from ._booleanlist import BooleanListOption
|
sam-m888/gprime
|
gprime/plug/menu/__init__.py
|
Python
|
gpl-2.0
| 1,579 | 0 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from mmfparser.data.chunkloaders.actions.names import *
|
joaormatos/anaconda
|
mmfparser/data/chunkloaders/actions/__init__.py
|
Python
|
gpl-3.0
| 749 | 0.001335 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
def parse_file(filename):
word_dict= {}
file = open(filename)
for f in file:
words = f.split()
for word in words:
word = word.lower()
if word in word_dict:
value = word_dict.get(word)
word_dict[word] = value+1
else:
word_dict[word] = 1
file.close()
return word_dict
def print_words(filename):
word_dict = parse_file(filename)
keys = sorted(word_dict.keys())
for key in keys:
print key,word_dict[key]
def print_top(filename):
word_dict = parse_file(filename)
top_list = sorted(word_dict.items(),key=value_sort,reverse=True)
count =20
if len(top_list)<count:
count = len(top_list)
for word_tuple in top_list[0:count]:
print word_tuple[0],word_tuple[1]
def value_sort(word_tuple):
return word_tuple[1]
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
|
bobbyphilip/learn_python
|
google-python-exercises/basic/wordcount.py
|
Python
|
apache-2.0
| 3,007 | 0.006651 |
# -*- coding:utf-8 -*-
"""
Copyright (C) 2013 Nurilab.
Author: Kei Choi(hanul93@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
__revision__ = '$LastChangedRevision: 2 $'
__author__ = 'Kei Choi'
__version__ = '1.0.0.%d' % int( __revision__[21:-2] )
__contact__ = 'hanul93@gmail.com'
import os # ÆÄÀÏ »èÁ¦¸¦ À§ÇØ import
import zlib
import hashlib
import struct, mmap
import kernel
import kavutil
import glob
# ¸ÅÅ©·Î ŸÀÔ
X95M = 1
X97M = 2
W95M = 3
W97M = 4
SIGTOOL = False
def IsPrint(char) :
c = ord(char)
if c > 0x20 and c < 0x80 :
return True
else :
return False
def ExtractMacroData_W95M(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
version = struct.unpack('<H', data[2:2+2])[0]
if version > 0xc0 : raise SystemError
exist_macro = struct.unpack('<L', data[0x11C:0x11C+4])[0]
if exist_macro <= 2 : raise SystemError
mac_pos = struct.unpack('<L', data[0x118:0x118+4])[0]
if ord(data[mac_pos]) != 0xFF : raise SystemError
while ord(data[mac_pos + 1]) != 0x01 : # chHplmcd
ch = ord(data[mac_pos + 1])
val = struct.unpack('<H', data[mac_pos+2:mac_pos+4])[0]
if ch == 0x02 : mac_pos += val * 0x4 # chHplacd
elif ch == 0x03 : mac_pos += val * 0xE # chHplkme
elif ch == 0x04 : mac_pos += val * 0xE # chHplkmeBad
elif ch == 0x05 : mac_pos += val * 0xC # chHplmud
elif ch == 0x12 : mac_pos += 2 # chUnnamedToolbar
elif ch == 0x40 : raise SystemError # chTcgEnd
else : raise SystemError
mac_pos += 3
mac_num = struct.unpack('<H', data[mac_pos+2:mac_pos+4])[0]
mac_pos += 4
# print mac_num # ¸ÅÅ©·Î °³¼ö
mac_info = 0 # ¸ÅÅ©·Î ÁÖ¿ä Á¤º¸ °³¼ö
all_code = []
for i in range(mac_num) :
if ord(data[mac_pos + (mac_info * 0x18)]) == 0x55 :
pos = mac_pos + (mac_info * 0x18)
w95m_key = ord(data[pos + 1])
w95m_len = struct.unpack('<L', data[pos+0x0C:pos+0x0C+4])[0]
w95m_pos = struct.unpack('<L', data[pos+0x14:pos+0x14+4])[0]
# print hex(w95m_key), hex(w95m_len), hex(w95m_pos)
if w95m_key != 0 :
w95m_code = ''
for j in range(w95m_len) :
ch = ord(data[w95m_pos + j]) ^ w95m_key
w95m_code += chr(ch)
else :
w95m_code = data[w95m_pos:w95m_pos + w95m_len]
all_code.append(w95m_code)
mac_info += 1
mac_data = all_code
except :
pass
return mac_data
def ExtractMacroData_X95M(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
if ord(data[0]) != 0x01 : raise SystemError
mac_pos = struct.unpack('<L', data[10:10+4])[0]
mac_pos += ( 14L + 14L )
if data_size < mac_pos : raise SystemError
t = struct.unpack('<L', data[mac_pos:mac_pos+4])[0]
mac_pos += t + 28L + 18L - 14L;
if data_size < mac_pos : raise SystemError
mac_pos = struct.unpack('<L', data[mac_pos:mac_pos+4])[0]
mac_pos += 0x3C
if data_size < mac_pos : raise SystemError
# ¸ÅÅ©·Î Á¤º¸ À§Ä¡±îÁö µµÂø
if ord(data[mac_pos]) != 0xFE or ord(data[mac_pos+1]) != 0xCA :
raise SystemError
# ¸ÅÅ©·Î ¼Ò½º ÄÚµåÀÇ ÁÙ ¼ö ¾ò±â
mac_lines = struct.unpack('<H', data[mac_pos+4:mac_pos+6])[0]
if mac_lines == 0 : raise SystemError
mac_pos = mac_pos + 4L + (mac_lines * 12L)
if data_size < mac_pos : raise SystemError
mac_len = struct.unpack('<L', data[mac_pos+6:mac_pos+10])[0]
mac_pos += 10
# print 'ok :', hex(mac_pos), mac_lines, mac_len
# ¸ÅÅ©·Î ´ã±ä ¿µ¿ª ÃßÃâ
if data_size < (mac_pos + mac_len) : raise SystemError
mac_data = data[mac_pos:mac_pos + mac_len]
except :
pass
return mac_data
def ExtractMacroData_Macro97(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
if ord(data[0]) != 0x01 : raise SystemError # ¸ÅÅ©·Î ¾Æ´Ô
if ord(data[9]) == 0x01 and ord(data[10]) == 0x01 :
# ¿¢¼¿ 97 or ¿öµå 97
mac_pos = struct.unpack('<L', data[0xB:0xB+4])[0] + 0x4F
mac_pos += (struct.unpack('<H', data[mac_pos:mac_pos+2])[0] * 16) + 2
mac_pos += struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 10
mac_pos += struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 81
mac_pos = struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 60
else :
# ¿¢¼¿ 2000 or ¿öµå 2000 ÀÌ»ó
mac_pos = struct.unpack('<L', data[25:25+4])[0]
mac_pos = (mac_pos - 1) + 0x3D
if ord(data[mac_pos]) != 0xFE or ord(data[mac_pos+1]) != 0xCA :
raise SystemError
mac_lines = struct.unpack('<H', data[mac_pos+4:mac_pos+6])[0]
if mac_lines == 0 : raise SystemError
mac_pos = mac_pos + 6L + (mac_lines * 12L);
Len = struct.unpack('<L', data[mac_pos+6:mac_pos+10])[0]
Off = mac_pos + 10
'''
print 'Macro off :', hex(Off)
print 'Macro len :', Len
fp = open('w97m.dmp', 'wb')
fp.write(data[Off:Off+Len])
fp.close()
'''
mac_data = data[Off:Off+Len]
except :
pass
return mac_data
def GetMD5_Macro(data, target_macro) :
global SIGTOOL
ret = None
try :
max = 0
buf = ''
for i in range(len(data)) :
c = data[i]
if IsPrint(c) :
max += 1
else :
if max > 3 :
if SIGTOOL == True :
print data[i-max:i] # ÆÐÅÏ »ý¼º½Ã ÂüÁ¶ (sigtool)
buf += data[i-max:i]
max = 0
md5 = hashlib.md5()
md5.update(buf)
fmd5 = md5.hexdigest().decode('hex')
if SIGTOOL == True :
str_macro = ['', 'x95m', 'x97m', 'w95m', 'w97m']
print '[%s] %s:%s:%s:' % (str_macro[target_macro], len(buf), md5.hexdigest(), len(data)) # ÆÐÅÏ ÃßÃâ (sigtool)
ret = (len(buf), fmd5, len(data))
except :
pass
return ret
#---------------------------------------------------------------------
# KavMain Ŭ·¡½º
# ŰÄÞ¹é½Å ¿£Áø ¸ðµâÀÓÀ» ³ªÅ¸³»´Â Ŭ·¡½ºÀÌ´Ù.
# ÀÌ Å¬·¡½º°¡ ¾øÀ¸¸é ¹é½Å ¿£Áø Ä¿³Î ¸ðµâ¿¡¼ ·ÎµùÇÏÁö ¾Ê´Â´Ù.
#---------------------------------------------------------------------
class KavMain :
#-----------------------------------------------------------------
# init(self, plugins)
# ¹é½Å ¿£Áø ¸ðµâÀÇ ÃʱâÈ ÀÛ¾÷À» ¼öÇàÇÑ´Ù.
#-----------------------------------------------------------------
def init(self, plugins) : # ¹é½Å ¸ðµâ ÃʱâÈ
try :
self.plugins = plugins
self.x95m_ptn = []
self.x95m_iptn = {}
self.x97m_ptn = []
self.x97m_iptn = {}
self.w95m_ptn = []
self.w95m_iptn = {}
self.w97m_ptn = []
self.w97m_iptn = {}
self.__signum__ = 0
self.__date__ = 0
self.__time__ = 0
self.max_date = 0
if self.__LoadDB__(X95M) == 1 : raise SystemError
if self.__LoadDB__(X97M) == 1 : raise SystemError
if self.__LoadDB__(W95M) == 1 : raise SystemError
if self.__LoadDB__(W97M) == 1 : raise SystemError
return 0
except :
pass
return 1
def __LoadDB__(self, target_macro) : # ¹é½Å ¸ðµâ ÃʱâÈ
try :
vdb = kavutil.VDB()
if target_macro == X95M : ptn_name = 'x95m'
elif target_macro == X97M : ptn_name = 'x97m'
elif target_macro == W95M : ptn_name = 'w95m'
elif target_macro == W97M : ptn_name = 'w97m'
flist = glob.glob(self.plugins + os.sep + ptn_name + '.c*')
for i in range(len(flist)) :
fname = flist[i]
# ÆÐÅÏ ·Îµù
ptn_data = vdb.Load(fname)
if ptn_data == None : # ÆÐÅÏ ·Îµù ½ÇÆÐ
return 1
if target_macro == X95M : self.x95m_ptn.append(ptn_data)
elif target_macro == X97M : self.x97m_ptn.append(ptn_data)
elif target_macro == W95M : self.w95m_ptn.append(ptn_data)
elif target_macro == W97M : self.w97m_ptn.append(ptn_data)
self.__signum__ += vdb.GetSigNum()
# Ãֽг¯Â¥ ±¸Çϱâ
t_d = vdb.GetDate()
t_t = vdb.GetTime()
t_date = (t_d << 16) + t_t
if self.max_date < t_date :
self.__date__ = t_d
self.__time__ = t_t
self.max_date = t_date
return 0
except :
return 1
#-----------------------------------------------------------------
# uninit(self)
# ¹é½Å ¿£Áø ¸ðµâÀÇ Á¾·áÈ ÀÛ¾÷À» ¼öÇàÇÑ´Ù.
#-----------------------------------------------------------------
def uninit(self) : # ¹é½Å ¸ðµâ Á¾·áÈ
return 0
#-----------------------------------------------------------------
# scan(self, filehandle, filename)
# ¾Ç¼ºÄڵ带 °Ë»çÇÑ´Ù.
# ÀÎÀÚ°ª : mmhandle - ÆÄÀÏ mmap ÇÚµé
# : scan_file_struct - ÆÄÀÏ ±¸Á¶Ã¼
# : format - ¹Ì¸® ºÐ¼®µÈ ÆÄÀÏ Æ÷¸Ë
# ¸®Åϰª : (¾Ç¼ºÄÚµå ¹ß°ß ¿©ºÎ, ¾Ç¼ºÄÚµå À̸§, ¾Ç¼ºÄÚµå ID) µîµî
#-----------------------------------------------------------------
def scan(self, mmhandle, scan_file_struct, format) :
global SIGTOOL
ret = None
scan_state = kernel.NOT_FOUND
ret_value = {}
ret_value['result'] = False # ¹ÙÀÌ·¯½º ¹ß°ß ¿©ºÎ
ret_value['virus_name'] = '' # ¹ÙÀÌ·¯½º À̸§
ret_value['scan_state'] = kernel.NOT_FOUND # 0:¾øÀ½, 1:°¨¿°, 2:ÀǽÉ, 3:°æ°í
ret_value['virus_id'] = -1 # ¹ÙÀÌ·¯½º ID
try :
section_name = scan_file_struct['deep_filename']
data = mmhandle[:] # ÆÄÀÏ Àüü ³»¿ë
if scan_file_struct['signature'] == True : # ½Ã±×³Êó »ý¼º
SIGTOOL = True
# _VBA_PROJECT/xxxx ¿¡ Á¸ÀçÇÏ´Â ½ºÆ®¸²Àº ¿¢¼¿95 ¸ÅÅ©·Î°¡ Á¸ÀçÇÑ´Ù.
if section_name.find(r'_VBA_PROJECT/') != -1 :
ret = self.__ScanVirus_X95M__(data)
target = 'MSExcel'
# _VBA_PROJECT_CUR/xxxx ¿¡ Á¸ÀçÇÏ´Â ½ºÆ®¸²Àº ¿¢¼¿97 ¸ÅÅ©·Î°¡ Á¸ÀçÇÑ´Ù.
elif section_name.find(r'_VBA_PROJECT_CUR/') != -1 :
ret = self.__ScanVirus_Macro97__(data, X97M)
target = 'MSExcel'
# WordDocument ½ºÆ®¸²¿¡ ¿öµå95 ¸ÅÅ©·Î°¡ Á¸ÀçÇÑ´Ù.
elif section_name.find('WordDocument') != -1 :
ret = self.__ScanVirus_W95M__(data)
target = 'MSWord'
# Macros/xxxx ¿¡ Á¸ÀçÇÏ´Â ½ºÆ®¸²Àº ¿öµå97 ¸ÅÅ©·Î°¡ Á¸ÀçÇÑ´Ù.
elif section_name.find('Macros/') != -1 :
ret = self.__ScanVirus_Macro97__(data, W97M)
target = 'MSWord'
if ret != None :
scan_state, s, i_num, i_list = ret
# ¹ÙÀÌ·¯½º À̸§ Á¶Àý
if s[0:2] == 'V.' :
s = 'Virus.%s.%s' % (target, s[2:])
elif s[0:2] == 'J.' :
s = 'Joke.%s.%s' % (target, s[2:])
# ¾Ç¼ºÄÚµå ÆÐÅÏÀÌ °®´Ù¸é °á°ú °ªÀ» ¸®ÅÏÇÑ´Ù.
ret_value['result'] = True # ¹ÙÀÌ·¯½º ¹ß°ß ¿©ºÎ
ret_value['virus_name'] = s # ¹ÙÀÌ·¯½º À̸§
ret_value['scan_state'] = scan_state # 0:¾øÀ½, 1:°¨¿°, 2:ÀǽÉ, 3:°æ°í
ret_value['virus_id'] = 0 # ¹ÙÀÌ·¯½º ID
return ret_value
except :
pass
# ¾Ç¼ºÄڵ带 ¹ß°ßÇÏÁö ¸øÇßÀ½À» ¸®ÅÏÇÑ´Ù.
return ret_value
def __ScanVirus_W95M__(self, data) :
ret = None
try :
mac_data = ExtractMacroData_W95M(data)
if mac_data == None : raise SystemError
for data in mac_data :
hash_data = GetMD5_Macro(data, W95M)
ret = self.__ScanVirus_Macro_ExpendDB__(hash_data, W95M)
if ret != None : return ret
except :
pass
return ret
def __ScanVirus_X95M__(self, data) :
ret = None
try :
mac_data = ExtractMacroData_X95M(data)
if mac_data == None : raise SystemError
hash_data = GetMD5_Macro(mac_data, X95M)
ret = self.__ScanVirus_Macro_ExpendDB__(hash_data, X95M)
except :
pass
return ret
def __ScanVirus_Macro97__(self, data, target_macro) :
ret = None
try :
mac_data = ExtractMacroData_Macro97(data)
if mac_data == None : raise SystemError
hash_data = GetMD5_Macro(mac_data, target_macro)
ret = self.__ScanVirus_Macro_ExpendDB__(hash_data, target_macro)
except :
pass
return ret
def __ScanVirus_Macro_ExpendDB__(self, hash_data, target_macro) :
ret = None
try :
fsize = hash_data[0] # md5¸¦ »ý¼ºÇÑ ¹öÆÛÀÇ Å©±â
fmd5 = hash_data[1] # md5
mac_size = hash_data[2] # ½ÇÁ¦ ¸ÅÅ©·Î Å©±â
# ÆÐÅÏ ºñ±³
i_num = -1
if target_macro == X95M : macro_ptn = self.x95m_ptn
elif target_macro == X97M : macro_ptn = self.x97m_ptn
elif target_macro == W95M : macro_ptn = self.w95m_ptn
elif target_macro == W97M : macro_ptn = self.w97m_ptn
for i in range(len(macro_ptn)) :
vpattern = macro_ptn[i]
try :
t = vpattern[fsize] # ÆÐÅÏ Áß¿¡ ÆÄÀÏ Å©±â·Î µÈ MD5°¡ Á¸ÀçÇϳª?
# MD5ÀÇ 6ÀÚ¸® ³»¿ëÀÌ ÀÏÄ¡ÇÏ´ÂÁö Á¶»ç
id = t[fmd5[0:6]]
# ³ª¸ÓÁö 10ÀÚ¸®µµ ºñ±³ÇØ¾ß ÇÔ
i_num = id[0] # x95m.iXX ÆÄÀÏ¿¡..
i_list = id[1] # ¸î¹øÂ° ¸®½ºÆ®ÀÎÁö ¾Ë°Ô µÊ
except :
pass
if i_num != -1 : # MD5 6ÀÚ¸®¿Í ÀÏÄ¡ÇÏ´Â °ÍÀ» ¹ß°ß µÇ¾ú´Ù¸é
try :
if target_macro == X95M :
e_vlist = self.x95m_iptn[i_num]
elif target_macro == X97M :
e_vlist = self.x97m_iptn[i_num]
elif target_macro == W95M :
e_vlist = self.w95m_iptn[i_num]
elif target_macro == W97M :
e_vlist = self.w97m_iptn[i_num]
except :
if target_macro == X95M : ptn_name = 'x95m'
elif target_macro == X97M : ptn_name = 'x97m'
elif target_macro == W95M : ptn_name = 'w95m'
elif target_macro == W97M : ptn_name = 'w97m'
fname = '%s%s%s.i%02d' % (self.plugins, os.sep,ptn_name, i_num)
vdb = kavutil.VDB() # ÆÐÅÏ ·Îµù
e_vlist = vdb.Load(fname)
if e_vlist != None :
if target_macro == X95M : self.x95m_iptn[i_num] = e_vlist
elif target_macro == X97M : self.x97m_iptn[i_num] = e_vlist
elif target_macro == W95M : self.w95m_iptn[i_num] = e_vlist
elif target_macro == W97M : self.w97m_iptn[i_num] = e_vlist
p_md5_10 = e_vlist[i_list][0] # MD5 10ÀÚ¸®
p_mac_size = int(e_vlist[i_list][1]) # ¸ÅÅ©·Î Å©±â
p_vname = e_vlist[i_list][2] # ¹ÙÀÌ·¯½º À̸§
if (p_md5_10 == fmd5[6:]) and (p_mac_size == mac_size) : # ¸ðµÎ ÀÏÄ¡
ret = (kernel.INFECTED, p_vname, i_num, i_list)
elif p_md5_10 == fmd5[6:] : # md5¸¸ ÀÏÄ¡
s = p_vname + '.Gen'
ret = (kernel.SUSPECT, s, i_num, i_list)
except :
pass
return ret
#-----------------------------------------------------------------
# disinfect(self, filename, malwareID)
# ¾Ç¼ºÄڵ带 Ä¡·áÇÑ´Ù.
# ÀÎÀÚ°ª : filename - ÆÄÀÏ À̸§
# : malwareID - Ä¡·áÇÒ ¾Ç¼ºÄÚµå ID
# ¸®Åϰª : ¾Ç¼ºÄÚµå Ä¡·á ¿©ºÎ
#-----------------------------------------------------------------
def disinfect(self, filename, malwareID) : # ¾Ç¼ºÄÚµå Ä¡·á
try :
'''
# ¾Ç¼ºÄÚµå Áø´Ü °á°ú¿¡¼ ¹ÞÀº ID °ªÀÌ 0Àΰ¡?
if malwareID == 0 :
os.remove(filename) # ÆÄÀÏ »èÁ¦
return True # Ä¡·á ¿Ï·á ¸®ÅÏ
'''
except :
pass
return False # Ä¡·á ½ÇÆÐ ¸®ÅÏ
#-----------------------------------------------------------------
# listvirus(self)
# Áø´Ü/Ä¡·á °¡´ÉÇÑ ¾Ç¼ºÄÚµåÀÇ ¸ñ·ÏÀ» ¾Ë·ÁÁØ´Ù.
#-----------------------------------------------------------------
def listvirus(self) : # Áø´Ü °¡´ÉÇÑ ¾Ç¼ºÄÚµå ¸ñ·Ï
vlist = [] # ¸®½ºÆ®Çü º¯¼ö ¼±¾ð
vlist.append('Virus.MSExcel.Laroux.A')
return vlist
#-----------------------------------------------------------------
# getinfo(self)
# ¹é½Å ¿£Áø ¸ðµâÀÇ ÁÖ¿ä Á¤º¸¸¦ ¾Ë·ÁÁØ´Ù. (¹öÀü, Á¦ÀÛÀÚ...)
#-----------------------------------------------------------------
def getinfo(self) :
info = {} # »çÀüÇü º¯¼ö ¼±¾ð
info['author'] = __author__ # Á¦ÀÛÀÚ
info['version'] = __version__ # ¹öÀü
info['title'] = 'Macro Engine' # ¿£Áø ¼³¸í
info['kmd_name'] = 'macro' # ¿£Áø ÆÄÀϸí
# ÆÐÅÏ »ý¼º³¯Â¥¿Í ½Ã°£Àº ¾ø´Ù¸é ºôµå ½Ã°£À¸·Î ÀÚµ¿ ¼³Á¤
info['date'] = self.__date__ # ÆÐÅÏ »ý¼º ³¯Â¥
info['time'] = self.__time__ # ÆÐÅÏ »ý¼º ½Ã°£
info['sig_num'] = self.__signum__ # ÆÐÅÏ ¼ö
return info
|
yezune/kicomav
|
Engine/plugins/macro.py
|
Python
|
gpl-2.0
| 19,013 | 0.017935 |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
|
rebolinho/liveit.repository
|
script.video.F4mProxy/lib/f4mUtils/datefuncs.py
|
Python
|
gpl-2.0
| 2,355 | 0.005096 |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import subprocess, time
last_ch = 0
class TvServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
global last_ch
cmd = self.path.split('/')
if 'favicon.ico' in cmd:
return
ch = int(cmd[1])
if not ch or ch < 1:
ch = 1
if ch == last_ch:
return
last_ch = ch
p = subprocess.Popen("killall VLC",shell=True)
time.sleep(0.5)
cmd = "/Applications/VLC.app/Contents/MacOS/VLC -I dummy eyetv:// --sout='#std{access=http,mux=ts,dst=<your ip>:8484}' --sout-keep --autocrop --intf dummy --eyetv-channel=%s" % ch
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,close_fds=True)
time.sleep(0.5)
self.send_response(301)
self.send_header("Location", "http://<your ip>:8484?t=%f" % time.time())
self.end_headers()
return
def do_POST(self):
pass
return
def main():
try:
server = HTTPServer(('',8485),TvServerHandler)
print 'server started'
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down'
server.socket.close()
if __name__ == '__main__':
main()
|
mimepp/umspx
|
htdocs/umsp/plugins/eyetv/eyetv-controller.py
|
Python
|
gpl-3.0
| 1,293 | 0.037123 |
import kivy
kivy.require('1.9.1')
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.app import Builder
from kivy.metrics import dp
from kivy.graphics import Color, Line
from autosportlabs.racecapture.geo.geopoint import GeoPoint
from autosportlabs.uix.track.trackmap import TrackMapView
from utils import *
Builder.load_file('autosportlabs/uix/track/racetrackview.kv')
class RaceTrackView(BoxLayout):
def __init__(self, **kwargs):
super(RaceTrackView, self).__init__(**kwargs)
def loadTrack(self, track):
self.initMap(track)
def initMap(self, track):
self.ids.trackmap.setTrackPoints(track.map_points)
def remove_reference_mark(self, key):
self.ids.trackmap.remove_marker(key)
def add_reference_mark(self, key, color):
trackmap = self.ids.trackmap
if trackmap.get_marker(key) is None:
trackmap.add_marker(key, color)
def update_reference_mark(self, key, geo_point):
self.ids.trackmap.update_marker(key, geo_point)
def add_map_path(self, key, path, color):
self.ids.trackmap.add_path(key, path, color)
def remove_map_path(self, key):
self.ids.trackmap.remove_path(key)
def add_heat_values(self, key, heat_values):
self.ids.trackmap.add_heat_values(key, heat_values)
def remove_heat_values(self, key):
self.ids.trackmap.remove_heat_values(key)
|
ddimensia/RaceCapture_App
|
autosportlabs/uix/track/racetrackview.py
|
Python
|
gpl-3.0
| 1,499 | 0.007338 |
import unittest
from sikuli import *
from java.awt.event import KeyEvent
from javax.swing import JFrame
not_pressed = True
WAIT_TIME = 4
def pressed(event):
global not_pressed
not_pressed = False
print "hotkey pressed! %d %d" %(event.modifiers,event.keyCode)
class TestHotkey(unittest.TestCase):
def testAddHotkey(self):
self.assertTrue(Env.addHotkey(Key.F6, 0, pressed))
def testAddHotkeyReal(self):
#f = JFrame("hello")
global not_pressed
Env.addHotkey(Key.F6, 0, pressed)
self.assertTrue(not_pressed)
count = 0
while not_pressed and count < WAIT_TIME:
count += 1
wait(1)
keyDown(Key.F6)
keyUp(Key.F6)
self.assertFalse(not_pressed)
#f.dispose()
def testRemoveHotkey(self):
self.assertFalse(Env.removeHotkey(Key.F7, 0))
self.assertTrue(Env.addHotkey(Key.F7, 0, pressed))
self.assertTrue(Env.removeHotkey(Key.F7, 0))
def setUp(self):
global not_pressed
not_pressed = True
@classmethod
def tearDownClass(self):
print "clean up"
Env.cleanUp()
|
bx5974/sikuli
|
sikuli-script/src/test/python/test_hotkey.py
|
Python
|
mit
| 1,110 | 0.034234 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:22713")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:22713")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Capricoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Capricoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
Capricoinofficial/Capricoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,842 | 0.038128 |
# -*- coding: utf-8 -*-
import os
"""
Illustration d'un exercice de TD visant à montrer l'évolution temporelle de la
densité de probabilité pour la superposition équiprobable d'un état n=1 et
d'un état n quelconque (à fixer) pour le puits quantique infini.
Par souci de simplicité, on se débrouille pour que E_1/hbar = 1
"""
import numpy as np # Boîte à outils numériques
import matplotlib.pyplot as plt # Boîte à outils graphiques
from matplotlib import animation # Pour l'animation progressive
# Second état n observer (à fixer)
n = 2
# On met tous les paramètres à 1 (ou presque)
t0 = 0
dt = 0.1
L = 1
hbar = 1
h = hbar * 2 * np.pi
m = (2 * np.pi)**2
E1 = h**2 / (8 * m * L**2)
En = n * E1
x = np.linspace(0, L, 1000)
def psi1(x, t):
return np.sin(np.pi * x / L) * np.exp(1j * E1 * t / hbar)
def psin(x, t):
return np.sin(n * np.pi * x / L) * np.exp(1j * En * t / hbar)
def psi(x, t):
return 1 / L**0.5 * (psi1(x, t) + psin(x, t))
fig = plt.figure()
line, = plt.plot(x, abs(psi(x, t0))**2)
plt.title('$t={}$'.format(t0))
plt.ylabel('$|\psi(x,t)|^2$')
plt.xlabel('$x$')
plt.plot(x, abs(psi1(x, t0))**2, '--', label='$|\psi_1|^2$')
plt.plot(x, abs(psin(x, t0))**2, '--', label='$|\psi_{}|^2$'.format(n))
plt.legend()
def init():
pass
def animate(i):
t = i * dt + t0
line.set_ydata(abs(psi(x, t))**2)
plt.title('$t={}$'.format(t))
anim = animation.FuncAnimation(fig, animate, frames=1000, interval=20)
plt.show()
os.system("pause")
|
NicovincX2/Python-3.5
|
Physique/Physique quantique/Mécanique quantique/principe_de_superposition_lineaire.py
|
Python
|
gpl-3.0
| 1,519 | 0.003333 |
from __future__ import unicode_literals, division, absolute_import
import re
from argparse import ArgumentParser, ArgumentTypeError
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import options
from flexget.event import event
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console
from flexget.utils.database import Session
from . import db
def do_cli(manager, options):
"""Handle regexp-list cli"""
action_map = {
'all': action_all,
'list': action_list,
'add': action_add,
'del': action_del,
'purge': action_purge,
}
action_map[options.regexp_action](options)
def action_all(options):
""" Show all regexp lists """
lists = db.get_regexp_lists()
header = ['#', 'List Name']
table_data = [header]
for regexp_list in lists:
table_data.append([regexp_list.id, regexp_list.name])
table = TerminalTable(options.table_type, table_data)
try:
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_list(options):
"""List regexp list"""
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
header = ['Regexp']
table_data = [header]
regexps = db.get_regexps_by_list_id(
regexp_list.id, order_by='added', descending=True, session=session
)
for regexp in regexps:
regexp_row = [regexp.regexp or '']
table_data.append(regexp_row)
try:
table = TerminalTable(options.table_type, table_data)
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_add(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}, creating'.format(options.list_name))
regexp_list = db.create_list(options.list_name, session=session)
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if not regexp:
console("Adding regexp {} to list {}".format(options.regexp, regexp_list.name))
db.add_to_list_by_name(regexp_list.name, options.regexp, session=session)
console(
'Successfully added regexp {} to regexp list {} '.format(
options.regexp, regexp_list.name
)
)
else:
console("Regexp {} already exists in list {}".format(options.regexp, regexp_list.name))
def action_del(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if regexp:
console('Removing regexp {} from list {}'.format(options.regexp, options.list_name))
session.delete(regexp)
else:
console(
'Could not find regexp {} in list {}'.format(
options.movie_title, options.list_name
)
)
return
def action_purge(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
console('Deleting list %s' % options.list_name)
session.delete(regexp_list)
def regexp_type(regexp):
try:
re.compile(regexp)
return regexp
except re.error as e:
raise ArgumentTypeError(e)
@event('options.register')
def register_parser_arguments():
# Common option to be used in multiple subparsers
regexp_parser = ArgumentParser(add_help=False)
regexp_parser.add_argument('regexp', type=regexp_type, help="The regexp")
list_name_parser = ArgumentParser(add_help=False)
list_name_parser.add_argument(
'list_name', nargs='?', help='Name of regexp list to operate on', default='regexps'
)
# Register subcommand
parser = options.register_command('regexp-list', do_cli, help='View and manage regexp lists')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='regexp_action')
subparsers.add_parser('all', parents=[table_parser], help='Shows all existing regexp lists')
subparsers.add_parser(
'list', parents=[list_name_parser, table_parser], help='List regexp from a list'
)
subparsers.add_parser(
'add', parents=[list_name_parser, regexp_parser], help='Add a regexp to a list'
)
subparsers.add_parser(
'del', parents=[list_name_parser, regexp_parser], help='Remove a regexp from a list'
)
subparsers.add_parser(
'purge', parents=[list_name_parser], help='Removes an entire list. Use with caution!'
)
|
gazpachoking/Flexget
|
flexget/components/managed_lists/lists/regexp_list/cli.py
|
Python
|
mit
| 5,302 | 0.003584 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('flyerapp', '0007_auto_20150629_1135'),
]
operations = [
migrations.AddField(
model_name='schedule',
name='logic_delete',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='flight',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180047), null=True, verbose_name=b'date published'),
),
migrations.AlterField(
model_name='schedule',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180807), null=True, verbose_name=b'date published'),
),
]
|
luzeduardo/antonov225
|
flyer/flyerapp/migrations/0008_auto_20150630_1859.py
|
Python
|
gpl-2.0
| 924 | 0.002165 |
from __future__ import unicode_literals
from django.apps import AppConfig
class ApplicationsConfig(AppConfig):
name = 'applications'
def ready(self):
super(ApplicationsConfig, self).ready()
from applications.signals import create_draft_application, clean_draft_application, \
auto_delete_file_on_change, auto_delete_file_on_delete
create_draft_application
clean_draft_application
auto_delete_file_on_change
auto_delete_file_on_delete
|
hackupc/backend
|
applications/apps.py
|
Python
|
mit
| 506 | 0.001976 |
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from logging import getLogger
from eos.const.eos import EffectBuildStatus
from eos.const.eve import AttrId
from eos.const.eve import EffectId
from eos.eve_obj.effect import EffectFactory
from .modifier import make_drone_dmg_modifiers
from .modifier import make_missile_dmg_modifiers
from .modifier import make_missile_rof_modifiers
logger = getLogger(__name__)
def add_missile_rof_modifiers(effect):
if effect.modifiers:
msg = 'missile self skillreq rof effect has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_missile_rof_modifiers()
effect.build_status = EffectBuildStatus.custom
def _add_missile_dmg_modifiers(effect, attr_id):
if effect.modifiers:
msg = f'missile self skillreq damage effect {effect.id} has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_missile_dmg_modifiers(attr_id)
effect.build_status = EffectBuildStatus.custom
def add_missile_dmg_modifiers_em(effect):
_add_missile_dmg_modifiers(effect, AttrId.em_dmg)
def add_missile_dmg_modifiers_therm(effect):
_add_missile_dmg_modifiers(effect, AttrId.therm_dmg)
def add_missile_dmg_modifiers_kin(effect):
_add_missile_dmg_modifiers(effect, AttrId.kin_dmg)
def add_missile_dmg_modifiers_expl(effect):
_add_missile_dmg_modifiers(effect, AttrId.expl_dmg)
def add_drone_dmg_modifiers(effect):
if effect.modifiers:
msg = 'drone self skillreq dmg effect has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_drone_dmg_modifiers()
effect.build_status = EffectBuildStatus.custom
EffectFactory.register_instance_by_id(
add_missile_rof_modifiers,
EffectId.self_rof)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_em,
EffectId.missile_em_dmg_bonus)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_therm,
EffectId.missile_therm_dmg_bonus)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_kin,
EffectId.missile_kin_dmg_bonus2)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_expl,
EffectId.missile_expl_dmg_bonus)
EffectFactory.register_instance_by_id(
add_drone_dmg_modifiers,
EffectId.drone_dmg_bonus)
|
pyfa-org/eos
|
eos/eve_obj/custom/self_skillreq/__init__.py
|
Python
|
lgpl-3.0
| 3,165 | 0.000632 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <shadowapex@gmail.com>,
# Benjamin Bean <superman2k5@gmail.com>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <shadowapex@gmail.com>
#
#
# core.components.log Logging module.
#
#
import sys
import logging
from . import config as Config
# read the configuration file
config = Config.Config()
loggers = {}
# Set up logging if the configuration has it enabled
if config.debug_logging == "1":
for logger_name in config.loggers:
# Enable logging
logger = logging.getLogger(logger_name)
logger.setLevel(int(config.debug_level))
log_hdlr = logging.StreamHandler(sys.stdout)
log_hdlr.setLevel(logging.DEBUG)
log_hdlr.setFormatter(logging.Formatter("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
logger.addHandler(log_hdlr)
loggers[logger_name] = logger
|
andrefbsantos/Tuxemon
|
tuxemon/core/components/log.py
|
Python
|
gpl-3.0
| 1,644 | 0.000608 |
from django.db import models
from django.core.urlresolvers import reverse
from jsonfield import JSONField
import collections
# Create your models here.
class YelpvisState(models.Model):
title=models.CharField(max_length=255)
slug=models.SlugField(unique=True,max_length=255)
description = models.CharField(max_length=255)
content=models.TextField()
published=models.BooleanField(default=True)
created=models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('blog:post', args=[self.slug])
class YelpvisCommentState(models.Model):
content=models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
vis_state = JSONField()
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.content
|
intuinno/vistalk
|
yelpvis/models.py
|
Python
|
mit
| 881 | 0.026107 |
# coding=UTF-8
# Author: Dennis Lutter <lad1337@gmail.com>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Test show database functionality."""
from __future__ import print_function
import threading
from tests.legacy import test_lib as test
class DBBasicTests(test.AppTestDBCase):
"""Perform basic database tests."""
def setUp(self):
"""Unittest set up."""
super(DBBasicTests, self).setUp()
self.db = test.db.DBConnection()
def test_select(self):
self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
class DBMultiTests(test.AppTestDBCase):
"""Perform multi-threaded test of the database."""
def setUp(self):
"""Unittest set up."""
super(DBMultiTests, self).setUp()
self.db = test.db.DBConnection()
def select(self):
"""Select from the database."""
self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
def test_threaded(self):
"""Test multi-threaded selection from the database."""
for _ in range(4):
thread = threading.Thread(target=self.select)
thread.start()
|
fernandog/Medusa
|
tests/legacy/db_tests.py
|
Python
|
gpl-3.0
| 1,788 | 0.001119 |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_PropertyMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._helpers import _PropertyMixin
return _PropertyMixin
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _derivedClass(self, path=None):
class Derived(self._get_target_class()):
client = None
@property
def path(self):
return path
return Derived
def test_path_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.path)
def test_client_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.client)
def test_reload(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is not a set, so we can observe a change.
derived._changes = object()
derived.reload(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'})
# Make sure changes get reset by reload.
self.assertEqual(derived._changes, set())
def test__set_properties(self):
mixin = self._make_one()
self.assertEqual(mixin._properties, {})
VALUE = object()
mixin._set_properties(VALUE)
self.assertEqual(mixin._properties, VALUE)
def test__patch_property(self):
derived = self._derivedClass()()
derived._patch_property('foo', 'Foo')
self.assertEqual(derived._properties, {'foo': 'Foo'})
def test_patch(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is non-empty, so we can observe a change.
BAR = object()
BAZ = object()
derived._properties = {'bar': BAR, 'baz': BAZ}
derived._changes = set(['bar']) # Ignore baz.
derived.patch(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
# Since changes does not include `baz`, we don't see it sent.
self.assertEqual(kw[0]['data'], {'bar': BAR})
# Make sure changes get reset by patch().
self.assertEqual(derived._changes, set())
class Test__scalar_property(unittest.TestCase):
def _call_fut(self, fieldName):
from google.cloud.storage._helpers import _scalar_property
return _scalar_property(fieldName)
def test_getter(self):
class Test(object):
def __init__(self, **kw):
self._properties = kw.copy()
do_re_mi = self._call_fut('solfege')
test = Test(solfege='Latido')
self.assertEqual(test.do_re_mi, 'Latido')
def test_setter(self):
class Test(object):
def _patch_property(self, name, value):
self._patched = (name, value)
do_re_mi = self._call_fut('solfege')
test = Test()
test.do_re_mi = 'Latido'
self.assertEqual(test._patched, ('solfege', 'Latido'))
class Test__base64_md5hash(unittest.TestCase):
def _call_fut(self, bytes_to_sign):
from google.cloud.storage._helpers import _base64_md5hash
return _base64_md5hash(bytes_to_sign)
def test_it(self):
from io import BytesIO
BYTES_TO_SIGN = b'FOO'
BUFFER = BytesIO()
BUFFER.write(BYTES_TO_SIGN)
BUFFER.seek(0)
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==')
def test_it_with_stubs(self):
import mock
class _Buffer(object):
def __init__(self, return_vals):
self.return_vals = return_vals
self._block_sizes = []
def read(self, block_size):
self._block_sizes.append(block_size)
return self.return_vals.pop()
BASE64 = _Base64()
DIGEST_VAL = object()
BYTES_TO_SIGN = b'BYTES_TO_SIGN'
BUFFER = _Buffer([b'', BYTES_TO_SIGN])
MD5 = _MD5(DIGEST_VAL)
patch = mock.patch.multiple(
'google.cloud.storage._helpers',
base64=BASE64, md5=MD5)
with patch:
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(BUFFER._block_sizes, [8192, 8192])
self.assertIs(SIGNED_CONTENT, DIGEST_VAL)
self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])
self.assertEqual(MD5._called, [None])
self.assertEqual(MD5.hash_obj.num_digest_calls, 1)
self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MD5Hash(object):
def __init__(self, digest_val):
self.digest_val = digest_val
self.num_digest_calls = 0
self._blocks = []
def update(self, block):
self._blocks.append(block)
def digest(self):
self.num_digest_calls += 1
return self.digest_val
class _MD5(object):
def __init__(self, digest_val):
self.hash_obj = _MD5Hash(digest_val)
self._called = []
def __call__(self, data=None):
self._called.append(data)
return self.hash_obj
class _Base64(object):
def __init__(self):
self._called_b64encode = []
def b64encode(self, value):
self._called_b64encode.append(value)
return value
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
dstrockis/outlook-autocategories
|
lib/unit_tests/test__helpers.py
|
Python
|
apache-2.0
| 6,951 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2017/10/18 17:13
# @Author : xxc727xxc (xxc727xxc@foxmail.com)
# @Version : 1.0.0
if __name__ == '__main__':
pass
|
DreamerBear/awesome-py3-webapp
|
www/biz/__init__.py
|
Python
|
gpl-3.0
| 181 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DiskOffering.available_size_kb'
db.add_column(u'physical_diskoffering', 'available_size_kb',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DiskOffering.available_size_kb'
db.delete_column(u'physical_diskoffering', 'available_size_kb')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
'equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Plan']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0025_auto__add_field_diskoffering_available_size_kb.py
|
Python
|
bsd-3-clause
| 11,926 | 0.00763 |
from pymacaron.log import pymlogger
import multiprocessing
from math import ceil
from pymacaron.config import get_config
log = pymlogger(__name__)
# Calculate resources available on this container hardware.
# Used by pymacaron-async, pymacaron-gcp and pymacaron-docker
def get_gunicorn_worker_count(cpu_count=None):
"""Return the number of gunicorn worker to run on this container hardware"""
if cpu_count:
return cpu_count * 2 + 1
return multiprocessing.cpu_count() * 2 + 1
def get_celery_worker_count(cpu_count=None):
"""Return the number of celery workers to run on this container hardware"""
conf = get_config()
if hasattr(conf, 'worker_count'):
# Start worker_count parrallel celery workers
return conf.worker_count
if cpu_count:
return cpu_count * 2
c = multiprocessing.cpu_count() * 2
# Minimum worker count == 2
if c < 2:
c == 2
return c
# Memory required, in Mb, by one gunicorn or celery worker:
GUNICORN_WORKER_MEM = 400
CELERY_WORKER_MEM = 200
def get_memory_limit(default_celery_worker_count=None, cpu_count=None):
"""Return the memory in Megabytes required to run pymacaron on this container hardware"""
# Let's calculate how much memory this pymacaron config requires for 1 container
celery_count = default_celery_worker_count
if not celery_count:
celery_count = get_celery_worker_count(cpu_count=cpu_count)
return ceil(get_gunicorn_worker_count(cpu_count=cpu_count) * GUNICORN_WORKER_MEM + celery_count * CELERY_WORKER_MEM)
def get_celery_worker_memory_limit():
return CELERY_WORKER_MEM * 1024
|
erwan-lemonnier/klue-microservice
|
pymacaron/resources.py
|
Python
|
bsd-2-clause
| 1,638 | 0.003053 |
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Help module for Plinth.
"""
import os
from apt.cache import Cache
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _, ugettext_lazy
from stronghold.decorators import public
from plinth import cfg, __version__
def init():
"""Initialize the Help module"""
menu = cfg.main_menu.add_urlname(ugettext_lazy('Documentation'),
'glyphicon-book', 'help:index')
menu.add_urlname(ugettext_lazy('Where to Get Help'), 'glyphicon-search',
'help:index_explicit', 5)
menu.add_urlname(ugettext_lazy('Manual'), 'glyphicon-info-sign',
'help:manual', 10)
menu.add_urlname(ugettext_lazy('About'), 'glyphicon-star', 'help:about',
100)
@public
def index(request):
"""Serve the index page"""
return TemplateResponse(request, 'help_index.html',
{'title': _('Documentation and FAQ')})
@public
def about(request):
"""Serve the about page"""
cache = Cache()
plinth = cache['plinth']
context = {
'title': _('About {box_name}').format(box_name=_(cfg.box_name)),
'version': __version__,
'new_version': not plinth.candidate.is_installed
}
return TemplateResponse(request, 'help_about.html', context)
@public
def manual(request):
"""Serve the manual page from the 'doc' directory"""
try:
with open(os.path.join(cfg.doc_dir, 'freedombox-manual.part.html'),
'r', encoding='utf-8') as input_file:
content = input_file.read()
except IOError:
raise Http404
return TemplateResponse(
request, 'help_manual.html',
{'title': _('{box_name} Manual').format(box_name=_(cfg.box_name)),
'content': content})
def status_log(request):
"""Serve the last 100 lines of plinth's status log"""
num_lines = 100
with open(cfg.status_log_file, 'r') as log_file:
data = log_file.readlines()
data = ''.join(data[-num_lines:])
context = {
'num_lines': num_lines,
'data': data
}
return TemplateResponse(request, 'statuslog.html', context)
|
freedomboxtwh/Plinth
|
plinth/modules/help/help.py
|
Python
|
agpl-3.0
| 2,917 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Anne Archibald <peridot.faceted@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
class ContainerError(ValueError):
"""Error signaling something went wrong with container handling"""
pass
class Container(object):
"""A container is an object that manages objects it contains.
The objects in a container each have a .container attribute that
points to the container. This attribute is managed by the container
itself.
This class is a base class that provides common container functionality,
to be used to simplify implementation of list and dict containers.
"""
def _set_container(self, item):
if hasattr( item, "container" ) and item.container not in (None,self):
# raise ContainerError("Item %s was added to container %s but was already in container %s" % (item, self, item.container))
item.container.remove( item )
item.container = self
def _unset_container(self, item):
if item.container is not self:
raise ContainerError("Item %s was removed from container %s but was not in it" % (item, self))
item.container = None
def _set_container_multi(self, items):
"""Put items in the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._set_container(i)
r.append(i)
r = None
finally: # Make sure items don't get added to this if any fail
if r is not None:
for i in r:
try:
self._unset_container(i)
except ContainerError:
pass
def _unset_container_multi(self, items):
"""Remove items from the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._unset_container(i)
r.append(i)
r = None
finally:
if r is not None:
for i in r:
try:
self._set_container(i)
except ContainerError:
pass
class ContainerList(list,Container):
"""A ContainerList is a list whose children know they're in it.
Each element in the ContainerList has a .container attribute which points
to the ContainerList itself. This container pointer is maintained automatically.
"""
def __init__(self, items=[], owner=None):
list.__init__(self, items)
self._set_container_multi(items)
self.owner = owner
def __repr__(self):
return "<CL %s>" % list.__repr__(self)
def append(self, item):
self._set_container(item)
list.append(self,item)
def extend(self, items):
self._set_container_multi(items)
list.extend(self,items)
def insert(self, i, item):
self._set_container(item)
list.insert(self,i,item)
def remove(self, item):
self._unset_container(item)
list.remove(self,item)
def pop(self, i=-1):
self._unset_container(self[i])
return list.pop(self,i)
# These don't work because they make the elements part of more than one list, or one list more than once
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __imul__(self,other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self,other):
raise NotImplementedError
# only works if other is not also a Container
def __iadd__(self, other):
self.extend(other)
return self
def __setitem__(self, key, value):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
try:
self._set_container_multi(value)
except ContainerError:
self._set_container_multi(self[key])
raise
else:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
self._set_container(self[key])
raise
list.__setitem__(self,key,value)
def __delitem__(self, key):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
else:
self._unset_container(self[key])
list.__delitem__(self,key)
# Needed for python2, forbidden for python3
def __delslice__(self,i,j):
del self[slice(i,j,None)]
class ContainerDict(dict,Container):
"""A ContainerDict is a dict whose children know they're in it.
Each element in the ContainerDict has a .container attribute which points
to the ContainerDict itself. This container pointer is maintained automatically.
"""
def __init__(self, contents=None, **kwargs):
if contents is None:
dict.__init__(self, **kwargs)
else:
dict.__init__(self, contents, **kwargs)
self._set_container_multi(list(self.values()))
def __repr__(self):
return "<CD %s>" % dict.__repr__(self)
def __setitem__(self, key, value):
if key in self:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
if key in self:
self._set_container(self[key])
raise
dict.__setitem__(self,key,value)
def __delitem__(self, key):
if key in self:
self._unset_container(self[key])
dict.__delitem__(self,key)
def pop(self, key):
if key in self:
self._unset_container(self[key])
return dict.pop(self,key)
def popitem(self):
key, value = dict.popitem(self)
self._unset_container(value)
return key, value
def setdefault(self, key, default=None):
if key not in self:
self._set_container(default)
dict.setdefault(self, key, default)
def update(self, other):
for (k,v) in list(other.items()):
self[k] = v
if __name__=='__main__':
class Gear(object):
def __init__(self, name, container=None):
self.name = name
self.container = container
def __repr__(self):
return "<G "+str(self.name)+">"
gears = [Gear(n) for n in range(10)]
a = Gear("A")
b = Gear("B")
c = Gear("C")
d = Gear("D")
e = Gear("E")
p = ContainerList([a,b,c])
print(p)
try:
p.append(a)
except ContainerError as err:
print(err)
else:
raise AssertionError
print(p[1])
print(p[::2])
p[1] = d
print(p)
p[1] = b
p[::2] = [d,e]
print(p)
del p[:]
p2 = ContainerList([a,b,c])
print(p2)
p2.extend([d,e])
print(p2)
print(p2.pop())
print(p2)
p2.remove(d)
print(p2)
p2 += [d,e]
print(p2)
try:
d = ContainerDict(a=a, b=b, c=c)
except ContainerError as err:
print(err)
else:
raise AssertionError
del p2[:]
d = ContainerDict(a=a, b=b, c=c)
print(d)
print(d["a"])
d["a"] = a
try:
d["a"] = b
except ContainerError as err:
print(err)
else:
raise AssertionError
del d["a"]
d["a"] = a
d.pop("a")
print(d)
d["a"] = a
k,v = d.popitem()
d[k] = v
d.setdefault("e",e)
d.setdefault("e",e)
print(d)
del d["e"]
d.update(dict(e=e))
print(d)
|
jwvhewitt/dmeternal
|
old_game/container.py
|
Python
|
gpl-2.0
| 8,515 | 0.00916 |
# View more python tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
# create data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0.3
### create tensorflow structure start ###
### create tensorflow structure end ###
# Very important
for step in range(201):
pass
|
MediffRobotics/DeepRobotics
|
DeepLearnMaterials/tutorials/tensorflowTUT/tf5_example2/for_you_to_practice.py
|
Python
|
gpl-3.0
| 640 | 0.010938 |
# ---Libraries---
# Standard library
import os
import sys
import math
# Third-party libraries
import cv2
import numpy as np
import scipy.ndimage as ndimage
# Private libraries
import compute_OBIFs
import color_BIFs
sys.path.append(os.path.abspath("../"))
import utils
template_png='algorithms/inputFields/template.png'
amount_input_png='algorithms/inputFields/amount_template.png'
date_input_png='algorithms/inputFields/date_template.png'
def searchTemplateCenterPointIn(check, template, searchMap, step=1, threshold=-9999999):
fromIndex = [int(template.shape[0] / 2 + 1), int(template.shape[1] / 2 + 1)]
toIndex = [int(searchMap.shape[0] - template.shape[0] / 2), int(searchMap.shape[1] - template.shape[1] / 2)]
radios = [int(template.shape[0] / 2), int(template.shape[1] / 2)]
maxConv = threshold
maxCenterConv = [0, 0]
for centerConvX in range(fromIndex[0], toIndex[0]):
for centerConvY in range(fromIndex[1], toIndex[1]):
if searchMap[centerConvX, centerConvY] == 1:
convMatrix = check[centerConvX - radios[0]:centerConvX + radios[0] + template.shape[0]%2,
centerConvY - radios[1]:centerConvY + radios[1] + template.shape[1]%2] \
* template
conv = np.sum(convMatrix)
if maxConv < conv:
maxConv = conv
maxCenterConv = [centerConvX, centerConvY]
print maxConv
return maxCenterConv
def normalize(image):
binary = np.array(image, dtype=np.int8, copy=True)
binary[image == 0] = 1
binary[image == 255] = -1
return binary
def binaryTemplate():
img_template = cv2.imread(template_png)
return utils.sanitize(img_template)
def dateTemplate():
img_template = cv2.imread(date_input_png)
return utils.sanitize(img_template)
def amountTemplate():
img_template = cv2.imread(amount_input_png)
return utils.sanitize(img_template)
def binaryTemplateFix():
img_template = cv2.imread(template_png)
return utils.sanitize(img_template, False)
# Extract input fields, the Region Of Interest (ROI), from bank check.
def extract(check):
template = binaryTemplate()
templateRadios = [template.shape[0] / 2, template.shape[1] / 2]
checkMap = np.array(check, dtype=np.int8)
checkMap[check == 0] = 1
checkMap[check > 0] = -1
searchFrom = [check.shape[0] / 2 - 10, check.shape[1] / 2 - 10]
searchTo = [check.shape[0] / 2 + 100, check.shape[1] / 2 + 10]
searchMatrix = np.zeros(check.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(checkMap, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - templateRadios[0] - 1), int(center[0] + templateRadios[0])],
[int(center[1] - templateRadios[1]), int(center[1] + templateRadios[1])]]
roi = check[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]]
return roi
def extractAmount(input_fields, clean = True):
template = amountTemplate()
template[template == -1] = 0
input_fields_map = normalize(input_fields)
amountX = 1018
amountY = 96
searchFrom = [amountY - 50, amountX - 50]
searchTo = [amountY + 50, amountX + 50]
searchMatrix = np.zeros(input_fields.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(input_fields_map, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - template.shape[0]/2), int(center[0] + template.shape[0]/2)],
[int(center[1] - template.shape[1]/2), int(center[1] + template.shape[1]/2)]]
template[template == 0] = -1
template[template == 1] = 0
template[:,0:35] = 0
input_fields_clean = cleanBy(input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]], template)
inputFieldsRectangle[1][1] = input_fields.shape[1] if inputFieldsRectangle[1][1] + 50 > input_fields.shape[1] \
else inputFieldsRectangle[1][1] + 50
inputFieldsRectangle[0][0] -= 20
roi = np.copy(input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]])
if clean:
roi[20:roi.shape[0], 0:input_fields_clean.shape[1]] = input_fields_clean
return roi
def extractDate(input_fields):
template = dateTemplate()
input_fields_map = normalize(input_fields)
amountX = 683
amountY = 190
searchFrom = [amountY - 100, amountX - 100]
searchTo = [amountY + 100, amountX + 100]
searchMatrix = np.zeros(input_fields.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(input_fields, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - 50), int(center[0] + 50)],
[int(center[1] - 113), int(center[1] + 113)]]
roi = input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]]
return roi
def clean(check):
input_fields = extract(check)
input_fields_OBIFs = compute_OBIFs.computeOBIFs(input_fields)
empty_input_fields = binaryTemplateFix()
empty_input_fields_OBIFs = compute_OBIFs.computeOBIFs(empty_input_fields)
# input_fields[diff_map_not] = 255
input_fields_clone = cleanBy(input_fields, empty_input_fields)
# clean_input_fields_OBIFs = compute_OBIFs.computeOBIFs(input_fields)
diff_map = np.equal(input_fields_OBIFs, empty_input_fields_OBIFs)
# diff_map_clean = np.equal(input_fields_OBIFs, clean_input_fields_OBIFs)
# diff_map_not = np.not_equal(input_fields_OBIFs, empty_input_fields_OBIFs)
# input_fields_OBIFs[diff_map] = 30
# empty_input_fields_OBIFs[diff_map] = 30
if_obifs_color = color_BIFs.bifs_to_color_image(input_fields_OBIFs)
eif_obifs_color = color_BIFs.bifs_to_color_image(empty_input_fields_OBIFs)
# cif_obifs_color = color_BIFs.bifs_to_color_image(clean_input_fields_OBIFs)
if_obifs_color[diff_map] = 30
if_obifs_color[empty_input_fields_OBIFs == 0] = 30
eif_obifs_color[diff_map] = 30
# cif_obifs_color[diff_map_clean] = 30
cv2.imwrite("obifInput.png", if_obifs_color)
cv2.imwrite("obifEmptyInput.png", eif_obifs_color)
# cv2.imwrite("obifCleanInput.png", cif_obifs_color)
# diff_map[empty_input_fields != 0] = False
return input_fields_clone
def cleanBy(image, template_image):
image_clone = np.copy(image)
image_clone[template_image == 0] = 255
# kernel = np.zeros((5, 5), np.float16)
# kernel[1][1] = 1/6.
# kernel[1][2] = 1/6.
# kernel[1][3] = 1/6.
# kernel[3][2] = 1/6.
# kernel[3][2] = 1/6.
# kernel[3][3] = 1/6.
#
#
# pixel_matrix = ndimage.filters.convolve(image_clone, kernel, mode='constant')
# cv2.imwrite('test1.png', pixel_matrix)
#
# pixel_matrix[template_image != 0] = 255
return image_clone
# Test
# img_template = cv2.imread('inputFields/templateFix1.png')
#
# image = np.array(img_template, dtype=np.uint8)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret3, invers1 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret, invers2 = cv2.threshold(invers1, 127, 255, cv2.THRESH_BINARY_INV)
# blur1 = cv2.GaussianBlur(image, (11, 11), 0)
# blur2 = cv2.GaussianBlur(image, (21, 21), 0)
# blur3 = cv2.GaussianBlur(image, (31, 31), 0)
# blur4 = cv2.GaussianBlur(image, (41, 41), 0)
#
#
# blur1 = np.array(blur1, dtype=np.uint8)
# blur2 = np.array(blur2, dtype=np.uint8)
# blur3 = np.array(blur3, dtype=np.uint8)
# blur4 = np.array(blur4, dtype=np.uint8)
#
# blur1 = cv2.cvtColor(blur1, cv2.COLOR_BGR2GRAY)
# blur2 = cv2.cvtColor(blur2, cv2.COLOR_BGR2GRAY)
# blur3 = cv2.cvtColor(blur3, cv2.COLOR_BGR2GRAY)
# blur4 = cv2.cvtColor(blur4, cv2.COLOR_BGR2GRAY)
#
# ret3, invers1 = cv2.threshold(blur1, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret3, invers2 = cv2.threshold(blur2, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret3, invers3 = cv2.threshold(blur3, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret3, invers4 = cv2.threshold(blur4, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#
# cv2.imwrite('inputFields/templateV1.png', invers1)
# cv2.imwrite('inputFields/templateV2.png', invers2)
# cv2.imwrite('inputFields/templateV3.png', invers3)
# cv2.imwrite('inputFields/templateV4.png', invers4)
# cv2.imwrite('inputFields/template.png', invers1)
# img_template = cv2.imread('inputFields/templateFix1.png')
#
# image = np.array(img_template, dtype=np.uint8)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret3, invers1 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#
#
# cv2.imwrite('inputFields/templateFix1.png', invers1)
# Create the template function ROI Weight function
# The max value indicate on the ROI position.
#
# def checkTemplate(name):
# img_template = cv2.imread('inputFields/template.png')
# image = np.array(img_template, dtype=np.uint8)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret3, binary = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#
# binary = np.array(binary, dtype=np.int8)
#
# binary[binary == 0] = 1
# binary[binary == 255] = -1
#
# img_template = cv2.imread('../../assets/Checks/' + name.__str__() + '.png')
# image = utils.rotate_image_by_angle(img_template, -2.213)
# image = np.array(image, dtype=np.uint8)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret3, check = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#
# checkMap = np.array(check, dtype=np.int8)
#
# checkMap[check == 0] = 1
# checkMap[check == 255] = -1
#
# selectionFrom = [check.shape[0] / 2 - 10, check.shape[1] / 2 - 10]
# selectionTo = [check.shape[1] / 2 + 10, check.shape[1] / 2 + 10]
#
# selectionMatrix = np.zeros(check.shape,np.uint8)
# selectionMatrix[int(selectionFrom[0]):int(selectionTo[0]), int(selectionFrom[1]):int(selectionTo[1])] = 1
#
# center = searchTemplateCenterPointIn(checkMap, binary, selectionMatrix)
#
# binaryRadios = [binary.shape[0] / 2, binary.shape[1] / 2]
#
# binary[binary == 1] = 120
#
# for i in range(center[0]-binaryRadios[0]-1, center[0]+binaryRadios[0]):
# for j in range(center[1]-binaryRadios[1], center[1]+binaryRadios[1]):
# index = [i-center[0]+binaryRadios[0]-1,j-center[1]+binaryRadios[1]]
# if binary[index[0], index[1]] == 120:
# check[i,j] = 120
# # check[center[0]-binaryRadios[0]-1:center[0]+binaryRadios[0], center[1]-binaryRadios[1]:center[1]+binaryRadios[1]] = binary
#
# cv2.imwrite('inputFields/checkRes' + name.__str__() + '.png', check)
#
#
# checkTemplate(1)
# checkTemplate(2)
# checkTemplate(3)
# checkTemplate(4)
# checkTemplate(5)
# checkTemplate(6)
# checkTemplate(7)
# checkTemplate(8)
# print img_template
|
avicorp/firstLook
|
src/algorithms/check_input_fields.py
|
Python
|
apache-2.0
| 11,258 | 0.004264 |
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
try:
from docutils.utils.error_reporting import ErrorString # the new way
except ImportError:
from docutils.error_reporting import ErrorString # the old way
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from nbconvert import html
class Notebook(Directive):
"""Use nbconvert to insert a notebook into the environment.
This is based on the Raw directive in docutils
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = False
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# set up encoding
attributes = {'format': 'html'}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_path = os.path.normpath(os.path.join(source_dir,
self.arguments[0]))
nb_path = utils.relative_path(None, nb_path)
# convert notebook to html
exporter = html.HTMLExporter(template_file='full')
output, resources = exporter.from_filename(nb_path)
header = output.split('<head>', 1)[1].split('</head>',1)[0]
body = output.split('<body>', 1)[1].split('</body>',1)[0]
# add HTML5 scoped attribute to header style tags
header = header.replace('<style', '<style scoped="scoped"')
header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n',
'')
header = header.replace("code,pre{", "code{")
# Filter out styles that conflict with the sphinx theme.
filter_strings = [
'navbar',
'body{',
'alert{',
'uneditable-input{',
'collapse{',
]
filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
line_begin = [
'pre{',
'p{margin'
]
filterfunc = lambda x: not any([s in x for s in filter_strings])
header_lines = filter(filterfunc, header.split('\n'))
filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
header_lines = filter(filterfunc, header_lines)
header = '\n'.join(header_lines)
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.append(header)
lines.append(body)
lines.append('</div>')
text = '\n'.join(lines)
# add dependency
self.state.document.settings.record_dependencies.add(nb_path)
attributes['source'] = nb_path
# create notebook node
nb_node = notebook('', text, **attributes)
(nb_node.source, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
return [nb_node]
class notebook(nodes.raw):
pass
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
app.add_node(notebook,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', Notebook)
|
wbinventor/openmc
|
docs/sphinxext/notebook_sphinxext.py
|
Python
|
mit
| 3,717 | 0.001345 |
# Problem 28
# Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
#
# 21 22 23 24 25
# 20 7 8 9 10
# 19 6 1 2 11
# 18 5 4 3 12
# 17 16 15 14 13
#
# It can be verified that the sum of the numbers on the diagonals is 101.
#
# What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
from math import floor
from enum import Enum
# constants for this problem
grid_rows = 1001 # 1001, for the final problem
grid_cols = grid_rows # make column/row distinction later on easier
max_idx = grid_rows - 1 #just to make life easier later
grid_print = True # use this to print the grid after setting it up, later on
#first setup the grid
my_grid = [[0 for x in range(grid_rows)] for y in range(grid_cols)]
# find the starting point
starting_point = int(grid_rows / 2) # subtract 1 to account for zero-based indexing
# these are "grid maintenance" types of things
grid_filled = False
fill_directions = Enum('direction', 'right down left up')
current_direction = fill_directions.right
grid_value = 1 # start off at zero so that the outside 'while' loop works for all iterations
# start the offsets off with a minimal value
row_offset = 0
col_offset = 0
# working variables, these will be used to keep track of the current positione
candidate_col = starting_point
candidate_row = starting_point
my_grid[candidate_row][candidate_col] = grid_value
grid_value += 1 # this seeds the center of our spiral
while not grid_filled:
try:
if current_direction == fill_directions.right and my_grid[candidate_row][candidate_col + 1] == 0:
candidate_col += 1 # offset by one column
if my_grid[candidate_row + 1][candidate_col] == 0:
current_direction = fill_directions.down
elif current_direction == fill_directions.down and my_grid[candidate_row + 1][candidate_col] == 0:
candidate_row += 1
if my_grid[candidate_row][candidate_col - 1] == 0:
current_direction = fill_directions.left
elif current_direction == fill_directions.left and my_grid[candidate_row][candidate_col - 1] == 0:
candidate_col -= 1
if my_grid[candidate_row - 1][candidate_col] == 0:
current_direction = fill_directions.up
elif current_direction == fill_directions.up and my_grid[candidate_row - 1][candidate_col] == 0:
candidate_row -= 1
if my_grid[candidate_row][candidate_col + 1] == 0:
current_direction = fill_directions.right
else:
raise Exception("current_direction wasn't in the enum of possible directions: {0}".format(current_direction))
except IndexError as idxExc:
break
if candidate_row == grid_cols and candidate_row == grid_rows:
grid_filled = True
else:
my_grid[candidate_row][candidate_col] = grid_value
grid_value += 1
if grid_print:
for x in range(grid_rows):
row_val = ""
for y in range(grid_cols):
row_val += "{0}\t".format(my_grid[x][y])
print("row {0}: {1}".format(x, row_val))
current_row = 0
running_total = 0
for i in range(grid_cols):
running_total += my_grid[current_row][i]
running_total += my_grid[current_row][(-i - 1) if i > 0 else -1] if my_grid[current_row][i] != 1 else 0
current_row += 1
print("running_total is: {0}".format(running_total))
|
chriscallan/Euler
|
Probs_1_to_50/028_NumberSpiralDiagonals.py
|
Python
|
gpl-3.0
| 3,489 | 0.004013 |
from __future__ import print_function
from __future__ import absolute_import
from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
from .Project import iso639language
import Tools.Notifications
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print("[png2yuvTask]", data[:-1])
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print("[mpeg2encTask]", line[:-1])
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print("[spumuxTask]", data[:-1])
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/", 1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename + "."):
self.args += [path + '/' + file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i + 1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = []
self.getRelevantAudioPIDs(title)
self.generated_files = []
self.mplex_audiofiles = {}
self.mplex_videofile = ""
self.mplex_streamfiles = []
if len(self.cutlist) > 1:
self.args += ["-cut", self.cutfile]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x', 1)[1].split(' ', 1)[0], 16))
except ValueError:
print("[DemuxTask] ERROR: couldn't detect Audio PID (projectx too old?)")
def haveNewFile(self, file):
print("[DemuxTask] produced file:", file, self.currentPID)
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProgress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print("[DemuxTask::cleanup]")
self.mplex_streamfiles = [self.mplex_videofile]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print(self.mplex_streamfiles)
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = list(range(2))
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting=500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print("[MplexTask] ", line[:-1])
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace + "/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print("[DVDAuthorTask] ", line[:-1])
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].split(" ")[-1])
if progress:
self.job.mplextask.progress = progress
print("[DVDAuthorTask] update mplextask progress:", self.job.mplextask.progress, "of", self.job.mplextask.end)
except:
print("couldn't set mux progress")
class DVDAuthorFinalTask(Task):
def __init__(self, job):
Task.__init__(self, job, "dvdauthor finalize")
self.setTool("dvdauthor")
self.args += ["-T", "-o", self.job.workspace + "/dvd"]
class WaitForResidentTasks(Task):
def __init__(self, job):
Task.__init__(self, job, "waiting for dvdauthor to finalize")
def run(self, callback):
print("waiting for %d resident task(s) %s to finish..." % (len(self.job.resident_tasks), str(self.job.resident_tasks)))
self.callback = callback
if self.job.resident_tasks == 0:
callback(self, [])
class BurnTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0:
return True
elif task.error is None or task.error is task.ERROR_MINUSRWBUG:
return True
return False
def getErrorMessage(self, task):
return {
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_LOAD: _("Could not load Medium! No disc inserted?"),
task.ERROR_SIZE: _("Content does not fit on DVD!"),
task.ERROR_WRITE_FAILED: _("Write failed!"),
task.ERROR_DVDROM: _("No (supported) DVDROM found!"),
task.ERROR_ISOFS: _("Medium is not empty!"),
task.ERROR_FILETOOLARGE: _("TS file is too large for ISO9660 level 1!"),
task.ERROR_ISOTOOLARGE: _("ISO file is too large for this filesystem!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class BurnTask(Task):
ERROR_NOTWRITEABLE, ERROR_LOAD, ERROR_SIZE, ERROR_WRITE_FAILED, ERROR_DVDROM, ERROR_ISOFS, ERROR_FILETOOLARGE, ERROR_ISOTOOLARGE, ERROR_MINUSRWBUG, ERROR_UNKNOWN = list(range(10))
def __init__(self, job, extra_args=[], tool="growisofs"):
Task.__init__(self, job, job.name)
self.weighting = 500
self.end = 120 # 100 for writing, 10 for buffer flush, 10 for closing disc
self.postconditions.append(BurnTaskPostcondition())
self.setTool(tool)
self.args += extra_args
def prepare(self):
self.error = None
def processOutputLine(self, line):
line = line[:-1]
print("[GROWISOFS] %s" % line)
progpos = line.find("%) @")
if line[8:14] == "done, ":
self.progress = float(line[:6])
elif progpos > 0:
self.progress = float(line[progpos - 4:progpos])
elif line.find("flushing cache") != -1:
self.progress = 100
elif line.find("closing disc") != -1:
self.progress = 110
elif line.startswith(":-["):
if line.find("ASC=30h") != -1:
self.error = self.ERROR_NOTWRITEABLE
elif line.find("ASC=24h") != -1:
self.error = self.ERROR_LOAD
elif line.find("SK=5h/ASC=A8h/ACQ=04h") != -1:
self.error = self.ERROR_MINUSRWBUG
else:
self.error = self.ERROR_UNKNOWN
print("BurnTask: unknown error %s" % line)
elif line.startswith(":-("):
if line.find("No space left on device") != -1:
self.error = self.ERROR_SIZE
elif self.error == self.ERROR_MINUSRWBUG:
print("*sigh* this is a known bug. we're simply gonna assume everything is fine.")
self.postconditions = []
elif line.find("write failed") != -1:
self.error = self.ERROR_WRITE_FAILED
elif line.find("unable to open64(") != -1 and line.find(",O_RDONLY): No such file or directory") != -1:
self.error = self.ERROR_DVDROM
elif line.find("media is not recognized as recordable DVD") != -1:
self.error = self.ERROR_NOTWRITEABLE
else:
self.error = self.ERROR_UNKNOWN
print("BurnTask: unknown error %s" % line)
elif line.startswith("FATAL:"):
if line.find("already carries isofs!"):
self.error = self.ERROR_ISOFS
else:
self.error = self.ERROR_UNKNOWN
print("BurnTask: unknown error %s" % line)
elif line.find("-allow-limited-size was not specified. There is no way do represent this file size. Aborting.") != -1:
self.error = self.ERROR_FILETOOLARGE
elif line.startswith("genisoimage: File too large."):
self.error = self.ERROR_ISOTOOLARGE
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
class RemoveWorkspaceFolder(Task):
def __init__(self, job):
Task.__init__(self, job, "Remove temp. files")
self.setTool("rm")
self.args += ["-rf", self.job.workspace]
self.weighting = 10
class CheckDiskspaceTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Checking free space")
totalsize = 0 # require an extra safety 50 MB
maxsize = 0
for title in job.project.titles:
titlesize = title.estimatedDiskspace
if titlesize > maxsize:
maxsize = titlesize
totalsize += titlesize
diskSpaceNeeded = totalsize + maxsize
job.estimateddvdsize = totalsize / 1024 / 1024
totalsize += 50 * 1024 * 1024 # require an extra safety 50 MB
self.global_preconditions.append(DiskspacePrecondition(diskSpaceNeeded))
self.weighting = 5
def abort(self):
self.finish(aborted=True)
def run(self, callback):
self.callback = callback
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if len(failed_preconditions):
callback(self, failed_preconditions)
return
Task.processFinished(self, 0)
class PreviewTask(Task):
def __init__(self, job, path):
Task.__init__(self, job, "Preview")
self.postconditions.append(PreviewTaskPostcondition())
self.job = job
self.path = path
self.weighting = 10
def run(self, callback):
self.callback = callback
if self.job.menupreview:
self.previewProject()
else:
import Screens.Standby
if Screens.Standby.inStandby:
self.previewCB(False)
else:
Tools.Notifications.AddNotificationWithCallback(self.previewCB, MessageBox, _("Do you want to preview this DVD before burning?"), timeout=60, default=False, domain="JobManager")
def abort(self):
self.finish(aborted=True)
def previewCB(self, answer):
if answer == True:
self.previewProject()
else:
self.closedCB(True)
def playerClosed(self):
if self.job.menupreview:
self.closedCB(True)
else:
Tools.Notifications.AddNotificationWithCallback(self.closedCB, MessageBox, _("Do you want to burn this collection to DVD medium?"), domain="JobManager")
def closedCB(self, answer):
if answer == True:
Task.processFinished(self, 0)
else:
Task.processFinished(self, 1)
def previewProject(self):
from Plugins.Extensions.DVDPlayer.plugin import DVDPlayer
self.job.project.session.openWithCallback(self.playerClosed, DVDPlayer, dvd_filelist=[self.path])
class PreviewTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return "Cancel"
class ImagingPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return _("Failed") + ": python-imaging"
class ImagePrepareTask(Task):
def __init__(self, job):
Task.__init__(self, job, _("please wait, loading picture..."))
self.postconditions.append(ImagingPostcondition())
self.weighting = 20
self.job = job
self.Menus = job.Menus
def run(self, callback):
self.callback = callback
# we are doing it this weird way so that the TaskView Screen actually pops up before the spinner comes
from enigma import eTimer
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.conduct)
self.delayTimer.start(10, 1)
def conduct(self):
try:
from ImageFont import truetype
from Image import open as Image_open
s = self.job.project.menutemplate.settings
(width, height) = s.dimensions.getValue()
self.Menus.im_bg_orig = Image_open(s.menubg.getValue())
if self.Menus.im_bg_orig.size != (width, height):
self.Menus.im_bg_orig = self.Menus.im_bg_orig.resize((width, height))
self.Menus.fontsizes = [s.fontsize_headline.getValue(), s.fontsize_title.getValue(), s.fontsize_subtitle.getValue()]
self.Menus.fonts = [(truetype(s.fontface_headline.getValue(), self.Menus.fontsizes[0])), (truetype(s.fontface_title.getValue(), self.Menus.fontsizes[1])), (truetype(s.fontface_subtitle.getValue(), self.Menus.fontsizes[2]))]
Task.processFinished(self, 0)
except:
Task.processFinished(self, 1)
class MenuImageTask(Task):
def __init__(self, job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename):
Task.__init__(self, job, "Create Menu %d Image" % menu_count)
self.postconditions.append(ImagingPostcondition())
self.weighting = 10
self.job = job
self.Menus = job.Menus
self.menu_count = menu_count
self.spuxmlfilename = spuxmlfilename
self.menubgpngfilename = menubgpngfilename
self.highlightpngfilename = highlightpngfilename
def run(self, callback):
self.callback = callback
#try:
import ImageDraw
import Image
import os
s = self.job.project.menutemplate.settings
s_top = s.margin_top.getValue()
s_bottom = s.margin_bottom.getValue()
s_left = s.margin_left.getValue()
s_right = s.margin_right.getValue()
s_rows = s.space_rows.getValue()
s_cols = s.space_cols.getValue()
nr_cols = s.cols.getValue()
nr_rows = s.rows.getValue()
thumb_size = s.thumb_size.getValue()
if thumb_size[0]:
from Image import open as Image_open
(s_width, s_height) = s.dimensions.getValue()
fonts = self.Menus.fonts
im_bg = self.Menus.im_bg_orig.copy()
im_high = Image.new("P", (s_width, s_height), 0)
im_high.putpalette(self.Menus.spu_palette)
draw_bg = ImageDraw.Draw(im_bg)
draw_high = ImageDraw.Draw(im_high)
if self.menu_count == 1:
headlineText = self.job.project.settings.name.getValue().decode("utf-8")
headlinePos = self.getPosition(s.offset_headline.getValue(), 0, 0, s_width, s_top, draw_bg.textsize(headlineText, font=fonts[0]))
draw_bg.text(headlinePos, headlineText, fill=self.Menus.color_headline, font=fonts[0])
spuxml = """<?xml version="1.0" encoding="utf-8"?>
<subpictures>
<stream>
<spu
highlight="%s"
transparent="%02x%02x%02x"
start="00:00:00.00"
force="yes" >""" % (self.highlightpngfilename, self.Menus.spu_palette[0], self.Menus.spu_palette[1], self.Menus.spu_palette[2])
#rowheight = (self.Menus.fontsizes[1]+self.Menus.fontsizes[2]+thumb_size[1]+s_rows)
menu_start_title = (self.menu_count - 1) * self.job.titles_per_menu + 1
menu_end_title = (self.menu_count) * self.job.titles_per_menu + 1
nr_titles = len(self.job.project.titles)
if menu_end_title > nr_titles:
menu_end_title = nr_titles + 1
col = 1
row = 1
for title_no in list(range(menu_start_title, menu_end_title)):
title = self.job.project.titles[title_no - 1]
col_width = (s_width - s_left - s_right) / nr_cols
row_height = (s_height - s_top - s_bottom) / nr_rows
left = s_left + ((col - 1) * col_width) + s_cols / 2
right = left + col_width - s_cols
top = s_top + ((row - 1) * row_height) + s_rows / 2
bottom = top + row_height - s_rows
width = right - left
height = bottom - top
if bottom > s_height:
bottom = s_height
#draw_bg.rectangle((left, top, right, bottom), outline=(255,0,0))
im_cell_bg = Image.new("RGBA", (width, height), (0, 0, 0, 0))
draw_cell_bg = ImageDraw.Draw(im_cell_bg)
im_cell_high = Image.new("P", (width, height), 0)
im_cell_high.putpalette(self.Menus.spu_palette)
draw_cell_high = ImageDraw.Draw(im_cell_high)
if thumb_size[0]:
thumbPos = self.getPosition(s.offset_thumb.getValue(), 0, 0, width, height, thumb_size)
box = (thumbPos[0], thumbPos[1], thumbPos[0] + thumb_size[0], thumbPos[1] + thumb_size[1])
try:
thumbIm = Image_open(title.inputfile.rsplit('.', 1)[0] + ".png")
im_cell_bg.paste(thumbIm, thumbPos)
except:
draw_cell_bg.rectangle(box, fill=(64, 127, 127, 127))
border = s.thumb_border.getValue()
if border:
draw_cell_high.rectangle(box, fill=1)
draw_cell_high.rectangle((box[0] + border, box[1] + border, box[2] - border, box[3] - border), fill=0)
titleText = title.formatDVDmenuText(s.titleformat.getValue(), title_no).decode("utf-8")
titlePos = self.getPosition(s.offset_title.getValue(), 0, 0, width, height, draw_bg.textsize(titleText, font=fonts[1]))
draw_cell_bg.text(titlePos, titleText, fill=self.Menus.color_button, font=fonts[1])
draw_cell_high.text(titlePos, titleText, fill=1, font=self.Menus.fonts[1])
subtitleText = title.formatDVDmenuText(s.subtitleformat.getValue(), title_no).decode("utf-8")
subtitlePos = self.getPosition(s.offset_subtitle.getValue(), 0, 0, width, height, draw_cell_bg.textsize(subtitleText, font=fonts[2]))
draw_cell_bg.text(subtitlePos, subtitleText, fill=self.Menus.color_button, font=fonts[2])
del draw_cell_bg
del draw_cell_high
im_bg.paste(im_cell_bg, (left, top, right, bottom), mask=im_cell_bg)
im_high.paste(im_cell_high, (left, top, right, bottom))
spuxml += """
<button name="button%s" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (str(title_no).zfill(2), left, right, top, bottom)
if col < nr_cols:
col += 1
else:
col = 1
row += 1
top = s_height - s_bottom - s_rows / 2
if self.menu_count < self.job.nr_menus:
next_page_text = s.next_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(next_page_text, font=fonts[1])
pos = (s_width - textsize[0] - s_right, top)
draw_bg.text(pos, next_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, next_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_next" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0], pos[0] + textsize[0], pos[1], pos[1] + textsize[1])
if self.menu_count > 1:
prev_page_text = s.prev_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(prev_page_text, font=fonts[1])
pos = ((s_left + s_cols / 2), top)
draw_bg.text(pos, prev_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, prev_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_prev" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0], pos[0] + textsize[0], pos[1], pos[1] + textsize[1])
del draw_bg
del draw_high
fd = open(self.menubgpngfilename, "w")
im_bg.save(fd, "PNG")
fd.close()
fd = open(self.highlightpngfilename, "w")
im_high.save(fd, "PNG")
fd.close()
spuxml += """
</spu>
</stream>
</subpictures>"""
f = open(self.spuxmlfilename, "w")
f.write(spuxml)
f.close()
Task.processFinished(self, 0)
#except:
#Task.processFinished(self, 1)
def getPosition(self, offset, left, top, right, bottom, size):
pos = [left, top]
if offset[0] != -1:
pos[0] += offset[0]
else:
pos[0] += ((right - left) - size[0]) / 2
if offset[1] != -1:
pos[1] += offset[1]
else:
pos[1] += ((bottom - top) - size[1]) / 2
return tuple(pos)
class Menus:
def __init__(self, job):
self.job = job
job.Menus = self
s = self.job.project.menutemplate.settings
self.color_headline = tuple(s.color_headline.getValue())
self.color_button = tuple(s.color_button.getValue())
self.color_highlight = tuple(s.color_highlight.getValue())
self.spu_palette = [0x60, 0x60, 0x60] + s.color_highlight.getValue()
ImagePrepareTask(job)
nr_titles = len(job.project.titles)
job.titles_per_menu = s.cols.getValue() * s.rows.getValue()
job.nr_menus = ((nr_titles + job.titles_per_menu - 1) / job.titles_per_menu)
#a new menu_count every 4 titles (1,2,3,4->1 ; 5,6,7,8->2 etc.)
for menu_count in list(range(1, job.nr_menus + 1)):
num = str(menu_count)
spuxmlfilename = job.workspace + "/spumux" + num + ".xml"
menubgpngfilename = job.workspace + "/dvd_menubg" + num + ".png"
highlightpngfilename = job.workspace + "/dvd_highlight" + num + ".png"
MenuImageTask(job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename)
png2yuvTask(job, menubgpngfilename, job.workspace + "/dvdmenubg" + num + ".yuv")
menubgm2vfilename = job.workspace + "/dvdmenubg" + num + ".mv2"
mpeg2encTask(job, job.workspace + "/dvdmenubg" + num + ".yuv", menubgm2vfilename)
menubgmpgfilename = job.workspace + "/dvdmenubg" + num + ".mpg"
menuaudiofilename = s.menuaudio.getValue()
MplexTask(job, outputfile=menubgmpgfilename, inputfiles=[menubgm2vfilename, menuaudiofilename], weighting=20)
menuoutputfilename = job.workspace + "/dvdmenu" + num + ".mpg"
spumuxTask(job, spuxmlfilename, menubgmpgfilename, menuoutputfilename)
def CreateAuthoringXML_singleset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace + "/dvd") + '" format="' + job.project.menutemplate.settings.video_format.getValue() + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + iso639language.get_dvd_id(job.project.menutemplate.settings.menulang.getValue()) + '">\n')
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n', )
if mode.startswith("menu"):
authorxml.append(' <post> jump titleset 1 menu; </post>\n')
else:
authorxml.append(' <post> jump title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
authorxml.append(' <titleset>\n')
if mode.startswith("menu"):
authorxml.append(' <menus lang="' + iso639language.get_dvd_id(job.project.menutemplate.settings.menulang.getValue()) + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
for menu_count in list(range(1, job.nr_menus + 1)):
if menu_count == 1:
authorxml.append(' <pgc entry="root">\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count - 1) * job.titles_per_menu + 1
menu_end_title = (menu_count) * job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles + 1
for i in list(range(menu_start_title, menu_end_title)):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump title ' + str(i) + '; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count - 1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count + 1) + '; </button>\n')
menuoutputfilename = job.workspace + "/dvdmenu" + str(menu_count) + ".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for i in list(range(nr_titles)):
chapters = ','.join(job.project.titles[i].getChapterMarks())
title_no = i + 1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump title %d;" % (title_no + 1)
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else:
post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace + "/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def CreateAuthoringXML_multiset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace + "/dvd") + '" jumppad="yes" format="' + job.project.menutemplate.settings.video_format.getValue() + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + iso639language.get_dvd_id(job.project.menutemplate.settings.menulang.getValue()) + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
if mode.startswith("menu"):
for menu_count in list(range(1, job.nr_menus + 1)):
if menu_count == 1:
authorxml.append(' <pgc>\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count - 1) * job.titles_per_menu + 1
menu_end_title = (menu_count) * job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles + 1
for i in list(range(menu_start_title, menu_end_title)):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump titleset ' + str(i) + ' title 1; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count - 1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count + 1) + '; </button>\n')
menuoutputfilename = job.workspace + "/dvdmenu" + str(menu_count) + ".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
else:
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n')
authorxml.append(' <post> jump titleset 1 title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
for i in list(range(nr_titles)):
title = job.project.titles[i]
authorxml.append(' <titleset>\n')
authorxml.append(' <menus lang="' + iso639language.get_dvd_id(job.project.menutemplate.settings.menulang.getValue()) + '">\n')
authorxml.append(' <pgc entry="root">\n')
authorxml.append(' <pre>\n')
authorxml.append(' jump vmgm menu entry title;\n')
authorxml.append(' </pre>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for audiotrack in title.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
format = audiotrack.format.getValue()
language = iso639language.get_dvd_id(audiotrack.language.getValue())
audio_tag = ' <audio format="%s"' % format
if language != "nolang":
audio_tag += ' lang="%s"' % language
audio_tag += ' />\n'
authorxml.append(audio_tag)
aspect = title.properties.aspect.getValue()
video_tag = ' <video aspect="' + aspect + '"'
if title.properties.widescreen.getValue() == "4:3":
video_tag += ' widescreen="' + title.properties.widescreen.getValue() + '"'
video_tag += ' />\n'
authorxml.append(video_tag)
chapters = ','.join(title.getChapterMarks())
title_no = i + 1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump titleset %d title 1;" % (title_no + 1)
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else:
post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace + "/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def getISOfilename(isopath, volName):
from Tools.Directories import fileExists
i = 0
filename = isopath + '/' + volName + ".iso"
while fileExists(filename):
i = i + 1
filename = isopath + '/' + volName + str(i).zfill(3) + ".iso"
return filename
class DVDJob(Job):
def __init__(self, project, menupreview=False):
Job.__init__(self, "DVDBurn Job")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S")
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.menupreview = menupreview
self.conduct()
def conduct(self):
CheckDiskspaceTask(self)
if self.project.settings.authormode.getValue().startswith("menu") or self.menupreview:
Menus(self)
if self.project.settings.titlesetmode.getValue() == "multi":
CreateAuthoringXML_multiset(self)
else:
CreateAuthoringXML_singleset(self)
DVDAuthorTask(self)
nr_titles = len(self.project.titles)
if self.menupreview:
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
else:
for self.i in list(range(nr_titles)):
self.title = self.project.titles[self.i]
link_name = self.workspace + "/source_title_%d.ts" % (self.i + 1)
title_filename = self.workspace + "/dvd_title_%d.mpg" % (self.i + 1)
LinkTS(self, self.title.inputfile, link_name)
demux = DemuxTask(self, link_name)
self.mplextask = MplexTask(self, outputfile=title_filename, demux_task=demux)
self.mplextask.end = self.estimateddvdsize
RemoveESFiles(self, demux)
WaitForResidentTasks(self)
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
if output == "medium":
self.name = _("Burn DVD")
tool = "growisofs"
burnargs = ["-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat"]
if self.project.size / (1024 * 1024) > self.project.MAX_SL:
burnargs += ["-use-the-force-luke=4gms", "-speed=1", "-R"]
elif output == "iso":
self.name = _("Create DVD-ISO")
tool = "genisoimage"
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = ["-o", isopathfile]
burnargs += ["-dvd-video", "-publisher", "Dreambox", "-V", volName, self.workspace + "/dvd"]
BurnTask(self, burnargs, tool)
RemoveWorkspaceFolder(self)
class DVDdataJob(Job):
def __init__(self, project):
Job.__init__(self, "Data DVD Burn")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S") + "/dvd/"
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.conduct()
def conduct(self):
if self.project.settings.output.getValue() == "iso":
CheckDiskspaceTask(self)
nr_titles = len(self.project.titles)
for self.i in list(range(nr_titles)):
title = self.project.titles[self.i]
filename = title.inputfile.rstrip("/").rsplit("/", 1)[1]
link_name = self.workspace + filename
LinkTS(self, title.inputfile, link_name)
CopyMeta(self, title.inputfile)
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
tool = "growisofs"
if output == "medium":
self.name = _("Burn DVD")
burnargs = ["-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat"]
if self.project.size / (1024 * 1024) > self.project.MAX_SL:
burnargs += ["-use-the-force-luke=4gms", "-speed=1", "-R"]
elif output == "iso":
tool = "genisoimage"
self.name = _("Create DVD-ISO")
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = ["-o", isopathfile]
if self.project.settings.dataformat.getValue() == "iso9660_1":
burnargs += ["-iso-level", "1"]
elif self.project.settings.dataformat.getValue() == "iso9660_4":
burnargs += ["-iso-level", "4", "-allow-limited-size"]
elif self.project.settings.dataformat.getValue() == "udf":
burnargs += ["-udf", "-allow-limited-size"]
burnargs += ["-publisher", "Dreambox", "-V", volName, "-follow-links", self.workspace]
BurnTask(self, burnargs, tool)
RemoveWorkspaceFolder(self)
class DVDisoJob(Job):
def __init__(self, project, imagepath):
Job.__init__(self, _("Burn DVD"))
self.project = project
self.menupreview = False
from Tools.Directories import getSize
if imagepath.endswith(".iso"):
#PreviewTask(self, imagepath)
burnargs = ["-Z", "/dev/" + harddiskmanager.getCD() + '=' + imagepath, "-dvd-compat", "-use-the-force-luke=tty"]
if getSize(imagepath) / (1024 * 1024) > self.project.MAX_SL:
burnargs += ["-use-the-force-luke=4gms", "-speed=1"]
else:
PreviewTask(self, imagepath + "/VIDEO_TS/")
volName = self.project.settings.name.getValue()
burnargs = ["-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat"]
if getSize(imagepath) / (1024 * 1024) > self.project.MAX_SL:
burnargs += ["-use-the-force-luke=4gms", "-speed=1", "-R"]
burnargs += ["-dvd-video", "-publisher", "Dreambox", "-V", volName, imagepath]
tool = "growisofs"
BurnTask(self, burnargs, tool)
|
openatv/enigma2
|
lib/python/Plugins/Extensions/DVDBurn/Process.py
|
Python
|
gpl-2.0
| 37,027 | 0.02536 |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a dataset of SequenceExamples from NoteSequence protos.
This script will extract melodies and chords from NoteSequence protos and save
them to TensorFlow's SequenceExample protos for input to the improv RNN models.
"""
import os
from magenta.models.improv_rnn import improv_rnn_config_flags
from magenta.models.improv_rnn import improv_rnn_pipeline
from magenta.pipelines import pipeline
import tensorflow.compat.v1 as tf
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string(
'input', None,
'TFRecord to read NoteSequence protos from.')
flags.DEFINE_string(
'output_dir', None,
'Directory to write training and eval TFRecord files. The TFRecord files '
'are populated with SequenceExample protos.')
flags.DEFINE_float(
'eval_ratio', 0.1,
'Fraction of input to set aside for eval set. Partition is randomly '
'selected.')
flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
config = improv_rnn_config_flags.config_from_flags()
pipeline_instance = improv_rnn_pipeline.get_pipeline(
config, FLAGS.eval_ratio)
FLAGS.input = os.path.expanduser(FLAGS.input)
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
pipeline.run_pipeline_serial(
pipeline_instance,
pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
FLAGS.output_dir)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
magenta/magenta
|
magenta/models/improv_rnn/improv_rnn_create_dataset.py
|
Python
|
apache-2.0
| 2,205 | 0.004082 |
'''This allows running a bit of code on couchdb docs.
code should take a json python object, modify it and hand back to the code
Not quite that slick yet, need way to pass in code or make this a decorator
'''
import importlib
from harvester.collection_registry_client import Collection
from harvester.couchdb_init import get_couchdb
COUCHDB_VIEW = 'all_provider_docs/by_provider_name'
def run_on_couchdb_by_collection(func, collection_key=None):
'''If collection_key is none, trying to grab all of docs and modify
func is a function that takes a couchdb doc in and returns it modified.
(can take long time - not recommended)
Function should return new document or None if no changes made
'''
_couchdb = get_couchdb()
v = _couchdb.view(COUCHDB_VIEW, include_docs='true', key=collection_key) \
if collection_key else _couchdb.view(COUCHDB_VIEW,
include_docs='true')
doc_ids = []
n = 0
for r in v:
n += 1
doc_new = func(r.doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
doc_ids.append(r.doc['_id'])
if n % 100 == 0:
print '{} docs ran. Last doc:{}\n'.format(n, r.doc['_id'])
return doc_ids
def run_on_couchdb_doc(docid, func):
'''Run on a doc, by doc id'''
_couchdb = get_couchdb()
doc = _couchdb[docid]
mod_name, func_name = func.rsplit('.', 1)
fmod = importlib.import_module(mod_name)
ffunc = getattr(fmod, func_name)
doc_new = ffunc(doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
return True
return False
C_CACHE = {}
def update_collection_description(doc):
cjson = doc['originalRecord']['collection'][0]
# get collection description
if 'description' not in cjson:
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACHE[cjson['@id']] = c
description = c['description'] if c['description'] else c['name']
print('DOC: {} DESCRIP: {}'.format(
doc['_id'], c['description'].encode('utf8')))
doc['originalRecord']['collection'][0]['description'] = description
doc['sourceResource']['collection'][0]['description'] = description
return doc
def add_rights_and_type_to_collection(doc):
cjson = doc['originalRecord']['collection'][0]
# get collection description
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACHE[cjson['@id']] = c
doc['originalRecord']['collection'][0]['rights_status'] = c['rights_status']
doc['originalRecord']['collection'][0]['rights_statement'] = c['rights_statement']
doc['originalRecord']['collection'][0]['dcmi_type']=c['dcmi_type']
if 'collection' in doc['sourceResource']:
doc['sourceResource']['collection'][0]['rights_status'] = c['rights_status']
doc['sourceResource']['collection'][0]['rights_statement'] = c['rights_statement']
doc['sourceResource']['collection'][0]['dcmi_type'] = c['dcmi_type']
else:
doc['sourceResource']['collection'] = doc['originalRecord']['collection']
return doc
|
ucldc/harvester
|
harvester/post_processing/run_transform_on_couchdb_docs.py
|
Python
|
bsd-3-clause
| 3,271 | 0.002446 |
#!python
from __future__ import with_statement
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
import wordbatch.batcher
def decorator_apply(func, batcher=None, cache=None, vectorize=None):
def wrapper_func(*args, **kwargs):
return Apply(func, args=args[1:], kwargs=kwargs, batcher=batcher, cache=cache, vectorize=vectorize)\
.transform(args[0])
return wrapper_func
def batch_transform(args):
f= args[1]
f_args= args[2]
f_kwargs= args[3]
if args[5] is not None:
from numba import vectorize
return vectorize(args[5], fastmath=True)(f)(*zip(*args[0]))
if args[4] is not None:
from functools import lru_cache
f= lru_cache(maxsize=args[4])(f)
#Applying per DataFrame row is very slow, use ApplyBatch instead
if isinstance(args[0], pd.DataFrame): return args[0].apply(lambda x: f(x, *f_args, **f_kwargs), axis=1)
return [f(row, *f_args, **f_kwargs) for row in args[0]]
class Apply(object):
#Applies a function to each row of a minibatch
def __init__(self, function, batcher=None, args=[], kwargs={}, cache=None, vectorize=None):
if batcher is None: self.batcher= wordbatch.batcher.Batcher()
else: self.batcher= batcher
self.function= function
self.args= [args]
self.kwargs= [kwargs]
self.cache = [cache]
self.vectorize = [vectorize]
def fit(self, data, input_split= False, batcher= None):
return self
def fit_transform(self, data, input_split=False, merge_output=True, minibatch_size=None, batcher=None):
return self.transform(data, input_split, merge_output, minibatch_size, batcher)
def transform(self, data, input_split=False, merge_output=True, minibatch_size=None, batcher=None):
if batcher is None: batcher = self.batcher
return batcher.process_batches(batch_transform, data,
[self.function] + self.args + self.kwargs + self.cache + self.vectorize,
input_split=input_split, merge_output=merge_output,
minibatch_size= minibatch_size)
# import wordbatch.batcher as batcher
# b= batcher.Batcher(minibatch_size=2)#, method="serial")
# import numpy as np
# a= Apply(np.power, b, [2],{})
# print(a.transform([1, 2, 3, 4]))
|
anttttti/Wordbatch
|
wordbatch/pipelines/apply.py
|
Python
|
gpl-2.0
| 2,258 | 0.030558 |
import os
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\how_to_use.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\problems.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapters678.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapters678.py")
os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal_multiprocess\\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapters678.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter9_applications.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\noncentral.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\depvars_demo.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\order_stats.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\regression.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\resistors.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\sum_dependent.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\linreg.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\diffeq_noise.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\kalman.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\noncentral.py")
|
jszymon/pacal
|
tests/examples/makerep.py
|
Python
|
gpl-3.0
| 5,081 | 0.011415 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_fpn_feature_extractor.
SSDMobileNetV1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=True,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 153)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
|
tombstone/models
|
research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py
|
Python
|
apache-2.0
| 8,728 | 0.001948 |
# -*- coding: utf-8 -*-
"""
rio.blueprints.api_1
~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Blueprint
bp = Blueprint('api_1', __name__)
|
soasme/rio
|
rio/blueprints/api_1.py
|
Python
|
mit
| 139 | 0 |
from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
MIDDLEWARE_CLASSES += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
|
greven/vagrant-django
|
project_name/settings/dev.py
|
Python
|
bsd-3-clause
| 233 | 0.012876 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Base64Encode
# Returns the specified text or file as a Base64 encoded string.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Base64Encode(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Base64Encode Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Base64Encode, self).__init__(temboo_session, '/Library/Utilities/Encoding/Base64Encode')
def new_input_set(self):
return Base64EncodeInputSet()
def _make_result_set(self, result, path):
return Base64EncodeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return Base64EncodeChoreographyExecution(session, exec_id, path)
class Base64EncodeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Base64Encode
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((conditional, string) The text that should be Base64 encoded. Required unless providing a value for the URL input.)
"""
super(Base64EncodeInputSet, self)._set_input('Text', value)
def set_URL(self, value):
"""
Set the value of the URL input for this Choreo. ((conditional, string) A URL to a hosted file that should be Base64 encoded. Required unless providing a value for the Text input.)
"""
super(Base64EncodeInputSet, self)._set_input('URL', value)
class Base64EncodeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Base64Encode Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Base64EncodedText(self):
"""
Retrieve the value for the "Base64EncodedText" output from this Choreo execution. ((string) The Base64 encoded text.)
"""
return self._output.get('Base64EncodedText', None)
class Base64EncodeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return Base64EncodeResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Utilities/Encoding/Base64Encode.py
|
Python
|
apache-2.0
| 3,302 | 0.00424 |
"""Define statements for retrieving the data for each of the types."""
CONSTRAINTS = """
select
o.owner,
o.constraint_name,
o.constraint_type,
o.table_name,
o.search_condition,
o.r_owner,
o.r_constraint_name,
o.delete_rule,
o.deferred,
o.deferrable
from %(p_ViewPrefix)s_constraints o
%(p_WhereClause)s
and exists
( select 1
from %(p_ViewPrefix)s_tables
where owner = o.owner
and table_name = o.table_name
)
and (o.generated = 'USER NAME' or o.constraint_type in ('P', 'U'))
order by decode(o.constraint_type, 'P', 1, 'U', 2, 'R', 3, 'C', 4),
o.owner, o.constraint_name"""
CONTEXTS = """
select
namespace,
schema,
package,
type
from dba_context o
%(p_WhereClause)s
order by namespace"""
INDEXES_ANY = """
select
o.owner,
o.index_name,
o.table_name,
o.tablespace_name,
o.uniqueness,
o.initial_extent,
o.next_extent,
o.min_extents,
o.max_extents,
o.pct_increase,
o.index_type,
o.partitioned,
o.temporary,
o.compression,
o.prefix_length,
o.ityp_owner,
o.ityp_name,
o.parameters
from %(p_ViewPrefix)s_indexes o
%(p_WhereClause)s
and o.index_type in ('NORMAL', 'NORMAL/REV', 'IOT - TOP', 'BITMAP',
'FUNCTION-BASED NORMAL', 'FUNCTION-BASED NORMAL/REV',
'DOMAIN')"""
INDEXES = INDEXES_ANY + """
and not exists
( select 1
from %(p_ViewPrefix)s_constraints
where owner = o.owner
and constraint_name = o.index_name
)
order by o.owner, o.index_name"""
INDEX_PARTITIONS = """
select
o.index_owner,
o.partition_name,
o.high_value,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extent,
o.max_extent,
o.pct_increase
from %(p_ViewPrefix)s_ind_partitions o
%(p_WhereClause)s
order by o.partition_position"""
LIBRARIES = """
select
o.owner,
o.library_name,
o.file_spec
from %(p_ViewPrefix)s_libraries o
%(p_WhereClause)s
order by o.owner, o.library_name"""
LOBS = """
select
o.owner,
o.column_name,
o.table_name,
o.segment_name,
o.in_row
from %(p_ViewPrefix)s_lobs o
%(p_WhereClause)s
order by o.column_name"""
ROLES = """
select
o.role,
o.password_required
from dba_roles o
%(p_WhereClause)s
order by o.role"""
SEQUENCES = """
select
o.sequence_owner,
o.sequence_name,
to_char(min_value),
to_char(max_value),
to_char(increment_by),
cycle_flag,
order_flag,
to_char(cache_size),
to_char(last_number)
from %(p_ViewPrefix)s_sequences o
%(p_WhereClause)s
order by o.sequence_owner, o.sequence_name"""
SYNONYMS = """
select
o.owner,
o.synonym_name,
o.table_owner,
o.table_name,
o.db_link
from %(p_ViewPrefix)s_synonyms o
%(p_WhereClause)s
order by decode(o.owner, 'PUBLIC', 0, 1), o.owner, o.synonym_name"""
TABLES = """
select
o.owner,
o.table_name,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extents,
o.max_extents,
o.pct_increase,
o.temporary,
o.partitioned,
o.duration,
o.iot_type
from %(p_ViewPrefix)s_tables o
%(p_WhereClause)s
and secondary = 'N'
order by o.owner, o.table_name"""
TABLE_PARTITIONS = """
select
o.table_owner,
o.partition_name,
o.high_value,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extent,
o.max_extent,
o.pct_increase
from %(p_ViewPrefix)s_tab_partitions o
%(p_WhereClause)s
order by o.partition_position"""
TRIGGERS = """
select
o.owner,
o.trigger_name,
o.table_name,
o.description,
o.when_clause,
o.action_type,
o.trigger_body
from %(p_ViewPrefix)s_triggers o
%(p_WhereClause)s
order by o.owner, o.trigger_name"""
USERS = """
select
o.username,
o.default_tablespace,
o.temporary_tablespace
from dba_users o
%(p_WhereClause)s
order by o.username"""
VIEWS = """
select
o.owner,
o.view_name,
o.text
from %(p_ViewPrefix)s_views o
%(p_WhereClause)s
order by o.owner, o.view_name"""
|
marhar/cx_OracleTools
|
cx_PyOracleLib/cx_OracleObject/Statements.py
|
Python
|
bsd-3-clause
| 5,193 | 0.000193 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
class A:
#classic class
"""this is class A"""
pass
__slots__=('x','y')
def test(self):
# classic class test
"""this is A.test()"""
print "A class"
class B(object):
#new class
"""this is class B"""
__slots__=('x','y')
pass
def test(self):
# new class test
"""this is B.test()"""
print "B class"
if __name__ == '__main__':
a=A()
b=B()
print dir(a)
print dir(b)
#a.x=1
#b.x=1
#help(a)
#help(b)
|
heibanke/python_do_something
|
Code/Chapter5/base_classic_new_class.py
|
Python
|
apache-2.0
| 596 | 0.028523 |
"""Classes required to create a Bluetooth Peripheral."""
# python-bluezero imports
from bluezero import adapter
from bluezero import advertisement
from bluezero import async_tools
from bluezero import localGATT
from bluezero import GATT
from bluezero import tools
logger = tools.create_module_logger(__name__)
class Peripheral:
"""Create a Bluetooth BLE Peripheral"""
def __init__(self, adapter_address, local_name=None, appearance=None):
self.app = localGATT.Application()
self.srv_mng = GATT.GattManager(adapter_address)
self.services = []
self.characteristics = []
self.descriptors = []
self.primary_services = []
self.dongle = adapter.Adapter(adapter_address)
self.local_name = local_name
self.appearance = appearance
self.advert = advertisement.Advertisement(1, 'peripheral')
self.ad_manager = advertisement.AdvertisingManager(adapter_address)
self.mainloop = async_tools.EventLoop()
def add_service(self, srv_id, uuid, primary):
"""
Add the service information required
:param srv_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this service
:param primary: boolean for if this service should be advertised
"""
self.services.append(localGATT.Service(srv_id, uuid, primary))
if primary:
self.primary_services.append(uuid)
def add_characteristic(self, srv_id, chr_id, uuid, value,
notifying, flags,
read_callback=None, write_callback=None,
notify_callback=None):
"""
Add information for characteristic.
:param srv_id: integer of parent service that was added
:param chr_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this characteristic
:param value: Initial value. list of integers in little endian format
:param notifying: Boolean representing initial state of notifications
:param flags: Defines how the characteristic value can be used. See
Core spec "Table 3.5: Characteristic Properties bit field", and
"Table 3.8: Characteristic Extended. Properties bit field".
Allowed values:
- "broadcast"
- "read"
- "write-without-response"
- "write"
- "notify"
- "indicate"
- "authenticated-signed-writes"
- "extended-properties"
- "reliable-write"
- "writable-auxiliaries"
- "encrypt-read"
- "encrypt-write"
- "encrypt-authenticated-read"
- "encrypt-authenticated-write"
- "secure-read" (Server only)
- "secure-write" (Server only)
- "authorize"
:param read_callback: function to be called when read_value is called
by client. function should return python list of integers
representing new value of characteristic
:param write_callback: function to be called when write_value is called
by client. Function should have two parameters value and options.
value is python list of integers with new value of characteristic.
:param notify_callback: function to be called when notify_start or
notify_stop is called by client. Function should have two
parameters notifying and characteristic. The `characteristic`
is the instantiation of a localGAT.Characteristic class
"""
self.characteristics.append(localGATT.Characteristic(
srv_id, chr_id, uuid, value, notifying, flags,
read_callback, write_callback, notify_callback
))
def add_descriptor(self, srv_id, chr_id, dsc_id, uuid, value, flags):
"""
Add information for the GATT descriptor.
:param srv_id: integer of parent service that was added
:param chr_id: integer of parent characteristic that was added
:param dsc_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this characteristic
:param value: Initial value. list of integers in little endian format
:param flags: Defines how the descriptor value can be used.
Possible values:
- "read"
- "write"
- "encrypt-read"
- "encrypt-write"
- "encrypt-authenticated-read"
- "encrypt-authenticated-write"
- "secure-read" (Server Only)
- "secure-write" (Server Only)
- "authorize"
"""
self.descriptors.append(localGATT.Descriptor(
srv_id, chr_id, dsc_id, uuid, value, flags
))
def _create_advertisement(self):
self.advert.service_UUIDs = self.primary_services
if self.local_name:
self.advert.local_name = self.local_name
if self.appearance:
self.advert.appearance = self.appearance
def publish(self):
"""Create advertisement and make peripheral visible"""
for service in self.services:
self.app.add_managed_object(service)
for chars in self.characteristics:
self.app.add_managed_object(chars)
for desc in self.descriptors:
self.app.add_managed_object(desc)
self._create_advertisement()
if not self.dongle.powered:
self.dongle.powered = True
self.srv_mng.register_application(self.app, {})
self.ad_manager.register_advertisement(self.advert, {})
try:
self.mainloop.run()
except KeyboardInterrupt:
self.mainloop.quit()
self.ad_manager.unregister_advertisement(self.advert)
@property
def on_connect(self):
"""
Callback for when a device connects to the peripheral.
Callback can accept 0, 1, or 2 positional arguments
1: a device.Device instance of the connected target
2: the local adapter address followed by the remote address
"""
return self.dongle.on_connect
@on_connect.setter
def on_connect(self, callback):
self.dongle.on_connect = callback
@property
def on_disconnect(self):
"""
Callback for when a device disconnects from the peripheral.
Callback can accept 0, 1, or 2 positional arguments
1: a device.Device instance of the disconnected target
2: the local adapter address followed by the remote address
"""
return self.dongle.on_disconnect
@on_disconnect.setter
def on_disconnect(self, callback):
self.dongle.on_disconnect = callback
|
ukBaz/python-bluezero
|
bluezero/peripheral.py
|
Python
|
mit
| 6,857 | 0 |
from django import forms
from apps.clientes.models import Cliente
from apps.clientes.choices import SEXO_CHOICES
import re
class ClienteForm(forms.ModelForm):
"""
Se declaran los campos y atributos que se mostraran en el formulario
"""
sexo = forms.ChoiceField(choices=SEXO_CHOICES, required=True)
class Meta:
model = Cliente
fields = [
'nombre',
'sexo',
'direccion',
'email',
'fecha_nac',
]
labels = {
'nombre': 'Nombre',
'sexo': 'Sexo',
'direccion': 'Dirección',
'email': 'Email',
'fecha_nac': 'Fecha de Nacimiento',
}
widgets = {
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'direccion': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'fecha_nac': forms.TextInput(attrs={'class': 'form-control'}),
}
def clean_nombre(self):
"""
Valida que el nombre no sea menor a 3 caracteres
"""
nombre_lim = self.cleaned_data
nombre = nombre_lim.get('nombre')
if len(nombre) < 5:
raise forms.ValidationError(
"Debe de tener un mínimo de 5 caracteres")
elif len(nombre) > 15:
raise forms.ValidationError(
"Debe de tener un maxímo de 15 caracteres")
return nombre
def clean_email(self):
"""
Valida que el correo no esta ya registrado
"""
email = self.cleaned_data['email']
if Cliente.objects.filter(email=email).exists():
raise forms.ValidationError("El Email ya esta dado de alta")
if not(re.match('^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$',
email.lower())):
raise forms.ValidationError("No es un email correcto")
return email
def clean_direccion(self):
"""
Valida que la dirección no sea menor a 5 caracteres
"""
direccion = self.cleaned_data['direccion']
if len(direccion) < 5:
raise forms.ValidationError(
"Debe de tener un mínimo de 5 caracteres")
elif len(direccion) > 15:
raise forms.ValidationError(
"Debe de tener un maxímo de 5 caracteres")
return direccion
|
axelleonhart/TrainingDjango
|
materiales/apps/clientes/forms.py
|
Python
|
lgpl-3.0
| 2,527 | 0.002777 |
#!/bin/python3
import sys
import os
import tempfile
import pprint
import logging
from logging import debug, info, warning, error
def process_info(line):
line = line.strip()
arr = line.split(':')
if len(arr) < 2:
return None, None
key = arr[0]
val = None
if key == "freq":
val = "{}Hz".format(arr[1].strip())
elif key == "signal":
val = "{}%".format(100 + int(float(arr[1].split()[0])))
elif key == "SSID":
val = arr[1].strip()
elif key == 'WPA':
val = True
elif key == "RSN":
val = True
elif key == "capability" and "Privacy" in arr[1]:
key = "Privacy"
val = True
return key, val
def print_scan(bsss):
for bss in bsss:
info = bsss[bss]
print("{} {}: ssid : {}".format("*" if "associated" in info
else " ",
bss,
info["SSID"]))
print(" signal : {}".format(info["signal"]))
print(" freq : {}".format(info["freq"]))
print(" security : {}".format(
"WPA2" if info.get("RSN", False) else
"WPA" if info.get("WPA", False) else
"WEP" if info.get("Privacy", False) else
"None"))
def main():
wifi_if = "wlp8s0"
if len(sys.argv) == 2:
wifi_if = sys.argv[1]
iw_out = tempfile.mktemp(suffix="iw", prefix="scan_wifi")
debug("iw output file: {}".format(iw_out))
r = os.system("sudo iw dev {} scan > {}".format(wifi_if, iw_out))
if r:
error("Error when scanning {}".format(wifi_if))
sys.exit(1)
f = open(iw_out, 'r')
bsss = dict()
for line in f.readlines():
if line.startswith("BSS "):
cur_bss = line[4:21]
bsss[cur_bss] = dict()
if line.endswith("associated\n"):
bsss[cur_bss]["associated"] = True
elif not cur_bss:
error("Not assosied BSS for cureent line: {}".format(line))
continue
else:
key, val = process_info(line)
if key and val:
bsss[cur_bss][key] = val
print_scan(bsss)
os.remove(iw_out)
if __name__ == "__main__":
# logging.basicConfig(level=logging.DEBUG)
main()
|
lejenome/my_scripts
|
scan_wifi.py
|
Python
|
gpl-2.0
| 2,384 | 0.001678 |
import struct
import uuid
from enum import IntEnum
from typing import List, Optional, Set
from .sid import SID
class ACEFlag(IntEnum):
""" ACE type-specific control flags. """
OBJECT_INHERIT = 0x01
CONTAINER_INHERIT = 0x02
NO_PROPAGATE_INHERIT = 0x04
INHERIT_ONLY = 0x08
INHERITED = 0x10
SUCCESSFUL_ACCESS = 0x40
FAILED_ACCESS = 0x80
@property
def short_name(self) -> str:
""" The SDDL short name of the flag. """
short_names = {
"OBJECT_INHERIT": "OI",
"CONTAINER_INHERIT": "CI",
"NO_PROPAGATE_INHERIT": "NP",
"INHERIT_ONLY": "IO",
"INHERITED": "ID",
"SUCCESSFUL_ACCESS": "SA",
"FAILED_ACCESS": "FA",
}
return short_names[self.name]
class ACEType(IntEnum):
""" Type of the ACE. """
ACCESS_ALLOWED = 0
ACCESS_DENIED = 1
SYSTEM_AUDIT = 2
SYSTEM_ALARM = 3
ACCESS_ALLOWED_COMPOUND = 4
ACCESS_ALLOWED_OBJECT = 5
ACCESS_DENIED_OBJECT = 6
SYSTEM_AUDIT_OBJECT = 7
SYSTEM_ALARM_OBJECT = 8
ACCESS_ALLOWED_CALLBACK = 9
ACCESS_DENIED_CALLBACK = 10
ACCESS_ALLOWED_CALLBACK_OBJECT = 11
ACCESS_DENIED_CALLBACK_OBJECT = 12
SYSTEM_AUDIT_CALLBACK = 13
SYSTEM_ALARM_CALLBACK = 14
SYSTEM_AUDIT_CALLBACK_OBJECT = 15
SYSTEM_ALARM_CALLBACK_OBJECT = 16
SYSTEM_MANDATORY_LABEL = 17
SYSTEM_RESOURCE_ATTRIBUTE = 18
SYSTEM_SCOPED_POLICY_ID = 19
@property
def short_name(self) -> str:
""" The SDDL short name of the type. """
short_names = {
"ACCESS_ALLOWED": "A",
"ACCESS_DENIED": "D",
"SYSTEM_AUDIT": "AU",
"SYSTEM_ALARM": "AL",
"ACCESS_ALLOWED_COMPOUND": "",
"ACCESS_ALLOWED_OBJECT": "OA",
"ACCESS_DENIED_OBJECT": "OD",
"SYSTEM_AUDIT_OBJECT": "OU",
"SYSTEM_ALARM_OBJECT": "OL",
"ACCESS_ALLOWED_CALLBACK": "XA",
"ACCESS_DENIED_CALLBACK": "XD",
"ACCESS_ALLOWED_CALLBACK_OBJECT": "ZA",
"ACCESS_DENIED_CALLBACK_OBJECT": "ZD",
"SYSTEM_AUDIT_CALLBACK": "XU",
"SYSTEM_ALARM_CALLBACK": "XL",
"SYSTEM_AUDIT_CALLBACK_OBJECT": "ZU",
"SYSTEM_ALARM_CALLBACK_OBJECT": "ZL",
"SYSTEM_MANDATORY_LABEL": "ML",
"SYSTEM_RESOURCE_ATTRIBUTE": "RA",
"SYSTEM_SCOPED_POLICY_ID": "SP",
}
return short_names[self.name]
@property
def is_object_type(self) -> bool:
""" Flag for ACE types with objects. """
return self in (
ACEType.ACCESS_ALLOWED_OBJECT,
ACEType.ACCESS_DENIED_OBJECT,
ACEType.SYSTEM_AUDIT_OBJECT,
ACEType.SYSTEM_ALARM_OBJECT,
ACEType.ACCESS_ALLOWED_CALLBACK_OBJECT,
ACEType.ACCESS_DENIED_CALLBACK_OBJECT,
ACEType.SYSTEM_AUDIT_CALLBACK_OBJECT,
ACEType.SYSTEM_ALARM_CALLBACK_OBJECT,
)
class ACERight(IntEnum):
""" The rights of the ACE. """
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x4000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
MAXIMUM_ALLOWED = 0x02000000
ACCESS_SYSTEM_SECURITY = 0x01000000
SYNCHRONIZE = 0x00100000
WRITE_OWNER = 0x00080000
WRITE_DACL = 0x00040000
READ_CONTROL = 0x00020000
DELETE = 0x00010000
DS_CONTROL_ACCESS = 0x00000100
DS_CREATE_CHILD = 0x00000001
DS_DELETE_CHILD = 0x00000002
ACTRL_DS_LIST = 0x00000004
DS_SELF = 0x00000008
DS_READ_PROP = 0x00000010
DS_WRITE_PROP = 0x00000020
DS_DELETE_TREE = 0x00000040
DS_LIST_OBJECT = 0x00000080
@property
def short_name(self) -> str:
""" The SDDL short name of the access right. """
short_names = {
"GENERIC_READ": "GR",
"GENERIC_WRITE": "GW",
"GENERIC_EXECUTE": "GX",
"GENERIC_ALL": "GA",
"MAXIMUM_ALLOWED": "MA",
"ACCESS_SYSTEM_SECURITY": "AS",
"SYNCHRONIZE": "SY",
"WRITE_OWNER": "WO",
"WRITE_DACL": "WD",
"READ_CONTROL": "RC",
"DELETE": "SD",
"DS_CONTROL_ACCESS": "CR",
"DS_CREATE_CHILD": "CC",
"DS_DELETE_CHILD": "DC",
"ACTRL_DS_LIST": "LC",
"DS_SELF": "SW",
"DS_READ_PROP": "RP",
"DS_WRITE_PROP": "WP",
"DS_DELETE_TREE": "DT",
"DS_LIST_OBJECT": "LO",
}
return short_names[self.name]
class ACLRevision(IntEnum):
""" The ACL revision. """
ACL_REVISION = 0x02
ACL_REVISION_DS = 0x04
class ACE:
"""
A class for the access control entry, that encodes the user rights
afforded to a principal.
:param ACEType ace_type: the type of the ACE.
:param Set[ACEFlag] flags: the set of flags for the ACE.
:param int mask: the access mask to encode the user rights as an int.
:param SID trustee_sid: the SID of the trustee.
:param uuid.UUID|None object_type: a UUID that identifies a property
set, property, extended right, or type of child object.
:param uuid.UUID|None inherited_object_type: a UUID that identifies the
type of child object that can inherit the ACE.
:param bytes application_data: optional application data.
"""
def __init__(
self,
ace_type: ACEType,
flags: Set[ACEFlag],
mask: int,
trustee_sid: SID,
object_type: Optional[uuid.UUID],
inherited_object_type: Optional[uuid.UUID],
application_data: bytes,
) -> None:
self.__type = ace_type
self.__flags = flags
self.__mask = mask
self.__object_type = object_type
self.__inherited_object_type = inherited_object_type
self.__trustee_sid = trustee_sid
self.__application_data = application_data
@classmethod
def from_binary(cls, data: bytes) -> "ACE":
"""
Create an ACE object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACE instance.
:rtype: ACE
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACE
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
object_type = None
inherited_object_type = None
application_data = None
ace_type, flags, size, mask = struct.unpack("<BBHL", data[:8])
pos = 8
if ACEType(ace_type).is_object_type:
obj_flag = struct.unpack("<I", data[8:12])[0]
pos += 4
if obj_flag & 0x00000001:
object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
if obj_flag & 0x00000002:
inherited_object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
trustee_sid = SID(bytes_le=data[pos:])
pos += trustee_sid.size
application_data = data[pos:size]
this = cls(
ACEType(ace_type),
{flg for flg in ACEFlag if flags & flg},
mask,
trustee_sid,
object_type,
inherited_object_type,
application_data,
)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACE, {err}")
def __str__(self):
""" Return the SDDL string representation of the ACE object. """
flags = "".join(
flg.short_name for flg in sorted(self.flags, key=lambda f: f.value)
)
rights = "".join(
rgt.short_name for rgt in sorted(self.rights, key=lambda r: r.value)
)
object_guid = self.object_type if self.object_type else ""
inherit_object_guid = (
self.inherited_object_type if self.inherited_object_type else ""
)
sid = (
self.trustee_sid.sddl_alias
if self.trustee_sid.sddl_alias
else str(self.trustee_sid)
)
return f"({self.type.short_name};{flags};{rights};{object_guid};{inherit_object_guid};{sid})"
def to_binary(self) -> bytes:
"""
Convert ACE object to binary form with little-endian byte order.
:returns: Bytes of the binary ACE instance
:rtype: bytes
"""
size = self.size
data = bytearray(size)
struct.pack_into(
"<BBHL", data, 0, self.type.value, sum(self.flags), size, self.mask
)
pos = 8
if self.type.is_object_type:
obj_flag = 0x00000001 if self.object_type else 0
obj_flag |= 0x00000002 if self.inherited_object_type else 0
struct.pack_into("<L", data, pos, obj_flag)
pos += 4
if self.object_type:
data[pos : pos + 16] = self.object_type.bytes_le
pos += 16
if self.inherited_object_type:
data[pos : pos + 16] = self.inherited_object_type.bytes_le
pos += 16
data[pos : pos + self.trustee_sid.size] = self.trustee_sid.bytes_le
pos += self.trustee_sid.size
data[pos : pos + size] = self.application_data
return bytes(data)
@property
def type(self) -> ACEType:
""" The type of the ACE. """
return self.__type
@property
def flags(self) -> Set[ACEFlag]:
""" The flags of the ACE. """
return self.__flags
@property
def size(self) -> int:
""" The binary size of ACE in bytes. """
size = 8
if self.type.is_object_type:
size += 4
if self.object_type:
size += 16
if self.inherited_object_type:
size += 16
size += self.trustee_sid.size
size += len(self.application_data)
return size
@property
def mask(self) -> int:
""" The acces mask """
return self.__mask
@property
def rights(self) -> Set[ACERight]:
""" The set of ACERights based on the access mask."""
return {rgt for rgt in ACERight if self.mask & rgt}
@property
def object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the object type. """
return self.__object_type
@property
def inherited_object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the inherited object type. """
return self.__inherited_object_type
@property
def trustee_sid(self) -> SID:
""" The sid of the trustee. """
return self.__trustee_sid
@property
def application_data(self) -> bytes:
""" The possible application data. """
return self.__application_data
class ACL:
"""
The access control list (ACL) is used to specify a list of individual
access control entries (ACEs). An ACL and an array of ACEs comprise a
complete access control list.
:param ACLRevision revision: the revision of the ACL.
:param List[ACE] aces: list of :class:`ACE`.
"""
def __init__(self, revision: ACLRevision, aces: List[ACE]) -> None:
self.__revision = revision
self.__aces = aces
@classmethod
def from_binary(cls, data: bytes) -> "ACL":
"""
Create an ACL object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACL instance.
:rtype: ACL
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACL
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
# Unwanted values are the reserved sbz1, size and sbz2.
rev, _, _, count, _ = struct.unpack("<BBHHH", data[:8])
pos = 8
aces = []
for _ in range(count):
ace = ACE.from_binary(data[pos:])
aces.append(ace)
pos += ace.size
this = cls(ACLRevision(rev), aces)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACL, {err}")
def to_binary(self) -> bytes:
"""
Convert ACL object to binary form with little-endian byte order.
:returns: Bytes of the binary ACL instance
:rtype: bytes
"""
size = self.size
data = bytearray(8)
struct.pack_into("<BBHHH", data, 0, self.revision, 0, size, len(self.aces), 0)
pos = 8
for ace in self.aces:
data.extend(ace.to_binary())
return bytes(data)
@property
def revision(self) -> ACLRevision:
""" The revision of ACL. """
return self.__revision
@property
def size(self) -> int:
""" The binary size in bytes. """
return 8 + sum(ace.size for ace in self.aces)
@property
def aces(self) -> List[ACE]:
""" The list of :class:`ACE` objects. """
return self.__aces
|
Noirello/PyLDAP
|
src/bonsai/active_directory/acl.py
|
Python
|
mit
| 13,344 | 0.000749 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# StartOS Device Manager(ydm).
# Copyright (C) 2011 ivali, Inc.
# hechao <hechao@ivali.com>, 2011.
__author__="hechao"
__date__ ="$2011-12-20 16:36:20$"
import gc
from xml.parsers import expat
from hwclass import *
class Device:
def __init__(self, dev_xml):
self.description = ''
self.product = ''
self.vendor = ''
self.version = ''
self.businfo = ''
self.logicalname = ''
self.date = ''
self.serial = ''
self.capacity = ''
self.width = ''
self.clock = ''
self.slot = ''
self.size = ''
self.config = {}
self.capability = []
self.attr = {}
self.dev_type = {}
self.pcid = {}
self._parser = expat.ParserCreate()
self._parser.buffer_size = 102400
self._parser.StartElementHandler = self.start_handler
self._parser.CharacterDataHandler = self.data_handler
self._parser.EndElementHandler = self.end_handler
self._parser.returns_unicode = False
fd = file(dev_xml)
self._parser.ParseFile(fd)
fd.close()
def start_handler(self, tag, attrs):
self.flag = tag
if tag == "node":
self.attr = attrs
elif tag == "setting":
self.config.setdefault(attrs["id"], attrs["value"])
elif tag == "capability":
self.capability.append(attrs["id"])
def data_handler(self, data):
if(data == '\n'):
return
if(data.isspace()):
return
if self.flag == "description":
self.description = data.strip()
elif self.flag == "product":
self.product = data.strip()
elif self.flag == "vendor":
self.vendor = data.strip()
elif self.flag == "businfo":
self.businfo = data.strip()
elif self.flag == "logicalname":
self.logicalname = data.strip()
elif self.flag == "version":
self.version = data.strip()
elif self.flag == "date":
self.date = data.strip()
elif self.flag == "serial":
self.serial = data.strip()
elif self.flag == "capacity":
self.capacity = data.strip()
elif self.flag == "width":
self.width = data.strip()
elif self.flag == "clock":
self.clock = data.strip()
elif self.flag == "slot":
self.slot = data.strip()
elif self.flag == "size":
self.size = data.strip()
def end_handler(self, tag):
if tag == "node":
if self.attr["class"] == "system":
system = System(self.description, self.product, self.vendor, self.version, \
self.serial, self.width, self.config, self.capability)
self.dev_type.setdefault((0, "system"), []).append(system)
elif self.attr["id"].split(":")[0] == "cpu" and self.attr["class"] == "processor":
cpu = Cpu(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.slot, self.size, self.capacity, self.width, self.clock, self.config, self.capability)
self.dev_type.setdefault((1, "cpu"), []).append(cpu)
elif self.attr["id"].split(":")[0] == "cache" and self.attr["class"] == "memory":
cache = Cache(self.description, self.product, self.vendor, self.version, self.slot, self.size)
self.dev_type.setdefault((1, "cpu"), []).append(cache)
elif (self.attr["id"] == "core" or self.attr["id"] == "board") and self.attr["class"] == "bus":
motherboard = Motherboard(self.description, self.product, self.vendor, self.version, self.serial)
self.dev_type.setdefault((2, "motherboard"), []).append(motherboard)
elif self.attr["id"] == "firmware" and self.attr["class"] == "memory":
bios = Bios(self.description, self.product, self.vendor, self.version, \
self.date, self.size, self.capability)
self.dev_type.setdefault((2, "motherboard"), []).append(bios)
elif self.attr["id"].split(":")[0] == "memory" and self.attr["class"] == "memory":
memory = Memory(self.description, self.product, self.vendor, self.version, \
self.slot, self.size)
self.dev_type.setdefault((3, "memory"), []).append(memory)
elif self.attr["id"].split(":")[0] == "bank" and self.attr["class"] == "memory":
bank = Bank(self.description, self.product, self.vendor, self.version, \
self.serial, self.slot, self.size, self.width, self.clock)
self.dev_type.setdefault((3, "memory"), []).append(bank)
elif self.attr["id"].split(":")[0] == "display" and self.attr["class"] == "display":
display = Display(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((4, "display"), []).append(display)
self.pcid[display.pcid] = "display"
if get_monitor():
monitor = Monitor("", "", "", "")
self.dev_type.setdefault((5, "monitor"), [monitor])#.append(monitor)
elif self.attr["id"].split(":")[0] == "disk" and self.attr["class"] == "disk":
disk = Disk(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.size, self.config, self.capability)
self.dev_type.setdefault((6, "disk"), []).append(disk)
elif self.attr["id"].split(":")[0] == "cdrom" and self.attr["class"] == "disk":
cdrom = Cdrom(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.config, self.capability)
self.dev_type.setdefault((7, "cdrom"), []).append(cdrom)
elif self.attr["class"] == "storage" and self.attr["handle"]:
storage = Storage(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.config, self.capability)
self.dev_type.setdefault((8, "storage"), []).append(storage)
elif (self.attr["class"] == "network") or (self.attr["id"].split(":")[0] == "bridge" \
and self.attr["class"] == "bridge"):
network = Network(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.capacity, self.config, self.capability)
self.dev_type.setdefault((9, "network"), []).append(network)
self.pcid[network.pcid] = "network"
elif self.attr["class"] == "multimedia":
media = Multimedia(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((10, "multimedia"), []).append(media)
self.pcid[media.pcid] = "multimedia"
elif self.attr["class"] == "input":
imput = Imput(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((11, "input"), []).append(imput)
self.pcid[imput.pcid] = "input"
elif self.attr["id"].split(":")[0] != "generic" and self.attr["class"] == "generic":
generic = Generic(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(generic)
self.pcid[generic.pcid] = "generic"
elif self.attr["id"].split(":")[0] != "communication" and self.attr["class"] == "communication":
modem = Modem(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(modem)
elif self.attr["id"].split(":")[0] == "battery" and self.attr["class"] == "power":
power = Power(self.description, self.product, self.vendor, self.version, \
self.slot, self.capacity, self.config)
self.dev_type.setdefault((12, "generic"), []).append(power)
self.clear()
def clear(self):
self.description = ''
self.product = ''
self.vendor = ''
self.version = ''
self.businfo = ''
self.logicalname = ''
self.date = ''
self.serial = ''
self.capacity = ''
self.width = ''
self.clock = ''
self.slot = ''
self.size = ''
self.config = {}
self.capability = []
self.attr = {}
def close(self):
del self._parser
gc.collect()
|
jun-zhang/device-manager
|
src/lib/ydevicemanager/devices.py
|
Python
|
gpl-2.0
| 8,950 | 0.01676 |
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for writers.android_policy_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from xml.dom import minidom
from writers import writer_unittest_common
from writers import android_policy_writer
class AndroidPolicyWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests to test assumptions in Android Policy Writer'''
def testPolicyWithoutItems(self):
# Test an example policy without items.
policy = {
'name': '_policy_name',
'caption': '_policy_caption',
'desc': 'This is a long policy caption. More than one sentence '
'in a single line because it is very important.\n'
'Second line, also important'
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(), '<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">This is a long policy caption. More '
'than one sentence in a single line because it is very '
'important.\nSecond line, also important'
'</string>'
'</resources>')
def testPolicyWithItems(self):
# Test an example policy without items.
policy = {
'name':
'_policy_name',
'caption':
'_policy_caption',
'desc':
'_policy_desc_first.\nadditional line',
'items': [{
'caption': '_caption1',
'value': '_value1',
}, {
'caption': '_caption2',
'value': '_value2',
},
{
'caption': '_caption3',
'value': '_value3',
'supported_on': [{
'platform': 'win'
}, {
'platform': 'win7'
}]
},
{
'caption':
'_caption4',
'value':
'_value4',
'supported_on': [{
'platform': 'android'
}, {
'platform': 'win7'
}]
}]
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(), '<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">_policy_desc_first.\n'
'additional line</string>'
'<string-array name="_policy_nameEntries">'
'<item>_caption1</item>'
'<item>_caption2</item>'
'<item>_caption4</item>'
'</string-array>'
'<string-array name="_policy_nameValues">'
'<item>_value1</item>'
'<item>_value2</item>'
'<item>_value4</item>'
'</string-array>'
'</resources>')
if __name__ == '__main__':
unittest.main()
|
endlessm/chromium-browser
|
components/policy/tools/template_writers/writers/android_policy_writer_unittest.py
|
Python
|
bsd-3-clause
| 3,381 | 0.001479 |
#!/usr/bin/env python
# reads data from wind direction thingy (see README)
# labels follow those set out in the Wunderground PWS API:
# http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol
#
# SOURCES:
# RETURNS: two objects for humidity and temperature
# CREATED: 2017-08-02
# ORIGINAL SOURCE: https://github.com/dirtchild/weatherPi [please do not remove this line]
# MODIFIED: see https://github.com/dirtchild/weatherPi
from SensorData import SensorReading
import time
import Adafruit_ADS1x15
import convertors
import windDirection
import sys
sys.path.append("../")
from config import *
def getReading():
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 16
# the channel on the ADC to use
CHANNEL = 0
# Create an ADS1115 ADC (16-bit) instance and do stuff with it
adc = Adafruit_ADS1x15.ADS1115()
adc.start_adc(CHANNEL, gain=GAIN)
start = time.time()
value = 0
totalVoltage = 0
cnt = 0
#DEBUG
#print("[PRE]adc.get_last_result()[",adc.get_last_result(),"]")
while (time.time() - start) <= 5.0:
# will sometimes give negative results
thisRead = -1
while thisRead < 1:
thisRead = adc.get_last_result()
#DEBUG: finding they are about a decimal place out
#DEBUG: hacky
#DEBUG
#print(cnt,": thisRead[",thisRead,"]")
totalVoltage += thisRead / 10 #DEBUG: /10 to get it into a measurable range. this is bad and wrong
cnt += 1
time.sleep(0.5)
#DEBUG
#print("[POST]adc.get_last_result()[",adc.get_last_result(),"]")
# Stop continuous conversion. After this point you can't get data from get_last_result!
adc.stop_adc()
avgVoltage = totalVoltage / cnt
#DEBUG
#print("avgVoltage[",avgVoltage,"] = totalVoltage[",totalVoltage,"] / cnt[",cnt,"] (G:[",GAIN,"] C:[",CHANNEL,"])")
return(SensorReading("winddir", "winddir", convertors.voltToDeg(avgVoltage,WIND_READ_VOLT,WIND_DIR_MOUNT_ADJ), "degree angle"))
# for testing
def main():
print(windDirection.getReading())
if __name__ == "__main__": main()
|
dirtchild/weatherPi
|
weatherSensors/windDirection.py
|
Python
|
gpl-3.0
| 2,252 | 0.030195 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.