code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import numpy as np
from helper.poly_fit import poly_fit_timescales
import matplotlib
import matplotlib.pyplot as plt
fs = 13
matplotlib.rcParams['font.size']=fs
matplotlib.rcParams['lines.markersize']=8
from allsn_info import get_at2019dge, get_iPTF14gqr, get_sn2005ek, get_iPTF16hgs, get_sn2010X, \
get_sn2019bkc, get_sn2018gep, get_sn2018kzr, get_ptf10iuv, get_ptf09dav, \
get_sn2002bj, get_iPTF16asu
```
### Read light curves of subluminous fast transients
AT2019dge (this work)
```
tb0 = get_at2019dge()['tb']
tb0 = tb0[tb0['filter'].values=='r']
x0 = tb0["tmax_rf"].values + 0.2 # previously relative to g-band, change to r-band
y0 = tb0['mag0_abs'].values
ey0 = tb0['emag'].values
r0 = poly_fit_timescales(x0, y0, ey0, name = "AT2019dge")
r0
```
iPTF14gqr (De et al. 2018)
```
tb1 = get_iPTF14gqr()
tb1 = tb1[tb1['filter'].values=='r ']
x1 = tb1["tmax_rf"].values[3:]
y1 = tb1['mag0_abs'].values [3:]
ey1 = tb1['emag'].values[3:]
r1 = poly_fit_timescales(x1, y1, ey1, name = "iPTF14gqr")
r1["tau_rise"] = 1
r1["tau_rise_lim"] = 2
r1
```
SN2005ek (Drout et al. 2013)
```
tb2 = get_sn2005ek()
tb2 = tb2[tb2["filter"].values=="R"]
x2 = tb2["tmax_rf"].values
y2 = tb2['mag0_abs'].values
ey2 = tb2['emag'].values
r2 = poly_fit_timescales(x2, y2, ey2, name = "SN2005ek")
r2
```
iPTF16hgs (De et al. 2018)
```
tb3 = get_iPTF16hgs()
ix = np.any([tb3["filter"].values=='r',
np.all([tb3["filter"].values=='o', tb3["tmax_rf"].values==min(tb3["tmax_rf"].values)], axis=0)
], axis=0)
tb3 = tb3[ix]
tb3 = tb3.sort_values(by=['tmax_rf'])
x3 = tb3["tmax_rf"].values
y3 = tb3['mag0_abs'].values
ey3 = tb3['emag'].values
#%matplotlib notebook
r3 = poly_fit_timescales(x3, y3, ey3, name = "iPTF16hgs")
r3["tau_rise"] = (81.32 - 69.01) / (1 + 0.017)
```
SN2010X (Kasliwal et al. 2010)
```
tb4 = get_sn2010X() # yes
tb4 = tb4[tb4["filter"].values=="r"]
x4 = tb4["tmax_rf"].values
y4 = tb4['mag0_abs'].values
ey4 = tb4['emag'].values
r4 = poly_fit_timescales(x4, y4, ey4, name = "SN2010X")
```
SN2019bkc (Chen et al. 2020). Add ZTF $g$-band discovery epoch.
```
tb5 = get_sn2019bkc()
ix = np.any([tb5["filter"].values=='r',
np.all([tb5["filter"].values=='g', tb5["tmax_rf"].values==min(tb5["tmax_rf"].values)], axis=0)
], axis=0)
tb5 = tb5[ix]
# Add g-band detection
tb5 = tb5.sort_values(by=['tmax_rf'])
x5 = tb5["tmax_rf"].values
y5 = tb5['mag0_abs'].values
ey5 = tb5['emag'].values
r5 = poly_fit_timescales(x5, y5, ey5, name = "SN2019bkc")
```
SN2018gep
```
tb6 = get_sn2018gep()
tb6 = tb6[tb6["filter"].values=="r"]
x6 = tb6["tmax_rf"].values
y6 = tb6['mag0_abs'].values
ey6 = tb6['emag'].values
r6 = poly_fit_timescales(x6, y6, ey6, name = 'SN2018gep')
plt.ylim(-0.1, 2.5)
r6["tau_decay"] = -99
```
SN2018kzr (McBrien et al. 2019)
```
tb7 = get_sn2018kzr()
tb7 = tb7[tb7["filter"].values=="r"]
x7 = tb7["tmax_rf"].values
y7 = tb7['mag0_abs'].values
ey7 = tb7['emag'].values
r7 = poly_fit_timescales(x7, y7, ey7, name = 'SN2018kzr')
```
PTF09dav (Sullivan et al 2011)
```
tb8 = get_ptf09dav()
x8 = tb8["tmax_rf"].values
y8 = tb8['mag0_abs'].values
ey8 = tb8['emag'].values
r8 = poly_fit_timescales(x8, y8, ey8, name = 'PTF09dav')
```
SN2002bj (Poznanski et al. 2010)
```
tb9 = get_sn2002bj()
ix = tb9["filter"].values == "r"
x9 = tb9["tmax_rf"].values[ix]
y9 = tb9['mag0_abs'].values[ix]
ey9 = tb9['emag'].values[ix]
r9 = poly_fit_timescales(x9, y9, ey9, name = 'SN2002bj')
```
PTF10iuv (Kasliwal et al. 2012)
```
tb10 = get_ptf10iuv()
ix = tb10["filter"].values=='r'
tb10 = tb10[ix]
x10 = tb10["tmax_rf"].values
y10 = tb10['mag0_abs'].values
ey10 = tb10['emag'].values
r10 = poly_fit_timescales(x10, y10, ey10, name = 'PTF10iuv')
```
iPTF16asu (Whitesides et al. 2017)
```
tb11 = get_iPTF16asu()
ix = tb11["filter"].values=='g' # but this is rest-frame r-band
tb11 = tb11[ix]
x11 = tb11["tmax_rf"].values
y11 = tb11['mag0_abs'].values
ey11 = tb11['emag'].values
r11 = poly_fit_timescales(x11, y11, ey11, name = 'iPTF16asu')
r11["tau_decay"] = -99
```
KSN2015K (Rest et al. 2018)
I cannot find the photometric data. But looking at Figure 2 of this paper I get half light rise time is ~1.35 day.
```
def add_timescale_circle(r0, ax, ax2):
name = r0["name"]
if name == "AT2019dge":
z1 = 2
z2 = 3
ms = 15
marker="*"
else:
z1 = 1
z2 = 2
ms = 6
marker="o"
color_rise = "k"
color_decay = "k"
trise = r0["tau_rise"]
tdecay = r0["tau_decay"]
decaylim = r0['tau_decay_lim']
riselim = r0['tau_rise_lim']
Mpeak = r0["Mpeak"]
xpos = trise + 0.2
xpos2 = tdecay + 0.2
ypos = Mpeak
ypos2 = Mpeak
fontsize = fs
colorr = "k"
if name[:2]=="SN" or name[:2]=="AT":
tt = name[4:]
elif name[:4] == "iPTF" or name[:4]=="OGLE":
tt = name[4:]
elif name[:3]=="PTF":
tt = name[3:]
else:
tt = name
if tt=="10X":
ypos+=0.02
if tt=="02bj":
ypos2+=0.02
if tt=="09dav":
ypos+=0.05
if tt == "19dge":
fontsize+=2
xpos2 -=2.5
xpos -= 2.
ypos += 0.25
colorr = "r"
color_rise = "r"
color_decay = "r"
if tt=="19bkc":
ypos2 += 0.1
xpos2 -= 2
if tt=="05ek":
ypos2 -= 0.1
xpos2 -= 1
if tt == "10iuv":
ypos += 0.12
xpos -= 0.3
ypos2 += 0.13
xpos2 -= 0.7
if trise!=-99 and tt!="19bkc":
if riselim!=True:
ax.plot(trise, Mpeak, marker=marker, markersize = ms, color = color_rise, zorder = z2)
else:
ax.plot(trise, Mpeak, marker=marker, markersize = ms, markerfacecolor = "white", color = color_rise, zorder = z2)
ax.text(xpos, ypos+0.05, tt, color=colorr, fontsize = fontsize)
if tdecay!=-99:
if decaylim!=True:
ax2.plot(tdecay, Mpeak, marker=marker, markersize = ms, color = color_decay, zorder = z2)
else:
ax2.plot(tdecay, Mpeak, marker=marker, markersize = ms, markerfacecolor = "white", color = color_decay, zorder = z2)
ax2.text(xpos2, ypos2+0.05, tt, color=colorr, fontsize = fontsize)
def adjust_comparefig(ax2, isrise = True):
if isrise == True:
ybottom = -15.3
yupper = -20.8
xmin = 0
xmax = 13
else:
ybottom = -15.5
yupper = -19
xmin = 0
xmax = 13
ax2.set_ylim(ybottom, yupper)
ax2.set_xlim(xmin, xmax)
if isrise == False:
xmajor = 2
xminor = 0.5
else:
xmajor = 2
xminor = 0.5
ax2.xaxis.set_major_locator(plt.MultipleLocator(xmajor))
ax2.xaxis.set_minor_locator(plt.MultipleLocator(xminor))
if isrise == False:
yminor = 0.1
ymajor = 0.5
else:
yminor = 0.2
ymajor = 1
ax2.yaxis.set_major_locator(plt.MultipleLocator(ymajor))
ax2.yaxis.set_minor_locator(plt.MultipleLocator(yminor))
ax2.tick_params(which = 'major', length = 4, top=True, right=True)
ax2.tick_params(which = 'minor', length = 2, top=True, right=True)
#ax2.set_ylabel('Peak magnitude ($r$-band)', fontsize=fs)
ax2.set_ylabel(r'$M_{\rm peak}$'+" ($r$-band)", fontsize=fs+2)
if isrise == True:
ax2.set_xlabel(r"$t_{\rm rise}$"+" (rest-frame days)", fontsize=fs+1)
else:
ax2.set_xlabel(r"$t_{\rm decay}$"+" (rest-frame days)", fontsize=fs+1)
xnum = 5.8
ynum = 10.5
fig = plt.figure(figsize=(xnum, ynum))
ax = plt.subplot(211)
ax2 = plt.subplot(212)
add_timescale_circle(r0, ax, ax2)
# iPTF14gqr 1st peak rise
ax.plot(r1['tau_rise'], y1[0], 'o', ms=6, color= "k")
ax.arrow(r1['tau_rise'], y1[0], -0.8*0.5, 0, color = 'k', zorder = 6, head_width = 0.08, head_length = 0.4*0.5)
ax.arrow(r1['tau_rise'], y1[0], 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
xoff = 1.2
yoff = +0.5
ax.text(0.1+xoff, -17.4+yoff, "14gqr")
ax.text(0.1+xoff, -17.1+yoff, "(1st peak)")
# iPTF14gqr, 2nd peak decay
Mpeak = r1["Mpeak"]
ax2.plot(r1['tau_decay'], Mpeak, 'o', ms=4, color= "k")
ax2.arrow(r1['tau_decay'], Mpeak, 0.8, 0, color = 'k', zorder = 6, head_width = 0.07, head_length = 0.4)
ax2.text(r1['tau_decay'], Mpeak-0.1, "14gqr (2nd peak)")
add_timescale_circle(r2, ax, ax2)
# iPTF16hgs 1st peak rise
ax.plot(r3["tau_rise"], y3[0], 'o', ms=6, color= "k")
ax.arrow(r3['tau_rise'], y3[0], -0.8*0.7, 0, color = 'k', zorder = 6, head_width = 0.08, head_length = 0.4*0.7)
ax.arrow(r3['tau_rise'], y3[0], 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
ax.text(r3["tau_rise"]-4.8, y3[0]-0.1, "16hgs (1st peak)")
# iPTF16hgs 2nd peak decay
Mpeak = r3["Mpeak"]
ax2.plot(r3["tau_decay"], Mpeak, 'o', ms=6, color= "k")
ax2.text(r3["tau_decay"]-5.2, Mpeak, "16hgs (2nd peak)")
"""
# SN2019ehk, 1st peak rise
ax.plot(2.9, -16.36, 'o', ms=6, color= "k", zorder = 6)
#ax.arrow(2.9, -16.36, 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
ax.plot([2.9, 2.9], [-16.36, -17.7], "k-", zorder = 6)
ax.plot([2.9-0.1, 2.9+0.1], [-17.7, -17.7], "k-", zorder = 6)
ax.text(3, -16.8, "19ehk (1st-peak)")
"""
add_timescale_circle(r4, ax, ax2)
add_timescale_circle(r5, ax, ax2)
add_timescale_circle(r6, ax, ax2)
add_timescale_circle(r7, ax, ax2)
add_timescale_circle(r8, ax, ax2)
add_timescale_circle(r9, ax, ax2)
add_timescale_circle(r10, ax, ax2)
add_timescale_circle(r11, ax, ax2)
# 15K (Rest et al. 2018)
ax.plot(1.35, -18.75, 'o', ms=6, color= "k")
ax.text(1.45, -18.7, "15K", color= "k")
# AT2018cow (Perley et al. 2019)
ax.plot(1.5, -19.9, 'o', ms=6, color= "k")
ax.text(1.65, -19.7, "18cow", color= "k")
# AT2018cow (Perley et al. 2019)
ax.plot(1.8, -20.1, 'o', ms=6, color= "k")
ax.text(1.95, -20.08, "Koala", color= "k")
adjust_comparefig(ax, isrise = True)
adjust_comparefig(ax2, isrise = False)
plt.tight_layout()
plt.savefig("../paper/figures/compare_mag.pdf")
#plt.close()
```
|
github_jupyter
|
import numpy as np
from helper.poly_fit import poly_fit_timescales
import matplotlib
import matplotlib.pyplot as plt
fs = 13
matplotlib.rcParams['font.size']=fs
matplotlib.rcParams['lines.markersize']=8
from allsn_info import get_at2019dge, get_iPTF14gqr, get_sn2005ek, get_iPTF16hgs, get_sn2010X, \
get_sn2019bkc, get_sn2018gep, get_sn2018kzr, get_ptf10iuv, get_ptf09dav, \
get_sn2002bj, get_iPTF16asu
tb0 = get_at2019dge()['tb']
tb0 = tb0[tb0['filter'].values=='r']
x0 = tb0["tmax_rf"].values + 0.2 # previously relative to g-band, change to r-band
y0 = tb0['mag0_abs'].values
ey0 = tb0['emag'].values
r0 = poly_fit_timescales(x0, y0, ey0, name = "AT2019dge")
r0
tb1 = get_iPTF14gqr()
tb1 = tb1[tb1['filter'].values=='r ']
x1 = tb1["tmax_rf"].values[3:]
y1 = tb1['mag0_abs'].values [3:]
ey1 = tb1['emag'].values[3:]
r1 = poly_fit_timescales(x1, y1, ey1, name = "iPTF14gqr")
r1["tau_rise"] = 1
r1["tau_rise_lim"] = 2
r1
tb2 = get_sn2005ek()
tb2 = tb2[tb2["filter"].values=="R"]
x2 = tb2["tmax_rf"].values
y2 = tb2['mag0_abs'].values
ey2 = tb2['emag'].values
r2 = poly_fit_timescales(x2, y2, ey2, name = "SN2005ek")
r2
tb3 = get_iPTF16hgs()
ix = np.any([tb3["filter"].values=='r',
np.all([tb3["filter"].values=='o', tb3["tmax_rf"].values==min(tb3["tmax_rf"].values)], axis=0)
], axis=0)
tb3 = tb3[ix]
tb3 = tb3.sort_values(by=['tmax_rf'])
x3 = tb3["tmax_rf"].values
y3 = tb3['mag0_abs'].values
ey3 = tb3['emag'].values
#%matplotlib notebook
r3 = poly_fit_timescales(x3, y3, ey3, name = "iPTF16hgs")
r3["tau_rise"] = (81.32 - 69.01) / (1 + 0.017)
tb4 = get_sn2010X() # yes
tb4 = tb4[tb4["filter"].values=="r"]
x4 = tb4["tmax_rf"].values
y4 = tb4['mag0_abs'].values
ey4 = tb4['emag'].values
r4 = poly_fit_timescales(x4, y4, ey4, name = "SN2010X")
tb5 = get_sn2019bkc()
ix = np.any([tb5["filter"].values=='r',
np.all([tb5["filter"].values=='g', tb5["tmax_rf"].values==min(tb5["tmax_rf"].values)], axis=0)
], axis=0)
tb5 = tb5[ix]
# Add g-band detection
tb5 = tb5.sort_values(by=['tmax_rf'])
x5 = tb5["tmax_rf"].values
y5 = tb5['mag0_abs'].values
ey5 = tb5['emag'].values
r5 = poly_fit_timescales(x5, y5, ey5, name = "SN2019bkc")
tb6 = get_sn2018gep()
tb6 = tb6[tb6["filter"].values=="r"]
x6 = tb6["tmax_rf"].values
y6 = tb6['mag0_abs'].values
ey6 = tb6['emag'].values
r6 = poly_fit_timescales(x6, y6, ey6, name = 'SN2018gep')
plt.ylim(-0.1, 2.5)
r6["tau_decay"] = -99
tb7 = get_sn2018kzr()
tb7 = tb7[tb7["filter"].values=="r"]
x7 = tb7["tmax_rf"].values
y7 = tb7['mag0_abs'].values
ey7 = tb7['emag'].values
r7 = poly_fit_timescales(x7, y7, ey7, name = 'SN2018kzr')
tb8 = get_ptf09dav()
x8 = tb8["tmax_rf"].values
y8 = tb8['mag0_abs'].values
ey8 = tb8['emag'].values
r8 = poly_fit_timescales(x8, y8, ey8, name = 'PTF09dav')
tb9 = get_sn2002bj()
ix = tb9["filter"].values == "r"
x9 = tb9["tmax_rf"].values[ix]
y9 = tb9['mag0_abs'].values[ix]
ey9 = tb9['emag'].values[ix]
r9 = poly_fit_timescales(x9, y9, ey9, name = 'SN2002bj')
tb10 = get_ptf10iuv()
ix = tb10["filter"].values=='r'
tb10 = tb10[ix]
x10 = tb10["tmax_rf"].values
y10 = tb10['mag0_abs'].values
ey10 = tb10['emag'].values
r10 = poly_fit_timescales(x10, y10, ey10, name = 'PTF10iuv')
tb11 = get_iPTF16asu()
ix = tb11["filter"].values=='g' # but this is rest-frame r-band
tb11 = tb11[ix]
x11 = tb11["tmax_rf"].values
y11 = tb11['mag0_abs'].values
ey11 = tb11['emag'].values
r11 = poly_fit_timescales(x11, y11, ey11, name = 'iPTF16asu')
r11["tau_decay"] = -99
def add_timescale_circle(r0, ax, ax2):
name = r0["name"]
if name == "AT2019dge":
z1 = 2
z2 = 3
ms = 15
marker="*"
else:
z1 = 1
z2 = 2
ms = 6
marker="o"
color_rise = "k"
color_decay = "k"
trise = r0["tau_rise"]
tdecay = r0["tau_decay"]
decaylim = r0['tau_decay_lim']
riselim = r0['tau_rise_lim']
Mpeak = r0["Mpeak"]
xpos = trise + 0.2
xpos2 = tdecay + 0.2
ypos = Mpeak
ypos2 = Mpeak
fontsize = fs
colorr = "k"
if name[:2]=="SN" or name[:2]=="AT":
tt = name[4:]
elif name[:4] == "iPTF" or name[:4]=="OGLE":
tt = name[4:]
elif name[:3]=="PTF":
tt = name[3:]
else:
tt = name
if tt=="10X":
ypos+=0.02
if tt=="02bj":
ypos2+=0.02
if tt=="09dav":
ypos+=0.05
if tt == "19dge":
fontsize+=2
xpos2 -=2.5
xpos -= 2.
ypos += 0.25
colorr = "r"
color_rise = "r"
color_decay = "r"
if tt=="19bkc":
ypos2 += 0.1
xpos2 -= 2
if tt=="05ek":
ypos2 -= 0.1
xpos2 -= 1
if tt == "10iuv":
ypos += 0.12
xpos -= 0.3
ypos2 += 0.13
xpos2 -= 0.7
if trise!=-99 and tt!="19bkc":
if riselim!=True:
ax.plot(trise, Mpeak, marker=marker, markersize = ms, color = color_rise, zorder = z2)
else:
ax.plot(trise, Mpeak, marker=marker, markersize = ms, markerfacecolor = "white", color = color_rise, zorder = z2)
ax.text(xpos, ypos+0.05, tt, color=colorr, fontsize = fontsize)
if tdecay!=-99:
if decaylim!=True:
ax2.plot(tdecay, Mpeak, marker=marker, markersize = ms, color = color_decay, zorder = z2)
else:
ax2.plot(tdecay, Mpeak, marker=marker, markersize = ms, markerfacecolor = "white", color = color_decay, zorder = z2)
ax2.text(xpos2, ypos2+0.05, tt, color=colorr, fontsize = fontsize)
def adjust_comparefig(ax2, isrise = True):
if isrise == True:
ybottom = -15.3
yupper = -20.8
xmin = 0
xmax = 13
else:
ybottom = -15.5
yupper = -19
xmin = 0
xmax = 13
ax2.set_ylim(ybottom, yupper)
ax2.set_xlim(xmin, xmax)
if isrise == False:
xmajor = 2
xminor = 0.5
else:
xmajor = 2
xminor = 0.5
ax2.xaxis.set_major_locator(plt.MultipleLocator(xmajor))
ax2.xaxis.set_minor_locator(plt.MultipleLocator(xminor))
if isrise == False:
yminor = 0.1
ymajor = 0.5
else:
yminor = 0.2
ymajor = 1
ax2.yaxis.set_major_locator(plt.MultipleLocator(ymajor))
ax2.yaxis.set_minor_locator(plt.MultipleLocator(yminor))
ax2.tick_params(which = 'major', length = 4, top=True, right=True)
ax2.tick_params(which = 'minor', length = 2, top=True, right=True)
#ax2.set_ylabel('Peak magnitude ($r$-band)', fontsize=fs)
ax2.set_ylabel(r'$M_{\rm peak}$'+" ($r$-band)", fontsize=fs+2)
if isrise == True:
ax2.set_xlabel(r"$t_{\rm rise}$"+" (rest-frame days)", fontsize=fs+1)
else:
ax2.set_xlabel(r"$t_{\rm decay}$"+" (rest-frame days)", fontsize=fs+1)
xnum = 5.8
ynum = 10.5
fig = plt.figure(figsize=(xnum, ynum))
ax = plt.subplot(211)
ax2 = plt.subplot(212)
add_timescale_circle(r0, ax, ax2)
# iPTF14gqr 1st peak rise
ax.plot(r1['tau_rise'], y1[0], 'o', ms=6, color= "k")
ax.arrow(r1['tau_rise'], y1[0], -0.8*0.5, 0, color = 'k', zorder = 6, head_width = 0.08, head_length = 0.4*0.5)
ax.arrow(r1['tau_rise'], y1[0], 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
xoff = 1.2
yoff = +0.5
ax.text(0.1+xoff, -17.4+yoff, "14gqr")
ax.text(0.1+xoff, -17.1+yoff, "(1st peak)")
# iPTF14gqr, 2nd peak decay
Mpeak = r1["Mpeak"]
ax2.plot(r1['tau_decay'], Mpeak, 'o', ms=4, color= "k")
ax2.arrow(r1['tau_decay'], Mpeak, 0.8, 0, color = 'k', zorder = 6, head_width = 0.07, head_length = 0.4)
ax2.text(r1['tau_decay'], Mpeak-0.1, "14gqr (2nd peak)")
add_timescale_circle(r2, ax, ax2)
# iPTF16hgs 1st peak rise
ax.plot(r3["tau_rise"], y3[0], 'o', ms=6, color= "k")
ax.arrow(r3['tau_rise'], y3[0], -0.8*0.7, 0, color = 'k', zorder = 6, head_width = 0.08, head_length = 0.4*0.7)
ax.arrow(r3['tau_rise'], y3[0], 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
ax.text(r3["tau_rise"]-4.8, y3[0]-0.1, "16hgs (1st peak)")
# iPTF16hgs 2nd peak decay
Mpeak = r3["Mpeak"]
ax2.plot(r3["tau_decay"], Mpeak, 'o', ms=6, color= "k")
ax2.text(r3["tau_decay"]-5.2, Mpeak, "16hgs (2nd peak)")
"""
# SN2019ehk, 1st peak rise
ax.plot(2.9, -16.36, 'o', ms=6, color= "k", zorder = 6)
#ax.arrow(2.9, -16.36, 0, -0.8*0.25, color = 'k', zorder = 6, head_width = 0.15, head_length = 0.4*0.25)
ax.plot([2.9, 2.9], [-16.36, -17.7], "k-", zorder = 6)
ax.plot([2.9-0.1, 2.9+0.1], [-17.7, -17.7], "k-", zorder = 6)
ax.text(3, -16.8, "19ehk (1st-peak)")
"""
add_timescale_circle(r4, ax, ax2)
add_timescale_circle(r5, ax, ax2)
add_timescale_circle(r6, ax, ax2)
add_timescale_circle(r7, ax, ax2)
add_timescale_circle(r8, ax, ax2)
add_timescale_circle(r9, ax, ax2)
add_timescale_circle(r10, ax, ax2)
add_timescale_circle(r11, ax, ax2)
# 15K (Rest et al. 2018)
ax.plot(1.35, -18.75, 'o', ms=6, color= "k")
ax.text(1.45, -18.7, "15K", color= "k")
# AT2018cow (Perley et al. 2019)
ax.plot(1.5, -19.9, 'o', ms=6, color= "k")
ax.text(1.65, -19.7, "18cow", color= "k")
# AT2018cow (Perley et al. 2019)
ax.plot(1.8, -20.1, 'o', ms=6, color= "k")
ax.text(1.95, -20.08, "Koala", color= "k")
adjust_comparefig(ax, isrise = True)
adjust_comparefig(ax2, isrise = False)
plt.tight_layout()
plt.savefig("../paper/figures/compare_mag.pdf")
#plt.close()
| 0.059244 | 0.786541 |
## Distributions and the Central Limit Theorem
Today we are going to introduce:
- Statistical distributions
- The Central Limit Theorem
```
import pandas as pd
import numpy as np
import altair as alt
import helpers.plotting as pt
from helpers.svg_wrapper import SVGImg
from helpers.distributions import *
pt.enable_slide_theme()
pt.import_lato_font_in_notebook()
%%html
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
"HTML-CSS" : {
mtextFontInherit: true,
}
});
</script>
```
### How to describe the possible outcomes of an experiment? Distributions!
Think about **throwing a** six-sided **[die](https://en.wikipedia.org/wiki/Dice)**:
- The possible outcomes are the integers (whole numbers) 1, 2, 3, 4, 5, 6.
- Each outcome should have the same **probability: 1/6**.
- The outcomes are described by the **discrete uniform distribution** on the integers 1-6.
```
SVGImg('images/die.svg', width='15%', output_dir='slides')
```
## There are discrete and continuous probability distributions!
For simplicity, we will keep the uniform distribution as an example for the moment.
### In the discrete case,
- we can specify the probability of each possible outcome.
- Mathematically this is done via a "[probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)".
- All the probabilities add up to one (one out of all possible outcomes will happen).
```
plot_uniform_probability_mass_function(20)
```
Above: the discrete uniform distribution with minimum 2, maximum set by slider.
### A continuous example: a spinning disc with markings that can rotate freely.
- If you spin it, it will stop at a random angle.
- If we say it stopped at e.g. 18°, we mean it was closest to that marking.
- There are infinitely many angles between 17.5° and 18.5°.
- The probability to hit a specific one is zero.
- The probability to end up somewhere in this 1° intervall, is 1/360!
```
SVGImg('images/spinner.svg', width='50%', output_dir='slides')
```
### For a continuous distribution,
- There are infinitely many possible outcomes.
- The [probability density](https://en.wikipedia.org/wiki/Probability_density_function) describes which outcomes may occur.
- The **probability** for an outcome to be in an interval <br>is equal to the **area** under the probability density.
### Example: the continuous uniform distribution
```
plot_continuous_uniform_density()
```
Left: the continuous uniform distribution (line) between 0 and a variable maximum.<br>
Right: the probability that an outcome falls between selection min and selection max.
## There are many different probability distributions!
Let's look at a very common one: the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution).
## The normal distribution...
- is also called the bell curve or Gaussian distribution.
- It is specified by its mean and its variance or standard deviation.
```
plot_normal_distribution()
```
### When we sample from a normal distribution...
- the standard deviation tells us which fraction of the observations we expect to fall how far away from the mean.
- This is called the [68-95-99 rule](https://en.wikipedia.org/wiki/68–95–99.7_rule).
```
SVGImg('images/68-95-99.svg', width='50%', output_dir='slides')
```
### The sums of independent random numbers are often approximately normally distributed!
- This is why the normal distribution is so common.
- Random fluctuations and errors in particular are often caused<br> by many independent contributions.
- [Remember](2_practical_basics.slides.html) that the [standard error](https://en.wikipedia.org/wiki/Standard_error) is modelled as a normal distribution!
### Example: the sum of the points from several dice
```
plot_sum_of_n_dice(7)
```
### The Central Limit Theorem
Take a random sample of
- N independent observations
- with a finite variance.
In the limit of large N, the distribution of
$$\frac{\text{Sample Mean} - \text{Population Mean}}{\sqrt{\text{Population Variance } / \text{ N}}}$$
converges towards a standard normal distribution (i.e. mean 0 and variance 1)
See also: [Mathworld](https://mathworld.wolfram.com/CentralLimitTheorem.html), [Wikipedia](https://en.wikipedia.org/wiki/Central_limit_theorem)
## The Central Limit Theorem has many applications, but...
- Some distributions need more samples for a good normal approximation.
- Correlations slow down the convergence.
- Not all distributions have a finite variance.
- Not all distributions have a finite mean.
#### Therefore...
- make sure to always look at your raw numbers.
- Consider a [normality test](https://en.wikipedia.org/wiki/Normality_test) or ask a statistician.
- We will have another session on what to do if your data is not normal.
## Examples where sums don't converge towards a normal distribution
- The [Cauchy distribution](https://en.wikipedia.org/wiki/Cauchy_distribution) and [Lévy distribution](https://en.wikipedia.org/wiki/Lévy_distribution) remain stable under aggregation. I.e. they don't converge towards a normal distribution.
- Price changes in financial markets have a (just) finite variance but it [changes](https://en.wikipedia.org/wiki/Heteroscedasticity) over time. E.g. Daily price changes are sums of many small ones, but even their logarithms exhibit much more [extremes](https://www.statisticshowto.com/heavy-tailed-distribution/) than a expected normal distribution.
#### See also:
- [This game](http://seesaw.neuro.uni-bremen.de) generates extreme events.
- An Explanation of [infinite mean and variance](https://stats.stackexchange.com/a/91515)
## Summary
- There are discrete 🎲 and continuous ⚪️ random distributions
- For continuous distributions, the area under the probability density gives us a probability 🗻
- Sums of many independent random variables with finite variance converge towards a normal distribution 🔔
- Correlations or extreme events can lead to exceptions 🤪
To wrap up, check ot the [Galton board](https://en.wikipedia.org/wiki/Bean_machine), e.g in this [video](https://www.youtube.com/watch?v=Vo9Esp1yaC8)
## In the next session,
we will take a deeper dive into…
- comparing two different statistical distributions,
- statistical significance and p-values,
- statistical power and effect sizes.
|
github_jupyter
|
import pandas as pd
import numpy as np
import altair as alt
import helpers.plotting as pt
from helpers.svg_wrapper import SVGImg
from helpers.distributions import *
pt.enable_slide_theme()
pt.import_lato_font_in_notebook()
%%html
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
"HTML-CSS" : {
mtextFontInherit: true,
}
});
</script>
SVGImg('images/die.svg', width='15%', output_dir='slides')
plot_uniform_probability_mass_function(20)
SVGImg('images/spinner.svg', width='50%', output_dir='slides')
plot_continuous_uniform_density()
plot_normal_distribution()
SVGImg('images/68-95-99.svg', width='50%', output_dir='slides')
plot_sum_of_n_dice(7)
| 0.333178 | 0.981347 |
# Hand tuning hyperparameters
**Learning Objectives:**
* Use the `LinearRegressor` class in TensorFlow to predict median housing price, at the granularity of city blocks, based on one input feature
* Evaluate the accuracy of a model's predictions using Root Mean Squared Error (RMSE)
* Improve the accuracy of a model by hand-tuning its hyperparameters
The data is based on 1990 census data from California. This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Using only one input feature -- the number of rooms -- predict house value.
## Set Up
In this first cell, we'll load the necessary libraries.
```
import math
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
tf.logging.set_verbosity(tf.logging.INFO)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
```
Next, we'll load our data set.
```
df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
```
## Examine the data
It's a good idea to get to know your data a little bit before you work with it.
We'll print out a quick summary of a few useful statistics on each column.
This will include things like mean, standard deviation, max, min, and various quantiles.
```
df.head()
df.describe()
```
In this exercise, we'll be trying to predict median_house_value. It will be our label (sometimes also called a target). Can we use total_rooms as our input feature? What's going on with the values for that feature?
This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well
```
df['num_rooms'] = df['total_rooms'] / df['households']
df.describe()
# Split into train and eval
np.random.seed(seed=1) #makes split reproducible
msk = np.random.rand(len(df)) < 0.8
traindf = df[msk]
evaldf = df[~msk]
```
## Build the first model
In this exercise, we'll be trying to predict `median_house_value`. It will be our label (sometimes also called a target). We'll use `num_rooms` as our input feature.
To train our model, we'll use the [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearRegressor) estimator. The Estimator takes care of a lot of the plumbing, and exposes a convenient way to interact with data, training, and evaluation.
```
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"], # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"], # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
```
## 1. Scale the output
Let's scale the target values so that the default parameters are more appropriate.
```
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
```
## 2. Change learning rate and batch size
Can you come up with better parameters?
```
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
myopt = tf.train.FtrlOptimizer(learning_rate = 0.2) # note the learning rate
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')],
optimizer = myopt)
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
batch_size = 512, # note the batch size
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
```
### Is there a standard method for tuning the model?
This is a commonly asked question. The short answer is that the effects of different hyperparameters is data dependent. So there are no hard and fast rules; you'll need to run tests on your data.
Here are a few rules of thumb that may help guide you:
* Training error should steadily decrease, steeply at first, and should eventually plateau as training converges.
* If the training has not converged, try running it for longer.
* If the training error decreases too slowly, increasing the learning rate may help it decrease faster.
* But sometimes the exact opposite may happen if the learning rate is too high.
* If the training error varies wildly, try decreasing the learning rate.
* Lower learning rate plus larger number of steps or larger batch size is often a good combination.
* Very small batch sizes can also cause instability. First try larger values like 100 or 1000, and decrease until you see degradation.
Again, never go strictly by these rules of thumb, because the effects are data dependent. Always experiment and verify.
### 3: Try adding more features
See if you can do any better by adding more features.
Don't take more than 5 minutes on this portion.
|
github_jupyter
|
import math
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
tf.logging.set_verbosity(tf.logging.INFO)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
df.head()
df.describe()
df['num_rooms'] = df['total_rooms'] / df['households']
df.describe()
# Split into train and eval
np.random.seed(seed=1) #makes split reproducible
msk = np.random.rand(len(df)) < 0.8
traindf = df[msk]
evaldf = df[~msk]
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"], # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"], # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
myopt = tf.train.FtrlOptimizer(learning_rate = 0.2) # note the learning rate
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')],
optimizer = myopt)
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.contrib.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
batch_size = 512, # note the batch size
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
| 0.582847 | 0.986726 |
# Tutorial
In this tutorial, we will give a brief introduction on the quantization and pruning techniques upon which QSPARSE is built. Using our library, we guide you through the building of a image classification neural network, whose both weights and activations are fully quantized and pruned to a given sparsity level.
> If you are already familiar with quantization and pruning methods and want to learn the programming syntax, please fast forward to [Building Network with QSPARSE](#building-network-with-qsparse).
## Preliminaries
Quantization and pruning are core techniques used to reduce the inference costs of deep neural networks and have been studied extensively. Approaches to quantization are often divided into two categories:
1. Post-training quantization
2. Quantization aware training
The former applies quantization after a network has been trained, and the latter quantizes the network during training and thereby reduces the quantization error throughout training process and usually yields superior performance.
Pruning techniques are often divided into unstructured or structured approaches which define if and how to impose a pre-defined topology, e.g. channel-wise pruning.
Here, we focus on applying quantization and unstructured pruning during training.
<figure style="text-align:center;font-style:italic">
<img src="../docs/assets/network_diagram-p1.svg" />
<figcaption>Conceptual diagram of the computational graph of a network whose weights and activations are quantized and pruned using QSPARSE.</figcaption>
</figure>
In QSPARSE, we implement the quantization and pruning as independent operators, which can be applied on both weights and activations, as demonstrated in the figure above.
### Uniform Quantization
We denote the uniform quantization operation as $Q_u(\mathbf{x}, d)$, where $\mathbf{x}$ denotes the input to the operator (i.e. weights or activations), $N$ denotes the total number of bits used to represent weights and activations, and $d$ denotes the number of bits used to represent the fractional (i.e. the position of the decimal point to the right, we will refer $d$ as decimal bits).
$$
Q_u(\mathbf{x}, d) = \text{clip}(\lfloor\mathbf{x} \times 2^{d}\rfloor, -2^{N-1}, 2^{N-1}-1) / 2^d
$$
Straight-through estimator (STE) is applied to calculate gradients in the backward computation.
$$
\frac{\partial Loss}{\partial \mathbf{x}} = \text{clip}(\frac{\partial Loss}{\partial Q_u(\mathbf{x}, d)}, -2^{N-d-1}, 2^{N-d-1} - 2^{-d})
$$
However, STE is known to be sensitive to weight initialization, therefore, we design the quantization operator as $\text{Quantize}$ in the following. Starting with the original full-precision network, we delay the quantization of the network to later training stages, and calculate the optimal decimal bits $d^*$ by minimizing the quantization error after a given number of update steps $t_q$.
$$
\text{Quantize}(\mathbf{x}_t) = \begin{cases}
\mathbf{x}_t & t < t_q \\
Q_u(\mathbf{x}_t, d^*) & t \ge t_q \\
\end{cases}
$$
$$
d^* = \arg \min_{d} \Vert Q_u(\mathbf{x}_{t_q}, d) - \mathbf{x}_{t_q} \Vert^2
$$
### Magnitude-based Unstructured Pruning
We denote the unstructured pruning operator $\textbf{Prune}(\mathbf{x}, s)$ as element-wise multiplication between $\mathbf{x}$ and $\mathbf{M}_{\mathbf{x},s}$, where $\mathbf{x}$ denotes the input to the operator (i.e., weights or activations), $s$ denotes the target sparsity as measured by the percentage of zero-valued elements, and $\mathbf{M}_{\mathbf{x},s}$ denotes a binary mask.
$$
P(\mathbf{x}, s) = \mathbf{x} \circ \mathbf{M}_{\mathbf{x},s}
$$
Given that $(i,j)$ are the row and column indices, respectively, the binary mask $\mathbf{M}_{\mathbf{x},s}$ is calculated as belows, where the $\text{quantile}(\mathbf{x}, a)$ is the a-th quantile of $\mathbf{x}$.
$$
\mathbf{M}_{\mathbf{x},s}^{(i,j)} = \begin{cases}
1 & |\mathbf{x}^{(i, j)}| \ge \text{quantile}(|\mathbf{x}|, s) \\
0 & \text{otherwise}
\end{cases}
$$
As proposed by [Zhu et al.](https://arxiv.org/pdf/1710.01878.pdf), the sparsity level $s$ is controlled and updated according to a sparsification schedule at time steps $t_p + i \Delta t_p$ such that $i \in \{1,2,..,,n\}$, where $t_p$, $\Delta t_p$, and $n$ are hyper parameters that represent the starting pruning step, frequency, and total number of pruning iterations, respectively.
## Building Network with QSPARSE
With the above methods in mind, in the following, we will use QSPARSE to build a quantized and sparse network upon the below full precision network borrowed from pytorch official [MNIST example](https://github.com/pytorch/examples/blob/master/mnist/main.py).
```
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(9216, 128)
self.bn3 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 10)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.bn3(self.fc1(x)))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
Net()
```
### Weight Quantization and Pruning
<figure style="text-align:center;font-style:italic">
<img src="../docs/assets/network_diagram-p2.svg" />
<figcaption>The part of diagram in red corresponds to weight quantization and pruning.</figcaption>
</figure>
We can easily create a weight quantized and pruned layer with QSPARSE. Take the convolution as an example:
```
from qsparse import prune, quantize, set_qsparse_options
set_qsparse_options(log_on_created=False)
conv = quantize(prune(nn.Conv2d(1, 32, 3),
sparsity=0.5, start=200,
interval=10, repetition=4),
bits=8, timeout=100)
conv
```
We can see that `prune` and `quantize` layers are injected. The output layer will behave identically to `nn.Conv2d` except that `conv.weight` will return a quantized and pruned version of the vanilla weight. As for the hyper parameters, they map to QSPARSE arguments as the table below.
| Param | QSPARSE Argument |
|--------------|-----------------------|
| $N$ | `bits` |
| $t_q$ | `timeout` |
| $s$ | `sparsity` |
| $t_p$ | `start` |
| $n$ | `repetition` |
| $\Delta t_p$ | `interval` |
Both the `prune` and `quantize` layers maintain an internal counter to record the number of training steps that have passed through. The counter values can be accessed through the `_n_updates` attribute. Based on the above specified arguments, `conv.weight` will be quantized from step 100 and pruned with 50% sparsity from step 240, which can be verified by:
```
data = torch.rand((1, 1, 32, 32))
for _ in range(241):
conv(data)
conv.quantize._n_updates
(conv.weight * (2**conv.quantize.decimal)
- (conv.weight * (2**conv.quantize.decimal)).int()).sum().item()
print(len(conv.prune.mask.nonzero()) / np.prod(conv.prune.mask.shape))
print(np.all((conv.weight.detach().numpy() == 0)
== (conv.prune.mask.detach().numpy() == 0)))
```
The `mask` and `decimal` denote the binary mask for pruning and number of fractional bits for quantization, which we will revisit in [Inspecting Parameters of a Pruned/Quantized Model](../advanced_usage/#inspecting-parameters-of-a-prunedquantized-model). The `prune` and `quantize` functions are compatible with any pytorch module as long as their parameters can be accessed from their `weight` attribute. Take another example of fully-connected layer:
```
quantize(prune(nn.Linear(128, 10), 0.5), 8)
```
### Activation Quantization and Pruning
<figure style="text-align:center;font-style:italic">
<img src="../docs/assets/network_diagram-p3.svg" />
<figcaption>The part of diagram in red corresponds to activation quantization and pruning.</figcaption>
</figure>
To prune and quantize and the output of a convolution, we can directly insert `quantize` and `prune` into the computation graph by:
```
nn.Sequential(
conv,
prune(sparsity=0.5, start=200, interval=10, repetition=4),
quantize(bits=8, timeout=100),
nn.ReLU()
)
```
Similarly, the output of `conv` will be quantized from step 100 and pruned with 50% sparsity from step 240.
### Building a Network with Both Weight and Activation Quantized and Pruned
Using the techniques introduced above, we can implement the `Net` so as to have joint quantization and pruning training capabilities with full transparency and minimal efforts:
```
class NetPQ(nn.Module):
def __init__(self, epoch_size=100):
super(NetPQ, self).__init__()
# input quantization, quantize at epoch 10
self.qin = quantize(bits=8, timeout=epoch_size * 10)
# For the sake of simplicity, we ignore the `timeout,start,repetition,
# interval` parameters in the following.
self.conv1 = quantize(nn.Conv2d(1, 32, 3, 1), 8)
self.bn1 = nn.BatchNorm2d(32)
self.p1, self.q1 = prune(sparsity=0.5), quantize(bits=8)
self.conv2 = quantize(prune(nn.Conv2d(32, 64, 3, 1), 0.5), 8)
self.bn2 = nn.BatchNorm2d(64)
self.p2, self.q2 = prune(sparsity=0.5), quantize(bits=8)
self.fc1 = quantize(prune(nn.Linear(9216, 128), 0.5), 8)
self.bn3 = nn.BatchNorm1d(128)
self.p3, self.q3 = prune(sparsity=0.5), quantize(bits=8)
self.fc2 = quantize(nn.Linear(128, 10), 8)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = self.qin(x)
x = F.relu(self.q1(self.p1(self.bn1(self.conv1(x)))))
x = F.relu(self.q2(self.p2(self.bn2(self.conv2(x)))))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.q3(self.p3(self.bn3(self.fc1(x)))))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
NetPQ()
```
The network created by `NetPQ` is a pytorch module that only consists of its original components and `PruneLayer / QuantizeLayer` introduced by `prune` and `quantize`.
It does not require you to modify the training loop or even the weight initialization code, and it also supports to [resume training from checkpoints](../advanced_usage/#resuming-from-checkpoint).
The full example of training MNIST classifier with different pruning and quantization configurations can be found at [examples/mnist.py](https://github.com/mlzxy/qsparse/blob/main/examples/). More examples can be found in [here](https://github.com/mlzxy/qsparse-examples).
## Summary
In this tutorial, we introduce some basics about joint quantization and pruning training, and the implementation of this training paradigm with QSPARSE. Next, we introduce more [advanced usage](../advanced_usage/).
|
github_jupyter
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(9216, 128)
self.bn3 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 10)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.bn3(self.fc1(x)))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
Net()
from qsparse import prune, quantize, set_qsparse_options
set_qsparse_options(log_on_created=False)
conv = quantize(prune(nn.Conv2d(1, 32, 3),
sparsity=0.5, start=200,
interval=10, repetition=4),
bits=8, timeout=100)
conv
data = torch.rand((1, 1, 32, 32))
for _ in range(241):
conv(data)
conv.quantize._n_updates
(conv.weight * (2**conv.quantize.decimal)
- (conv.weight * (2**conv.quantize.decimal)).int()).sum().item()
print(len(conv.prune.mask.nonzero()) / np.prod(conv.prune.mask.shape))
print(np.all((conv.weight.detach().numpy() == 0)
== (conv.prune.mask.detach().numpy() == 0)))
quantize(prune(nn.Linear(128, 10), 0.5), 8)
nn.Sequential(
conv,
prune(sparsity=0.5, start=200, interval=10, repetition=4),
quantize(bits=8, timeout=100),
nn.ReLU()
)
class NetPQ(nn.Module):
def __init__(self, epoch_size=100):
super(NetPQ, self).__init__()
# input quantization, quantize at epoch 10
self.qin = quantize(bits=8, timeout=epoch_size * 10)
# For the sake of simplicity, we ignore the `timeout,start,repetition,
# interval` parameters in the following.
self.conv1 = quantize(nn.Conv2d(1, 32, 3, 1), 8)
self.bn1 = nn.BatchNorm2d(32)
self.p1, self.q1 = prune(sparsity=0.5), quantize(bits=8)
self.conv2 = quantize(prune(nn.Conv2d(32, 64, 3, 1), 0.5), 8)
self.bn2 = nn.BatchNorm2d(64)
self.p2, self.q2 = prune(sparsity=0.5), quantize(bits=8)
self.fc1 = quantize(prune(nn.Linear(9216, 128), 0.5), 8)
self.bn3 = nn.BatchNorm1d(128)
self.p3, self.q3 = prune(sparsity=0.5), quantize(bits=8)
self.fc2 = quantize(nn.Linear(128, 10), 8)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = self.qin(x)
x = F.relu(self.q1(self.p1(self.bn1(self.conv1(x)))))
x = F.relu(self.q2(self.p2(self.bn2(self.conv2(x)))))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.q3(self.p3(self.bn3(self.fc1(x)))))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
NetPQ()
| 0.924543 | 0.99508 |
# Data Wrangling
## Data Flow

Mining data directly from GitHub, `Viz` is powered by the [GitHub API](https://developer.github.com/v3/) and leverages the following:
* [`github3.py`](https://github.com/sigmavirus24/github3.py) to access the GitHub API through Python.
* [`pandas`](https://github.com/pydata/pandas) in the following [IPython Notebook](https://github.com/donnemartin/viz/blob/master/githubstats/data_wrangling.ipynb) for data wrangling.
* [Google Maps API](https://developers.google.com/maps/?hl=en) through [`geocoder`](https://github.com/DenisCarriere/geocoder) for location data.
* [Tableau Public](https://public.tableau.com/s/) for visualizations.*
In the future, [Google BigQuery](https://cloud.google.com/bigquery/) along with [GitHub Archive](https://www.githubarchive.org/) could also supplement the GitHub API.
## Imports
```
import re
import pandas as pd
```
## Prepare Repo Data
Load the repos data and drop duplicates:
```
repos = pd.read_csv("data/2017/repos-dump.csv", quotechar='"', skipinitialspace=True)
print('Shape before dropping duplicates', repos.shape)
repos = repos.drop_duplicates(subset='full_name', keep='last')
print('Shape after dropping duplicates', repos.shape)
repos.head()
```
Separate out the `user` and `repo` from `full_name` into new columns:
```
def extract_user(line):
return line.split('/')[0]
def extract_repo(line):
return line.split('/')[1]
repos['user'] = repos['full_name'].str[:].apply(extract_user)
repos['repo'] = repos['full_name'].str[:].apply(extract_repo)
print(repos.shape)
repos.head()
```
## Prepare User Data
Load the users data and drop duplicates:
```
users = pd.read_csv("data/2017/user-geocodes-dump.csv", quotechar='"', skipinitialspace=True)
print('Shape before dropping duplicates', users.shape)
users = users.drop_duplicates(subset='id', keep='last')
print('Shape after dropping duplicates', users.shape)
users.head()
```
Rename column `id` to `user`:
```
users.rename(columns={'id': 'user'}, inplace=True)
users.head()
```
## Merge Repo and User Data
Left join repos and users:
```
repos_users = pd.merge(repos, users, on='user', how='left')
print('Shape repos:', repos.shape)
print('Shape users:', users.shape)
print('Shape repos_users:', repos_users.shape)
repos_users.head()
```
## Tidy Up Repo and User Data
Re-order the columns:
```
repos_users = repos_users.reindex_axis(['full_name',
'repo',
'description',
'stars',
'forks',
'language',
'user',
'name',
'type',
'location',
'lat',
'long',
'city',
'country'], axis=1)
print(repos_users.shape)
repos_users.head()
```
## Add Overall Ranks
Rank each element based on number of stars:
```
repos_users['rank'] = repos_users['stars'].rank(ascending=False)
print(repos_users.shape)
repos_users.head()
```
## Verify Results: Users
Equivalent [GitHub search query](https://github.com/search?utf8=%E2%9C%93&q=created%3A2017-01-01..2017-12-31+stars%3A%3E%3D100+user%3Adonnemartin&type=Repositories&ref=searchresults): `created:2017-01-01..2017-12-31 stars:>=100 user:donnemartin`
*Note: The data might be slightly off, as the search query will take into account data up to when the query was executed. Data in this notebook was mined on January 1, 2017 to 'freeze' the results for the year 2017. The longer you run the search from January 1, 2017, the larger the discrepancy.*
```
repos_users[repos_users['user'] == 'donnemartin']
```
## Verify Results: Python Repos
Equivalent [GitHub search query](https://github.com/search?utf8=%E2%9C%93&q=created%3A2017-01-01..2017-12-31+stars%3A%3E%3D100+language%3Apython&type=Repositories&ref=searchresults): `created:2017-01-01..2017-12-31 stars:>=100 language:python`
*Note: The data might be slightly off, as the search query will take into account data up to when the query was executed. Data in this notebook was mined on January 1, 2017 to 'freeze' the results for the year 2017. The longer you run the search from January 1, 2017, the larger the discrepancy.*
```
print(repos_users[repos_users['language'] == 'Python'].shape)
repos_users[repos_users['language'] == 'Python'].head()
```
## Verify Results: Overall Repos
Equivalent [GitHub search query](https://github.com/search?utf8=%E2%9C%93&q=created%3A2017-01-01..2017-12-31+stars%3A%3E%3D100&type=Repositories&ref=searchresults): `created:2017-01-01..2017-12-31 stars:>=100`
*Note: The data might be slightly off, as the search query will take into account data up to when the query was executed. Data in this notebook was mined on January 1, 2017 to 'freeze' the results for the year 2017. The longer you run the search from January 1, 2017, the larger the discrepancy.*
```
print(repos_users.shape)
repos_users.head()
```
## Output Results
Write out the results to csv to visualize in Tableau:
```
users.to_csv('data/2017/users.csv', index=False)
repos_users.to_csv('data/2017/repos-users-geocodes.csv', index=False)
repos_users.to_csv('data/2017/repos-users.csv', index=False)
repos_rank = repos_users.reindex_axis(['full_name', 'rank'], axis=1)
repos_rank.to_csv('data/2017/repos-ranks.csv', index=False)
```
|
github_jupyter
|
import re
import pandas as pd
repos = pd.read_csv("data/2017/repos-dump.csv", quotechar='"', skipinitialspace=True)
print('Shape before dropping duplicates', repos.shape)
repos = repos.drop_duplicates(subset='full_name', keep='last')
print('Shape after dropping duplicates', repos.shape)
repos.head()
def extract_user(line):
return line.split('/')[0]
def extract_repo(line):
return line.split('/')[1]
repos['user'] = repos['full_name'].str[:].apply(extract_user)
repos['repo'] = repos['full_name'].str[:].apply(extract_repo)
print(repos.shape)
repos.head()
users = pd.read_csv("data/2017/user-geocodes-dump.csv", quotechar='"', skipinitialspace=True)
print('Shape before dropping duplicates', users.shape)
users = users.drop_duplicates(subset='id', keep='last')
print('Shape after dropping duplicates', users.shape)
users.head()
users.rename(columns={'id': 'user'}, inplace=True)
users.head()
repos_users = pd.merge(repos, users, on='user', how='left')
print('Shape repos:', repos.shape)
print('Shape users:', users.shape)
print('Shape repos_users:', repos_users.shape)
repos_users.head()
repos_users = repos_users.reindex_axis(['full_name',
'repo',
'description',
'stars',
'forks',
'language',
'user',
'name',
'type',
'location',
'lat',
'long',
'city',
'country'], axis=1)
print(repos_users.shape)
repos_users.head()
repos_users['rank'] = repos_users['stars'].rank(ascending=False)
print(repos_users.shape)
repos_users.head()
repos_users[repos_users['user'] == 'donnemartin']
print(repos_users[repos_users['language'] == 'Python'].shape)
repos_users[repos_users['language'] == 'Python'].head()
print(repos_users.shape)
repos_users.head()
users.to_csv('data/2017/users.csv', index=False)
repos_users.to_csv('data/2017/repos-users-geocodes.csv', index=False)
repos_users.to_csv('data/2017/repos-users.csv', index=False)
repos_rank = repos_users.reindex_axis(['full_name', 'rank'], axis=1)
repos_rank.to_csv('data/2017/repos-ranks.csv', index=False)
| 0.223038 | 0.975739 |
```
import pandas as pd
# !git clone https://github.com/wshuyi/demo-chinese-text-classification-lstm-keras.git
from pathlib import Path
mypath = Path(".")
# Path("demo-chinese-text-classification-lstm-keras")
df = pd.read_csv(mypath/'dianping.csv')
df.head()
df.info()
# !pip install jieba
import jieba
df['text'] = df.comment.apply(lambda x: " ".join(jieba.cut(x)))
df.head()
df = df[['text', 'sentiment']]
df.head()
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100
max_words = 10000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df.text)
sequences = tokenizer.texts_to_sequences(df.text)
type(sequences)
len(sequences)
len(sequences[0])
len(sequences[:1][0])
[len(sequence) for sequence in sequences[:5]]
data = pad_sequences(sequences, maxlen=maxlen, value = 0.0)
```
长句子被剪裁(`maxlen`),短句子被 0 padding
```
print(data.shape)
print(maxlen)
word_index = tokenizer.word_index
type(word_index)
[str(key)+": "+str(value) for key, value in word_index.items()][:5]
labels = np.array(df.sentiment)
labels[:5]
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
training_samples = int(len(indices) * .8)
validation_samples = len(indices) - training_samples
training_samples
validation_samples
X_train = data[:training_samples]
y_train = labels[:training_samples]
X_valid = data[training_samples: training_samples + validation_samples]
y_valid = labels[training_samples: training_samples + validation_samples]
X_train[:5,:5]
# !pip install gensim
from gensim.models import KeyedVectors
```
?KeyedVectors.load_word2vec_format
```
zh_model = KeyedVectors.load_word2vec_format('refs/Tencent_AILab_ChineseEmbedding.txt', binary=False, limit=100000)
```
加载本身会很慢,限制词向量数量,有更好的设备的话,建议尝试更多。
```
zh_model.vectors.shape
zh_model.vectors[0].shape
list(iter(zh_model.vocab))[:5]
embedding_dim = len(zh_model[next(iter(zh_model.vocab))])
embedding_dim
print('最大值: ',zh_model.vectors.max())
print('最小值: ',zh_model.vectors.min())
embedding_matrix = np.random.uniform(zh_model.vectors.min(), zh_model.vectors.max(), [max_words, embedding_dim])
```
随机数参考 https://stackoverflow.com/questions/11873741/sampling-random-floats-on-a-range-in-numpy
```
embedding_matrix = (embedding_matrix - 0.5) * 2
zh_model.get_vector("的").shape
zh_model.get_vector("李").shape
for word, i in word_index.items():
if i < max_words:
try:
embedding_vector = zh_model.get_vector(word)
embedding_matrix[i] = embedding_vector
except:
pass # 如果无法获得对应的词向量,我们就干脆跳过,使用默认的随机向量。
```
这也是为什么,我们前面尽量把二者的分布调整成一致。
```
embedding_matrix.shape
```
参考 https://github.com/chen0040/keras-sentiment-analysis-web-api/blob/master/keras_sentiment_analysis/library/lstm.py
```
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense, LSTM, Dropout, Bidirectional
LSTM_units = 16
model = Sequential()
model.add(Embedding(max_words, embedding_dim))
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(units=LSTM_units, dropout=0.2, recurrent_dropout=0.2, input_shape=(max_words, embedding_dim))))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.layers[0].set_weights([embedding_matrix])
# model.layers[0].trainable = False # 不跑,用预训练模型。
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(X_train, y_train,
epochs=10,
batch_size=32,
validation_data=(X_valid, y_valid))
# 有监督的学习
model.save("sentiment_model-Bidirectional-LSTM-with-w2v-Tencent_AILab-v.1.0.0.h5")
```
1. `zh.vec`: `1600/1600 [==============================] - 7s 4ms/step - loss: 0.4032 - acc: 0.8313 - val_loss: 0.4158 - val_acc: 0.8200`
1. `w2v-Tencent_AILab`: `1600/1600 [==============================] - 6s 3ms/step - loss: 0.2360 - acc: 0.9156 - val_loss: 0.3585 - val_acc: 0.8600`
这个结果明显比 `zh.vec` 好很多。
```
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['training', 'validation'], loc='upper left')
plt.title('Training and validation accuracy')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training', 'validation'], loc='upper left')
plt.title('Training and validation loss')
plt.show()
```
5 次迭代就是过拟合的拐点。
|
github_jupyter
|
import pandas as pd
# !git clone https://github.com/wshuyi/demo-chinese-text-classification-lstm-keras.git
from pathlib import Path
mypath = Path(".")
# Path("demo-chinese-text-classification-lstm-keras")
df = pd.read_csv(mypath/'dianping.csv')
df.head()
df.info()
# !pip install jieba
import jieba
df['text'] = df.comment.apply(lambda x: " ".join(jieba.cut(x)))
df.head()
df = df[['text', 'sentiment']]
df.head()
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100
max_words = 10000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df.text)
sequences = tokenizer.texts_to_sequences(df.text)
type(sequences)
len(sequences)
len(sequences[0])
len(sequences[:1][0])
[len(sequence) for sequence in sequences[:5]]
data = pad_sequences(sequences, maxlen=maxlen, value = 0.0)
print(data.shape)
print(maxlen)
word_index = tokenizer.word_index
type(word_index)
[str(key)+": "+str(value) for key, value in word_index.items()][:5]
labels = np.array(df.sentiment)
labels[:5]
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
training_samples = int(len(indices) * .8)
validation_samples = len(indices) - training_samples
training_samples
validation_samples
X_train = data[:training_samples]
y_train = labels[:training_samples]
X_valid = data[training_samples: training_samples + validation_samples]
y_valid = labels[training_samples: training_samples + validation_samples]
X_train[:5,:5]
# !pip install gensim
from gensim.models import KeyedVectors
zh_model = KeyedVectors.load_word2vec_format('refs/Tencent_AILab_ChineseEmbedding.txt', binary=False, limit=100000)
zh_model.vectors.shape
zh_model.vectors[0].shape
list(iter(zh_model.vocab))[:5]
embedding_dim = len(zh_model[next(iter(zh_model.vocab))])
embedding_dim
print('最大值: ',zh_model.vectors.max())
print('最小值: ',zh_model.vectors.min())
embedding_matrix = np.random.uniform(zh_model.vectors.min(), zh_model.vectors.max(), [max_words, embedding_dim])
embedding_matrix = (embedding_matrix - 0.5) * 2
zh_model.get_vector("的").shape
zh_model.get_vector("李").shape
for word, i in word_index.items():
if i < max_words:
try:
embedding_vector = zh_model.get_vector(word)
embedding_matrix[i] = embedding_vector
except:
pass # 如果无法获得对应的词向量,我们就干脆跳过,使用默认的随机向量。
embedding_matrix.shape
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense, LSTM, Dropout, Bidirectional
LSTM_units = 16
model = Sequential()
model.add(Embedding(max_words, embedding_dim))
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(units=LSTM_units, dropout=0.2, recurrent_dropout=0.2, input_shape=(max_words, embedding_dim))))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.layers[0].set_weights([embedding_matrix])
# model.layers[0].trainable = False # 不跑,用预训练模型。
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(X_train, y_train,
epochs=10,
batch_size=32,
validation_data=(X_valid, y_valid))
# 有监督的学习
model.save("sentiment_model-Bidirectional-LSTM-with-w2v-Tencent_AILab-v.1.0.0.h5")
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['training', 'validation'], loc='upper left')
plt.title('Training and validation accuracy')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training', 'validation'], loc='upper left')
plt.title('Training and validation loss')
plt.show()
| 0.611498 | 0.650058 |
```
from utils import *
from azureml.core import Workspace
# Configure experiment
ws = Workspace.from_config()
# Create or get training cluster
aml_cluster = get_aml_cluster(ws, cluster_name="cpu-cluster")
aml_cluster.wait_for_completion(show_output=True)
# Create a run configuration
run_conf = get_run_config(['numpy', 'pandas', 'scikit-learn', 'tensorflow'])
from azureml.core import Dataset
dataset = Dataset.get_by_name(ws, name='titanic')
data_in = dataset.as_named_input('titanic')
from azureml.core import Datastore
from azureml.pipeline.core import PipelineData
datastore = Datastore.get(ws, datastore_name="mldata")
data_train = PipelineData('train', datastore=datastore)
data_test = PipelineData('test', datastore=datastore)
from azureml.data import OutputFileDatasetConfig
data_out = OutputFileDatasetConfig(name="predictions", destination=(datastore, 'titanic/predictions'))
data_out = data_out.read_delimited_files().register_on_complete('titanic.pred')
from azureml.pipeline.steps import PythonScriptStep
step_1 = PythonScriptStep(name='Preprocessing',
script_name="preprocess_output.py",
source_directory="code",
arguments=[
"--input", data_in,
"--out-train", data_train,
"--out-test", data_test],
inputs=[data_in],
outputs=[data_train, data_test],
runconfig=run_conf,
compute_target=aml_cluster)
from azureml.pipeline.core.graph import PipelineParameter
lr_param = PipelineParameter(name="lr_arg", default_value=0.01)
from azureml.pipeline.steps import PythonScriptStep
step_2 = PythonScriptStep(name='Training',
script_name="train_output.py",
source_directory="code",
arguments=[
"--in-train", data_train,
"--in-test", data_test,
"--output", data_out,
"--learning-rate", lr_param],
inputs=[data_train, data_test],
outputs=[data_out],
runconfig=run_conf,
compute_target=aml_cluster)
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(ws, steps=[step_1, step_2])
pipeline.validate()
service = pipeline.publish(name="AzureML Published Pipeline", version="1.0")
print(service)
service_id = service.id
service_endpoint = service.endpoint
from azureml.pipeline.core import PipelineEndpoint
endpoint = PipelineEndpoint.publish(ws, pipeline=service, name="AzureML Published Pipeline Endpoint", description="Mastering Azure Machine Learning")
print(endpoint)
service_id = endpoint.id
service_endpoint = endpoint.endpoint
from azureml.core.authentication import AzureCliAuthentication
cli_auth = AzureCliAuthentication()
aad_token = cli_auth.get_authentication_header()
import requests
request = {
"ExperimentName": "azureml-pipeline-trigger",
"ParameterAssignments": {
"lr_arg": 0.05
}
}
response = requests.post(service_endpoint, headers=aad_token, json=request)
print(response.json)
# service.disable()
# endpoint.disable()
```
|
github_jupyter
|
from utils import *
from azureml.core import Workspace
# Configure experiment
ws = Workspace.from_config()
# Create or get training cluster
aml_cluster = get_aml_cluster(ws, cluster_name="cpu-cluster")
aml_cluster.wait_for_completion(show_output=True)
# Create a run configuration
run_conf = get_run_config(['numpy', 'pandas', 'scikit-learn', 'tensorflow'])
from azureml.core import Dataset
dataset = Dataset.get_by_name(ws, name='titanic')
data_in = dataset.as_named_input('titanic')
from azureml.core import Datastore
from azureml.pipeline.core import PipelineData
datastore = Datastore.get(ws, datastore_name="mldata")
data_train = PipelineData('train', datastore=datastore)
data_test = PipelineData('test', datastore=datastore)
from azureml.data import OutputFileDatasetConfig
data_out = OutputFileDatasetConfig(name="predictions", destination=(datastore, 'titanic/predictions'))
data_out = data_out.read_delimited_files().register_on_complete('titanic.pred')
from azureml.pipeline.steps import PythonScriptStep
step_1 = PythonScriptStep(name='Preprocessing',
script_name="preprocess_output.py",
source_directory="code",
arguments=[
"--input", data_in,
"--out-train", data_train,
"--out-test", data_test],
inputs=[data_in],
outputs=[data_train, data_test],
runconfig=run_conf,
compute_target=aml_cluster)
from azureml.pipeline.core.graph import PipelineParameter
lr_param = PipelineParameter(name="lr_arg", default_value=0.01)
from azureml.pipeline.steps import PythonScriptStep
step_2 = PythonScriptStep(name='Training',
script_name="train_output.py",
source_directory="code",
arguments=[
"--in-train", data_train,
"--in-test", data_test,
"--output", data_out,
"--learning-rate", lr_param],
inputs=[data_train, data_test],
outputs=[data_out],
runconfig=run_conf,
compute_target=aml_cluster)
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(ws, steps=[step_1, step_2])
pipeline.validate()
service = pipeline.publish(name="AzureML Published Pipeline", version="1.0")
print(service)
service_id = service.id
service_endpoint = service.endpoint
from azureml.pipeline.core import PipelineEndpoint
endpoint = PipelineEndpoint.publish(ws, pipeline=service, name="AzureML Published Pipeline Endpoint", description="Mastering Azure Machine Learning")
print(endpoint)
service_id = endpoint.id
service_endpoint = endpoint.endpoint
from azureml.core.authentication import AzureCliAuthentication
cli_auth = AzureCliAuthentication()
aad_token = cli_auth.get_authentication_header()
import requests
request = {
"ExperimentName": "azureml-pipeline-trigger",
"ParameterAssignments": {
"lr_arg": 0.05
}
}
response = requests.post(service_endpoint, headers=aad_token, json=request)
print(response.json)
# service.disable()
# endpoint.disable()
| 0.5083 | 0.528777 |
# Use Pytorch to recognize hand-written digits with `ibm-watson-machine-learning`
This notebook facilitates Pytorch ML library in Watson Machine Learning service. It contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. It also introduces commands for getting model and training data, persisting model, deploying model and scoring it.
Some familiarity with Python is helpful. This notebook uses Python 3.
## Learning goals
The learning goals of this notebook are:
- Download an externally trained Pytorch model with dataset.
- Persist an external model in Watson Machine Learning repository.
- Deploy model for online scoring using client library.
- Score sample records using client library.
## Contents
This notebook contains the following parts:
1. [Setup](#setup)
2. [Download externally created Pytorch model and data](#download)
3. [Persist externally created Pytorch ONNX model](#persistence)
4. [Deploy and score](#scoring)
5. [Clean up](#cleanup)
6. [Summary and next steps](#summary)
<a id="setup"></a>
## 1. Set up the environment
Before you use the sample code in this notebook, you must perform the following setup tasks:
- Contact with your Cloud Pack for Data administrator and ask him for your account credentials
### Connection to WML
Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`.
```
username = 'PASTE YOUR USERNAME HERE'
password = 'PASTE YOUR PASSWORD HERE'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
```
### Install and import the `ibm-watson-machine-learning` package
**Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
```
!pip install -U ibm-watson-machine-learning
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
```
### Working with spaces
First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
- Click New Deployment Space
- Create an empty space
- Go to space `Settings` tab
- Copy `space_id` and paste it below
**Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb).
**Action**: Assign space ID below
```
space_id = 'PASTE YOUR SPACE ID HERE'
```
You can use `list` method to print all existing spaces.
```
client.spaces.list(limit=10)
```
To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
```
client.set.default_space(space_id)
```
<a id="download"></a>
## 2. Download externally created Pytorch model and data
In this section, you will download externally created Pytorch models and data used for training it.
```
import os
import wget
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
model_path = os.path.join(data_dir, 'mnist_pytorch.tar.gz')
if not os.path.isfile(model_path):
wget.download('https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd3.5/models/pytorch/mnist_pytorch.tar.gz', out=data_dir)
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
filename = os.path.join(data_dir, 'mnist.npz')
if not os.path.isfile(filename):
wget.download('https://s3.amazonaws.com/img-datasets/mnist.npz', out=data_dir)
import numpy as np
dataset = np.load(filename)
x_test = dataset['x_test']
```
<a id="persistence"></a>
## 3. Persist externally created Pytorch ONNX model
In this section, you will learn how to store your model in Watson Machine Learning repository by using the IBM Watson Machine Learning SDK.
### 3.1: Publish model
#### Publish model in Watson Machine Learning repository.
Define model name, autor name and email.
```
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7")
metadata = {
client.repository.ModelMetaNames.NAME: 'External pytorch model',
client.repository.ModelMetaNames.TYPE: 'pytorch-onnx_1.3',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=model_path,
meta_props=metadata)
```
### 3.2: Get model details
```
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
```
### 3.3 Get all models
```
models_details = client.repository.list_models()
```
<a id="scoring"></a>
## 4. Deploy and score
In this section you will learn how to create online scoring and to score a new data record by using the IBM Watson Machine Learning SDK.
### 4.1: Create model deployment
#### Create online deployment for published model
```
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external pytorch model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=metadata)
```
**Note**: Here we use deployment url saved in published_model object. In next section, we show how to retrive deployment url from Watson Machine Learning instance.
```
deployment_uid = client.deployments.get_uid(created_deployment)
```
Now you can print an online scoring endpoint.
```
scoring_endpoint = client.deployments.get_scoring_href(created_deployment)
print(scoring_endpoint)
```
You can also list existing deployments.
```
client.deployments.list()
```
### 4.2: Get deployment details
```
client.deployments.get_details(deployment_uid)
```
### 4.3: Score
You can use below method to do test scoring request against deployed model.
Let's first visualize two samples from dataset, we'll use for scoring.
```
%matplotlib inline
import matplotlib.pyplot as plt
for i, image in enumerate([x_test[0], x_test[1]]):
plt.subplot(2, 2, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
```
Prepare scoring payload with records to score.
```
score_0 = [x_test[0].tolist()]
score_1 = [x_test[1].tolist()]
scoring_payload = {"input_data": [{"values": [score_0, score_1]}]}
```
Use ``client.deployments.score()`` method to run scoring.
```
predictions = client.deployments.score(deployment_uid, scoring_payload)
```
Let's print the result of predictions.
```
print(json.dumps(predictions, indent=2))
```
As you can see, prediction probabilities point to proper classes as displayed above from test dataset.
<a id="cleanup"></a>
## 5. Clean up
If you want to clean up all created assets:
- experiments
- trainings
- pipelines
- model definitions
- models
- functions
- deployments
please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
<a id="summary"></a>
## 6. Summary and next steps
You successfully completed this notebook! You learned how to use Pytorch machine learning library as well as Watson Machine Learning for model creation and deployment.
Check out our [Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html) for more samples, tutorials, documentation, how-tos, and blog posts.
### Authors
**Daniel Ryszka**, Software Engineer
Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
|
github_jupyter
|
username = 'PASTE YOUR USERNAME HERE'
password = 'PASTE YOUR PASSWORD HERE'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
!pip install -U ibm-watson-machine-learning
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
space_id = 'PASTE YOUR SPACE ID HERE'
client.spaces.list(limit=10)
client.set.default_space(space_id)
import os
import wget
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
model_path = os.path.join(data_dir, 'mnist_pytorch.tar.gz')
if not os.path.isfile(model_path):
wget.download('https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd3.5/models/pytorch/mnist_pytorch.tar.gz', out=data_dir)
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
filename = os.path.join(data_dir, 'mnist.npz')
if not os.path.isfile(filename):
wget.download('https://s3.amazonaws.com/img-datasets/mnist.npz', out=data_dir)
import numpy as np
dataset = np.load(filename)
x_test = dataset['x_test']
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7")
metadata = {
client.repository.ModelMetaNames.NAME: 'External pytorch model',
client.repository.ModelMetaNames.TYPE: 'pytorch-onnx_1.3',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=model_path,
meta_props=metadata)
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
models_details = client.repository.list_models()
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external pytorch model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=metadata)
deployment_uid = client.deployments.get_uid(created_deployment)
scoring_endpoint = client.deployments.get_scoring_href(created_deployment)
print(scoring_endpoint)
client.deployments.list()
client.deployments.get_details(deployment_uid)
%matplotlib inline
import matplotlib.pyplot as plt
for i, image in enumerate([x_test[0], x_test[1]]):
plt.subplot(2, 2, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
score_0 = [x_test[0].tolist()]
score_1 = [x_test[1].tolist()]
scoring_payload = {"input_data": [{"values": [score_0, score_1]}]}
predictions = client.deployments.score(deployment_uid, scoring_payload)
print(json.dumps(predictions, indent=2))
| 0.248534 | 0.954647 |
# TTS Inference Model Selection
This notebook can be used to generate audio samples using either NeMo's pretrained models or after training NeMo TTS models. This notebook supports all TTS models and is intended to showcase different models and how their results differ.
# License
> Copyright 2020 NVIDIA. All Rights Reserved.
>
> Licensed under the Apache License, Version 2.0 (the "License");
> you may not use this file except in compliance with the License.
> You may obtain a copy of the License at
>
> http://www.apache.org/licenses/LICENSE-2.0
>
> Unless required by applicable law or agreed to in writing, software
> distributed under the License is distributed on an "AS IS" BASIS,
> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
> See the License for the specific language governing permissions and
> limitations under the License.
```
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# # If you're using Google Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# BRANCH = 'main'
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts]
```
## Models
First we pick the models that we want to use. Currently supported models are:
End-to-End Models:
- [FastPitch_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastpitchhifigan)
- [FastSpeech2_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastspeech2hifigan)
Spectrogram Generators:
- [Tacotron 2](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_tacotron2)
- [Glow-TTS](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_glowtts)
- [TalkNet](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_talknet)
- [FastPitch](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastpitch)
- [FastSpeech2](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastspeech_2)
- [Mixer-TTS](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_lj_mixertts)
- [Mixer-TTS-X](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_lj_mixerttsx)
Audio Generators
- [WaveGlow](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_waveglow_88m)
- [SqueezeWave](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_squeezewave)
- [UniGlow](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_uniglow)
- [MelGAN](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_melgan)
- [HiFiGAN](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_hifigan)
- [UnivNet](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_lj_univnet)
- Griffin-Lim
```
from ipywidgets import Select, HBox, Label
from IPython.display import display
supported_e2e = ["fastpitch_hifigan", "fastspeech2_hifigan", None]
supported_spec_gen = ["tacotron2", "glow_tts", "talknet", "fastpitch", "fastspeech2", "mixertts", "mixerttsx", None]
supported_audio_gen = ["waveglow", "squeezewave", "uniglow", "melgan", "hifigan", "univnet", "griffin-lim", None]
print("Select the model(s) that you want to use. Please choose either 1 end-to-end model or 1 spectrogram generator and 1 vocoder.")
e2e_selector = Select(options=supported_e2e, value=None)
spectrogram_generator_selector = Select(options=supported_spec_gen, value=None)
audio_generator_selector = Select(options=supported_audio_gen, value=None)
display(HBox([e2e_selector, Label("OR"), spectrogram_generator_selector, Label("+"), audio_generator_selector]))
e2e_model = e2e_selector.value
spectrogram_generator = spectrogram_generator_selector.value
audio_generator = audio_generator_selector.value
if e2e_model is None and spectrogram_generator is None and audio_generator is None:
raise ValueError("No models were chosen. Please return to the previous step and choose either 1 end-to-end model or 1 spectrogram generator and 1 vocoder.")
if e2e_model and (spectrogram_generator or audio_generator):
raise ValueError(
"An end-to-end model was chosen and either a spectrogram generator or a vocoder was also selected. For end-to-end models, please select `None` "
"in the second and third column to continue. For the two step pipeline, please select `None` in the first column to continue."
)
if (spectrogram_generator and audio_generator is None) or (audio_generator and spectrogram_generator is None):
raise ValueError("In order to continue with the two step pipeline, both the spectrogram generator and the audio generator must be chosen, but one was `None`")
```
## Load model checkpoints
Next we load the pretrained model provided by NeMo. All NeMo models have two functions to help with this
- list_available_models(): This function will return a list of all pretrained checkpoints for that model
- from_pretrained(): This function will download the pretrained checkpoint, load it, and return an instance of the model
Below we will use `from_pretrained` to load the chosen models from above.
```
from omegaconf import OmegaConf, open_dict
import torch
from nemo.collections.tts.models.base import SpectrogramGenerator, Vocoder, TextToWaveform
def load_spectrogram_model():
override_conf = None
from_pretrained_call = SpectrogramGenerator.from_pretrained
if spectrogram_generator == "tacotron2":
from nemo.collections.tts.models import Tacotron2Model
pretrained_model = "tts_en_tacotron2"
elif spectrogram_generator == "glow_tts":
from nemo.collections.tts.models import GlowTTSModel
pretrained_model = "tts_en_glowtts"
import wget
from pathlib import Path
if not Path("cmudict-0.7b").exists():
filename = wget.download("http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b")
filename = str(Path(filename).resolve())
else:
filename = str(Path("cmudict-0.7b").resolve())
conf = SpectrogramGenerator.from_pretrained(pretrained_model, return_config=True)
if "params" in conf.parser:
conf.parser.params.cmu_dict_path = filename
else:
conf.parser.cmu_dict_path = filename
override_conf = conf
elif spectrogram_generator == "talknet":
from nemo.collections.tts.models import TalkNetSpectModel
pretrained_model = "tts_en_talknet"
from_pretrained_call = TalkNetSpectModel.from_pretrained
elif spectrogram_generator == "fastpitch":
from nemo.collections.tts.models import FastPitchModel
pretrained_model = "tts_en_fastpitch"
elif spectrogram_generator == "fastspeech2":
from nemo.collections.tts.models import FastSpeech2Model
pretrained_model = "tts_en_fastspeech2"
elif spectrogram_generator == "mixertts":
from nemo.collections.tts.models import MixerTTSModel
pretrained_model = "tts_en_lj_mixertts"
elif spectrogram_generator == "mixerttsx":
from nemo.collections.tts.models import MixerTTSModel
pretrained_model = "tts_en_lj_mixerttsx"
else:
raise NotImplementedError
model = from_pretrained_call(pretrained_model, override_config_path=override_conf)
return model
def load_vocoder_model():
RequestPseudoInverse = False
TwoStagesModel = False
strict=True
if audio_generator == "waveglow":
from nemo.collections.tts.models import WaveGlowModel
pretrained_model = "tts_waveglow"
strict=False
elif audio_generator == "squeezewave":
from nemo.collections.tts.models import SqueezeWaveModel
pretrained_model = "tts_squeezewave"
elif audio_generator == "uniglow":
from nemo.collections.tts.models import UniGlowModel
pretrained_model = "tts_uniglow"
elif audio_generator == "melgan":
from nemo.collections.tts.models import MelGanModel
pretrained_model = "tts_melgan"
elif audio_generator == "hifigan":
from nemo.collections.tts.models import HifiGanModel
spectrogram_generator2ft_hifigan = {
"mixertts": "tts_en_lj_hifigan_ft_mixertts",
"mixerttsx": "tts_en_lj_hifigan_ft_mixerttsx"
}
pretrained_model = spectrogram_generator2ft_hifigan.get(spectrogram_generator, "tts_hifigan")
elif audio_generator == "univnet":
from nemo.collections.tts.models import UnivNetModel
pretrained_model = "tts_en_lj_univnet"
elif audio_generator == "griffin-lim":
from nemo.collections.tts.models import TwoStagesModel
cfg = {'linvocoder': {'_target_': 'nemo.collections.tts.models.two_stages.GriffinLimModel',
'cfg': {'n_iters': 64, 'n_fft': 1024, 'l_hop': 256}},
'mel2spec': {'_target_': 'nemo.collections.tts.models.two_stages.MelPsuedoInverseModel',
'cfg': {'sampling_rate': 22050, 'n_fft': 1024,
'mel_fmin': 0, 'mel_fmax': 8000, 'mel_freq': 80}}}
model = TwoStagesModel(cfg)
TwoStagesModel = True
else:
raise NotImplementedError
if not TwoStagesModel:
model = Vocoder.from_pretrained(pretrained_model, strict=strict)
return model
def load_e2e_model():
if e2e_model == "fastpitch_hifigan":
from nemo.collections.tts.models import FastPitchHifiGanE2EModel
pretrained_model = "tts_en_e2e_fastpitchhifigan"
elif e2e_model == "fastspeech2_hifigan":
from nemo.collections.tts.models import FastSpeech2HifiGanE2EModel
pretrained_model = "tts_en_e2e_fastspeech2hifigan"
else:
raise NotImplementedError
model = TextToWaveform.from_pretrained(pretrained_model)
return model
emodel = None
spec_gen = None
vocoder = None
if e2e_model:
emodel = load_e2e_model().eval().cuda()
else:
spec_gen = load_spectrogram_model().eval().cuda()
vocoder = load_vocoder_model().eval().cuda()
```
## Inference
Now that we have downloaded the model checkpoints and loaded them into memory. Let's define a short infer helper function that takes a string, and our models to produce speech.
Notice that the NeMo TTS model interface is fairly simple and standardized across all models.
End-to-end models have two helper functions:
- parse(): Accepts raw python strings and returns a torch.tensor that represents tokenized text
- convert_text_to_waveform(): Accepts a batch of tokenized text and returns a torch.tensor that represents a batch of raw audio
Mel Spectrogram generators have two helper functions:
- parse(): Accepts raw python strings and returns a torch.tensor that represents tokenized text
- generate_spectrogram(): Accepts a batch of tokenized text and returns a torch.tensor that represents a batch of spectrograms
Vocoder have just one helper function:
- convert_spectrogram_to_audio(): Accepts a batch of spectrograms and returns a torch.tensor that represents a batch of raw audio
```
def infer(end2end_model, spec_gen_model, vocoder_model, str_input):
parser_model = end2end_model or spec_gen_model
with torch.no_grad():
parsed = parser_model.parse(str_input)
if end2end_model is None:
gen_spec_kwargs = {}
if spectrogram_generator == "mixerttsx":
gen_spec_kwargs["raw_texts"] = [str_input]
spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, **gen_spec_kwargs)
audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)
if audio_generator == "hifigan":
audio = vocoder_model._bias_denoise(audio, spectrogram).squeeze(1)
else:
spectrogram = None
audio = end2end_model.convert_text_to_waveform(tokens=parsed)[0]
if spectrogram is not None:
if isinstance(spectrogram, torch.Tensor):
spectrogram = spectrogram.to('cpu').numpy()
if len(spectrogram.shape) == 3:
spectrogram = spectrogram[0]
if isinstance(audio, torch.Tensor):
audio = audio.to('cpu').numpy()
return spectrogram, audio
```
Now that everything is set up, let's give an input that we want our models to speak
```
text_to_generate = input("Input what you want the model to say: ")
spec, audio = infer(emodel, spec_gen, vocoder, text_to_generate)
```
# Results
After our model generates the audio, let's go ahead and play it. We can also visualize the spectrogram that was produced from the first stage model if a spectrogram generator was used.
```
import IPython.display as ipd
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
ipd.Audio(audio, rate=22050)
%matplotlib inline
if spec is not None:
imshow(spec, origin="lower")
plt.show()
```
|
github_jupyter
|
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# # If you're using Google Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# BRANCH = 'main'
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts]
from ipywidgets import Select, HBox, Label
from IPython.display import display
supported_e2e = ["fastpitch_hifigan", "fastspeech2_hifigan", None]
supported_spec_gen = ["tacotron2", "glow_tts", "talknet", "fastpitch", "fastspeech2", "mixertts", "mixerttsx", None]
supported_audio_gen = ["waveglow", "squeezewave", "uniglow", "melgan", "hifigan", "univnet", "griffin-lim", None]
print("Select the model(s) that you want to use. Please choose either 1 end-to-end model or 1 spectrogram generator and 1 vocoder.")
e2e_selector = Select(options=supported_e2e, value=None)
spectrogram_generator_selector = Select(options=supported_spec_gen, value=None)
audio_generator_selector = Select(options=supported_audio_gen, value=None)
display(HBox([e2e_selector, Label("OR"), spectrogram_generator_selector, Label("+"), audio_generator_selector]))
e2e_model = e2e_selector.value
spectrogram_generator = spectrogram_generator_selector.value
audio_generator = audio_generator_selector.value
if e2e_model is None and spectrogram_generator is None and audio_generator is None:
raise ValueError("No models were chosen. Please return to the previous step and choose either 1 end-to-end model or 1 spectrogram generator and 1 vocoder.")
if e2e_model and (spectrogram_generator or audio_generator):
raise ValueError(
"An end-to-end model was chosen and either a spectrogram generator or a vocoder was also selected. For end-to-end models, please select `None` "
"in the second and third column to continue. For the two step pipeline, please select `None` in the first column to continue."
)
if (spectrogram_generator and audio_generator is None) or (audio_generator and spectrogram_generator is None):
raise ValueError("In order to continue with the two step pipeline, both the spectrogram generator and the audio generator must be chosen, but one was `None`")
from omegaconf import OmegaConf, open_dict
import torch
from nemo.collections.tts.models.base import SpectrogramGenerator, Vocoder, TextToWaveform
def load_spectrogram_model():
override_conf = None
from_pretrained_call = SpectrogramGenerator.from_pretrained
if spectrogram_generator == "tacotron2":
from nemo.collections.tts.models import Tacotron2Model
pretrained_model = "tts_en_tacotron2"
elif spectrogram_generator == "glow_tts":
from nemo.collections.tts.models import GlowTTSModel
pretrained_model = "tts_en_glowtts"
import wget
from pathlib import Path
if not Path("cmudict-0.7b").exists():
filename = wget.download("http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b")
filename = str(Path(filename).resolve())
else:
filename = str(Path("cmudict-0.7b").resolve())
conf = SpectrogramGenerator.from_pretrained(pretrained_model, return_config=True)
if "params" in conf.parser:
conf.parser.params.cmu_dict_path = filename
else:
conf.parser.cmu_dict_path = filename
override_conf = conf
elif spectrogram_generator == "talknet":
from nemo.collections.tts.models import TalkNetSpectModel
pretrained_model = "tts_en_talknet"
from_pretrained_call = TalkNetSpectModel.from_pretrained
elif spectrogram_generator == "fastpitch":
from nemo.collections.tts.models import FastPitchModel
pretrained_model = "tts_en_fastpitch"
elif spectrogram_generator == "fastspeech2":
from nemo.collections.tts.models import FastSpeech2Model
pretrained_model = "tts_en_fastspeech2"
elif spectrogram_generator == "mixertts":
from nemo.collections.tts.models import MixerTTSModel
pretrained_model = "tts_en_lj_mixertts"
elif spectrogram_generator == "mixerttsx":
from nemo.collections.tts.models import MixerTTSModel
pretrained_model = "tts_en_lj_mixerttsx"
else:
raise NotImplementedError
model = from_pretrained_call(pretrained_model, override_config_path=override_conf)
return model
def load_vocoder_model():
RequestPseudoInverse = False
TwoStagesModel = False
strict=True
if audio_generator == "waveglow":
from nemo.collections.tts.models import WaveGlowModel
pretrained_model = "tts_waveglow"
strict=False
elif audio_generator == "squeezewave":
from nemo.collections.tts.models import SqueezeWaveModel
pretrained_model = "tts_squeezewave"
elif audio_generator == "uniglow":
from nemo.collections.tts.models import UniGlowModel
pretrained_model = "tts_uniglow"
elif audio_generator == "melgan":
from nemo.collections.tts.models import MelGanModel
pretrained_model = "tts_melgan"
elif audio_generator == "hifigan":
from nemo.collections.tts.models import HifiGanModel
spectrogram_generator2ft_hifigan = {
"mixertts": "tts_en_lj_hifigan_ft_mixertts",
"mixerttsx": "tts_en_lj_hifigan_ft_mixerttsx"
}
pretrained_model = spectrogram_generator2ft_hifigan.get(spectrogram_generator, "tts_hifigan")
elif audio_generator == "univnet":
from nemo.collections.tts.models import UnivNetModel
pretrained_model = "tts_en_lj_univnet"
elif audio_generator == "griffin-lim":
from nemo.collections.tts.models import TwoStagesModel
cfg = {'linvocoder': {'_target_': 'nemo.collections.tts.models.two_stages.GriffinLimModel',
'cfg': {'n_iters': 64, 'n_fft': 1024, 'l_hop': 256}},
'mel2spec': {'_target_': 'nemo.collections.tts.models.two_stages.MelPsuedoInverseModel',
'cfg': {'sampling_rate': 22050, 'n_fft': 1024,
'mel_fmin': 0, 'mel_fmax': 8000, 'mel_freq': 80}}}
model = TwoStagesModel(cfg)
TwoStagesModel = True
else:
raise NotImplementedError
if not TwoStagesModel:
model = Vocoder.from_pretrained(pretrained_model, strict=strict)
return model
def load_e2e_model():
if e2e_model == "fastpitch_hifigan":
from nemo.collections.tts.models import FastPitchHifiGanE2EModel
pretrained_model = "tts_en_e2e_fastpitchhifigan"
elif e2e_model == "fastspeech2_hifigan":
from nemo.collections.tts.models import FastSpeech2HifiGanE2EModel
pretrained_model = "tts_en_e2e_fastspeech2hifigan"
else:
raise NotImplementedError
model = TextToWaveform.from_pretrained(pretrained_model)
return model
emodel = None
spec_gen = None
vocoder = None
if e2e_model:
emodel = load_e2e_model().eval().cuda()
else:
spec_gen = load_spectrogram_model().eval().cuda()
vocoder = load_vocoder_model().eval().cuda()
def infer(end2end_model, spec_gen_model, vocoder_model, str_input):
parser_model = end2end_model or spec_gen_model
with torch.no_grad():
parsed = parser_model.parse(str_input)
if end2end_model is None:
gen_spec_kwargs = {}
if spectrogram_generator == "mixerttsx":
gen_spec_kwargs["raw_texts"] = [str_input]
spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, **gen_spec_kwargs)
audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)
if audio_generator == "hifigan":
audio = vocoder_model._bias_denoise(audio, spectrogram).squeeze(1)
else:
spectrogram = None
audio = end2end_model.convert_text_to_waveform(tokens=parsed)[0]
if spectrogram is not None:
if isinstance(spectrogram, torch.Tensor):
spectrogram = spectrogram.to('cpu').numpy()
if len(spectrogram.shape) == 3:
spectrogram = spectrogram[0]
if isinstance(audio, torch.Tensor):
audio = audio.to('cpu').numpy()
return spectrogram, audio
text_to_generate = input("Input what you want the model to say: ")
spec, audio = infer(emodel, spec_gen, vocoder, text_to_generate)
import IPython.display as ipd
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
ipd.Audio(audio, rate=22050)
%matplotlib inline
if spec is not None:
imshow(spec, origin="lower")
plt.show()
| 0.73848 | 0.887009 |
I wanna sanity test that tabulating the HODs from two different SHAMs will give different clustering.
```
import numpy as np
import astropy
from itertools import izip
from pearce.mocks import compute_prim_haloprop_bins, cat_dict
from pearce.mocks.customHODModels import *
from halotools.utils.table_utils import compute_conditional_percentiles
from halotools.mock_observables import hod_from_mock
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
mag_cut = -21
min_ptcl = 200
PMASS = 591421440.0000001 #chinchilla 400/ 2048
#catalog = np.loadtxt('ab_sham_hod_data_cut.npy')
catalog = astropy.table.Table.read('abmatched_halos.hdf5', format = 'hdf5')
cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[0.658, 1.0]}
cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog!
cat.load_catalog(1.0)
catalog.colnames
vpeak_catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vpeak_mag'] <=mag_cut)]
mpeak_catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vvir_mag'] <=mag_cut)]
from math import ceil
def compute_mass_bins(prim_haloprop, dlog10_prim_haloprop=0.05):
lg10_min_prim_haloprop = np.log10(np.min(prim_haloprop))-0.001
lg10_max_prim_haloprop = np.log10(np.max(prim_haloprop))+0.001
num_prim_haloprop_bins = (lg10_max_prim_haloprop-lg10_min_prim_haloprop)/dlog10_prim_haloprop
return np.logspace(
lg10_min_prim_haloprop, lg10_max_prim_haloprop,
num=int(ceil(num_prim_haloprop_bins)))
halo_mass = catalog['halo_mvir'][catalog['halo_mvir'] > min_ptcl*cat.pmass]
haloprop_bins = compute_mass_bins(halo_mass, 0.2)
mbc = (haloprop_bins[1:]+haloprop_bins[:-1])/2.0
cen_hods, sat_hods = [], []
for galaxy_catalog in (vpeak_catalog, mpeak_catalog):
cenmask = galaxy_catalog['halo_upid']==-1
satmask = galaxy_catalog['halo_upid']>0
cen_hods.append(hod_from_mock(galaxy_catalog['halo_mvir_host_halo'][cenmask], halo_mass, haloprop_bins)[0])
sat_hods.append(hod_from_mock(galaxy_catalog['halo_mvir_host_halo'][satmask], halo_mass, haloprop_bins)[0])
plt.plot(mbc, cen_hods[-1]+ sat_hods[-1])
plt.loglog()
from pearce.mocks.customHODModels import *
#rp_bins = np.logspace(-1,1.5,20)
rp_bins = np.logspace(-1.1,1.6, 18)
bin_centers = (rp_bins[:1]+rp_bins[:-1])/2
for cen_hod, sat_hod in zip(cen_hods, sat_hods):
print cen_hod
print sat_hod
cat.load_model(1.0, HOD=(TabulatedCens, TabulatedSats), hod_kwargs = {'prim_haloprop_vals': mbc,
#'sec_haloprop_key': 'halo_%s'%(mag_type),
'cen_hod_vals':cen_hod,
'sat_hod_vals':sat_hod})# ,
#'split':0.7})
cat.populated_once = False
cat.populate({})
xi = cat.calc_xi(rp_bins)
print xi
break
plt.plot(bin_centers,xi)
plt.loglog();
from halotools.mock_observables import wp, tpcf
min_logmass, max_logmass = 9.0, 17.0
from halotools.mock_observables import tpcf_one_two_halo_decomp
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 0.0, 0.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_no_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_no_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_no_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
#MAP = np.array([ 0.38800666, -0.49540832, 3, 3])
MAP = np.array([1.0, -1.0, 5.0, 3.0])
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 1.0, -1.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 1.0, 0.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_cen_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_cen_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_cen_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 0.0, -1.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_sat_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_sat_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_sat_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
catalog.colnames
#catalog = astropy.table.Table.read('abmatched_halos.hdf5', format = 'hdf5')
#halo_catalog_orig = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vpeak_mag'] <=mag_cut)]
#halo_catalog_orig = catalog[np.logical_and( \
# np.logical_and(catalog['halo_shuffled_host_mvir'] > 10**min_logmass,\
# catalog['halo_shuffled_host_mvir'] < 10**max_logmass),\
# catalog['halo_vvir_mag'] <=mag_cut)]
mag_cut = catalog['halo_%s_mag'%mag_type] <=mag_cut
cut_idx = catalog['halo_upid'] >= -1
mass_cut = np.logical_and(np.log10(catalog['halo_mvir_host_halo']) > min_logmass,\
np.log10(catalog['halo_mvir_host_halo']) <= max_logmass)
mass_cut = np.logical_and(mass_cut, cut_idx)
halo_catalog_orig = catalog[np.logical_and(mag_cut, mass_cut)]
print len(halo_catalog_orig)
centrals_idx = np.where(halo_catalog_orig['halo_upid']>=-1)[0]
sham_pos = np.c_[halo_catalog_orig['halo_x'],\
halo_catalog_orig['halo_y'],\
halo_catalog_orig['halo_z']]
sham_wp = tpcf(sham_pos, rp_bins , period=cat.Lbox, num_threads=1)
print sham_wp
host_ids = halo_catalog_orig['halo_upid']
host_ids[centrals_idx] = halo_catalog_orig[centrals_idx]['halo_id']
sham_wp_1h, sham_wp_2h = tpcf_one_two_halo_decomp(sham_pos,host_ids, rp_bins , period=cat.Lbox, num_threads=1)
```
```
#sham_nd = len(halo_catalog_orig[centrals_idx])/(cat.Lbox**3)
sham_nd = len(halo_catalog_orig)/(cat.Lbox**3)
sham_nfw_pos = np.c_[halo_catalog_orig['halo_nfw_x'],\
halo_catalog_orig['halo_nfw_y'],\
halo_catalog_orig['halo_nfw_z']]
sham_nfw_wp = tpcf(sham_nfw_pos, rp_bins, period=cat.Lbox, num_threads=1)
sham_nfw_wp_1h, sham_nfw_wp_2h = tpcf_one_two_halo_decomp(sham_nfw_pos,host_ids,\
rp_bins, period=cat.Lbox, num_threads=1)
```
```
halo_catalog.colnames
shuffle_type = 'shuffled'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[ma ss_cut]['halo_%s_z'%shuffle_type]]
sham_shuffled_wp = tpcf(sham_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_shuffled_wp_1h, sham_shuffled_wp_2h = tpcf_one_two_halo_decomp(sham_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_wp_1h, sham_sh_shuffled_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled_cen'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_cen_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_cen_wp_1h, sham_sh_shuffled_cen_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled_sats'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_sat_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_sat_wp_1h, sham_sh_shuffled_sat_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
#shuffled_nd = len(cut_idx)/(cat.Lbox**3)
shuffled_nd = len(halo_catalog)/(cat.Lbox**3)
print sham_nd,shuffled_nd, mock_nd
print sham_nd-mock_nd, nd_err
print (sham_nd-mock_nd)/nd_err
plt.figure(figsize=(10,8))
plt.errorbar(bin_centers, mock_wp_no_ab,yerr=wp_errs, label = 'no ab model')
plt.errorbar(bin_centers, mock_wp_ab,yerr=wp_errs, label = 'ab model')
plt.errorbar(bin_centers, mock_wp_max_ab,yerr=wp_errs, label = 'max model')
plt.plot(bin_centers, sham_wp, label = 'sham')
plt.plot(bin_centers, sham_nfw_wp, label = 'nfw-ized sham')
plt.plot(bin_centers, sham_shuffled_wp, label = 'shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_wp, label = 'sh shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_cen_wp, label = 'sh shuffled cen & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_sat_wp, label = 'sh shuffled sat & nfw-ized sham')
plt.loglog()
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 5e0]);
#plt.ylim([1,15000])
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ comparison for HOD+AB and NFW-ized SHAM', fontsize = 20)
```
```
print sham_sh_shuffled_wp_2h/sham_shuffled_wp_2h
plt.figure(figsize=(10,8))
plt.plot(bin_centers, mock_wp_no_ab_2h, label = 'no ab model')
plt.plot(bin_centers, mock_wp_ab_2h, label = 'ab model')
plt.plot(bin_centers, mock_wp_max_ab_2h, label = 'max model')
plt.plot(bin_centers, sham_wp_2h, label = 'sham')
plt.plot(bin_centers, sham_nfw_wp_2h, label = 'nfw-ized sham')
plt.plot(bin_centers, sham_shuffled_wp_2h, label = 'shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_wp_2h, label = 'sh shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_cen_wp_2h, label = 'sh shuffled cen & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_sat_wp_2h, label = 'sh shuffled sat & nfw-ized sham')
plt.loglog()
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 5e0]);
#plt.ylim([1,15000])
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ comparison for HOD+AB and NFW-ized SHAM', fontsize = 20)
```
```
%%bash
ls *xi*.npy
np.savetxt('mock_xi_%s.npy'%mag_type,mock_wp_no_ab)
np.savetxt('mock_xi_ab_%s.npy'%mag_type,mock_wp_ab)
np.savetxt('mock_xi_max_ab_%s.npy'%mag_type,mock_wp_max_ab)
np.savetxt('mock_xi_max_cen_ab_%s.npy'%mag_type,mock_wp_max_cen_ab)
np.savetxt('mock_xi_max_sat_ab_%s.npy'%mag_type,mock_wp_max_sat_ab)
np.savetxt('sham_xi_%s.npy'%mag_type, sham_wp)
np.savetxt('sham_shuffle_xi_%s.npy'%(mag_type), sham_shuffled_wp)
np.savetxt('sham_sh_shuffle_xi_%s.npy'%(mag_type), sham_sh_shuffled_wp)
np.savetxt('sham_nfw_xi_%s.npy'%mag_type, sham_nfw_wp)
np.savetxt('sham_sh_shuffle_cen_xi_%s.npy'%(mag_type), sham_sh_shuffled_cen_wp)
np.savetxt('sham_sh_shuffle_sat_xi_%s.npy'%(mag_type), sham_sh_shuffled_sat_wp)
np.savetxt('mock_xi_%s_1h.npy'%mag_type,mock_wp_no_ab_1h)
np.savetxt('mock_xi_ab_%s_1h.npy'%mag_type,mock_wp_ab_1h)
np.savetxt('mock_xi_max_ab_%s_1h.npy'%mag_type,mock_wp_max_ab_1h)
np.savetxt('mock_xi_max_cen_ab_%s_1h.npy'%mag_type,mock_wp_max_cen_ab_1h)
np.savetxt('mock_xi_max_sat_ab_%s_1h.npy'%mag_type,mock_wp_max_sat_ab_1h)
np.savetxt('sham_xi_%s_1h.npy'%mag_type, sham_wp_1h)
np.savetxt('sham_shuffle_xi_%s_1h.npy'%(mag_type), sham_shuffled_wp_1h)
np.savetxt('sham_sh_shuffle_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_wp_1h)
np.savetxt('sham_nfw_xi_%s_1h.npy'%mag_type, sham_nfw_wp_1h)
np.savetxt('sham_sh_shuffle_cen_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_cen_wp_1h)
np.savetxt('sham_sh_shuffle_sat_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_sat_wp_1h)
np.savetxt('mock_xi_%s_2h.npy'%mag_type,mock_wp_no_ab_2h)
np.savetxt('mock_xi_ab_%s_2h.npy'%mag_type,mock_wp_ab_2h)
np.savetxt('mock_xi_max_ab_%s_2h.npy'%mag_type,mock_wp_max_ab_2h)
np.savetxt('mock_xi_max_cen_ab_%s_2h.npy'%mag_type,mock_wp_max_cen_ab_2h)
np.savetxt('mock_xi_max_sat_ab_%s_2h.npy'%mag_type,mock_wp_max_sat_ab_2h)
np.savetxt('sham_xi_%s_2h.npy'%mag_type, sham_wp_2h)
np.savetxt('sham_shuffle_xi_%s_2h.npy'%(mag_type), sham_shuffled_wp_2h)
np.savetxt('sham_sh_shuffle_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_wp_2h)
np.savetxt('sham_nfw_xi_%s_2h.npy'%mag_type, sham_nfw_wp_2h)
np.savetxt('sham_sh_shuffle_cen_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_cen_wp_2h)
np.savetxt('sham_sh_shuffle_sat_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_sat_wp_2h)
plt.figure(figsize=(10,8))
plt.errorbar(bin_centers, mock_wp/mock_wp,yerr=wp_errs/mock_wp, label = 'model/model')
plt.plot(bin_centers, sham_wp/mock_wp, label = 'sham/model')
plt.plot(bin_centers, sham_nfw_wp/mock_wp, label = 'nfw-ized sham/model')
plt.plot(bin_centers, sham_shuffled_wp/mock_wp, label = 'shuffled & nfw-ized sham/model')
plt.xscale('log')
plt.legend(loc='best')
plt.xlim([1e-1, 5e0]);
#plt.ylim([0.8,1.2]);
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w_{SHAM}(r_p)/w_{HOD+AB}(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ ratio for HOD+AB and NFW-ized SHAM', fontsize = 20)
print mock_wps/sham_shuffled_wp
colors = sns.color_palette("coolwarm", ab_vals.shape[0])
plt.figure(figsize=(10,8))
#plt.errorbar(bin_centers, mock_wp/sham_shuffled_wp,yerr=wp_errs/mock_wp, label = 'model/model')
for mwp, a, c in zip(mock_wps, ab_vals, colors):
plt.plot(bin_centers,mwp/sham_shuffled_wp, c =c, label = a)
plt.plot(bin_centers, sham_wp/sham_shuffled_wp, label = 'sham/model')
plt.plot(bin_centers, sham_nfw_wp/sham_shuffled_wp, label = 'nfw-ized sham/model')
plt.plot(bin_centers, sham_shuffled_wp/sham_shuffled_wp, label = 'shuffled & nfw-ized sham/model')
plt.xscale('log')
plt.legend(loc='best')
plt.xlim([1e-1, 5e0]);
#plt.ylim([0.8,1.2]);
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w_{SHAM}(r_p)/w_{HOD+AB}(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ ratio for HOD+AB and NFW-ized SHAM', fontsize = 20)
```
|
github_jupyter
|
import numpy as np
import astropy
from itertools import izip
from pearce.mocks import compute_prim_haloprop_bins, cat_dict
from pearce.mocks.customHODModels import *
from halotools.utils.table_utils import compute_conditional_percentiles
from halotools.mock_observables import hod_from_mock
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
mag_cut = -21
min_ptcl = 200
PMASS = 591421440.0000001 #chinchilla 400/ 2048
#catalog = np.loadtxt('ab_sham_hod_data_cut.npy')
catalog = astropy.table.Table.read('abmatched_halos.hdf5', format = 'hdf5')
cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[0.658, 1.0]}
cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog!
cat.load_catalog(1.0)
catalog.colnames
vpeak_catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vpeak_mag'] <=mag_cut)]
mpeak_catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vvir_mag'] <=mag_cut)]
from math import ceil
def compute_mass_bins(prim_haloprop, dlog10_prim_haloprop=0.05):
lg10_min_prim_haloprop = np.log10(np.min(prim_haloprop))-0.001
lg10_max_prim_haloprop = np.log10(np.max(prim_haloprop))+0.001
num_prim_haloprop_bins = (lg10_max_prim_haloprop-lg10_min_prim_haloprop)/dlog10_prim_haloprop
return np.logspace(
lg10_min_prim_haloprop, lg10_max_prim_haloprop,
num=int(ceil(num_prim_haloprop_bins)))
halo_mass = catalog['halo_mvir'][catalog['halo_mvir'] > min_ptcl*cat.pmass]
haloprop_bins = compute_mass_bins(halo_mass, 0.2)
mbc = (haloprop_bins[1:]+haloprop_bins[:-1])/2.0
cen_hods, sat_hods = [], []
for galaxy_catalog in (vpeak_catalog, mpeak_catalog):
cenmask = galaxy_catalog['halo_upid']==-1
satmask = galaxy_catalog['halo_upid']>0
cen_hods.append(hod_from_mock(galaxy_catalog['halo_mvir_host_halo'][cenmask], halo_mass, haloprop_bins)[0])
sat_hods.append(hod_from_mock(galaxy_catalog['halo_mvir_host_halo'][satmask], halo_mass, haloprop_bins)[0])
plt.plot(mbc, cen_hods[-1]+ sat_hods[-1])
plt.loglog()
from pearce.mocks.customHODModels import *
#rp_bins = np.logspace(-1,1.5,20)
rp_bins = np.logspace(-1.1,1.6, 18)
bin_centers = (rp_bins[:1]+rp_bins[:-1])/2
for cen_hod, sat_hod in zip(cen_hods, sat_hods):
print cen_hod
print sat_hod
cat.load_model(1.0, HOD=(TabulatedCens, TabulatedSats), hod_kwargs = {'prim_haloprop_vals': mbc,
#'sec_haloprop_key': 'halo_%s'%(mag_type),
'cen_hod_vals':cen_hod,
'sat_hod_vals':sat_hod})# ,
#'split':0.7})
cat.populated_once = False
cat.populate({})
xi = cat.calc_xi(rp_bins)
print xi
break
plt.plot(bin_centers,xi)
plt.loglog();
from halotools.mock_observables import wp, tpcf
min_logmass, max_logmass = 9.0, 17.0
from halotools.mock_observables import tpcf_one_two_halo_decomp
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 0.0, 0.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_no_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_no_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_no_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
#MAP = np.array([ 0.38800666, -0.49540832, 3, 3])
MAP = np.array([1.0, -1.0, 5.0, 3.0])
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 1.0, -1.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 1.0, 0.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_cen_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_cen_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_cen_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 0.0, -1.0,5,5])
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_slope1','mean_occupation_satellites_assembias_slope1']
params = dict(zip(names, MAP))
mock_wps = []
mock_wps_1h, mock_wps_2h = [],[]
mock_nds = []
for i in xrange(10):
cat.populate(params)
#cut_idx = cat.model.mock.galaxy_table['gal_type'] == 'centrals'
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mass_cut = np.logical_and(np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) > min_logmass,\
np.log10(cat.model.mock.galaxy_table['halo_mvir'] ) <= max_logmass)
#mass_cut = np.logical_and(mass_cut, cut_idx)
#mock_nds.append(len(cut_idx)/cat.Lbox**3)
mock_pos = np.c_[cat.model.mock.galaxy_table[mass_cut]['x'],\
cat.model.mock.galaxy_table[mass_cut]['y'],\
cat.model.mock.galaxy_table[mass_cut]['z']]
mock_wps.append(tpcf(mock_pos, rp_bins , period=cat.Lbox, num_threads=1))
oneh, twoh = tpcf_one_two_halo_decomp(mock_pos,cat.model.mock.galaxy_table[mass_cut]['halo_hostid'],\
rp_bins , period=cat.Lbox, num_threads=1)
mock_wps_1h.append(oneh)
mock_wps_2h.append(twoh)
mock_wps = np.array(mock_wps)
mock_wp_max_sat_ab = np.mean(mock_wps, axis = 0)
wp_errs = np.std(mock_wps, axis = 0)
mock_wps_1h = np.array(mock_wps_1h)
mock_wp_max_sat_ab_1h = np.mean(mock_wps_1h, axis = 0)
mock_wps_2h = np.array(mock_wps_2h)
mock_wp_max_sat_ab_2h = np.mean(mock_wps_2h, axis = 0)
mock_nds = np.array(mock_nds)
mock_nd = np.mean(mock_nds)
nd_err = np.std(mock_nds)
catalog.colnames
#catalog = astropy.table.Table.read('abmatched_halos.hdf5', format = 'hdf5')
#halo_catalog_orig = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vpeak_mag'] <=mag_cut)]
#halo_catalog_orig = catalog[np.logical_and( \
# np.logical_and(catalog['halo_shuffled_host_mvir'] > 10**min_logmass,\
# catalog['halo_shuffled_host_mvir'] < 10**max_logmass),\
# catalog['halo_vvir_mag'] <=mag_cut)]
mag_cut = catalog['halo_%s_mag'%mag_type] <=mag_cut
cut_idx = catalog['halo_upid'] >= -1
mass_cut = np.logical_and(np.log10(catalog['halo_mvir_host_halo']) > min_logmass,\
np.log10(catalog['halo_mvir_host_halo']) <= max_logmass)
mass_cut = np.logical_and(mass_cut, cut_idx)
halo_catalog_orig = catalog[np.logical_and(mag_cut, mass_cut)]
print len(halo_catalog_orig)
centrals_idx = np.where(halo_catalog_orig['halo_upid']>=-1)[0]
sham_pos = np.c_[halo_catalog_orig['halo_x'],\
halo_catalog_orig['halo_y'],\
halo_catalog_orig['halo_z']]
sham_wp = tpcf(sham_pos, rp_bins , period=cat.Lbox, num_threads=1)
print sham_wp
host_ids = halo_catalog_orig['halo_upid']
host_ids[centrals_idx] = halo_catalog_orig[centrals_idx]['halo_id']
sham_wp_1h, sham_wp_2h = tpcf_one_two_halo_decomp(sham_pos,host_ids, rp_bins , period=cat.Lbox, num_threads=1)
#sham_nd = len(halo_catalog_orig[centrals_idx])/(cat.Lbox**3)
sham_nd = len(halo_catalog_orig)/(cat.Lbox**3)
sham_nfw_pos = np.c_[halo_catalog_orig['halo_nfw_x'],\
halo_catalog_orig['halo_nfw_y'],\
halo_catalog_orig['halo_nfw_z']]
sham_nfw_wp = tpcf(sham_nfw_pos, rp_bins, period=cat.Lbox, num_threads=1)
sham_nfw_wp_1h, sham_nfw_wp_2h = tpcf_one_two_halo_decomp(sham_nfw_pos,host_ids,\
rp_bins, period=cat.Lbox, num_threads=1)
halo_catalog.colnames
shuffle_type = 'shuffled'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[ma ss_cut]['halo_%s_z'%shuffle_type]]
sham_shuffled_wp = tpcf(sham_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_shuffled_wp_1h, sham_shuffled_wp_2h = tpcf_one_two_halo_decomp(sham_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_wp_1h, sham_sh_shuffled_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled_cen'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_cen_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_cen_wp_1h, sham_sh_shuffled_cen_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
shuffle_type = 'sh_shuffled_sats'
mass_cut = np.logical_and(np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) > min_logmass,\
np.log10(halo_catalog['halo_%s_host_mvir'%shuffle_type]) < max_logmass)
cut_idx = halo_catalog['halo_%s_upid'%shuffle_type] >= -1
mass_cut = np.logical_and(mass_cut, cut_idx)
sham_sh_shuffled_pos = np.c_[halo_catalog[mass_cut]['halo_%s_x'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_y'%shuffle_type],\
halo_catalog[mass_cut]['halo_%s_z'%shuffle_type]]
sham_sh_shuffled_sat_wp = tpcf(sham_sh_shuffled_pos, rp_bins , period=cat.Lbox, num_threads=1)
centrals_idx = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]>=-1
host_ids = halo_catalog[mass_cut]['halo_%s_upid'%shuffle_type]
host_ids[centrals_idx] = halo_catalog[mass_cut][centrals_idx]['halo_id']
sham_sh_shuffled_sat_wp_1h, sham_sh_shuffled_sat_wp_2h = tpcf_one_two_halo_decomp(sham_sh_shuffled_pos, host_ids,\
rp_bins , period=cat.Lbox, num_threads=1)
#shuffled_nd = len(cut_idx)/(cat.Lbox**3)
shuffled_nd = len(halo_catalog)/(cat.Lbox**3)
print sham_nd,shuffled_nd, mock_nd
print sham_nd-mock_nd, nd_err
print (sham_nd-mock_nd)/nd_err
plt.figure(figsize=(10,8))
plt.errorbar(bin_centers, mock_wp_no_ab,yerr=wp_errs, label = 'no ab model')
plt.errorbar(bin_centers, mock_wp_ab,yerr=wp_errs, label = 'ab model')
plt.errorbar(bin_centers, mock_wp_max_ab,yerr=wp_errs, label = 'max model')
plt.plot(bin_centers, sham_wp, label = 'sham')
plt.plot(bin_centers, sham_nfw_wp, label = 'nfw-ized sham')
plt.plot(bin_centers, sham_shuffled_wp, label = 'shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_wp, label = 'sh shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_cen_wp, label = 'sh shuffled cen & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_sat_wp, label = 'sh shuffled sat & nfw-ized sham')
plt.loglog()
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 5e0]);
#plt.ylim([1,15000])
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ comparison for HOD+AB and NFW-ized SHAM', fontsize = 20)
print sham_sh_shuffled_wp_2h/sham_shuffled_wp_2h
plt.figure(figsize=(10,8))
plt.plot(bin_centers, mock_wp_no_ab_2h, label = 'no ab model')
plt.plot(bin_centers, mock_wp_ab_2h, label = 'ab model')
plt.plot(bin_centers, mock_wp_max_ab_2h, label = 'max model')
plt.plot(bin_centers, sham_wp_2h, label = 'sham')
plt.plot(bin_centers, sham_nfw_wp_2h, label = 'nfw-ized sham')
plt.plot(bin_centers, sham_shuffled_wp_2h, label = 'shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_wp_2h, label = 'sh shuffled & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_cen_wp_2h, label = 'sh shuffled cen & nfw-ized sham')
plt.plot(bin_centers, sham_sh_shuffled_sat_wp_2h, label = 'sh shuffled sat & nfw-ized sham')
plt.loglog()
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 5e0]);
#plt.ylim([1,15000])
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ comparison for HOD+AB and NFW-ized SHAM', fontsize = 20)
%%bash
ls *xi*.npy
np.savetxt('mock_xi_%s.npy'%mag_type,mock_wp_no_ab)
np.savetxt('mock_xi_ab_%s.npy'%mag_type,mock_wp_ab)
np.savetxt('mock_xi_max_ab_%s.npy'%mag_type,mock_wp_max_ab)
np.savetxt('mock_xi_max_cen_ab_%s.npy'%mag_type,mock_wp_max_cen_ab)
np.savetxt('mock_xi_max_sat_ab_%s.npy'%mag_type,mock_wp_max_sat_ab)
np.savetxt('sham_xi_%s.npy'%mag_type, sham_wp)
np.savetxt('sham_shuffle_xi_%s.npy'%(mag_type), sham_shuffled_wp)
np.savetxt('sham_sh_shuffle_xi_%s.npy'%(mag_type), sham_sh_shuffled_wp)
np.savetxt('sham_nfw_xi_%s.npy'%mag_type, sham_nfw_wp)
np.savetxt('sham_sh_shuffle_cen_xi_%s.npy'%(mag_type), sham_sh_shuffled_cen_wp)
np.savetxt('sham_sh_shuffle_sat_xi_%s.npy'%(mag_type), sham_sh_shuffled_sat_wp)
np.savetxt('mock_xi_%s_1h.npy'%mag_type,mock_wp_no_ab_1h)
np.savetxt('mock_xi_ab_%s_1h.npy'%mag_type,mock_wp_ab_1h)
np.savetxt('mock_xi_max_ab_%s_1h.npy'%mag_type,mock_wp_max_ab_1h)
np.savetxt('mock_xi_max_cen_ab_%s_1h.npy'%mag_type,mock_wp_max_cen_ab_1h)
np.savetxt('mock_xi_max_sat_ab_%s_1h.npy'%mag_type,mock_wp_max_sat_ab_1h)
np.savetxt('sham_xi_%s_1h.npy'%mag_type, sham_wp_1h)
np.savetxt('sham_shuffle_xi_%s_1h.npy'%(mag_type), sham_shuffled_wp_1h)
np.savetxt('sham_sh_shuffle_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_wp_1h)
np.savetxt('sham_nfw_xi_%s_1h.npy'%mag_type, sham_nfw_wp_1h)
np.savetxt('sham_sh_shuffle_cen_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_cen_wp_1h)
np.savetxt('sham_sh_shuffle_sat_xi_%s_1h.npy'%(mag_type), sham_sh_shuffled_sat_wp_1h)
np.savetxt('mock_xi_%s_2h.npy'%mag_type,mock_wp_no_ab_2h)
np.savetxt('mock_xi_ab_%s_2h.npy'%mag_type,mock_wp_ab_2h)
np.savetxt('mock_xi_max_ab_%s_2h.npy'%mag_type,mock_wp_max_ab_2h)
np.savetxt('mock_xi_max_cen_ab_%s_2h.npy'%mag_type,mock_wp_max_cen_ab_2h)
np.savetxt('mock_xi_max_sat_ab_%s_2h.npy'%mag_type,mock_wp_max_sat_ab_2h)
np.savetxt('sham_xi_%s_2h.npy'%mag_type, sham_wp_2h)
np.savetxt('sham_shuffle_xi_%s_2h.npy'%(mag_type), sham_shuffled_wp_2h)
np.savetxt('sham_sh_shuffle_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_wp_2h)
np.savetxt('sham_nfw_xi_%s_2h.npy'%mag_type, sham_nfw_wp_2h)
np.savetxt('sham_sh_shuffle_cen_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_cen_wp_2h)
np.savetxt('sham_sh_shuffle_sat_xi_%s_2h.npy'%(mag_type), sham_sh_shuffled_sat_wp_2h)
plt.figure(figsize=(10,8))
plt.errorbar(bin_centers, mock_wp/mock_wp,yerr=wp_errs/mock_wp, label = 'model/model')
plt.plot(bin_centers, sham_wp/mock_wp, label = 'sham/model')
plt.plot(bin_centers, sham_nfw_wp/mock_wp, label = 'nfw-ized sham/model')
plt.plot(bin_centers, sham_shuffled_wp/mock_wp, label = 'shuffled & nfw-ized sham/model')
plt.xscale('log')
plt.legend(loc='best')
plt.xlim([1e-1, 5e0]);
#plt.ylim([0.8,1.2]);
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w_{SHAM}(r_p)/w_{HOD+AB}(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ ratio for HOD+AB and NFW-ized SHAM', fontsize = 20)
print mock_wps/sham_shuffled_wp
colors = sns.color_palette("coolwarm", ab_vals.shape[0])
plt.figure(figsize=(10,8))
#plt.errorbar(bin_centers, mock_wp/sham_shuffled_wp,yerr=wp_errs/mock_wp, label = 'model/model')
for mwp, a, c in zip(mock_wps, ab_vals, colors):
plt.plot(bin_centers,mwp/sham_shuffled_wp, c =c, label = a)
plt.plot(bin_centers, sham_wp/sham_shuffled_wp, label = 'sham/model')
plt.plot(bin_centers, sham_nfw_wp/sham_shuffled_wp, label = 'nfw-ized sham/model')
plt.plot(bin_centers, sham_shuffled_wp/sham_shuffled_wp, label = 'shuffled & nfw-ized sham/model')
plt.xscale('log')
plt.legend(loc='best')
plt.xlim([1e-1, 5e0]);
#plt.ylim([0.8,1.2]);
plt.xlabel(r'$r_p$',fontsize = 15)
plt.ylabel(r'$w_{SHAM}(r_p)/w_{HOD+AB}(r_p)$',fontsize = 15)
plt.title(r'$w(r_p)$ ratio for HOD+AB and NFW-ized SHAM', fontsize = 20)
| 0.458834 | 0.628236 |
# Genotype Risk Analysis Program
This program plots the area under filtered blide slide prevalence curve, by specific genotype(s) to show their risk.
## Parser Function
This is the parser function, taken directly from `BoniLabMDR` file.
```
import pandas as pd
# Source: https://thispointer.com/python-how-to-insert-lines-at-the-top-of-a-file/
'''
Default columns that are selected are:
#0 - Current Time
#2 - Year
#8 - Population
#12 - Blood Slide Prevalence
#22-149 - Parasite Count by Genotypes
'''
def parse(file_name, interested_col = [0,2,8,12] + list(range(22,150))):
headline = "current_time\tsclock_to_time\tyear\tmonth\tday\tseasonal_fac\ttreated_p_5-\ttreated_p_5+\tpopulation\tsep\tEIR_loc_yr\tsep\tblood_slide_prev\tbsp_2_10\tbsp_0_5\tsep\tmonthly_new_inf\tsep\tmon_treatment\tsep\tmon_clinical_ep\tsep\tKNY--C1x\tKNY--C1X\tKNY--C2x\tKNY--C2X\tKNY--Y1x\tKNY--Y1X\tKNY--Y2x\tKNY--Y2X\tKYY--C1x\tKYY--C1X\tKYY--C2x\tKYY--C2X\tKYY--Y1x\tKYY--Y1X\tKYY--Y2x\tKYY--Y2X\tKNF--C1x\tKNF--C1X\tKNF--C2x\tKNF--C2X\tKNF--Y1x\tKNF--Y1X\tKNF--Y2x\tKNF--Y2X\tKYF--C1x\tKYF--C1X\tKYF--C2x\tKYF--C2X\tKYF--Y1x\tKYF--Y1X\tKYF--Y2x\tKYF--Y2X\tKNYNYC1x\tKNYNYC1X\tKNYNYC2x\tKNYNYC2X\tKNYNYY1x\tKNYNYY1X\tKNYNYY2x\tKNYNYY2X\tKYYYYC1x\tKYYYYC1X\tKYYYYC2x\tKYYYYC2X\tKYYYYY1x\tKYYYYY1X\tKYYYYY2x\tKYYYYY2X\tKNFNFC1x\tKNFNFC1X\tKNFNFC2x\tKNFNFC2X\tKNFNFY1x\tKNFNFY1X\tKNFNFY2x\tKNFNFY2X\tKYFYFC1x\tKYFYFC1X\tKYFYFC2x\tKYFYFC2X\tKYFYFY1x\tKYFYFY1X\tKYFYFY2x\tKYFYFY2X\tTNY--C1x\tTNY--C1X\tTNY--C2x\tTNY--C2X\tTNY--Y1x\tTNY--Y1X\tTNY--Y2x\tTNY--Y2X\tTYY--C1x\tTYY--C1X\tTYY--C2x\tTYY--C2X\tTYY--Y1x\tTYY--Y1X\tTYY--Y2x\tTYY--Y2X\tTNF--C1x\tTNF--C1X\tTNF--C2x\tTNF--C2X\tTNF--Y1x\tTNF--Y1X\tTNF--Y2x\tTNF--Y2X\tTYF--C1x\tTYF--C1X\tTYF--C2x\tTYF--C2X\tTYF--Y1x\tTYF--Y1X\tTYF--Y2x\tTYF--Y2X\tTNYNYC1x\tTNYNYC1X\tTNYNYC2x\tTNYNYC2X\tTNYNYY1x\tTNYNYY1X\tTNYNYY2x\tTNYNYY2X\tTYYYYC1x\tTYYYYC1X\tTYYYYC2x\tTYYYYC2X\tTYYYYY1x\tTYYYYY1X\tTYYYYY2x\tTYYYYY2X\tTNFNFC1x\tTNFNFC1X\tTNFNFC2x\tTNFNFC2X\tTNFNFY1x\tTNFNFY1X\tTNFNFY2x\tTNFNFY2X\tTYFYFC1x\tTYFYFC1X\tTYFYFC2x\tTYFYFC2X\tTYFYFY1x\tTYFYFY1X\tTYFYFY2x\tTYFYFY2X\tsep\tKNY--C1x\tKNY--C1X\tKNY--C2x\tKNY--C2X\tKNY--Y1x\tKNY--Y1X\tKNY--Y2x\tKNY--Y2X\tKYY--C1x\tKYY--C1X\tKYY--C2x\tKYY--C2X\tKYY--Y1x\tKYY--Y1X\tKYY--Y2x\tKYY--Y2X\tKNF--C1x\tKNF--C1X\tKNF--C2x\tKNF--C2X\tKNF--Y1x\tKNF--Y1X\tKNF--Y2x\tKNF--Y2X\tKYF--C1x\tKYF--C1X\tKYF--C2x\tKYF--C2X\tKYF--Y1x\tKYF--Y1X\tKYF--Y2x\tKYF--Y2X\tKNYNYC1x\tKNYNYC1X\tKNYNYC2x\tKNYNYC2X\tKNYNYY1x\tKNYNYY1X\tKNYNYY2x\tKNYNYY2X\tKYYYYC1x\tKYYYYC1X\tKYYYYC2x\tKYYYYC2X\tKYYYYY1x\tKYYYYY1X\tKYYYYY2x\tKYYYYY2X\tKNFNFC1x\tKNFNFC1X\tKNFNFC2x\tKNFNFC2X\tKNFNFY1x\tKNFNFY1X\tKNFNFY2x\tKNFNFY2X\tKYFYFC1x\tKYFYFC1X\tKYFYFC2x\tKYFYFC2X\tKYFYFY1x\tKYFYFY1X\tKYFYFY2x\tKYFYFY2X\tTNY--C1x\tTNY--C1X\tTNY--C2x\tTNY--C2X\tTNY--Y1x\tTNY--Y1X\tTNY--Y2x\tTNY--Y2X\tTYY--C1x\tTYY--C1X\tTYY--C2x\tTYY--C2X\tTYY--Y1x\tTYY--Y1X\tTYY--Y2x\tTYY--Y2X\tTNF--C1x\tTNF--C1X\tTNF--C2x\tTNF--C2X\tTNF--Y1x\tTNF--Y1X\tTNF--Y2x\tTNF--Y2X\tTYF--C1x\tTYF--C1X\tTYF--C2x\tTYF--C2X\tTYF--Y1x\tTYF--Y1X\tTYF--Y2x\tTYF--Y2X\tTNYNYC1x\tTNYNYC1X\tTNYNYC2x\tTNYNYC2X\tTNYNYY1x\tTNYNYY1X\tTNYNYY2x\tTNYNYY2X\tTYYYYC1x\tTYYYYC1X\tTYYYYC2x\tTYYYYC2X\tTYYYYY1x\tTYYYYY1X\tTYYYYY2x\tTYYYYY2X\tTNFNFC1x\tTNFNFC1X\tTNFNFC2x\tTNFNFC2X\tTNFNFY1x\tTNFNFY1X\tTNFNFY2x\tTNFNFY2X\tTYFYFC1x\tTYFYFC1X\tTYFYFC2x\tTYFYFC2X\tTYFYFY1x\tTYFYFY1X\tTYFYFY2x\tTYFYFY2X\tsep\t\t"
# Start - Cited Codes
# define name of temporary dummy file
dummy_file = file_name[:-4] + '_parsed.txt'
# open original file in read mode and dummy file in write mode
with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Write given line to the dummy file
write_obj.write(headline + '\n')
# Read lines from original file one by one and append them to the dummy file
for line in read_obj:
write_obj.write(line)
# End - Cited Codes
df = pd.read_csv(dummy_file, sep='\t')
# Check if file is single-location'd
if len(df.columns) == 282:
# Return tailored df
df = df.iloc[:,interested_col]
return df
# Error if file not single-location'd
return None
```
## Main Function - Start Here
This part defines some variables first, then plot a diagram of selected blood slide prevalence against time, with selected areas under the curve shaded.
```
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# user defined variables
filepath='2.txt'
startyear=17
endyear = 20
title=''
xlabel='Year'
ylabel='Population (filtered)'
burnin_year = 10
# parse the output file
df = parse(file_name=filepath)
# estimate bsp (people infected) by epecific genotype(s)
df['bsp_portion'] = df['blood_slide_prev'] * df.filter(regex='TYY..Y2.', axis=1).sum(axis=1)
df['people'] = df['population'] * df['bsp_portion'] / 100
startyear += burnin_year
endyear += burnin_year
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.rcParams['figure.figsize'] = [15, 8]
ax = fig.add_subplot(111)
scale_x = 365
ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format((x-burnin_year*365)/scale_x))
ax.plot(df['current_time'], df['people'])
ax.fill_between(df['current_time'], df['people'],
where=((startyear*365<df['current_time']) & (df['current_time']<endyear*365)),
alpha=0.25)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5*scale_x)) # 5-year mark
ax.xaxis.set_major_formatter(ticks_x)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.grid()
```
Then this part uses `np.trapz` to calculate quantity of the selected area.
```
import numpy as np
df2 = df.copy()
# select target years and columns
# people
yaxis = df2.loc[(startyear*365<df2['current_time']) & (df2['current_time']<endyear*365)]['people'].values
xaxis = df2.loc[(startyear*365<df2['current_time']) & (df2['current_time']<endyear*365)]['current_time'].values
area = np.trapz(yaxis, x=xaxis)
area
```
|
github_jupyter
|
import pandas as pd
# Source: https://thispointer.com/python-how-to-insert-lines-at-the-top-of-a-file/
'''
Default columns that are selected are:
#0 - Current Time
#2 - Year
#8 - Population
#12 - Blood Slide Prevalence
#22-149 - Parasite Count by Genotypes
'''
def parse(file_name, interested_col = [0,2,8,12] + list(range(22,150))):
headline = "current_time\tsclock_to_time\tyear\tmonth\tday\tseasonal_fac\ttreated_p_5-\ttreated_p_5+\tpopulation\tsep\tEIR_loc_yr\tsep\tblood_slide_prev\tbsp_2_10\tbsp_0_5\tsep\tmonthly_new_inf\tsep\tmon_treatment\tsep\tmon_clinical_ep\tsep\tKNY--C1x\tKNY--C1X\tKNY--C2x\tKNY--C2X\tKNY--Y1x\tKNY--Y1X\tKNY--Y2x\tKNY--Y2X\tKYY--C1x\tKYY--C1X\tKYY--C2x\tKYY--C2X\tKYY--Y1x\tKYY--Y1X\tKYY--Y2x\tKYY--Y2X\tKNF--C1x\tKNF--C1X\tKNF--C2x\tKNF--C2X\tKNF--Y1x\tKNF--Y1X\tKNF--Y2x\tKNF--Y2X\tKYF--C1x\tKYF--C1X\tKYF--C2x\tKYF--C2X\tKYF--Y1x\tKYF--Y1X\tKYF--Y2x\tKYF--Y2X\tKNYNYC1x\tKNYNYC1X\tKNYNYC2x\tKNYNYC2X\tKNYNYY1x\tKNYNYY1X\tKNYNYY2x\tKNYNYY2X\tKYYYYC1x\tKYYYYC1X\tKYYYYC2x\tKYYYYC2X\tKYYYYY1x\tKYYYYY1X\tKYYYYY2x\tKYYYYY2X\tKNFNFC1x\tKNFNFC1X\tKNFNFC2x\tKNFNFC2X\tKNFNFY1x\tKNFNFY1X\tKNFNFY2x\tKNFNFY2X\tKYFYFC1x\tKYFYFC1X\tKYFYFC2x\tKYFYFC2X\tKYFYFY1x\tKYFYFY1X\tKYFYFY2x\tKYFYFY2X\tTNY--C1x\tTNY--C1X\tTNY--C2x\tTNY--C2X\tTNY--Y1x\tTNY--Y1X\tTNY--Y2x\tTNY--Y2X\tTYY--C1x\tTYY--C1X\tTYY--C2x\tTYY--C2X\tTYY--Y1x\tTYY--Y1X\tTYY--Y2x\tTYY--Y2X\tTNF--C1x\tTNF--C1X\tTNF--C2x\tTNF--C2X\tTNF--Y1x\tTNF--Y1X\tTNF--Y2x\tTNF--Y2X\tTYF--C1x\tTYF--C1X\tTYF--C2x\tTYF--C2X\tTYF--Y1x\tTYF--Y1X\tTYF--Y2x\tTYF--Y2X\tTNYNYC1x\tTNYNYC1X\tTNYNYC2x\tTNYNYC2X\tTNYNYY1x\tTNYNYY1X\tTNYNYY2x\tTNYNYY2X\tTYYYYC1x\tTYYYYC1X\tTYYYYC2x\tTYYYYC2X\tTYYYYY1x\tTYYYYY1X\tTYYYYY2x\tTYYYYY2X\tTNFNFC1x\tTNFNFC1X\tTNFNFC2x\tTNFNFC2X\tTNFNFY1x\tTNFNFY1X\tTNFNFY2x\tTNFNFY2X\tTYFYFC1x\tTYFYFC1X\tTYFYFC2x\tTYFYFC2X\tTYFYFY1x\tTYFYFY1X\tTYFYFY2x\tTYFYFY2X\tsep\tKNY--C1x\tKNY--C1X\tKNY--C2x\tKNY--C2X\tKNY--Y1x\tKNY--Y1X\tKNY--Y2x\tKNY--Y2X\tKYY--C1x\tKYY--C1X\tKYY--C2x\tKYY--C2X\tKYY--Y1x\tKYY--Y1X\tKYY--Y2x\tKYY--Y2X\tKNF--C1x\tKNF--C1X\tKNF--C2x\tKNF--C2X\tKNF--Y1x\tKNF--Y1X\tKNF--Y2x\tKNF--Y2X\tKYF--C1x\tKYF--C1X\tKYF--C2x\tKYF--C2X\tKYF--Y1x\tKYF--Y1X\tKYF--Y2x\tKYF--Y2X\tKNYNYC1x\tKNYNYC1X\tKNYNYC2x\tKNYNYC2X\tKNYNYY1x\tKNYNYY1X\tKNYNYY2x\tKNYNYY2X\tKYYYYC1x\tKYYYYC1X\tKYYYYC2x\tKYYYYC2X\tKYYYYY1x\tKYYYYY1X\tKYYYYY2x\tKYYYYY2X\tKNFNFC1x\tKNFNFC1X\tKNFNFC2x\tKNFNFC2X\tKNFNFY1x\tKNFNFY1X\tKNFNFY2x\tKNFNFY2X\tKYFYFC1x\tKYFYFC1X\tKYFYFC2x\tKYFYFC2X\tKYFYFY1x\tKYFYFY1X\tKYFYFY2x\tKYFYFY2X\tTNY--C1x\tTNY--C1X\tTNY--C2x\tTNY--C2X\tTNY--Y1x\tTNY--Y1X\tTNY--Y2x\tTNY--Y2X\tTYY--C1x\tTYY--C1X\tTYY--C2x\tTYY--C2X\tTYY--Y1x\tTYY--Y1X\tTYY--Y2x\tTYY--Y2X\tTNF--C1x\tTNF--C1X\tTNF--C2x\tTNF--C2X\tTNF--Y1x\tTNF--Y1X\tTNF--Y2x\tTNF--Y2X\tTYF--C1x\tTYF--C1X\tTYF--C2x\tTYF--C2X\tTYF--Y1x\tTYF--Y1X\tTYF--Y2x\tTYF--Y2X\tTNYNYC1x\tTNYNYC1X\tTNYNYC2x\tTNYNYC2X\tTNYNYY1x\tTNYNYY1X\tTNYNYY2x\tTNYNYY2X\tTYYYYC1x\tTYYYYC1X\tTYYYYC2x\tTYYYYC2X\tTYYYYY1x\tTYYYYY1X\tTYYYYY2x\tTYYYYY2X\tTNFNFC1x\tTNFNFC1X\tTNFNFC2x\tTNFNFC2X\tTNFNFY1x\tTNFNFY1X\tTNFNFY2x\tTNFNFY2X\tTYFYFC1x\tTYFYFC1X\tTYFYFC2x\tTYFYFC2X\tTYFYFY1x\tTYFYFY1X\tTYFYFY2x\tTYFYFY2X\tsep\t\t"
# Start - Cited Codes
# define name of temporary dummy file
dummy_file = file_name[:-4] + '_parsed.txt'
# open original file in read mode and dummy file in write mode
with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Write given line to the dummy file
write_obj.write(headline + '\n')
# Read lines from original file one by one and append them to the dummy file
for line in read_obj:
write_obj.write(line)
# End - Cited Codes
df = pd.read_csv(dummy_file, sep='\t')
# Check if file is single-location'd
if len(df.columns) == 282:
# Return tailored df
df = df.iloc[:,interested_col]
return df
# Error if file not single-location'd
return None
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# user defined variables
filepath='2.txt'
startyear=17
endyear = 20
title=''
xlabel='Year'
ylabel='Population (filtered)'
burnin_year = 10
# parse the output file
df = parse(file_name=filepath)
# estimate bsp (people infected) by epecific genotype(s)
df['bsp_portion'] = df['blood_slide_prev'] * df.filter(regex='TYY..Y2.', axis=1).sum(axis=1)
df['people'] = df['population'] * df['bsp_portion'] / 100
startyear += burnin_year
endyear += burnin_year
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.rcParams['figure.figsize'] = [15, 8]
ax = fig.add_subplot(111)
scale_x = 365
ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format((x-burnin_year*365)/scale_x))
ax.plot(df['current_time'], df['people'])
ax.fill_between(df['current_time'], df['people'],
where=((startyear*365<df['current_time']) & (df['current_time']<endyear*365)),
alpha=0.25)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5*scale_x)) # 5-year mark
ax.xaxis.set_major_formatter(ticks_x)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.grid()
import numpy as np
df2 = df.copy()
# select target years and columns
# people
yaxis = df2.loc[(startyear*365<df2['current_time']) & (df2['current_time']<endyear*365)]['people'].values
xaxis = df2.loc[(startyear*365<df2['current_time']) & (df2['current_time']<endyear*365)]['current_time'].values
area = np.trapz(yaxis, x=xaxis)
area
| 0.299105 | 0.815673 |
# Convolutional Layer
In this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer.
In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.
<img src='https://github.com/karanchhabra99/deep-learning-v2-pytorch/blob/master/convolutional-neural-networks/conv-visualization/notebook_ims/conv_layer.gif?raw=1' height=60% width=60% />
### Import the image
```
from google.colab import drive
drive.mount('/content/gdrive')
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = '/content/gdrive/My Drive/Colab Notebooks/Cat_Dog_data/train/Dog/873.jpg'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
```
# New Section
### Define and visualize the filters
```
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
```
## Define a convolutional layer
The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:
* Convolutional layer
Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
#### `__init__` and `forward`
To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python.
Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
```
### Visualize the output of each filter
First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
```
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
```
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
```
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
```
#### ReLu activation
In this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
<img src='https://github.com/karanchhabra99/deep-learning-v2-pytorch/blob/master/convolutional-neural-networks/conv-visualization/notebook_ims/relu_ex.png?raw=1' height=50% width=50% />
```
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/gdrive')
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = '/content/gdrive/My Drive/Colab Notebooks/Cat_Dog_data/train/Dog/873.jpg'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
| 0.600774 | 0.956145 |
```
trait RNG {
def nextInt: (Int, RNG)
}
case class SimpleRNG(seed: Long) extends RNG {
def nextInt: (Int, RNG) = {
val newSeed = (seed * 0x5DEECE66DL + 0xBL) & 0xFFFFFFFFFFFFL
val nextRNG = SimpleRNG(newSeed)
val n = (newSeed >>> 16).toInt
(n, nextRNG)
}
}
val rng = SimpleRNG(42)
val (n1, rng2) = rng.nextInt
```
## 6-1
```
def nonNegativeInt(rng: RNG):(Int,RNG) = {
val(n1,rng2) = rng.nextInt
(if(n1 <0) -(n1+1) else n1,rng2)
}
nonNegativeInt(SimpleRNG(4))
```
## 6-2
```
if(n1 < 0) -(n1+1) else n1
def double(rng:RNG): (Double, RNG) = {
val (n1, rng2) = nonNegativeInt(rng)
(n1.toDouble/Int.MaxValue.toDouble,rng2)
}
double(SimpleRNG(10))
```
## 6-3
```
def intDouble(rng:RNG): ((Int,Double), RNG) = {
val (n2, rng2) = nonNegativeInt(rng)
val (n3, rng3) = double(rng2)
((n2,n3), rng3)
}
def doubleInt(rng:RNG): ((Double,Int), RNG) = {
val (n2, rng2) = double(rng)
val (n3, rng3) = nonNegativeInt(rng2)
((n2,n3), rng3)
}
def double3(rng:RNG): ((Double,Double,Double),RNG) = {
val (n2, rng2) = double(rng)
val (n3, rng3) = double(rng2)
val (n4, rng4) = double(rng3)
((n2,n3,n4),rng4)
}
```
## 6.4
```
def ints(count: Int)(rng:RNG): (List[Int],RNG) = {
if (count <= 0)
(List(), rng)
else{
val (x, r1) = nonNegativeInt(rng)
val (xs, r2) = ints(count - 1)(r1)
(x :: xs, r2)
}
}
ints(5)(SimpleRNG(10))
type Rand[+A] = RNG => (A,RNG)
val int2:Rand[Int] = rng => rng.nextInt
val int: Rand[Int] = _.nextInt
def unit[A](a: A): Rand[A] = rng => (a, rng)
def map[A, B](s: Rand[A])(f: A => B): Rand[B] = {
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
}
def map2[A,B](s: RNG => (A,RNG))(f: A=>B): RNG => (B,RNG) = {
rng => {
val (a,rng2) = s(rng)
(f(a),rng2)
}
}
```
map 구현할때 rng가 뜬금 어디서 나왔다 싶었는데 <br>
크게 보면
rng => (f(a),rng2)) 라는 걸 알 수 있고 <br>
이는 Rand[B] 형태 (RNG => (B,RNG)) 라는걸 알 수 있다.
```
def nonNegativeEven:Rand[Int] = {
map(nonNegativeInt)(i => i - i % 2)
}
```
원래는 def foo(param): returnType 을 넣어주는게 일반적인 형태인데
Rand 자체를 type으로 등록해서 RNG => (A,RNG)로 표현이 되므로 위와 같이 쓰면
nonNegativeEven(rng:RNG):(A,RNG) 와 같은 뜻임
```
nonNegativeEven(SimpleRNG(10))
def double:Rand[Double] = {
map(nonNegativeInt)(i => i/Int.MaxValue.toDouble)
}
double(SimpleRNG(10))
double(SimpleRNG(10))
```
## 6.6
```
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A,B) => C) : Rand[C] = {
rng1 => {
val (a,rng2) = ra(rng1)
val (b,rng3) = rb(rng2)
(f(a,b),rng3)
}
}
def both[A,B](ra: Rand[A],rb: Rand[B]): Rand[(A,B)] = map2(ra,rb)((_,_))
val randIntDouble: Rand[(Int,Double)] = {
both(nonNegativeInt,double)
}
randIntDouble(SimpleRNG(11))
val x = List(1,2,3)
List.fill(x.size)(x.map(_*2).head)
```
## 6.7
Hard 어렵..
Hint => List도 결국 2개씩 나눠서 처리한다는 걸 생각하자
=> 1부터 10이면 f(1,f(2,f(3,4))) ...
## 6.8
A => rng => (b,rng1)
```
def flatMap[A,B](s:Rand[A])(g:A => Rand[B]): Rand[B] = {
rng => {
val (a,rng1) = s(rng)
val (b,rng2) = g(a)(rng1)
(b,rng2)
}
}
def nonNegativeLessThan(n: Int): Rand[Int] = {
flatMap(nonNegativeInt) { a =>
val mod = a % n
if (a + (n - 1) - mod >= 0) unit(mod)
else nonNegativeLessThan(n)
}
}
```
```scala
def map[A, B](s: Rand[A])(f: A => B): Rand[B] = {
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
}
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A,B) => C) : Rand[C] = {
rng1 => {
val (a,rng2) = ra(rng1)
val (b,rng3) = rb(rng2)
(f(a,b),rng3)
}
}
```
## 6.9
```
def map[A,B](s:Rand[A])(f:A => B):Rand[B] = {
flatMap(s)(a => unit(f(a)))
}
def map2[A,B,C](ra:Rand[A], rb: Rand[B])(f: (A,B) => C): Rand[C] = {
flatMap(ra)(a => {
rng => {
val(b,rng2) = rb(rng)
(f(a,b),rng2)
}
})
}
def map2[A,B,C](ra:Rand[A], rb: Rand[B])(f: (A,B) => C): Rand[C] = {
flatMap(ra)(a => {
map(rb)(b => f(a,b))
})
}
type State[S,+A] = S => (A,S)
case class State[S,+A](run: S => (A,S)) {
def test():Unit = {
println("hello World")
}
def flatMap[B](g:A => State[S,B]): State[S,B] = State({
(s:S) => {
val (a,rng1) = run(s)
val (b,rng2) = g(a).run(rng1)
(b,rng2)}
})
def map[B](f:A => B):State[S,B] = flatMap(a => State(s => (f(a),s)))
def map2[B,C](rb:State[S,B])(f: (A,B) => C): State[S,C] = {
flatMap(a => {
rb.map(b => f(a,b))
})
}
}
def get[S]: State[S, Int] = State(s => (1,s))
```
|
github_jupyter
|
trait RNG {
def nextInt: (Int, RNG)
}
case class SimpleRNG(seed: Long) extends RNG {
def nextInt: (Int, RNG) = {
val newSeed = (seed * 0x5DEECE66DL + 0xBL) & 0xFFFFFFFFFFFFL
val nextRNG = SimpleRNG(newSeed)
val n = (newSeed >>> 16).toInt
(n, nextRNG)
}
}
val rng = SimpleRNG(42)
val (n1, rng2) = rng.nextInt
def nonNegativeInt(rng: RNG):(Int,RNG) = {
val(n1,rng2) = rng.nextInt
(if(n1 <0) -(n1+1) else n1,rng2)
}
nonNegativeInt(SimpleRNG(4))
if(n1 < 0) -(n1+1) else n1
def double(rng:RNG): (Double, RNG) = {
val (n1, rng2) = nonNegativeInt(rng)
(n1.toDouble/Int.MaxValue.toDouble,rng2)
}
double(SimpleRNG(10))
def intDouble(rng:RNG): ((Int,Double), RNG) = {
val (n2, rng2) = nonNegativeInt(rng)
val (n3, rng3) = double(rng2)
((n2,n3), rng3)
}
def doubleInt(rng:RNG): ((Double,Int), RNG) = {
val (n2, rng2) = double(rng)
val (n3, rng3) = nonNegativeInt(rng2)
((n2,n3), rng3)
}
def double3(rng:RNG): ((Double,Double,Double),RNG) = {
val (n2, rng2) = double(rng)
val (n3, rng3) = double(rng2)
val (n4, rng4) = double(rng3)
((n2,n3,n4),rng4)
}
def ints(count: Int)(rng:RNG): (List[Int],RNG) = {
if (count <= 0)
(List(), rng)
else{
val (x, r1) = nonNegativeInt(rng)
val (xs, r2) = ints(count - 1)(r1)
(x :: xs, r2)
}
}
ints(5)(SimpleRNG(10))
type Rand[+A] = RNG => (A,RNG)
val int2:Rand[Int] = rng => rng.nextInt
val int: Rand[Int] = _.nextInt
def unit[A](a: A): Rand[A] = rng => (a, rng)
def map[A, B](s: Rand[A])(f: A => B): Rand[B] = {
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
}
def map2[A,B](s: RNG => (A,RNG))(f: A=>B): RNG => (B,RNG) = {
rng => {
val (a,rng2) = s(rng)
(f(a),rng2)
}
}
def nonNegativeEven:Rand[Int] = {
map(nonNegativeInt)(i => i - i % 2)
}
nonNegativeEven(SimpleRNG(10))
def double:Rand[Double] = {
map(nonNegativeInt)(i => i/Int.MaxValue.toDouble)
}
double(SimpleRNG(10))
double(SimpleRNG(10))
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A,B) => C) : Rand[C] = {
rng1 => {
val (a,rng2) = ra(rng1)
val (b,rng3) = rb(rng2)
(f(a,b),rng3)
}
}
def both[A,B](ra: Rand[A],rb: Rand[B]): Rand[(A,B)] = map2(ra,rb)((_,_))
val randIntDouble: Rand[(Int,Double)] = {
both(nonNegativeInt,double)
}
randIntDouble(SimpleRNG(11))
val x = List(1,2,3)
List.fill(x.size)(x.map(_*2).head)
def flatMap[A,B](s:Rand[A])(g:A => Rand[B]): Rand[B] = {
rng => {
val (a,rng1) = s(rng)
val (b,rng2) = g(a)(rng1)
(b,rng2)
}
}
def nonNegativeLessThan(n: Int): Rand[Int] = {
flatMap(nonNegativeInt) { a =>
val mod = a % n
if (a + (n - 1) - mod >= 0) unit(mod)
else nonNegativeLessThan(n)
}
}
def map[A, B](s: Rand[A])(f: A => B): Rand[B] = {
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
}
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A,B) => C) : Rand[C] = {
rng1 => {
val (a,rng2) = ra(rng1)
val (b,rng3) = rb(rng2)
(f(a,b),rng3)
}
}
def map[A,B](s:Rand[A])(f:A => B):Rand[B] = {
flatMap(s)(a => unit(f(a)))
}
def map2[A,B,C](ra:Rand[A], rb: Rand[B])(f: (A,B) => C): Rand[C] = {
flatMap(ra)(a => {
rng => {
val(b,rng2) = rb(rng)
(f(a,b),rng2)
}
})
}
def map2[A,B,C](ra:Rand[A], rb: Rand[B])(f: (A,B) => C): Rand[C] = {
flatMap(ra)(a => {
map(rb)(b => f(a,b))
})
}
type State[S,+A] = S => (A,S)
case class State[S,+A](run: S => (A,S)) {
def test():Unit = {
println("hello World")
}
def flatMap[B](g:A => State[S,B]): State[S,B] = State({
(s:S) => {
val (a,rng1) = run(s)
val (b,rng2) = g(a).run(rng1)
(b,rng2)}
})
def map[B](f:A => B):State[S,B] = flatMap(a => State(s => (f(a),s)))
def map2[B,C](rb:State[S,B])(f: (A,B) => C): State[S,C] = {
flatMap(a => {
rb.map(b => f(a,b))
})
}
}
def get[S]: State[S, Int] = State(s => (1,s))
| 0.292191 | 0.898232 |
<a href="https://colab.research.google.com/github/unburied/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/LS_DS_111_A_First_Look_at_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lambda School Data Science - A First Look at Data
## Lecture - let's explore Python DS libraries and examples!
The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
I'm excited to learn about Theano and all of its high level children. Getting into neural networks feels like it would be the best end goal, especially with Tesla's announcement recently about its adavancements using adavanced neural networks. These are exciting times for this spectrum of data science and I am eager to learn more about it.
## Assignment - now it's your turn
Pick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
I hope this is ok. I actually worked on this a little after the precourse assignment was done. I feel like it demonstrates something cool and practical(ish) at the same time.
https://colab.research.google.com/drive/1WMdcl9USQjuDE5n0JKwR0j9l-uqd7zXc
### Assignment questions
After you've worked on some code, answer the following questions in this text block:
1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.
In the project I linked above , I was able to make some loose predictions about the company that I currently work for. We receive monthly profit reports from the CEO. I was able to gather almost 2 years worth of data, and feed it to a linear regression model. Some of the cool libraries I was able to use and learn about were google.colab to import files from my computer, and datetime to convert the dates data to useable variables. This was on top of the skilearn library which houses the linear regression model I used to make the predictions. I wanted to see approximately when cd sales where going to become unprofitable, and when our growth product, books, would intersect and surpass it as a product to keep the company afloat. The linear regression model helps with this goal, as it takes the values I submitted, and creates a line of best fit from that data. You can then trace the line past the data you have to make predictions. At the current rate, cd sales and book sales intersect in October next year, and books will steadily climb into a profitable product while cd sales continue to decline significantly.
2. What was the most challenging part of what you did?
The challenging parts were importing the csv file I created in excel from my computer. It turns out colab can be a little tricky with that, and apprently a library was created to make this easier. Also, converting the dates to useable data was not trivial. I still need to gain a better understanding of what datetime.toordinal acutally does.
3. What was the most interesting thing you learned?
Besides the programming aspects of overcoming the above challenges, I learned first hand that the linear regression model is very limited. For instance, the prediction is that cd sales bottom out in 5 years. I find that unrealistic, and actually expect it will plateau at some point and hover well above the zero mark for at least the next decade.
4. What area would you like to explore with more time?
I would like to discover other models that could allow for the predicted data to plateau, or in a sense, output logarithmic regression as opposed to strictly linear. Also, researching whats under the hood of datetime and toordinal.
## Stretch goals and resources
Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).
- [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)
- [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)
- [matplotlib documentation](https://matplotlib.org/contents.html)
- [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources
Stretch goals:
- Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!
- Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.
Awesome walkthrough on associatvie analysis:
https://pbpython.com/market-basket-analysis.html
My first blog post ever:
https://medium.com/@davi86m/conflicted-science-f767e94a1217
|
github_jupyter
|
<a href="https://colab.research.google.com/github/unburied/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/LS_DS_111_A_First_Look_at_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lambda School Data Science - A First Look at Data
## Lecture - let's explore Python DS libraries and examples!
The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
I'm excited to learn about Theano and all of its high level children. Getting into neural networks feels like it would be the best end goal, especially with Tesla's announcement recently about its adavancements using adavanced neural networks. These are exciting times for this spectrum of data science and I am eager to learn more about it.
## Assignment - now it's your turn
Pick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
I hope this is ok. I actually worked on this a little after the precourse assignment was done. I feel like it demonstrates something cool and practical(ish) at the same time.
https://colab.research.google.com/drive/1WMdcl9USQjuDE5n0JKwR0j9l-uqd7zXc
### Assignment questions
After you've worked on some code, answer the following questions in this text block:
1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.
In the project I linked above , I was able to make some loose predictions about the company that I currently work for. We receive monthly profit reports from the CEO. I was able to gather almost 2 years worth of data, and feed it to a linear regression model. Some of the cool libraries I was able to use and learn about were google.colab to import files from my computer, and datetime to convert the dates data to useable variables. This was on top of the skilearn library which houses the linear regression model I used to make the predictions. I wanted to see approximately when cd sales where going to become unprofitable, and when our growth product, books, would intersect and surpass it as a product to keep the company afloat. The linear regression model helps with this goal, as it takes the values I submitted, and creates a line of best fit from that data. You can then trace the line past the data you have to make predictions. At the current rate, cd sales and book sales intersect in October next year, and books will steadily climb into a profitable product while cd sales continue to decline significantly.
2. What was the most challenging part of what you did?
The challenging parts were importing the csv file I created in excel from my computer. It turns out colab can be a little tricky with that, and apprently a library was created to make this easier. Also, converting the dates to useable data was not trivial. I still need to gain a better understanding of what datetime.toordinal acutally does.
3. What was the most interesting thing you learned?
Besides the programming aspects of overcoming the above challenges, I learned first hand that the linear regression model is very limited. For instance, the prediction is that cd sales bottom out in 5 years. I find that unrealistic, and actually expect it will plateau at some point and hover well above the zero mark for at least the next decade.
4. What area would you like to explore with more time?
I would like to discover other models that could allow for the predicted data to plateau, or in a sense, output logarithmic regression as opposed to strictly linear. Also, researching whats under the hood of datetime and toordinal.
## Stretch goals and resources
Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).
- [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)
- [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)
- [matplotlib documentation](https://matplotlib.org/contents.html)
- [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources
Stretch goals:
- Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!
- Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.
Awesome walkthrough on associatvie analysis:
https://pbpython.com/market-basket-analysis.html
My first blog post ever:
https://medium.com/@davi86m/conflicted-science-f767e94a1217
| 0.653348 | 0.92657 |
# The Image Classification Dataset
One of the widely used dataset for image classification is the MNIST dataset :cite:`LeCun.Bottou.Bengio.ea.1998`.
It contains binary images of handwritten digits.
Because it's so simple that it's unsuitable for distinguishing between stronger models and weaker ones.
So we will focus our discussion in the coming sections
on the qualitatively similar, but comparatively complex Fashion-MNIST
dataset :cite:`Xiao.Rasul.Vollgraf.2017`, which was released in 2017.
```
%matplotlib inline
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
d2l.use_svg_display()
```
## Reading the Dataset
We can [**download and read the Fashion-MNIST dataset into memory via the build-in functions in the framework.**]
```
# `ToTensor` converts the image data from PIL type to 32-bit floating point
# tensors. It divides all numbers by 255 so that all pixel values are between
# 0 and 1
trans = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
```
Fashion-MNIST consists of images from 10 categories, each represented
by 6000 images in the training dataset and by 1000 in the test dataset.
A test set is used for evaluating model performance and not for training.
Consequently the training set and the test set
contain 60000 and 10000 images, respectively.
```
len(mnist_train), len(mnist_test)
```
The height and width of each input image are both 28 pixels.
Note that the dataset consists of grayscale images, whose number of channels is 1.
```
mnist_train[0][0].shape
```
The images in Fashion-MNIST are associated with the following categories:
t-shirt, trousers, pullover, dress, coat, sandal, shirt, sneaker, bag, and ankle boot.
The following function converts between numeric label indices and their names in text.
```
def get_fashion_mnist_labels(labels): #@save
"""Return text labels for the Fashion-MNIST dataset."""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
```
We can now create a function to visualize these examples.
```
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if torch.is_tensor(img):
# Tensor Image
ax.imshow(img.numpy(), cmap="gray")
else:
# PIL Image
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
```
Here are [**the images and their corresponding labels**] (in text)
for the first few examples in the training dataset.
```
X, y = next(iter(data.DataLoader(mnist_train, batch_size=16)))
show_images(X.reshape(16, 28, 28), 2, 8, titles=get_fashion_mnist_labels(y));
```
## Reading a Minibatch
To make our life easier when reading from the training and test sets,
we use the built-in data iterator rather than creating one from scratch.
Recall that at each iteration, a data iterator
[**reads a minibatch of data with size `batch_size` each time.**]
We also randomly shuffle the examples for the training data iterator.
```
batch_size = 256
def get_dataloader_workers(): #@save
"""Use 4 processes to read the data."""
return 4
train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers())
```
Let us look at the time it takes to read the training data.
```
timer = d2l.Timer()
for X, y in train_iter:
continue
f'{timer.stop():.2f} sec'
```
## Putting All Things Together
Now we define [**the `load_data_fashion_mnist` function
that obtains and reads the Fashion-MNIST dataset.**]
It returns the data iterators for both the training set and validation set.
In addition, it accepts an optional argument to resize images to another shape.
```
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""Download the Fashion-MNIST dataset and then load it into memory."""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
```
Below we test the image resizing feature of the `load_data_fashion_mnist` function
by specifying the `resize` argument.
```
train_iter, test_iter = load_data_fashion_mnist(32, resize=64)
for X, y in train_iter:
print(X.shape, X.dtype, y.shape, y.dtype)
break
```
|
github_jupyter
|
%matplotlib inline
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
d2l.use_svg_display()
# `ToTensor` converts the image data from PIL type to 32-bit floating point
# tensors. It divides all numbers by 255 so that all pixel values are between
# 0 and 1
trans = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
len(mnist_train), len(mnist_test)
mnist_train[0][0].shape
def get_fashion_mnist_labels(labels): #@save
"""Return text labels for the Fashion-MNIST dataset."""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if torch.is_tensor(img):
# Tensor Image
ax.imshow(img.numpy(), cmap="gray")
else:
# PIL Image
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
X, y = next(iter(data.DataLoader(mnist_train, batch_size=16)))
show_images(X.reshape(16, 28, 28), 2, 8, titles=get_fashion_mnist_labels(y));
batch_size = 256
def get_dataloader_workers(): #@save
"""Use 4 processes to read the data."""
return 4
train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers())
timer = d2l.Timer()
for X, y in train_iter:
continue
f'{timer.stop():.2f} sec'
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""Download the Fashion-MNIST dataset and then load it into memory."""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
train_iter, test_iter = load_data_fashion_mnist(32, resize=64)
for X, y in train_iter:
print(X.shape, X.dtype, y.shape, y.dtype)
break
| 0.87401 | 0.992039 |
## Abstract
## Extensive Feature Extraction for Stock Prediction
Forcasting Future Stock prices is a very hard problem to solve. An efficient Predictive model to correctly forecast future trend is crucial for Hedge funds and algorithmic trading. Specially in the case of Algorithmic Trading where error should me minimal as millions of dollars are at stake for each trade. Portfolio Optimization strategies needs to be backtested on historical data after predicting furture stock prices.
Stock prices depends upon many factors like the Market behavious, other stocks, Index funds, Global news etc. We will try to capture many of these in our features.
In this project, we will look at this problem in many ways to Predict the Closing Prices -
- 1. We will start with Extracting Features and see which performs well for predicting each stock. We will extract various Technical Indicators described below.
- 2. Then check coorelation and Perform feature selection using RFECV Recursive feature Elimination using Random Forest to select best features.
- 3. Then we will create a pipeline for this feature extraction and convert the entire code into Pipeline so anyone can easily run it and get the extracted features data for each stock.
- 4. Next we will use Time Lagged data as a feature and create features based on previous day closing prices, Previous days Index funds prices.
- 5. Then we will train 4 different Algorithms - Linear Regression, Random Forest, XG Boost, LSTM and GRU for forcasting nexy day price and test and evalute it on historical stock data.
- 6. We will also create a Pipeline for this to train many stocks with many algorithms in just one go.
- 7. We will Evaluate the data on around 2 years of data which is a long period, so it our models are closer overall, means we are doing great. Metrics we will use are MAE, MAPE, R2 and RMSE. Final Metrics which we will look at to compare models is MAE(Mean Absolute Error)
- 8. We will also check feature importance of various features using Random forest and XG boost in this.
- 9. We will pick the best algorithm from these, and will tune the Number of lagged days to consider for forcasting for each type like Stock price, other index Funds previous prices.
- 10. For LSTM, we will use Lagged previous days prices for a lookback period of 30-60 days.
- 11. Then we will create a Portfolio of these stocks and will build a strategy using sharpe ratio to optimize the portfolio and allocate the money of a fund effectively.
- 12. As a future scope, we will also try to create a dashboard to Show the comparison of 2 portfolios before and after optimization.
## Feature Extraction
We will create features using various Technical indicators and Lagged prices as described below. All of these features have something to offer for forcasting. Some tells us about the trend, some gives us a signal if the stock is overbought or oversold, some portrays the strength of the price trend.
#### Bollinger Bands
A Bollinger Band® is a technical analysis tool defined by a set of lines plotted two standard deviations (positively and negatively) away from a simple moving average (SMA) of the Stocks's price
Bollinger Bands allow traders to monitor and take advantage of shifts in price volatilities
Main Components of a Bollinger Bands
- Upper Band: The upper band is simply two standard deviations above the moving average of a stock’s price.
- Middle Band: The middle band is simply the moving average of the stock’s price.
- Lower Band: Two standard deviations below the moving average is the lower band.
#### Simple Moving Average (SMA)
A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number of periods in that range.
SMA is basically the average price of the given time period, with equal weighting given to the price of each period.
Formula: SMA = ( Sum ( Price, n ) ) / n
#### Exponential moving average (EMA)
An exponential moving average (EMA) is a type of moving average (MA) that places a greater weight and significance on the most recent data points. The exponential moving average is also referred to as the exponentially weighted moving average. An exponentially weighted moving average reacts more significantly to recent price changes than a simple moving average (SMA), which applies an equal weight to all observations in the period.
#### Average true range (ATR)
The average true range (ATR) is a technical analysis indicator that measures market volatility by decomposing the entire range of an asset price for that period.
ATR measures market volatility. It is typically derived from the 14-day moving average of a series of true range indicators.
#### Average Directional Index (ADX)
ADX stands for Average Directional Movement Index and can be used to help measure the overall strength of a trend. The ADX indicator is an average of expanding price range values.
ADX indicates the strength of a trend in price time series. It is a combination of the negative and positive directional movements indicators computed over a period of n past days corresponding to the input window length (typically 14 days)
#### Commodity Channel Index (CCI)
Commodity Channel Index (CCI) is a momentum-based oscillator used to help determine when an investment vehicle is reaching a condition of being overbought or oversold. It is also used to assess price trend direction and strength.
CCI = (typical price − ma) / (0.015 * mean deviation)
typical price = (high + low + close) / 3
p = number of periods (20 commonly used)
ma = moving average
moving average = typical price / p
mean deviation = (typical price — MA) / p
#### Rate-of-change (ROC)
ROC measures the percentage change in price between the current price and the price a certain number of periods ago.
#### Relative Strength Index (RSI)
RSI compares the size of recent gains to recent losses, it is intended to reveal the strength or weakness of a price trend from a range of closing prices over a time period.
#### William’s %R
Williams %R, also known as the Williams Percent Range, is a type of momentum indicator that moves between 0 and -100 and measures overbought and oversold levels. The Williams %R may be used to find entry and exit points in the market.
#### Stochastic %K
A stochastic oscillator is a momentum indicator comparing a particular closing price of a security to a range of its prices over a certain period of time.
It compares a close price and its price interval during a period of n past days and gives a signal meaning that a stock is oversold or overbought.
This is a Step by Step notebook of Extracting features for Stock prediction. We will be using Alpha vantage API to extract the stocks prices for previous 15 years.
```
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from alpha_vantage.timeseries import TimeSeries
%matplotlib inline
os.chdir(r'N:\STOCK ADVISOR BOT')
ALPHA_VANTAGE_API_KEY = 'XAGC5LBB1SI9RDLW'
ts = TimeSeries(key= ALPHA_VANTAGE_API_KEY, output_format='pandas')
df_NFLX, NFLX_info = ts.get_daily('NFLX', outputsize='full')
df_NFLX
NFLX_info
df_NFLX = df_NFLX.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'Close', '5. volume': 'Volume' })
df_NFLX = df_NFLX.rename_axis(['Date'])
df_NFLX
#sorting index
NFLX = df_NFLX.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
NFLX = NFLX.loc['2004-01-02':]
NFLX
NFLX['Close'].plot(figsize=(10, 7))
plt.title("Netflix Stock Price", fontsize=17)
plt.ylabel('Price', fontsize=14)
plt.xlabel('Time', fontsize=14)
plt.grid(which="major", color='k', linestyle='-.', linewidth=0.5)
plt.show()
```
### Feature Extraction for Predictinig Stock prices
### 1. Using Index Fund Nasdaq-100 ETF QQQ's Previous Day & Moving Average price as a feature
```
QQQ, QQQ_info = ts.get_daily('QQQ', outputsize='full')
QQQ = QQQ.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'QQQ_Close', '5. volume': 'Volume' })
QQQ = QQQ.rename_axis(['Date'])
QQQ = QQQ.drop(columns=['Open', 'High', 'Low', 'Volume'])
#sorting index
QQQ = QQQ.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
QQQ = QQQ.loc['2004-01-02':]
QQQ
QQQ['QQQ(t-1)'] = QQQ.QQQ_Close.shift(periods=1)
QQQ['QQQ(t-2)'] = QQQ.QQQ_Close.shift(periods=2)
QQQ['QQQ(t-5)'] = QQQ.QQQ_Close.shift(periods=5)
QQQ
QQQ['QQQ_MA10'] = QQQ.QQQ_Close.rolling(window=10).mean()
#QQQ['QQQ_MA10_t'] = QQQ.QQQ_ClosePrev1.rolling(window=10).mean()
QQQ['QQQ_MA20'] = QQQ.QQQ_Close.rolling(window=20).mean()
QQQ['QQQ_MA50'] = QQQ.QQQ_Close.rolling(window=50).mean()
```
### 2. Creating more features and technical indicators from the Netflix stock itself
### Bollinger Bands
Bollinger Bands allow traders to monitor and take advantage of shifts in price volatilities
#### Main Components of a Bollinger Bands
- Upper Band: The upper band is simply two standard deviations above the moving average of a stock’s price.
- Middle Band: The middle band is simply the moving average of the stock’s price.
- Lower Band: Two standard deviations below the moving average is the lower band.
```
NFLX['MA_20'] = NFLX.Close.rolling(window=20).mean()
NFLX['SD20'] = NFLX.Close.rolling(window=20).std()
NFLX['Upper_Band'] = NFLX.Close.rolling(window=20).mean() + (NFLX['SD20']*2)
NFLX['Lower_Band'] = NFLX.Close.rolling(window=20).mean() - (NFLX['SD20']*2)
NFLX.tail()
NFLX[['Close', 'MA_20', 'Upper_Band', 'Lower_Band']].plot(figsize=(12,6))
plt.title('20 Day Bollinger Band for Netflix')
plt.ylabel('Price (USD)')
plt.show();
```
### Shifting for Lagged data - Adding Previous Day prices
```
NFLX['NFLX_Close(t-1)'] = NFLX.Close.shift(periods=1)
NFLX['NFLX_Close(t-2)'] = NFLX.Close.shift(periods=2)
NFLX['NFLX_Close(t-5)'] = NFLX.Close.shift(periods=5)
NFLX['NFLX_Close(t-10)'] = NFLX.Close.shift(periods=10)
NFLX['NFLX_Open(t-1)'] = NFLX.Open.shift(periods=1)
NFLX.head(20)
```
### Simple Moving Averages for different periods
```
NFLX['MA5'] = NFLX.Close.rolling(window=5).mean()
NFLX['MA10'] = NFLX.Close.rolling(window=10).mean()
NFLX['MA20'] = NFLX.Close.rolling(window=20).mean()
NFLX['MA50'] = NFLX.Close.rolling(window=50).mean()
NFLX['MA200'] = NFLX.Close.rolling(window=200).mean()
NFLX[['Close', 'MA20', 'MA200', 'MA50']].plot()
plt.show()
```
### Moving Average Convergance Divergance
```
NFLX['EMA_12'] = NFLX.Close.ewm(span=12, adjust=False).mean()
NFLX['EMA_26'] = NFLX.Close.ewm(span=26, adjust=False).mean()
NFLX['MACD'] = NFLX['EMA_12'] - NFLX['EMA_26']
NFLX['MACD_EMA'] = NFLX.MACD.ewm(span=9, adjust=False).mean()
NFLX[['MACD', 'MACD_EMA']].plot()
plt.show()
```
### Exponential Moving Averages
```
NFLX['EMA10'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA20'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA50'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA100'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA200'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
```
### Average True Range
```
import talib
NFLX['ATR'] = talib.ATR(NFLX['High'].values, NFLX['Low'].values, NFLX['Close'].values, timeperiod=14)
```
### Average Directional Index
```
NFLX['ADX'] = talib.ADX(NFLX['High'], NFLX['Low'], NFLX['Close'], timeperiod=14)
```
### Commodity Channel Index
```
tp = (NFLX['High'] + NFLX['Low'] + NFLX['Close']) /3
ma = tp/20
md = (tp-ma)/20
NFLX['CCI'] = (tp-ma)/(0.015 * md)
```
### Rate of Change
```
NFLX['ROC'] = ((NFLX['Close'] - NFLX['Close'].shift(10)) / (NFLX['Close'].shift(10)))*100
```
### Relative Strength Index
```
NFLX['RSI'] = talib.RSI(NFLX.Close.values, timeperiod=14)
```
### William's %R
```
NFLX['William%R'] = talib.WILLR(NFLX.High.values, NFLX.Low.values, NFLX.Close.values, 14)
```
### Stochastic %K
```
NFLX['SO%K'] = ((NFLX.Close - NFLX.Low.rolling(window=14).min()) / (NFLX.High.rolling(window=14).max() - NFLX.Low.rolling(window=14).min())) * 100
```
### Standard Deviation last 5 days returns
```
NFLX['per_change'] = NFLX.Close.pct_change()
NFLX['STD5'] = NFLX.per_change.rolling(window=5).std()
NFLX.columns
```
### 3. Using S&P 500 Index
```
SnP, SnP_info = ts.get_daily('INX', outputsize='full')
SnP = SnP.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'SnP_Close', '5. volume': 'Volume' })
SnP = SnP.rename_axis(['Date'])
SnP = SnP.drop(columns=['Open', 'High', 'Low', 'Volume'])
#sorting index
SnP = SnP.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
SnP = SnP.loc['2004-01-02':]
SnP
SnP['SnP(t-1))'] = SnP.SnP_Close.shift(periods=1)
SnP['SnP(t-5)'] = SnP.SnP_Close.shift(periods=5)
SnP
```
### Merging of all columns
```
NFLX = NFLX.merge(QQQ, left_index=True, right_index=True)
NFLX
NFLX = NFLX.merge(SnP, left_index=True, right_index=True)
NFLX
NFLX.columns
# Remove unwanted columns
NFLX = NFLX.drop(columns=['MA_20', 'per_change', 'EMA_12', 'EMA_26'])
```
### Force Index and Ease of Movement
```
NFLX['ForceIndex1'] = NFLX.Close.diff(1) * NFLX.Volume
NFLX['ForceIndex20'] = NFLX.Close.diff(20) * NFLX.Volume
```
### Adding the Next day Close Price Column which needs to be predicted using Machine Learning Models
```
NFLX['NFLX_Close(t+1)'] = NFLX.Close.shift(-1)
NFLX
NFLX.shape
NFLX = NFLX.dropna()
NFLX.shape
NFLX = NFLX.rename(columns={'Close': 'NFLX_Close(t)'})
NFLX
```
### Extract Features from Date
```
NFLX['Date_Col'] = NFLX.index
from datetime import datetime
def extract_date_features(date_val):
Day = date_val.day
DayofWeek = date_val.dayofweek
Dayofyear = date_val.dayofyear
Week = date_val.week
Is_month_end = date_val.is_month_end.real
Is_month_start = date_val.is_month_start.real
Is_quarter_end = date_val.is_quarter_end.real
Is_quarter_start = date_val.is_quarter_start.real
Is_year_end = date_val.is_year_end.real
Is_year_start = date_val.is_year_start.real
Is_leap_year = date_val.is_leap_year.real
Year = date_val.year
Month = date_val.month
return Day, DayofWeek, Dayofyear, Week, Is_month_end, Is_month_start, Is_quarter_end, Is_quarter_start, Is_year_end, Is_year_start, Is_leap_year, Year, Month
funct = lambda x: pd.Series(extract_date_features(x))
NFLX[['Day', 'DayofWeek', 'DayofYear', 'Week', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start', 'Is_leap_year', 'Year', 'Month']] = NFLX.Date_Col.apply(funct)
NFLX
NFLX.columns
NFLX.shape
```
#### I have extracted some 55 new features for Predicting stock prices. Some analyses short term trend, some tells us about long term trends. I will now build Machine Learning models based on these features
### Save the Features in CSV
```
NFLX.to_csv('NETFLIX.csv')
```
### License
MIT License
Copyright (c) 2020 Avinash Chourasiya
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
github_jupyter
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from alpha_vantage.timeseries import TimeSeries
%matplotlib inline
os.chdir(r'N:\STOCK ADVISOR BOT')
ALPHA_VANTAGE_API_KEY = 'XAGC5LBB1SI9RDLW'
ts = TimeSeries(key= ALPHA_VANTAGE_API_KEY, output_format='pandas')
df_NFLX, NFLX_info = ts.get_daily('NFLX', outputsize='full')
df_NFLX
NFLX_info
df_NFLX = df_NFLX.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'Close', '5. volume': 'Volume' })
df_NFLX = df_NFLX.rename_axis(['Date'])
df_NFLX
#sorting index
NFLX = df_NFLX.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
NFLX = NFLX.loc['2004-01-02':]
NFLX
NFLX['Close'].plot(figsize=(10, 7))
plt.title("Netflix Stock Price", fontsize=17)
plt.ylabel('Price', fontsize=14)
plt.xlabel('Time', fontsize=14)
plt.grid(which="major", color='k', linestyle='-.', linewidth=0.5)
plt.show()
QQQ, QQQ_info = ts.get_daily('QQQ', outputsize='full')
QQQ = QQQ.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'QQQ_Close', '5. volume': 'Volume' })
QQQ = QQQ.rename_axis(['Date'])
QQQ = QQQ.drop(columns=['Open', 'High', 'Low', 'Volume'])
#sorting index
QQQ = QQQ.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
QQQ = QQQ.loc['2004-01-02':]
QQQ
QQQ['QQQ(t-1)'] = QQQ.QQQ_Close.shift(periods=1)
QQQ['QQQ(t-2)'] = QQQ.QQQ_Close.shift(periods=2)
QQQ['QQQ(t-5)'] = QQQ.QQQ_Close.shift(periods=5)
QQQ
QQQ['QQQ_MA10'] = QQQ.QQQ_Close.rolling(window=10).mean()
#QQQ['QQQ_MA10_t'] = QQQ.QQQ_ClosePrev1.rolling(window=10).mean()
QQQ['QQQ_MA20'] = QQQ.QQQ_Close.rolling(window=20).mean()
QQQ['QQQ_MA50'] = QQQ.QQQ_Close.rolling(window=50).mean()
NFLX['MA_20'] = NFLX.Close.rolling(window=20).mean()
NFLX['SD20'] = NFLX.Close.rolling(window=20).std()
NFLX['Upper_Band'] = NFLX.Close.rolling(window=20).mean() + (NFLX['SD20']*2)
NFLX['Lower_Band'] = NFLX.Close.rolling(window=20).mean() - (NFLX['SD20']*2)
NFLX.tail()
NFLX[['Close', 'MA_20', 'Upper_Band', 'Lower_Band']].plot(figsize=(12,6))
plt.title('20 Day Bollinger Band for Netflix')
plt.ylabel('Price (USD)')
plt.show();
NFLX['NFLX_Close(t-1)'] = NFLX.Close.shift(periods=1)
NFLX['NFLX_Close(t-2)'] = NFLX.Close.shift(periods=2)
NFLX['NFLX_Close(t-5)'] = NFLX.Close.shift(periods=5)
NFLX['NFLX_Close(t-10)'] = NFLX.Close.shift(periods=10)
NFLX['NFLX_Open(t-1)'] = NFLX.Open.shift(periods=1)
NFLX.head(20)
NFLX['MA5'] = NFLX.Close.rolling(window=5).mean()
NFLX['MA10'] = NFLX.Close.rolling(window=10).mean()
NFLX['MA20'] = NFLX.Close.rolling(window=20).mean()
NFLX['MA50'] = NFLX.Close.rolling(window=50).mean()
NFLX['MA200'] = NFLX.Close.rolling(window=200).mean()
NFLX[['Close', 'MA20', 'MA200', 'MA50']].plot()
plt.show()
NFLX['EMA_12'] = NFLX.Close.ewm(span=12, adjust=False).mean()
NFLX['EMA_26'] = NFLX.Close.ewm(span=26, adjust=False).mean()
NFLX['MACD'] = NFLX['EMA_12'] - NFLX['EMA_26']
NFLX['MACD_EMA'] = NFLX.MACD.ewm(span=9, adjust=False).mean()
NFLX[['MACD', 'MACD_EMA']].plot()
plt.show()
NFLX['EMA10'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA20'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA50'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA100'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
NFLX['EMA200'] = NFLX.Close.ewm(span=5, adjust=False).mean().fillna(0)
import talib
NFLX['ATR'] = talib.ATR(NFLX['High'].values, NFLX['Low'].values, NFLX['Close'].values, timeperiod=14)
NFLX['ADX'] = talib.ADX(NFLX['High'], NFLX['Low'], NFLX['Close'], timeperiod=14)
tp = (NFLX['High'] + NFLX['Low'] + NFLX['Close']) /3
ma = tp/20
md = (tp-ma)/20
NFLX['CCI'] = (tp-ma)/(0.015 * md)
NFLX['ROC'] = ((NFLX['Close'] - NFLX['Close'].shift(10)) / (NFLX['Close'].shift(10)))*100
NFLX['RSI'] = talib.RSI(NFLX.Close.values, timeperiod=14)
NFLX['William%R'] = talib.WILLR(NFLX.High.values, NFLX.Low.values, NFLX.Close.values, 14)
NFLX['SO%K'] = ((NFLX.Close - NFLX.Low.rolling(window=14).min()) / (NFLX.High.rolling(window=14).max() - NFLX.Low.rolling(window=14).min())) * 100
NFLX['per_change'] = NFLX.Close.pct_change()
NFLX['STD5'] = NFLX.per_change.rolling(window=5).std()
NFLX.columns
SnP, SnP_info = ts.get_daily('INX', outputsize='full')
SnP = SnP.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'SnP_Close', '5. volume': 'Volume' })
SnP = SnP.rename_axis(['Date'])
SnP = SnP.drop(columns=['Open', 'High', 'Low', 'Volume'])
#sorting index
SnP = SnP.sort_index(ascending=True, axis=0)
#slicing the data for 15 years from '2004-01-02' to today
SnP = SnP.loc['2004-01-02':]
SnP
SnP['SnP(t-1))'] = SnP.SnP_Close.shift(periods=1)
SnP['SnP(t-5)'] = SnP.SnP_Close.shift(periods=5)
SnP
NFLX = NFLX.merge(QQQ, left_index=True, right_index=True)
NFLX
NFLX = NFLX.merge(SnP, left_index=True, right_index=True)
NFLX
NFLX.columns
# Remove unwanted columns
NFLX = NFLX.drop(columns=['MA_20', 'per_change', 'EMA_12', 'EMA_26'])
NFLX['ForceIndex1'] = NFLX.Close.diff(1) * NFLX.Volume
NFLX['ForceIndex20'] = NFLX.Close.diff(20) * NFLX.Volume
NFLX['NFLX_Close(t+1)'] = NFLX.Close.shift(-1)
NFLX
NFLX.shape
NFLX = NFLX.dropna()
NFLX.shape
NFLX = NFLX.rename(columns={'Close': 'NFLX_Close(t)'})
NFLX
NFLX['Date_Col'] = NFLX.index
from datetime import datetime
def extract_date_features(date_val):
Day = date_val.day
DayofWeek = date_val.dayofweek
Dayofyear = date_val.dayofyear
Week = date_val.week
Is_month_end = date_val.is_month_end.real
Is_month_start = date_val.is_month_start.real
Is_quarter_end = date_val.is_quarter_end.real
Is_quarter_start = date_val.is_quarter_start.real
Is_year_end = date_val.is_year_end.real
Is_year_start = date_val.is_year_start.real
Is_leap_year = date_val.is_leap_year.real
Year = date_val.year
Month = date_val.month
return Day, DayofWeek, Dayofyear, Week, Is_month_end, Is_month_start, Is_quarter_end, Is_quarter_start, Is_year_end, Is_year_start, Is_leap_year, Year, Month
funct = lambda x: pd.Series(extract_date_features(x))
NFLX[['Day', 'DayofWeek', 'DayofYear', 'Week', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start', 'Is_leap_year', 'Year', 'Month']] = NFLX.Date_Col.apply(funct)
NFLX
NFLX.columns
NFLX.shape
NFLX.to_csv('NETFLIX.csv')
| 0.272218 | 0.994208 |
# Section 2: Moving Beyond Static Visualizations
Static visualizations are limited in how much information they can show. To move beyond these limitations, we can create animated and/or interactive visualizations. Animations make it possible for our visualizations to tell a story through movement of the plot components (e.g., bars, points, lines). Interactivity makes it possible to explore the data visually by hiding and displaying information based on user interest. In this section, we will focus on creating animated visualizations using Matplotlib before moving on to create interactive visualizations in the next section.
## Animating cumulative values over time
In the previous section, we made a couple of visualizations to help us understand the number of Stack Overflow questions per library and how it changed over time. However, each of these came with some limitations.
We made a bar plot that captured the total number of questions per library, but it couldn't show us the growth in pandas questions over time (or how the growth rate changed over time):
<div style="text-align: center;">
<img width="500" src="https://raw.githubusercontent.com/stefmolin/python-data-viz-workshop/main/media/bar_plot.png" alt="bar plot">
</div>
We also made an area plot showing the number of questions per day over time for the top 4 libraries, but by limiting the libraries shown we lost some information:
<div style="text-align: center;">
<img width="800" src="https://raw.githubusercontent.com/stefmolin/python-data-viz-workshop/main/media/area_plot.png" alt="area plot">
</div>
Both of these visualizations gave us insight into the dataset. For example, we could see that pandas has by far the largest number of questions and has been growing at a faster rate than the other libraries. While this comes from studying the plots, an animation would make this much more obvious and, at the same time, capture the exponential growth in pandas questions that helped pandas overtake both Matplotlib and NumPy in cumulative questions.
Let's use Matplotlib to create an animated bar plot of cumulative questions over time to show this. We will do so in the following steps:
1. Create a dataset of cumulative questions per library over time.
2. Import the `FuncAnimation` class.
3. Write a function for generating the initial plot.
4. Write a function for generating annotations and plot text.
5. Define the plot update function.
6. Bind arguments to the update function.
7. Animate the plot.
#### 1. Create a dataset of cumulative questions per library over time.
We will start by reading in our Stack Overflow dataset, but this time, we will calculate the total number of questions per month and then calculate the cumulative value over time:
```
import pandas as pd
questions_per_library = pd.read_csv(
'../data/stackoverflow.zip', parse_dates=True, index_col='creation_date'
).loc[:,'pandas':'bokeh'].resample('1M').sum().cumsum().reindex(
pd.date_range('2008-08', '2021-10', freq='M')
).fillna(0)
questions_per_library.tail()
```
*Source: [Stack Exchange Network](https://api.stackexchange.com/docs/search)*
#### 2. Import the `FuncAnimation` class.
To create animations with Matplotlib, we will be using the `FuncAnimation` class, so let's import it now:
```
from matplotlib.animation import FuncAnimation
```
At a minimum, we will need to provide the following when instantiating a `FuncAnimation` object:
- The `Figure` object to draw on.
- A function to call at each frame to update the plot.
In the next few steps, we will work on the logic for these.
#### 3. Write a function for generating the initial plot.
Since we are required to pass in a `Figure` object and bake all the plot update logic into a function, we will start by building up an initial plot. Here, we create a bar plot with bars of width 0, so that they don't show up for now. The y-axis is set up so that the libraries with the most questions overall are at the top:
```
import matplotlib.pyplot as plt
from matplotlib import ticker
def bar_plot(data):
fig, ax = plt.subplots(figsize=(8, 6))
sort_order = data.last('1M').squeeze().sort_values().index
bars = [
bar.set_label(label) for label, bar in
zip(sort_order, ax.barh(sort_order, [0] * data.shape[1]))
]
ax.set_xlabel('total questions', fontweight='bold')
ax.set_xlim(0, 250_000)
ax.xaxis.set_major_formatter(ticker.EngFormatter())
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
fig.tight_layout()
return fig, ax
```
This gives us a plot that we can update:
```
%matplotlib inline
bar_plot(questions_per_library)
```
#### 4. Write a function for generating annotations and plot text.
We will also need to initialize annotations for each of the bars and some text to show the date in the animation (month and year):
```
def generate_plot_text(ax):
annotations = [
ax.annotate(
'', xy=(0, bar.get_y() + bar.get_height()/2),
ha='left', va='center'
) for bar in ax.patches
]
time_text = ax.text(
0.9, 0.1, '', transform=ax.transAxes, fontsize=15,
horizontalalignment='center', verticalalignment='center'
)
return annotations, time_text
```
*Tip: We are passing in `transform=ax.transAxes` when we place our time text in order to specify the location in terms of the `Axes` object's coordinates instead of basing it off the data in the plot so that it is easier to place.*
#### 5. Define the plot update function.
Next, we will make our plot update function. This will be called at each frame. We will extract that frame's data (the cumulative questions by that month), and then update the widths of each of the bars. If the values are greater than 0, we will also annotate the bar. At every frame, we will also need to update our time annotation (`time_text`):
```
def update(frame, *, ax, df, annotations, time_text):
data = df.loc[frame, :]
# update bars
for rect, text in zip(ax.patches, annotations):
col = rect.get_label()
if data[col]:
rect.set_width(data[col])
text.set_x(data[col])
text.set_text(f' {data[col]:,.0f}')
# update time
time_text.set_text(frame.strftime('%b\n%Y'))
```
#### 6. Bind arguments to the update function.
The last step before creating our animation is to create a function that will assemble everything we need to pass to `FuncAnimation`. Note that our `update()` function requires multiple parameters, but we would be passing in the same values every time (since we would only change the value for `frame`). To make this simpler, we create a [partial function](https://docs.python.org/3/library/functools.html#functools.partial), which **binds** values to each of those arguments so that we only have to pass in `frame` when we call the partial. This is essentially a [closure](https://www.programiz.com/python-programming/closure), where `bar_plot_init()` is the enclosing function and `update()` is the nested function, which we defined in the previous code block for readability:
```
from functools import partial
def bar_plot_init(questions_per_library):
fig, ax = bar_plot(questions_per_library)
annotations, time_text = generate_plot_text(ax)
bar_plot_update = partial(
update, ax=ax, df=questions_per_library,
annotations=annotations, time_text=time_text
)
return fig, ax, bar_plot_update
```
#### 7. Animate the plot.
Finally, we are ready to create our animation. We will call the `bar_plot_init()` function from the previous code block to generate the `Figure` object, `Axes` object, and partial function for the update of the plot. Then, we pass in the `Figure` object and update function when initializing our `FuncAnimation` object. We also specify the `frames` argument as the index of our DataFrame (the dates) and that the animation shouldn't repeat because we will save it as an MP4 video:
```
fig, ax, update_func = bar_plot_init(questions_per_library)
ani = FuncAnimation(
fig, update_func, frames=questions_per_library.index, repeat=False
)
ani.save(
'../media/stackoverflow_questions.mp4',
writer='ffmpeg', fps=10, bitrate=100, dpi=300
)
plt.close()
```
**Important**: The `FuncAnimation` object **must** be assigned to a variable when creating it; otherwise, without any references to it, Python will garbage collect it – ending the animation. For more information on garbage collection in Python, check out [this](https://stackify.com/python-garbage-collection/) article.
Now, let's view the animation we just saved as an MP4 file:
```
from IPython import display
display.Video(
'../media/stackoverflow_questions.mp4', width=600, height=400,
embed=True, html_attributes='controls muted autoplay'
)
```
## Animating distributions over time
As with the previous example, the histograms of daily Manhattan subway entries in 2018 (from the first section of the workshop) don't tell the whole story of the dataset because the distributions changed drastically in 2020 and 2021:
<div style="text-align: center;">
<img width="700" src="https://raw.githubusercontent.com/stefmolin/python-data-viz-workshop/main/media/2018_subway_entries_histogram.png" alt="Histograms of daily Manhattan subway entries in 2018">
</div>
We will make an animated version of these histograms that enables us to see the distributions changing over time. Note that this example will have two key differences from the previous one. The first is that we will be animating subplots rather than a single plot, and the second is that we will use a technique called **blitting** to only update the portion of the subplots that has changed. This requires that we return the [*artists*](https://matplotlib.org/stable/tutorials/intermediate/artists.html) that need to be redrawn in the plot update function.
To make this visualization, we will work through these steps:
1. Create a dataset of daily subway entries.
2. Determine the bin ranges for the histograms.
3. Write a function for generating the initial histogram subplots.
4. Write a function for generating an annotation for the time period.
5. Define the plot update function.
6. Bind arguments for the update function.
7. Animate the plot.
#### 1. Create a dataset of daily subway entries.
As we did previously, we will read in the subway dataset, which contains the total entries and exits per day per borough:
```
subway = pd.read_csv(
'../data/NYC_subway_daily.csv', parse_dates=['Datetime'],
index_col=['Borough', 'Datetime']
)
subway_daily = subway.unstack(0)
subway_daily.head()
```
*Source: The above dataset was resampled from [this](https://www.kaggle.com/eddeng/nyc-subway-traffic-data-20172021?select=NYC_subway_traffic_2017-2021.csv) dataset provided by Kaggle user [Edden](https://www.kaggle.com/eddeng).*
For this visualization, we will just be working with the entries in Manhattan:
```
manhattan_entries = subway_daily['Entries']['M']
```
#### 2. Determine the bin ranges for the histograms.
Before we can set up the subplots, we have to calculate the bin ranges for the histograms so our animation is smooth. NumPy provides the `histogram()` function, which gives us both the number of data points in each bin and the bin ranges, respectively. We will also be using this function to update the histograms during the animation:
```
import numpy as np
count_per_bin, bin_ranges = np.histogram(manhattan_entries, bins=30)
```
#### 3. Write a function for generating the initial histogram subplots.
Next, we will handle the logic for building our initial histogram, packaging it in a function:
```
def subway_histogram(data, bins, date_range):
weekday_mask = data.index.weekday < 5
configs = [
{'label': 'Weekend', 'mask': ~weekday_mask, 'ymax': 60},
{'label': 'Weekday', 'mask': weekday_mask, 'ymax': 120}
]
_, bin_ranges = np.histogram(data, bins=bins)
fig, axes = plt.subplots(1, 2, figsize=(8, 4), sharex=True)
for ax, config in zip(axes, configs):
_, _, config['hist'] = ax.hist(
data[config['mask']].loc[date_range], bin_ranges, ec='black'
)
ax.xaxis.set_major_formatter(ticker.EngFormatter())
ax.set(
xlim=(0, None), ylim=(0, config['ymax']),
xlabel=f'{config["label"]} Entries'
)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
axes[0].set_ylabel('Frequency')
fig.suptitle('Histogram of Daily Subway Entries in Manhattan')
fig.tight_layout()
return fig, axes, bin_ranges, configs
```
Notice that our plot this time starts out with data already – this is because we want to show the change in the distribution of daily entries in the last year:
```
_ = subway_histogram(manhattan_entries, bins=30, date_range='2017')
```
#### 4. Write a function for generating an annotation for the time period.
We will once again include some text that indicates the time period as the animation runs. This is similar to what we had in the previous example:
```
def add_time_text(ax):
time_text = ax.text(
0.15, 0.9, '', transform=ax.transAxes, fontsize=15,
horizontalalignment='center', verticalalignment='center'
)
return time_text
```
#### 5. Define the plot update function.
Now, we will create our update function. This time, we have to update both subplots and return any artists that need to be redrawn since we are going to use blitting:
```
def update(frame, *, data, configs, time_text, bin_ranges):
artists = []
time = frame.strftime('%b\n%Y')
if time != time_text.get_text():
time_text.set_text(time)
artists.append(time_text)
for config in configs:
time_frame_mask = \
(data.index > frame - pd.Timedelta(days=365)) & (data.index <= frame)
counts, _ = np.histogram(
data[time_frame_mask & config['mask']],
bin_ranges
)
for count, rect in zip(counts, config['hist'].patches):
if count != rect.get_height():
rect.set_height(count)
artists.append(rect)
return artists
```
#### 6. Bind arguments for the update function.
As our final step before generating the animation, we bind our arguments to the update function using a partial function:
```
def histogram_init(data, bins, initial_date_range):
fig, axes, bin_ranges, configs = subway_histogram(data, bins, initial_date_range)
update_func = partial(
update, data=data, configs=configs,
time_text=add_time_text(axes[0]),
bin_ranges=bin_ranges
)
return fig, axes, update_func
```
#### 7. Animate the plot.
Finally, we will animate the plot using `FuncAnimation` like before. Notice that this time we are passing in `blit=True`, so that only the artists that we returned in the `update()` function are redrawn. We are specifying to make updates for each day in the data starting on August 1, 2019:
```
fig, axes, update_func = histogram_init(
manhattan_entries, bins=30, initial_date_range=slice('2017', '2019-07')
)
ani = FuncAnimation(
fig, update_func, frames=manhattan_entries['2019-08':'2021'].index,
repeat=False, blit=True
)
ani.save(
'../media/subway_entries_subplots.mp4',
writer='ffmpeg', fps=30, bitrate=500, dpi=300
)
plt.close()
```
*Tip: We are using a `slice` object to pass a date range for pandas to use with `loc[]`. More information on `slice()` can be found [here](https://docs.python.org/3/library/functions.html?highlight=slice#slice).*
Our animation makes it easy to see the change in the distributions over time:
```
from IPython import display
display.Video(
'../media/subway_entries_subplots.mp4', width=600, height=400,
embed=True, html_attributes='controls muted autoplay'
)
```
## Animating geospatial data with HoloViz
[HoloViz](https://holoviz.org/) provides multiple high-level tools that aim to simplify data visualization in Python. For this example, we will be looking at [HoloViews](https://holoviews.org/) and [GeoViews](https://geoviews.org/), which extends HoloViews for use with geographic data. HoloViews abstracts away some of the plotting logic, removing boilerplate code and making it possible to easily switch backends (e.g., switch from Matplotlib to Bokeh for JavaScript-powered, interactive plotting). To wrap up our discussion on animation, we will use GeoViews to create an animation of earthquakes per month in 2020 on a map of the world.
To make this visualization, we will work through the following steps:
1. Use GeoPandas to read in our data.
2. Handle HoloViz imports and set up the Matplotlib backend.
3. Define a function for plotting earthquakes on a map using GeoViews.
4. Create a mapping of frames to plots using HoloViews.
5. Animate the plot.
#### 1. Use GeoPandas to read in our data.
Our dataset is in GeoJSON format, so the best way to read it in will be to use [GeoPandas](https://geopandas.org/), which is a library that makes working with geospatial data in Python easier. It builds on top of pandas, so we don't have to learn any additional syntax for this example.
Here, we import GeoPandas and then use the `read_file()` function to read the earthquakes GeoJSON data into a `GeoDataFrame` object:
```
import geopandas as gpd
earthquakes = gpd.read_file('../data/earthquakes.geojson').assign(
time=lambda x: pd.to_datetime(x.time, unit='ms'),
month=lambda x: x.time.dt.month
)[['geometry', 'mag', 'time', 'month']]
earthquakes.shape
```
Our data looks like this:
```
earthquakes.head()
```
*Source: [USGS API](https://earthquake.usgs.gov/fdsnws/event/1/)*
#### 2. Handle HoloViz imports and set up the Matplotlib backend.
Since our earthquakes dataset contains geometries, we will use GeoViews in addition to HoloViews to create our animation. For this example, we will be using the [Matplotlib backend](http://holoviews.org/user_guide/Plotting_with_Matplotlib.html):
```
import geoviews as gv
import geoviews.feature as gf
import holoviews as hv
gv.extension('matplotlib')
```
#### 3. Define a function for plotting earthquakes on a map using GeoViews.
Next, we will write a function to plot each earthquake as a point on the world map. Since our dataset has geometries, we can use that information to plot them and then size each point by the earthquake magnitude. Note that, since earthquakes are measured on a logarithmic scale, some magnitudes are negative:
```
import calendar
def plot_earthquakes(data, month_num):
points = gv.Points(
data.query(f'month == {month_num}'),
kdims=['longitude', 'latitude'], # key dimensions (for coordinates here)
vdims=['mag'] # value dimensions (for modifying the plot here)
).redim.range(mag=(-2, 10), latitude=(-90, 90))
# create an overlay by combining Cartopy features and the points with *
overlay = gf.land * gf.coastline * gf.borders * points
return overlay.opts(
gv.opts.Points(color='mag', cmap='fire_r', colorbar=True, alpha=0.75),
gv.opts.Overlay(
global_extent=False, title=f'{calendar.month_name[month_num]}', fontscale=2
)
)
```
Our function returns an `Overlay` of earthquakes (represented as `Points`) on a map of the world. Under the hood GeoViews is using [Cartopy](https://scitools.org.uk/cartopy/docs/latest/) to create the map:
```
plot_earthquakes(earthquakes, 1).opts(
fig_inches=(6, 3), aspect=2, fig_size=250, fig_bounds=(0.07, 0.05, 0.87, 0.95)
)
```
*Tip: One thing that makes working with geospatial data difficult is handling [projections](https://en.wikipedia.org/wiki/Map_projection). When working with datasets that use different projections, GeoViews can help align them – check out their tutorial [here](https://geoviews.org/user_guide/Projections.html).*
#### 4. Create a mapping of frames to plots using HoloViews.
We will create a `HoloMap` of the frames to include in our animation. This maps the frame to the plot that should be rendered at that frame:
```
frames = {
month_num: plot_earthquakes(earthquakes, month_num)
for month_num in range(1, 13)
}
holomap = hv.HoloMap(frames)
```
#### 5. Animate the plot.
Now, we will output our `HoloMap` as a GIF animation, which may take a while to run:
```
hv.output(
holomap.opts(
fig_inches=(6, 3), aspect=2, fig_size=250,
fig_bounds=(0.07, 0.05, 0.87, 0.95)
), holomap='gif', fps=5
)
```
To save the animation to a file, run the following code:
```python
hv.save(
holomap.opts(
fig_inches=(6, 3), aspect=2, fig_size=250,
fig_bounds=(0.07, 0.05, 0.87, 0.95)
), 'earthquakes.gif', fps=5
)
```
## Up Next: Building Interactive Visualizations for Data Exploration
There are no exercises in this section, but let's take a 5-minute break for you to review the content here along with some additional resources on animation:
- `matplotlib.animation` [API overview](https://matplotlib.org/stable/api/animation_api.html)
- `FuncAnimation` [documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.animation.FuncAnimation.html)
- Matplotlib animation [examples](https://matplotlib.org/stable/api/animation_api.html#examples)
- Matplotlib's list of [3rd-party animation libraries](https://matplotlib.org/stable/thirdpartypackages/index.html#animations)
- Using HoloViews with the [Matplotlib backend](http://holoviews.org/user_guide/Plotting_with_Matplotlib.html)
|
github_jupyter
|
import pandas as pd
questions_per_library = pd.read_csv(
'../data/stackoverflow.zip', parse_dates=True, index_col='creation_date'
).loc[:,'pandas':'bokeh'].resample('1M').sum().cumsum().reindex(
pd.date_range('2008-08', '2021-10', freq='M')
).fillna(0)
questions_per_library.tail()
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
from matplotlib import ticker
def bar_plot(data):
fig, ax = plt.subplots(figsize=(8, 6))
sort_order = data.last('1M').squeeze().sort_values().index
bars = [
bar.set_label(label) for label, bar in
zip(sort_order, ax.barh(sort_order, [0] * data.shape[1]))
]
ax.set_xlabel('total questions', fontweight='bold')
ax.set_xlim(0, 250_000)
ax.xaxis.set_major_formatter(ticker.EngFormatter())
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
fig.tight_layout()
return fig, ax
%matplotlib inline
bar_plot(questions_per_library)
def generate_plot_text(ax):
annotations = [
ax.annotate(
'', xy=(0, bar.get_y() + bar.get_height()/2),
ha='left', va='center'
) for bar in ax.patches
]
time_text = ax.text(
0.9, 0.1, '', transform=ax.transAxes, fontsize=15,
horizontalalignment='center', verticalalignment='center'
)
return annotations, time_text
def update(frame, *, ax, df, annotations, time_text):
data = df.loc[frame, :]
# update bars
for rect, text in zip(ax.patches, annotations):
col = rect.get_label()
if data[col]:
rect.set_width(data[col])
text.set_x(data[col])
text.set_text(f' {data[col]:,.0f}')
# update time
time_text.set_text(frame.strftime('%b\n%Y'))
from functools import partial
def bar_plot_init(questions_per_library):
fig, ax = bar_plot(questions_per_library)
annotations, time_text = generate_plot_text(ax)
bar_plot_update = partial(
update, ax=ax, df=questions_per_library,
annotations=annotations, time_text=time_text
)
return fig, ax, bar_plot_update
fig, ax, update_func = bar_plot_init(questions_per_library)
ani = FuncAnimation(
fig, update_func, frames=questions_per_library.index, repeat=False
)
ani.save(
'../media/stackoverflow_questions.mp4',
writer='ffmpeg', fps=10, bitrate=100, dpi=300
)
plt.close()
from IPython import display
display.Video(
'../media/stackoverflow_questions.mp4', width=600, height=400,
embed=True, html_attributes='controls muted autoplay'
)
subway = pd.read_csv(
'../data/NYC_subway_daily.csv', parse_dates=['Datetime'],
index_col=['Borough', 'Datetime']
)
subway_daily = subway.unstack(0)
subway_daily.head()
manhattan_entries = subway_daily['Entries']['M']
import numpy as np
count_per_bin, bin_ranges = np.histogram(manhattan_entries, bins=30)
def subway_histogram(data, bins, date_range):
weekday_mask = data.index.weekday < 5
configs = [
{'label': 'Weekend', 'mask': ~weekday_mask, 'ymax': 60},
{'label': 'Weekday', 'mask': weekday_mask, 'ymax': 120}
]
_, bin_ranges = np.histogram(data, bins=bins)
fig, axes = plt.subplots(1, 2, figsize=(8, 4), sharex=True)
for ax, config in zip(axes, configs):
_, _, config['hist'] = ax.hist(
data[config['mask']].loc[date_range], bin_ranges, ec='black'
)
ax.xaxis.set_major_formatter(ticker.EngFormatter())
ax.set(
xlim=(0, None), ylim=(0, config['ymax']),
xlabel=f'{config["label"]} Entries'
)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
axes[0].set_ylabel('Frequency')
fig.suptitle('Histogram of Daily Subway Entries in Manhattan')
fig.tight_layout()
return fig, axes, bin_ranges, configs
_ = subway_histogram(manhattan_entries, bins=30, date_range='2017')
def add_time_text(ax):
time_text = ax.text(
0.15, 0.9, '', transform=ax.transAxes, fontsize=15,
horizontalalignment='center', verticalalignment='center'
)
return time_text
def update(frame, *, data, configs, time_text, bin_ranges):
artists = []
time = frame.strftime('%b\n%Y')
if time != time_text.get_text():
time_text.set_text(time)
artists.append(time_text)
for config in configs:
time_frame_mask = \
(data.index > frame - pd.Timedelta(days=365)) & (data.index <= frame)
counts, _ = np.histogram(
data[time_frame_mask & config['mask']],
bin_ranges
)
for count, rect in zip(counts, config['hist'].patches):
if count != rect.get_height():
rect.set_height(count)
artists.append(rect)
return artists
def histogram_init(data, bins, initial_date_range):
fig, axes, bin_ranges, configs = subway_histogram(data, bins, initial_date_range)
update_func = partial(
update, data=data, configs=configs,
time_text=add_time_text(axes[0]),
bin_ranges=bin_ranges
)
return fig, axes, update_func
fig, axes, update_func = histogram_init(
manhattan_entries, bins=30, initial_date_range=slice('2017', '2019-07')
)
ani = FuncAnimation(
fig, update_func, frames=manhattan_entries['2019-08':'2021'].index,
repeat=False, blit=True
)
ani.save(
'../media/subway_entries_subplots.mp4',
writer='ffmpeg', fps=30, bitrate=500, dpi=300
)
plt.close()
from IPython import display
display.Video(
'../media/subway_entries_subplots.mp4', width=600, height=400,
embed=True, html_attributes='controls muted autoplay'
)
import geopandas as gpd
earthquakes = gpd.read_file('../data/earthquakes.geojson').assign(
time=lambda x: pd.to_datetime(x.time, unit='ms'),
month=lambda x: x.time.dt.month
)[['geometry', 'mag', 'time', 'month']]
earthquakes.shape
earthquakes.head()
import geoviews as gv
import geoviews.feature as gf
import holoviews as hv
gv.extension('matplotlib')
import calendar
def plot_earthquakes(data, month_num):
points = gv.Points(
data.query(f'month == {month_num}'),
kdims=['longitude', 'latitude'], # key dimensions (for coordinates here)
vdims=['mag'] # value dimensions (for modifying the plot here)
).redim.range(mag=(-2, 10), latitude=(-90, 90))
# create an overlay by combining Cartopy features and the points with *
overlay = gf.land * gf.coastline * gf.borders * points
return overlay.opts(
gv.opts.Points(color='mag', cmap='fire_r', colorbar=True, alpha=0.75),
gv.opts.Overlay(
global_extent=False, title=f'{calendar.month_name[month_num]}', fontscale=2
)
)
plot_earthquakes(earthquakes, 1).opts(
fig_inches=(6, 3), aspect=2, fig_size=250, fig_bounds=(0.07, 0.05, 0.87, 0.95)
)
frames = {
month_num: plot_earthquakes(earthquakes, month_num)
for month_num in range(1, 13)
}
holomap = hv.HoloMap(frames)
hv.output(
holomap.opts(
fig_inches=(6, 3), aspect=2, fig_size=250,
fig_bounds=(0.07, 0.05, 0.87, 0.95)
), holomap='gif', fps=5
)
hv.save(
holomap.opts(
fig_inches=(6, 3), aspect=2, fig_size=250,
fig_bounds=(0.07, 0.05, 0.87, 0.95)
), 'earthquakes.gif', fps=5
)
| 0.492432 | 0.987191 |
# Noise Estimation and Adaptive Encoding for Asymmetric Quantum Error Correcting Codes
_Jan Florjanczyk, Supervisor: Todd A. Brun_
```
% matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from matplotlib import cm
from glob import glob
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
sns.set_style("whitegrid")
from drift_qec.oneangledephasing import *
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.75)
x=0.2*np.cos(u)*np.sin(v)
y=0.2*np.sin(u)*np.sin(v)
z=1.0*np.cos(v)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.75)
x0=0.2*np.cos(u)*np.sin(v)
y0=0.2*np.sin(u)*np.sin(v)
z0=1.0*np.cos(v)
x = x0*np.sin(-1.2) + z0*np.cos(-1.2)
y = y0
z = x0*np.cos(-1.2) - z0*np.sin(-1.2)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.25)
k, p = 0.5, 0.6
px, py, pz = p, 0, k*p
x0=(1.0 - py - pz)*np.cos(u)*np.sin(v)
y0=(1.0 - px - pz)*np.sin(u)*np.sin(v)
z0=(1.0 - px - py)*np.cos(v)
x1 = x0*np.sin(0.5) + y0*np.cos(0.5)
y1 = x0*np.cos(0.5) - y0*np.sin(0.5)
z1 = z0
x = x1*np.sin(-0.8) + z1*np.cos(-0.8)
y = y1
z = x1*np.cos(-0.8) - z1*np.sin(-0.8)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.25)
k, p = 0.5, 0.6
px, py, pz = p, 0, k*p
x=(1.0 - py - pz)*np.cos(u)*np.sin(v)
y=(1.0 - px - pz)*np.sin(u)*np.sin(v)
z=(1.0 - px - py)*np.cos(v)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
```
## Fixed angle dephasing channel
```
def uncorr_rates(N, t, ps):
puncorrs = np.zeros(ps.shape)
for idx, p in enumerate(ps):
puncorr = 0.0
for k in np.arange(t, N):
puncorr = puncorr + sp.misc.comb(N, k) * ((1-p) ** (N-k)) * (p ** k)
puncorrs[idx] = puncorr
return puncorrs
df = pd.read_csv("data/OneAngleDephasingFixed/src.csv", index_col=0)
df = df.loc[df["time"] < df["max_t"], :]
ps = times.index.values
opt = pd.DataFrame({"rate": ps, "max_opt_t": 1.0 / uncorr_rates(15, 4, ps)})
df = pd.merge(df, opt, how="left")
# df = df.loc[df["time"] < df["max_opt_t"], :]
df = df.loc[df["time"] < 1.0 / df["p_uncorrectable"], :]
times = df[["rate", "time"]].groupby(["rate"]).aggregate([np.mean, sp.stats.sem])
times.columns=["mean", "sem"]
x = np.log(times["mean"].index)
y = np.log(times["mean"].values)
ps = times.index.values
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x,y)
f = np.exp(intercept + x * slope)
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
ax.loglog(times.index, times["mean"], marker="D", ls="",
color=sns.color_palette()[0], label="[[15, 1, 3]] code with adaptive basis")
# ax.loglog(times.index, times["mean"] - times["sem"], ls="--", color=sns.color_palette()[0])
# ax.loglog(times.index, times["mean"] + times["sem"], ls="--", color=sns.color_palette()[0])
ax.loglog(times.index, f, color=sns.color_palette()[0], ls="-",
label="Effective code distance {:1.5f}".format(-2*slope-1), alpha=0.5)
ax.loglog(times.index, 16.0/(63.0 * 3.141592 * (times.index.values ** 2)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code without adaptive basis")
ax.loglog(times.index, 1.0/(uncorr_rates(15, 4, ps)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code optimal", ls="--")
ax.loglog(times.index, 1.0/(uncorr_rates(7, 1, ps)),
color=sns.color_palette()[1], label="[[7, 1, 3]] code")
ax.loglog(times.index, 1.0/(uncorr_rates(23, 4, ps)),
color=sns.color_palette()[2], label="[[23, 1, 7]] code")
ax.axis([1e-6, 1e-1, 1e1, 1e21])
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Dephasing channel error rate $p$")
ax.set_ylabel("Lifetime [cycles]")
ax.legend(frameon=True)
fig.savefig("figures/fixedangledephasinglifetimes.pdf")
```
## Drifting angle dephasing channel
```
max_time = 1000
params = {"Theta": Theta(max_time, grains=10000, sigma=0.03)}
constants = {"p": Constant(0.003, "p")}
estimator = OneAngleDephasingEstimator(params, constants)
channel = OneAngleDephasingChannel(15, max_time)
report = Report("One Angle Dephasing")
time = 0
while time < max_time:
s = channel.error(estimator.params, estimator.constants, time)
estimator.update(s, time)
report.record(s, time)
time = time + 1
report.exit(time, "oot", estimator)
fig, ax = plt.subplots(figsize=(7, 5))
report.plot(ax, weightson=True)
ax.legend(frameon=True, loc=4)
ax.set_title("Dephasing angle Theta and estimate")
ax.set_ylabel("Angle (radians)")
ax.set_xlabel("Error correction cycle")
fig.savefig("figures/driftingangledephasingrun.pdf")
df = pd.concat([pd.read_csv(path) for path in glob("data/OneAngleDephasingDriftMore/*.csv")])
s = df.groupby("error_rate").aggregate([np.mean, sp.stats.sem]).reset_index()
s
x = np.log(s[("error_rate", )])
y = np.log(s[("exit_time", "mean")])
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x,y)
ps = s[("error_rate", )].values
xmin = -4.5
xmax = -1.5
xn = 9
f = np.exp(intercept) * (np.logspace(-4, -2, xn) ** slope)
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
plt.loglog(s[("error_rate", )], s[("exit_time", "mean")], ls="", marker="o",
color=sns.color_palette()[0], label="[[15, 1, 3]] code with adaptive basis")
# plt.loglog(s[("error_rate", )], s[("exit_time", "mean")] - s[("exit_time", "sem")],
# ls="--", color=sns.color_palette()[0])
# plt.loglog(s[("error_rate", )], s[("exit_time", "mean")] + s[("exit_time", "sem")],
# ls="--", color=sns.color_palette()[0])
ax.loglog(np.logspace(-4, -2, xn), f, color=sns.color_palette()[0], ls="-",
label="Effective code distance {:1.2f}".format(-2*slope-1), alpha=0.3)
ax.loglog(s[("error_rate", )], 16.0/(63.0 * 3.141592 * (s[("error_rate", )].values ** 2)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code without adaptive basis")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(15, 4, ps)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code optimal", ls="--")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(7, 1, ps)),
color=sns.color_palette()[1], label="[[7, 1, 3]] code")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(23, 4, ps)),
color=sns.color_palette()[2], label="[[23, 1, 7]] code")
labels = ["{:1.2f}".format(x) for x in np.linspace(xmin, xmax, xn)]
plt.xticks(np.logspace(xmin, xmax, xn), labels)
plt.axis([(10 ** xmin), (10 ** xmax), 1e1, 1e13])
plt.legend(frameon=True)
plt.title("Expected time until uncorrectable error")
plt.xlabel("Dephasing channel error rate $p$")
plt.ylabel("Lifetime [cycles]")
```
## Drift rate
```
from glob import glob
files = glob("data/Archive/*.csv")
dfs = [pd.read_csv(fname) for fname in files]
df = pd.concat(dfs)
df.columns = ["error_rate", "drift_rate", "exit_time", "exit_status"]
error_rates = np.unique(df["error_rate"])
t = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time")
s = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time", aggfunc=lambda x: sp.stats.sem(x))
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
for idx, error_rate in enumerate(error_rates):
x = t.loc[:, error_rate].index
y = t.loc[:, error_rate].values
e = s.loc[:, error_rate].values
ax.loglog(x, y, marker="D", ls="",
color=sns.color_palette()[idx], label="error rate {:1.3f}".format(error_rate))
ax.loglog(x, y+e, ls="--", color=sns.color_palette()[idx])
ax.loglog(x, y-e, ls="--", color=sns.color_palette()[idx])
ax.axhline(1.0 / uncorr_rates(15, 2, np.array([error_rate])), color=sns.color_palette()[idx], ls="-")
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Drift rate (random walk step size)")
ax.set_ylabel("Lifetime [cycles]")
ax.legend(frameon=True)
from glob import glob
files = glob("data/Archive/*.csv")
dfs = [pd.read_csv(fname) for fname in files]
df = pd.concat(dfs)
df.columns = ["error_rate", "drift_rate", "exit_time", "exit_status"]
error_rates = np.unique(df["error_rate"])
t = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time")
s = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time", aggfunc=lambda x: sp.stats.sem(x))
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
for idx, error_rate in enumerate(error_rates):
baseline = 1.0 / uncorr_rates(15, 2, np.array([error_rate]))[0]
x = t.loc[:, error_rate].index
y = t.loc[:, error_rate].values - baseline
e = s.loc[:, error_rate].values
ax.loglog(x, y, marker="D", ls="",
color=sns.color_palette()[idx], label="error rate {:1.3f}".format(error_rate))
ax.loglog(x, y+e, ls="--", color=sns.color_palette()[idx])
ax.loglog(x, y-e, ls="--", color=sns.color_palette()[idx])
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Drift rate (random walk step size)")
ax.set_ylabel("Lifetime increase [cycles]")
ax.legend(frameon=True)
```
|
github_jupyter
|
% matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from matplotlib import cm
from glob import glob
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
sns.set_style("whitegrid")
from drift_qec.oneangledephasing import *
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.75)
x=0.2*np.cos(u)*np.sin(v)
y=0.2*np.sin(u)*np.sin(v)
z=1.0*np.cos(v)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.75)
x0=0.2*np.cos(u)*np.sin(v)
y0=0.2*np.sin(u)*np.sin(v)
z0=1.0*np.cos(v)
x = x0*np.sin(-1.2) + z0*np.cos(-1.2)
y = y0
z = x0*np.cos(-1.2) - z0*np.sin(-1.2)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.25)
k, p = 0.5, 0.6
px, py, pz = p, 0, k*p
x0=(1.0 - py - pz)*np.cos(u)*np.sin(v)
y0=(1.0 - px - pz)*np.sin(u)*np.sin(v)
z0=(1.0 - px - py)*np.cos(v)
x1 = x0*np.sin(0.5) + y0*np.cos(0.5)
y1 = x0*np.cos(0.5) - y0*np.sin(0.5)
z1 = z0
x = x1*np.sin(-0.8) + z1*np.cos(-0.8)
y = y1
z = x1*np.cos(-0.8) - z1*np.sin(-0.8)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
fig = plt.figure(figsize=(14,10))
axs = ["", ""]
axs[0] = fig.add_subplot(121, projection='3d')
axs[1] = fig.add_subplot(122, projection='3d')
axs[0].set_aspect("equal")
axs[0].set_frame_on(False)
axs[0].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[0].grid(False)
axs[1].set_aspect("equal")
axs[1].set_frame_on(False)
axs[1].w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axs[1].grid(False)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
x=np.cos(u)*np.sin(v)
y=np.sin(u)*np.sin(v)
z=np.cos(v)
axs[0].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
axs[1].plot_wireframe(x, y, z, color=[0.9, 0.9, 0.9], linewidth=0.25)
k, p = 0.5, 0.6
px, py, pz = p, 0, k*p
x=(1.0 - py - pz)*np.cos(u)*np.sin(v)
y=(1.0 - px - pz)*np.sin(u)*np.sin(v)
z=(1.0 - px - py)*np.cos(v)
axs[1].plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm)
def uncorr_rates(N, t, ps):
puncorrs = np.zeros(ps.shape)
for idx, p in enumerate(ps):
puncorr = 0.0
for k in np.arange(t, N):
puncorr = puncorr + sp.misc.comb(N, k) * ((1-p) ** (N-k)) * (p ** k)
puncorrs[idx] = puncorr
return puncorrs
df = pd.read_csv("data/OneAngleDephasingFixed/src.csv", index_col=0)
df = df.loc[df["time"] < df["max_t"], :]
ps = times.index.values
opt = pd.DataFrame({"rate": ps, "max_opt_t": 1.0 / uncorr_rates(15, 4, ps)})
df = pd.merge(df, opt, how="left")
# df = df.loc[df["time"] < df["max_opt_t"], :]
df = df.loc[df["time"] < 1.0 / df["p_uncorrectable"], :]
times = df[["rate", "time"]].groupby(["rate"]).aggregate([np.mean, sp.stats.sem])
times.columns=["mean", "sem"]
x = np.log(times["mean"].index)
y = np.log(times["mean"].values)
ps = times.index.values
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x,y)
f = np.exp(intercept + x * slope)
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
ax.loglog(times.index, times["mean"], marker="D", ls="",
color=sns.color_palette()[0], label="[[15, 1, 3]] code with adaptive basis")
# ax.loglog(times.index, times["mean"] - times["sem"], ls="--", color=sns.color_palette()[0])
# ax.loglog(times.index, times["mean"] + times["sem"], ls="--", color=sns.color_palette()[0])
ax.loglog(times.index, f, color=sns.color_palette()[0], ls="-",
label="Effective code distance {:1.5f}".format(-2*slope-1), alpha=0.5)
ax.loglog(times.index, 16.0/(63.0 * 3.141592 * (times.index.values ** 2)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code without adaptive basis")
ax.loglog(times.index, 1.0/(uncorr_rates(15, 4, ps)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code optimal", ls="--")
ax.loglog(times.index, 1.0/(uncorr_rates(7, 1, ps)),
color=sns.color_palette()[1], label="[[7, 1, 3]] code")
ax.loglog(times.index, 1.0/(uncorr_rates(23, 4, ps)),
color=sns.color_palette()[2], label="[[23, 1, 7]] code")
ax.axis([1e-6, 1e-1, 1e1, 1e21])
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Dephasing channel error rate $p$")
ax.set_ylabel("Lifetime [cycles]")
ax.legend(frameon=True)
fig.savefig("figures/fixedangledephasinglifetimes.pdf")
max_time = 1000
params = {"Theta": Theta(max_time, grains=10000, sigma=0.03)}
constants = {"p": Constant(0.003, "p")}
estimator = OneAngleDephasingEstimator(params, constants)
channel = OneAngleDephasingChannel(15, max_time)
report = Report("One Angle Dephasing")
time = 0
while time < max_time:
s = channel.error(estimator.params, estimator.constants, time)
estimator.update(s, time)
report.record(s, time)
time = time + 1
report.exit(time, "oot", estimator)
fig, ax = plt.subplots(figsize=(7, 5))
report.plot(ax, weightson=True)
ax.legend(frameon=True, loc=4)
ax.set_title("Dephasing angle Theta and estimate")
ax.set_ylabel("Angle (radians)")
ax.set_xlabel("Error correction cycle")
fig.savefig("figures/driftingangledephasingrun.pdf")
df = pd.concat([pd.read_csv(path) for path in glob("data/OneAngleDephasingDriftMore/*.csv")])
s = df.groupby("error_rate").aggregate([np.mean, sp.stats.sem]).reset_index()
s
x = np.log(s[("error_rate", )])
y = np.log(s[("exit_time", "mean")])
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x,y)
ps = s[("error_rate", )].values
xmin = -4.5
xmax = -1.5
xn = 9
f = np.exp(intercept) * (np.logspace(-4, -2, xn) ** slope)
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
plt.loglog(s[("error_rate", )], s[("exit_time", "mean")], ls="", marker="o",
color=sns.color_palette()[0], label="[[15, 1, 3]] code with adaptive basis")
# plt.loglog(s[("error_rate", )], s[("exit_time", "mean")] - s[("exit_time", "sem")],
# ls="--", color=sns.color_palette()[0])
# plt.loglog(s[("error_rate", )], s[("exit_time", "mean")] + s[("exit_time", "sem")],
# ls="--", color=sns.color_palette()[0])
ax.loglog(np.logspace(-4, -2, xn), f, color=sns.color_palette()[0], ls="-",
label="Effective code distance {:1.2f}".format(-2*slope-1), alpha=0.3)
ax.loglog(s[("error_rate", )], 16.0/(63.0 * 3.141592 * (s[("error_rate", )].values ** 2)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code without adaptive basis")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(15, 4, ps)),
color=sns.color_palette()[0], label="[[15, 1, 3]] code optimal", ls="--")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(7, 1, ps)),
color=sns.color_palette()[1], label="[[7, 1, 3]] code")
ax.loglog(s[("error_rate", )], 1.0/(uncorr_rates(23, 4, ps)),
color=sns.color_palette()[2], label="[[23, 1, 7]] code")
labels = ["{:1.2f}".format(x) for x in np.linspace(xmin, xmax, xn)]
plt.xticks(np.logspace(xmin, xmax, xn), labels)
plt.axis([(10 ** xmin), (10 ** xmax), 1e1, 1e13])
plt.legend(frameon=True)
plt.title("Expected time until uncorrectable error")
plt.xlabel("Dephasing channel error rate $p$")
plt.ylabel("Lifetime [cycles]")
from glob import glob
files = glob("data/Archive/*.csv")
dfs = [pd.read_csv(fname) for fname in files]
df = pd.concat(dfs)
df.columns = ["error_rate", "drift_rate", "exit_time", "exit_status"]
error_rates = np.unique(df["error_rate"])
t = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time")
s = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time", aggfunc=lambda x: sp.stats.sem(x))
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
for idx, error_rate in enumerate(error_rates):
x = t.loc[:, error_rate].index
y = t.loc[:, error_rate].values
e = s.loc[:, error_rate].values
ax.loglog(x, y, marker="D", ls="",
color=sns.color_palette()[idx], label="error rate {:1.3f}".format(error_rate))
ax.loglog(x, y+e, ls="--", color=sns.color_palette()[idx])
ax.loglog(x, y-e, ls="--", color=sns.color_palette()[idx])
ax.axhline(1.0 / uncorr_rates(15, 2, np.array([error_rate])), color=sns.color_palette()[idx], ls="-")
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Drift rate (random walk step size)")
ax.set_ylabel("Lifetime [cycles]")
ax.legend(frameon=True)
from glob import glob
files = glob("data/Archive/*.csv")
dfs = [pd.read_csv(fname) for fname in files]
df = pd.concat(dfs)
df.columns = ["error_rate", "drift_rate", "exit_time", "exit_status"]
error_rates = np.unique(df["error_rate"])
t = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time")
s = df.pivot_table(index="drift_rate", columns="error_rate", values="exit_time", aggfunc=lambda x: sp.stats.sem(x))
fig, ax = plt.subplots(1, 1, figsize=(9, 6))
for idx, error_rate in enumerate(error_rates):
baseline = 1.0 / uncorr_rates(15, 2, np.array([error_rate]))[0]
x = t.loc[:, error_rate].index
y = t.loc[:, error_rate].values - baseline
e = s.loc[:, error_rate].values
ax.loglog(x, y, marker="D", ls="",
color=sns.color_palette()[idx], label="error rate {:1.3f}".format(error_rate))
ax.loglog(x, y+e, ls="--", color=sns.color_palette()[idx])
ax.loglog(x, y-e, ls="--", color=sns.color_palette()[idx])
ax.set_title("Expected time until uncorrectable error")
ax.set_xlabel("Drift rate (random walk step size)")
ax.set_ylabel("Lifetime increase [cycles]")
ax.legend(frameon=True)
| 0.449876 | 0.846831 |
<a href="https://colab.research.google.com/github/Educat8n/Reinforcement-Learning-for-Game-Playing-and-More/blob/main/Module3/Module_3.2_DDPG.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Policy Gradients
* DQN and its variants have been very successful in solving problems where the state space is continuous and action space is discrete.
* For example, in Atari games, the input space consists of raw pixels, but actions are discrete - [up, down, left, right, no-op].
###### How do we solve a problem with continuous action space?
* Use a policy gradient algorithm.
* In policy gradient methods the policy $\pi(a|s) $ is approximated directly.
The neural network learns a policy for selecting actions that maximize the rewards by adjusting its weights using steepest gradient ascent, hence, the name: policy gradients.
# Deep deterministic policy gradients
* Two networks; one called the actor network and the other called the critic network.
* The actor network approximates the optimal policy deterministically.
* It outputs the most preferred action.
* The critic evaluates the optimal action value function using the actor's most preferred action.
# DDPG: Architecture

# DDPG: Training
* Train the critic network just as in DQN.
* Try to minimize the difference between the estimated Q-value and the target Q-value.
* The gradient of the Q-value over actions is then propagated back to train the actor network.
### If the critic is good enough, it will force the actor to choose actions with optimal value functions.
```
# In colab please uncomment this to install Atari
# Box2d is a 2D physics engine.
!pip install box2d-py
# And for visualization on Colab install
!pip install pyglet
!apt-get install -y xvfb python-opengl > /dev/null 2>&1
!pip install gym pyvirtualdisplay > /dev/null 2>&1
# Source: https://keras.io/examples/rl/ddpg_pendulum/
import warnings
warnings.filterwarnings('ignore')
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
from IPython import display as ipythondisplay ## Needed on colab
## Need to set a virtual Display on colab, else ipythondisplay would not work
from pyvirtualdisplay import Display
display = Display(visible=0, size=(400, 300))
display.start()
```
## Gym Environment with Continuous Action
* Pendulum
```
env = gym.make("Pendulum-v0")
obs = env.reset()
img = env.render(mode='rgb_array')
env.close()
plt.imshow(img)
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
```
# DDPG: Actor Network
```
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
```
# DDPG: Critic Network
```
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
```
# Exploration vs exploitation
* Let us add noise
```
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
def noisy_policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]
```
# Replay Buffer and Training
```
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
```
# Soft Update
```
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
```
## Train the model
```
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = noisy_policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
```
# DDPG Reward Plot
```
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
# Save the weights
actor_model.save_weights("pendulum_actor.h5")
critic_model.save_weights("pendulum_critic.h5")
target_actor.save_weights("pendulum_target_actor.h5")
target_critic.save_weights("pendulum_target_critic.h5")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
obs = env.reset()
frames = [] # array to store state space at each step
for _ in range(300):
frames.append(env.render(mode='rgb_array'))
obs = tf.expand_dims(tf.convert_to_tensor(obs), 0)
obs,reward,done, _ = env.step(actor_model(obs))
if done:
break
```
# Visualize the trained agent
```
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, \
frames=len(frames), interval=100)
# For Colab
from IPython.display import HTML
HTML(anim.to_html5_video())
```
|
github_jupyter
|
# In colab please uncomment this to install Atari
# Box2d is a 2D physics engine.
!pip install box2d-py
# And for visualization on Colab install
!pip install pyglet
!apt-get install -y xvfb python-opengl > /dev/null 2>&1
!pip install gym pyvirtualdisplay > /dev/null 2>&1
# Source: https://keras.io/examples/rl/ddpg_pendulum/
import warnings
warnings.filterwarnings('ignore')
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
from IPython import display as ipythondisplay ## Needed on colab
## Need to set a virtual Display on colab, else ipythondisplay would not work
from pyvirtualdisplay import Display
display = Display(visible=0, size=(400, 300))
display.start()
env = gym.make("Pendulum-v0")
obs = env.reset()
img = env.render(mode='rgb_array')
env.close()
plt.imshow(img)
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
def noisy_policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = noisy_policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
# Save the weights
actor_model.save_weights("pendulum_actor.h5")
critic_model.save_weights("pendulum_critic.h5")
target_actor.save_weights("pendulum_target_actor.h5")
target_critic.save_weights("pendulum_target_critic.h5")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
obs = env.reset()
frames = [] # array to store state space at each step
for _ in range(300):
frames.append(env.render(mode='rgb_array'))
obs = tf.expand_dims(tf.convert_to_tensor(obs), 0)
obs,reward,done, _ = env.step(actor_model(obs))
if done:
break
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, \
frames=len(frames), interval=100)
# For Colab
from IPython.display import HTML
HTML(anim.to_html5_video())
| 0.783575 | 0.97553 |
```
## repeat for MLP
%matplotlib inline
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.preprocessing import OneHotEncoder
# load mnist
mnist = fetch_openml('mnist_784')
X, y = mnist.data, mnist.target
X.shape, y.shape
# preprocessing
X = X.T / 255.0
y = OneHotEncoder().fit_transform(y.astype('int32').reshape(-1,1)).toarray().T
X.shape, y.shape
# make train/test split
m = 60000
X_train, X_test = X[:,:m], X[:,m:]
y_train, y_test = y[:,:m], y[:,m:]
# shuffle
seed = 123456
np.random.seed(seed)
shuffle = np.random.permutation(m)
X_train, y_train = X_train[:, shuffle], y_train[:,shuffle]
X_train.shape, y_train.shape
# build nlp
n_samples = 60000
input_dims = 784
hidden_dims = 64
output_dims = 10
# weights/bias
W1 = np.random.randn(hidden_dims, input_dims)
b1 = np.zeros((hidden_dims, 1))
W2 = np.random.randn(output_dims, hidden_dims)
b2 = np.zeros((output_dims, 1))
lr = 1
# training
lr = 0.1
for ep in range(1000):
# forward pass
Z1 = W1 @ X_train + b1 # (128, 784) @ (784, 60000) + (128, 1)
A1 = 1 / (1 + np.exp(-Z1)) # sigmoid: 128 * 60000
Z2 = W2 @ A1 + b2 # (10, 32) @ (128, 60000) + (10, 1)
A2 = np.exp(Z2) / np.exp(Z2).sum(axis = 0) # 10 * 60000, prob for each class
# calculate loss
L = -np.sum(y_train * np.log(A2))/n_samples # scaler
# backward pass
dZ2 = A2 - y_train # 10 * 60000, dL/dZ2 = Y_hat - Y (square-error-like)
dW2 = dZ2 @ A1.T / n_samples # (10,60000) @ (10, 128).T / 60000
db2 = dZ2.sum(axis = 1, keepdims = True)/n_samples # (10, 1) <== (10, 60000).sum(axis = 1, keepdims= True)
dA1 = W2.T @ dZ2 # (10 * 784).T @ (10, 60000) ==> (784, 60000)
dZ1 = dA1 * A1 * (1 - A1) # d_sigmoid
dW1 = dZ1 @ X_train.T / n_samples
db1 = dZ1.sum(axis=1, keepdims = True)/n_samples
# update W/b
W1 -= lr * dW1
W2 -= lr * dW2
b1 -= lr * db1
b2 -= lr * db2
# print
print('\nThe loss at epoch #%2d is %2.4f'%(ep, L) if ep%100 == 0 else '', end = ' ')
# test
Z1 = W1 @ X_test + b1
A1 = 1 / (1 + np.exp(-Z1))
Z2 = W2 @ A1 + b2
Z2 = Z2 - Z2.sum(axis = 0)
A2 = np.exp(Z2)
A2 = A2/A2.sum(axis = 0)
# results
preds = np.argmax(A2, axis = 0)
truth = np.argmax(y_test, axis = 0)
print('\n\nclassification_report')
print(classification_report(truth, preds))
print('\n\naccuracy_score')
print(accuracy_score(truth, preds))
print('\n\nconfusion_matrix')
print(confusion_matrix(truth, preds))
```
|
github_jupyter
|
## repeat for MLP
%matplotlib inline
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.preprocessing import OneHotEncoder
# load mnist
mnist = fetch_openml('mnist_784')
X, y = mnist.data, mnist.target
X.shape, y.shape
# preprocessing
X = X.T / 255.0
y = OneHotEncoder().fit_transform(y.astype('int32').reshape(-1,1)).toarray().T
X.shape, y.shape
# make train/test split
m = 60000
X_train, X_test = X[:,:m], X[:,m:]
y_train, y_test = y[:,:m], y[:,m:]
# shuffle
seed = 123456
np.random.seed(seed)
shuffle = np.random.permutation(m)
X_train, y_train = X_train[:, shuffle], y_train[:,shuffle]
X_train.shape, y_train.shape
# build nlp
n_samples = 60000
input_dims = 784
hidden_dims = 64
output_dims = 10
# weights/bias
W1 = np.random.randn(hidden_dims, input_dims)
b1 = np.zeros((hidden_dims, 1))
W2 = np.random.randn(output_dims, hidden_dims)
b2 = np.zeros((output_dims, 1))
lr = 1
# training
lr = 0.1
for ep in range(1000):
# forward pass
Z1 = W1 @ X_train + b1 # (128, 784) @ (784, 60000) + (128, 1)
A1 = 1 / (1 + np.exp(-Z1)) # sigmoid: 128 * 60000
Z2 = W2 @ A1 + b2 # (10, 32) @ (128, 60000) + (10, 1)
A2 = np.exp(Z2) / np.exp(Z2).sum(axis = 0) # 10 * 60000, prob for each class
# calculate loss
L = -np.sum(y_train * np.log(A2))/n_samples # scaler
# backward pass
dZ2 = A2 - y_train # 10 * 60000, dL/dZ2 = Y_hat - Y (square-error-like)
dW2 = dZ2 @ A1.T / n_samples # (10,60000) @ (10, 128).T / 60000
db2 = dZ2.sum(axis = 1, keepdims = True)/n_samples # (10, 1) <== (10, 60000).sum(axis = 1, keepdims= True)
dA1 = W2.T @ dZ2 # (10 * 784).T @ (10, 60000) ==> (784, 60000)
dZ1 = dA1 * A1 * (1 - A1) # d_sigmoid
dW1 = dZ1 @ X_train.T / n_samples
db1 = dZ1.sum(axis=1, keepdims = True)/n_samples
# update W/b
W1 -= lr * dW1
W2 -= lr * dW2
b1 -= lr * db1
b2 -= lr * db2
# print
print('\nThe loss at epoch #%2d is %2.4f'%(ep, L) if ep%100 == 0 else '', end = ' ')
# test
Z1 = W1 @ X_test + b1
A1 = 1 / (1 + np.exp(-Z1))
Z2 = W2 @ A1 + b2
Z2 = Z2 - Z2.sum(axis = 0)
A2 = np.exp(Z2)
A2 = A2/A2.sum(axis = 0)
# results
preds = np.argmax(A2, axis = 0)
truth = np.argmax(y_test, axis = 0)
print('\n\nclassification_report')
print(classification_report(truth, preds))
print('\n\naccuracy_score')
print(accuracy_score(truth, preds))
print('\n\nconfusion_matrix')
print(confusion_matrix(truth, preds))
| 0.444203 | 0.811639 |
```
# load and plot dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
# load dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# summarize first few rows
print(series.head())
# line plot
series.plot()
pyplot.show()
series
"3-01",339.7
"3-02",440.4
"3-03",315.9
"3-04",439.3
"3-05",401.3
"3-06",437.4
"3-07",575.5
"3-08",407.6
"3-09",682.0
"3-10",475.3
"3-11",581.3
"3-12",646.9
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
supervised = series_to_supervised(raw_values, 1, 3)
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from math import sqrt
from matplotlib import pyplot
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq):
# extract raw values
raw_values = series.values
raw_values = raw_values.reshape(len(raw_values), 1)
# transform into supervised learning problem X, y
supervised = series_to_supervised(raw_values, n_lag, n_seq)
supervised_values = supervised.values
# split into train and test sets
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return train, test
# make a persistence forecast
def persistence(last_ob, n_seq):
return [last_ob for i in range(n_seq)]
# evaluate the persistence model
def make_forecasts(train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# make forecast
forecast = persistence(X[-1], n_seq)
# store the forecast
forecasts.append(forecast)
return forecasts
# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = test[:,(n_lag+i)]
predicted = [forecast[i] for forecast in forecasts]
rmse = sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# plot the forecasts in the context of the original dataset
def plot_forecasts(series, forecasts, n_test):
# plot the entire dataset in blue
pyplot.plot(series.values)
# plot the forecasts in red
for i in range(len(forecasts)):
off_s = len(series) - n_test + i - 1
off_e = off_s + len(forecasts[i]) + 1
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i]
pyplot.plot(xaxis, yaxis, color='red')
# show the plot
pyplot.show()
# load dataset
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# configure
n_lag = 1
n_seq = 3
n_test = 10
# prepare data
train, test = prepare_data(series, n_test, n_lag, n_seq)
# make forecasts
forecasts = make_forecasts(train, test, n_lag, n_seq)
# evaluate forecasts
evaluate_forecasts(test, forecasts, n_lag, n_seq)
# plot forecasts
plot_forecasts(series, forecasts, n_test+2)
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
from numpy import array
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq):
# extract raw values
raw_values = series.values
# transform data to be stationary
diff_series = difference(raw_values, 1)
diff_values = diff_series.values
diff_values = diff_values.reshape(len(diff_values), 1)
# rescale values to -1, 1
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(diff_values)
scaled_values = scaled_values.reshape(len(scaled_values), 1)
# transform into supervised learning problem X, y
supervised = series_to_supervised(scaled_values, n_lag, n_seq)
supervised_values = supervised.values
# split into train and test sets
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return scaler, train, test
# fit an LSTM network to training data
def fit_lstm(train, n_lag, n_seq, n_batch, nb_epoch, n_neurons):
# reshape training into [samples, timesteps, features]
X, y = train[:, 0:n_lag], train[:, n_lag:]
X = X.reshape(X.shape[0], 1, X.shape[1])
# design network
model = Sequential()
model.add(LSTM(n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
model.reset_states()
return model
# make one forecast with an LSTM,
def forecast_lstm(model, X, n_batch):
# reshape input pattern to [samples, timesteps, features]
X = X.reshape(1, 1, len(X))
# make forecast
forecast = model.predict(X, batch_size=n_batch)
# convert to array
return [x for x in forecast[0, :]]
# evaluate the persistence model
def make_forecasts(model, n_batch, train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# make forecast
forecast = forecast_lstm(model, X, n_batch)
# store the forecast
forecasts.append(forecast)
return forecasts
# invert differenced forecast
def inverse_difference(last_ob, forecast):
# invert first forecast
inverted = list()
inverted.append(forecast[0] + last_ob)
# propagate difference forecast using inverted first value
for i in range(1, len(forecast)):
inverted.append(forecast[i] + inverted[i-1])
return inverted
# inverse data transform on forecasts
def inverse_transform(series, forecasts, scaler, n_test):
inverted = list()
for i in range(len(forecasts)):
# create array from forecast
forecast = array(forecasts[i])
forecast = forecast.reshape(1, len(forecast))
# invert scaling
inv_scale = scaler.inverse_transform(forecast)
inv_scale = inv_scale[0, :]
# invert differencing
index = len(series) - n_test + i - 1
last_ob = series.values[index]
inv_diff = inverse_difference(last_ob, inv_scale)
# store
inverted.append(inv_diff)
return inverted
# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = [row[i] for row in test]
predicted = [forecast[i] for forecast in forecasts]
rmse = sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# plot the forecasts in the context of the original dataset
def plot_forecasts(series, forecasts, n_test):
# plot the entire dataset in blue
pyplot.plot(series.values)
# plot the forecasts in red
for i in range(len(forecasts)):
off_s = len(series) - n_test + i - 1
off_e = off_s + len(forecasts[i]) + 1
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i]
pyplot.plot(xaxis, yaxis, color='red')
# show the plot
pyplot.show()
# load dataset
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# configure
n_lag = 1
n_seq = 3
n_test = 10
n_epochs = 1500
n_batch = 1
n_neurons = 1
# prepare data
scaler, train, test = prepare_data(series, n_test, n_lag, n_seq)
# fit model
model = fit_lstm(train, n_lag, n_seq, n_batch, n_epochs, n_neurons)
# make forecasts
forecasts = make_forecasts(model, n_batch, train, test, n_lag, n_seq)
# inverse transform forecasts and test
forecasts = inverse_transform(series, forecasts, scaler, n_test+2)
actual = [row[n_lag:] for row in test]
actual = inverse_transform(series, actual, scaler, n_test+2)
# evaluate forecasts
evaluate_forecasts(actual, forecasts, n_lag, n_seq)
# plot forecasts
plot_forecasts(series, forecasts, n_test+2)
import pandas as pd
df=pd.read_csv("household_power_consumption.csv")
df.head()
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
df=pd.read_csv("household_power_consumption.csv")
df
df = pd.read_csv('household_power_consumption.txt',sep = ';',
parse_dates={'dt':['Date','Time']},
infer_datetime_format=True,
low_memory=False, na_values=['nan','?'],
index_col='dt')
df=pd.DataFrame()
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = pd.read_csv('household_power_consumption.txt',sep = ';',
header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']
)
# dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
# load and clean-up data
from numpy import nan
from numpy import isnan
from pandas import read_csv
from pandas import to_numeric
from numpy import split
from numpy import array
from math import sqrt
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import ConvLSTM2D
# fill missing values with a value at the same time one day ago
def fill_missing(values):
one_day = 60 * 24
for row in range(values.shape[0]):
for col in range(values.shape[1]):
if isnan(values[row, col]):
values[row, col] = values[row - one_day, col]
# load all data
dataset = read_csv('household_power_consumption.txt', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime':[0,1]}, index_col=['datetime'])
# mark all missing values
dataset.replace('?', nan, inplace=True)
# make dataset numeric
dataset = dataset.astype('float32')
# fill missing
fill_missing(dataset.values)
# add a column for for the remainder of sub metering
values = dataset.values
dataset['sub_metering_4'] = (values[:,0] * 1000 / 60) - (values[:,4] + values[:,5] + values[:,6])
# save updated dataset
dataset.to_csv('household_power_consumption.csv')
dataset
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
```
|
github_jupyter
|
# load and plot dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
# load dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# summarize first few rows
print(series.head())
# line plot
series.plot()
pyplot.show()
series
"3-01",339.7
"3-02",440.4
"3-03",315.9
"3-04",439.3
"3-05",401.3
"3-06",437.4
"3-07",575.5
"3-08",407.6
"3-09",682.0
"3-10",475.3
"3-11",581.3
"3-12",646.9
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
supervised = series_to_supervised(raw_values, 1, 3)
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from math import sqrt
from matplotlib import pyplot
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq):
# extract raw values
raw_values = series.values
raw_values = raw_values.reshape(len(raw_values), 1)
# transform into supervised learning problem X, y
supervised = series_to_supervised(raw_values, n_lag, n_seq)
supervised_values = supervised.values
# split into train and test sets
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return train, test
# make a persistence forecast
def persistence(last_ob, n_seq):
return [last_ob for i in range(n_seq)]
# evaluate the persistence model
def make_forecasts(train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# make forecast
forecast = persistence(X[-1], n_seq)
# store the forecast
forecasts.append(forecast)
return forecasts
# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = test[:,(n_lag+i)]
predicted = [forecast[i] for forecast in forecasts]
rmse = sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# plot the forecasts in the context of the original dataset
def plot_forecasts(series, forecasts, n_test):
# plot the entire dataset in blue
pyplot.plot(series.values)
# plot the forecasts in red
for i in range(len(forecasts)):
off_s = len(series) - n_test + i - 1
off_e = off_s + len(forecasts[i]) + 1
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i]
pyplot.plot(xaxis, yaxis, color='red')
# show the plot
pyplot.show()
# load dataset
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# configure
n_lag = 1
n_seq = 3
n_test = 10
# prepare data
train, test = prepare_data(series, n_test, n_lag, n_seq)
# make forecasts
forecasts = make_forecasts(train, test, n_lag, n_seq)
# evaluate forecasts
evaluate_forecasts(test, forecasts, n_lag, n_seq)
# plot forecasts
plot_forecasts(series, forecasts, n_test+2)
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
from numpy import array
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq):
# extract raw values
raw_values = series.values
# transform data to be stationary
diff_series = difference(raw_values, 1)
diff_values = diff_series.values
diff_values = diff_values.reshape(len(diff_values), 1)
# rescale values to -1, 1
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(diff_values)
scaled_values = scaled_values.reshape(len(scaled_values), 1)
# transform into supervised learning problem X, y
supervised = series_to_supervised(scaled_values, n_lag, n_seq)
supervised_values = supervised.values
# split into train and test sets
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return scaler, train, test
# fit an LSTM network to training data
def fit_lstm(train, n_lag, n_seq, n_batch, nb_epoch, n_neurons):
# reshape training into [samples, timesteps, features]
X, y = train[:, 0:n_lag], train[:, n_lag:]
X = X.reshape(X.shape[0], 1, X.shape[1])
# design network
model = Sequential()
model.add(LSTM(n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
model.reset_states()
return model
# make one forecast with an LSTM,
def forecast_lstm(model, X, n_batch):
# reshape input pattern to [samples, timesteps, features]
X = X.reshape(1, 1, len(X))
# make forecast
forecast = model.predict(X, batch_size=n_batch)
# convert to array
return [x for x in forecast[0, :]]
# evaluate the persistence model
def make_forecasts(model, n_batch, train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# make forecast
forecast = forecast_lstm(model, X, n_batch)
# store the forecast
forecasts.append(forecast)
return forecasts
# invert differenced forecast
def inverse_difference(last_ob, forecast):
# invert first forecast
inverted = list()
inverted.append(forecast[0] + last_ob)
# propagate difference forecast using inverted first value
for i in range(1, len(forecast)):
inverted.append(forecast[i] + inverted[i-1])
return inverted
# inverse data transform on forecasts
def inverse_transform(series, forecasts, scaler, n_test):
inverted = list()
for i in range(len(forecasts)):
# create array from forecast
forecast = array(forecasts[i])
forecast = forecast.reshape(1, len(forecast))
# invert scaling
inv_scale = scaler.inverse_transform(forecast)
inv_scale = inv_scale[0, :]
# invert differencing
index = len(series) - n_test + i - 1
last_ob = series.values[index]
inv_diff = inverse_difference(last_ob, inv_scale)
# store
inverted.append(inv_diff)
return inverted
# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = [row[i] for row in test]
predicted = [forecast[i] for forecast in forecasts]
rmse = sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# plot the forecasts in the context of the original dataset
def plot_forecasts(series, forecasts, n_test):
# plot the entire dataset in blue
pyplot.plot(series.values)
# plot the forecasts in red
for i in range(len(forecasts)):
off_s = len(series) - n_test + i - 1
off_e = off_s + len(forecasts[i]) + 1
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i]
pyplot.plot(xaxis, yaxis, color='red')
# show the plot
pyplot.show()
# load dataset
series = read_csv('test_mt.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# configure
n_lag = 1
n_seq = 3
n_test = 10
n_epochs = 1500
n_batch = 1
n_neurons = 1
# prepare data
scaler, train, test = prepare_data(series, n_test, n_lag, n_seq)
# fit model
model = fit_lstm(train, n_lag, n_seq, n_batch, n_epochs, n_neurons)
# make forecasts
forecasts = make_forecasts(model, n_batch, train, test, n_lag, n_seq)
# inverse transform forecasts and test
forecasts = inverse_transform(series, forecasts, scaler, n_test+2)
actual = [row[n_lag:] for row in test]
actual = inverse_transform(series, actual, scaler, n_test+2)
# evaluate forecasts
evaluate_forecasts(actual, forecasts, n_lag, n_seq)
# plot forecasts
plot_forecasts(series, forecasts, n_test+2)
import pandas as pd
df=pd.read_csv("household_power_consumption.csv")
df.head()
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
df=pd.read_csv("household_power_consumption.csv")
df
df = pd.read_csv('household_power_consumption.txt',sep = ';',
parse_dates={'dt':['Date','Time']},
infer_datetime_format=True,
low_memory=False, na_values=['nan','?'],
index_col='dt')
df=pd.DataFrame()
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = pd.read_csv('household_power_consumption.txt',sep = ';',
header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']
)
# dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
# load and clean-up data
from numpy import nan
from numpy import isnan
from pandas import read_csv
from pandas import to_numeric
from numpy import split
from numpy import array
from math import sqrt
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import ConvLSTM2D
# fill missing values with a value at the same time one day ago
def fill_missing(values):
one_day = 60 * 24
for row in range(values.shape[0]):
for col in range(values.shape[1]):
if isnan(values[row, col]):
values[row, col] = values[row - one_day, col]
# load all data
dataset = read_csv('household_power_consumption.txt', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime':[0,1]}, index_col=['datetime'])
# mark all missing values
dataset.replace('?', nan, inplace=True)
# make dataset numeric
dataset = dataset.astype('float32')
# fill missing
fill_missing(dataset.values)
# add a column for for the remainder of sub metering
values = dataset.values
dataset['sub_metering_4'] = (values[:,0] * 1000 / 60) - (values[:,4] + values[:,5] + values[:,6])
# save updated dataset
dataset.to_csv('household_power_consumption.csv')
dataset
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# split a univariate dataset into train/test sets
def split_dataset(data):
# split into standard weeks
train, test = data[1:-328], data[-328:-6]
# restructure into windows of weekly data
train = array(split(train, len(train)/7))
test = array(split(test, len(test)/7))
return train, test
# evaluate one or more weekly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=7):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 50, 16
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of weekly data
history = [x for x in train]
# walk-forward validation over each week
predictions = list()
for i in range(len(test)):
# predict the week
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next week
history.append(test[i, :])
# evaluate predictions days for each week
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores
# load the new file
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime'])
# split into train and test
train, test = split_dataset(dataset.values)
# evaluate model and get scores
n_input = 14
score, scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
# plot scores
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat']
pyplot.plot(days, scores, marker='o', label='lstm')
pyplot.show()
| 0.516595 | 0.501953 |
# Name
Data preparation using SparkSQL on YARN with Cloud Dataproc
# Label
Cloud Dataproc, GCP, Cloud Storage, YARN, SparkSQL, Kubeflow, pipelines, components
# Summary
A Kubeflow Pipeline component to prepare data by submitting a SparkSql job on YARN to Cloud Dataproc.
# Details
## Intended use
Use the component to run an Apache SparkSql job as one preprocessing step in a Kubeflow Pipeline.
## Runtime arguments
Argument| Description | Optional | Data type| Accepted values| Default |
:--- | :---------- | :--- | :------- | :------ | :------
project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No| GCPProjectID | | |
region | The Cloud Dataproc region to handle the request. | No | GCPRegion|
cluster_name | The name of the cluster to run the job. | No | String| | |
queries | The queries to execute the SparkSQL job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | | None |
query_file_uri | The HCFS URI of the script that contains the SparkSQL queries.| Yes | GCSPath | | None |
script_variables | Mapping of the query’s variable names to their values (equivalent to the SparkSQL command: SET name="value";).| Yes| Dict | | None |
sparksql_job | The payload of a [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob). | Yes | Dict | | None |
job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | | None |
wait_interval | The number of seconds to pause between polling the operation. | Yes |Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed Description
This component creates a Pig job from [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using KFP SDK
```
import kfp.components as comp
dataproc_submit_sparksql_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.0.4/components/gcp/dataproc/submit_sparksql_job/component.yaml')
help(dataproc_submit_sparksql_job_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a SparkSQL job
Either put your SparkSQL queries in the `queires` list, or upload your SparkSQL queries into a file to a Cloud Storage bucket and then enter the Cloud Storage bucket’s path in `query_file_uri`. In this sample, we will use a hard coded query in the `queries` list to select data from a public CSV file from Cloud Storage.
For more details about Spark SQL, see [Spark SQL, DataFrames and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html)
#### Set sample parameters
```
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'
```
#### Example pipeline that uses the component
```
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit SparkSQL job pipeline',
description='Dataproc submit SparkSQL job pipeline'
)
def dataproc_submit_sparksql_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
sparksql_job='',
job='',
wait_interval='30'
):
dataproc_submit_sparksql_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
sparksql_job=sparksql_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```
pipeline_func = dataproc_submit_sparksql_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Spark SQL, DataFrames and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html)
* [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
* [Cloud Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
|
github_jupyter
|
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
import kfp.components as comp
dataproc_submit_sparksql_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.0.4/components/gcp/dataproc/submit_sparksql_job/component.yaml')
help(dataproc_submit_sparksql_job_op)
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit SparkSQL job pipeline',
description='Dataproc submit SparkSQL job pipeline'
)
def dataproc_submit_sparksql_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
sparksql_job='',
job='',
wait_interval='30'
):
dataproc_submit_sparksql_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
sparksql_job=sparksql_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_sparksql_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
| 0.346652 | 0.981364 |
```
a = [0, 1, 2, 3]
l = 0
r = len(a) - 1
count = 0
while l < r:
i, j = r, l
t = a[j]
while j <= r:
t = t & a[j]
count += t
j+= 1
t = a[i]
while i > l:
t = t & a[i]
count += t
i -= 1
# inc,dec
l += 1
r -= 1
if l == r:
count += a[l]
print(count)
from collections import defaultdict
import math
math.inf = float('Inf') #avoid crashing with python2
n, m = 7, 5
cost = [2, 1, 1, 2, 1, 1, 0]
path = [[1, 2, 0], [1, 3 ,1], [1, 4 ,0], [2, 5, 2], [2, 6, 0], [5, 7, 1]]
q = [[1, 2], [2, 3], [1, 3], [1, 7], [7, 1]]
dist = defaultdict(lambda:defaultdict(lambda:float('Inf')))
for a, b, c in path:
dist[a][b] = c
dist[b][a] = c
v = n + 1 #number of vertices
for i in range(1, v):
dist[i][i] = 0
def dfs(g, src, dst):
import heapq
a = [2,1,4,5,6,7]
class e:
def __init__(self, x):
self.x = x
def __lt__(self, x):
if self.x < x.x:
return False
return True
def __repr__(self):
return str(self.x)
a = list(map(e,a))
heapq.heapify(a)
heapq.heappop(a)
a
from collections import Counter, defaultdict
def gap(a, b):
t = abs(abs(a) - abs(b))
if a > b:
return -t
else:
return t
class Solution(object):
def gap(self, a, b):
t = abs(abs(a) - abs(b))
if a > b:
return -t
else:
return t
def twoSum(self, nums, target):
n = defaultdict(list)
for i, e in enumerate(nums):
n[e].append(i)
for i in n:
gp = gap(i, target)
if not gp in n:
continue
else:
if gp == i and len(n[gp]) > 1:
return sorted(n[gp][:2])
elif gp != i:
return sorted([n[i][0], n[gp][0]])
x = Solution()
x.twoSum([-3,4,3,90], 0)
x.gap(0,3)
# self.next = None
def toNum(it):
it = it[::-1]
l = len(it)
t = list(map(str, it))
t = ''.join(t)
# print(t)
return int(t)
def toList(num):
t = str(num)
t = list(map(int, t))
# print(t)
return t[::-1]
class Solution(object):
def addTwoNumbers(self, l1, l2):
return toList(toNum(l1) + toNum(l2))
x = Solution()
x.addTwoNumbers([2,4,3], [5,6,4])
class Solution(object):
def lengthOfLongestSubstring(self, s):
prev = {}
m = 0
seq = []
count = 0
last = 0
for i, e in enumerate(s):
print(i, e)
if e in prev:
m = max(m, count - last)
t = prev[e]
for k in seq[last:prev[e]]:
try:
prev.pop(k)
except:
pass
last = t + 1
seq += [e]
prev[e] = count
count += 1
else:
seq += [e]
prev[e] = count
count += 1
m = max(m, len(prev))
return m
x = Solution()
x.lengthOfLongestSubstring('dvdf')
import heapq as hq
class MaxHeap():
def __init__(self, x):
self.val = x
def __lt__(self, x):
if self.val < x.val:
return False
return True
def get(self):
return self.val
def __repr__(self):
return str(self.val)
class MinHeap():
def __init__(self, x):
self.val = x
def __lt__(self, x):
if self.val < x.val:
return True
return False
def get(self):
return self.val
def __repr__(self):
return str(self.val)
class Q:
def __init__(self, wrapper = MinHeap):
self.q = []
self.wrapper = wrapper
def push(self, x):
x = self.wrapper(x)
hq.heappush(self.q, x)
def peek(self):
return self[0].get()
def pop(self):
return hq.heappop(self.q).get()
def __repr__(self):
return str(self.q)
def __len__(self):
return len(self.q)
def insert(mnq, mxq, x):
if len(mnq) > len(mxq):
mxq.push(x)
else:
mnq.push(x)
nums1 = [1, 2]
nums2 = [3 , 4, 5]
x = Q(MaxHeap)
y = Q()
for i in nums1 + nums2:
insert(y,x,i)
x, y
class Solution(object):
def longestPalindrome(self, s):
ln = len(s)
st = ""
m = 0
for i in range(ln - 1):
# print(i)
l, r = i, i
while r < ln - 1 and s[r] == s[r + 1]:
r += 1
while l >= 0 and r < ln:
print(l, r)
if s[l] != s[r]:
print(st)
print('broke', l, r)
l += 1
r -= 1
break
l -= 1
r += 1
if l == -1 or r == ln:
l += 1
r -= 1
if m < r - l:
m = r - l
st = s[l:r + 1]
return st if st != '' else s[:1]
x = Solution()
x.longestPalindrome('aaa')
from collections import defaultdict
def nums(rows):
t = 0
while True:
t = 0
while t < rows:
yield t
t += 1
t -= 2
while t > 0:
yield t
t -= 1
class Solution(object):
def convert(self, s, numRows):
rows = defaultdict(list)
it = nums(numRows)
for i in s:
t = next(it)
# print(t)
rows[t].append(i)
tmp = []
# print(rows)
for i in range(numRows):
tmp += rows[i]
return ''.join(tmp)
x = Solution()
x.convert("PAYPALISHIRING", 3)
class Solution(object):
def reverse(self, x):
neg = x < 0
data = str(abs(x))
data = int(data[::-1])
m = 0
for i in range(31):
m = m << 1
m = m | 1
print(m)
if neg:
if -data < ~m:
return 0
return -data
if data > m:
return 0
return data
x = Solution()
x.reverse(-123)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
t = self
x = []
while t != None:
x += [t.val]
t = t.next
return str(x)
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
tmp = head
count = 0
while tmp != None:
count += 1
tmp = tmp.next
count -= n
if count < 0:
return None
t = 0
tmp = head
prev = None
prev2 = None
while t <= count and tmp != None:
# print(t)
prev2 = prev
prev = tmp
tmp = tmp.next
t += 1
print(prev2, prev, tmp)
if prev2 == None:
del prev
return tmp
else:
prev2.next = tmp
del prev
return head
x = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
print(x.removeNthFromEnd(head, 1))
from collections import defaultdict
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
indices = defaultdict()
for k,i in enumerate(words):
index = s.find(i)
while index >= 0:
indices[index] = k
index = s.find(i,index+1)
print(indices)
check = set()
indices
for i in sorted(indices.keys()):
print(i)
if not indices[i] in check:
check.add(indices[i])
else:
x = Solution()
x.findSubstring("barfoothefoobarman", ["foo", "bar"])
from collections import Counter
t = int(input())
for _ in range(t):
n = int(input())
tickets = []
for _ in range(n):
tickets += [input().strip()]
tickets += [input().strip()]
c = {}
el = iter(tickets)
s1 = set()
d1 = set()
for i in range(n):
s = next(el)
d = next(el)
s1.add(s)
d1.add(d)
c[s] = d
start = (s1 - d1).pop()
s = []
while start in c:
s += ["{}-{}".format(start, c[start])]
start = c[start]
print(' '.join(s))
t = int(input())
for _ in range(t):
l, r = list(map(int, input().strip().split()))
nums = set()
tot = 0
for n in range(l, r+1):
# print(n)
while n > 1:
for i in [7, 5, 3, 2]:
if n % i == 0:
nums.add(i)
n //= i
continue
if n == 1:
break
nums.add(sum(map(int, str(n))))
break
# print(nums)
tot += sum(nums)
nums.clear()
print(tot)
from collections import defaultdict
s = input().strip()
graph = defaultdict(set)
exist = defaultdict(list)
for i, e in enumerate(s):
for j in exist[e]:
graph[j].add(i)
graph[i -1].add(i)
graph[i].add(i-1)
exist[e].append(i)
dp = {}
def dfs(g, src, dst, weight = 0):
global dp
if src == dst:
return weight
else:
weight += 1
key = (src,dst,weight)
if key in dp:
return dp[key]
m = float('Inf')
for i in g[src]:
m = min(m,dfs(g, i, dst, weight))
dp[key] = m
return m
print(dfs(graph, 0, len(s) -1, 0))
t = int(input())
for _ in range(t):
input()
s = input().strip()
vals = list(map(int, input().strip().split()))
stack = []
seq = []
match = {i: j for i, j in zip('>]})','<[{(') }
opening = {i for i in '<[{('}
m = -float('Inf')
for i, e in enumerate(s):
if e in opening:
# print("open")
stack.append(i)
elif len(stack) > 0:
# print(s[stack[-1]], match[e])
if s[stack[-1]] == match[e]:
seq.append(stack.pop())
for k in seq:
t = vals[k]+ vals[i]
if m < t:
m = t
else:
seq = []
print(m if m != -float('Inf') else 0)
s = "[[))"
vals = [300,2,-1,3,-5,6,9, 10]
stack = []
seq = []
match = {i: j for i, j in zip('>]})','<[{(') }
opening = {i for i in '<[{('}
m = -float('Inf')
for i, e in enumerate(s):
if e in opening:
# print("open")
stack.append(i)
elif len(stack) > 0:
# print(s[stack[-1]], match[e])
if s[stack[-1]] == match[e]:
seq.append(stack.pop())
for k in seq:
t = vals[k]+ vals[i]
if m < t:
m = t
else:
seq = []
print(m if m != -float('Inf') else 0)
a, b = 3, 5
def getRMB(n):
if n & 1 == 1:
return True
return False
def bitInverse(n, bitlen = 31):
out = 0
for _ in range(bitlen):
b = getRMB(n)
out = out << 1
if not b:
out = out | 1
n = n >> 1
return bitReverse(out, bitlen)
def bitReverse(n, bitlen = 31):
out = 0
for _ in range(bitlen):
b = getRMB(n)
out = out << 1
out |= b
n = n >> 1
return out
def add(a, b):
pos = True
if a < 0 and b < 0:
pos =False
out = 0
carry = False
for _ in range(31):
b1 = getRMB(a)
b2 = getRMB(b)
print(b1, b2, end = " ")
if b1 and b2:
print(' both')
if carry:
out = out << 1
out |= 1
else:
out = out << 1
carry = True
elif b1 or b2:
print(' one of em')
out = out << 1
if not carry:
out = out | 1
elif carry:
print(' carry')
out = out << 1
out = out | 1
carry = False
else:
print('nothing')
out = out << 1
print(out)
a = a >> 1
b = b >> 1
out = bitReverse(out, 31)
if not pos:
return -out
return out
add(-3, -5)
a = 999
b = 1
s = a ^ b
c = (a & b) << 1
while c != 0:
t = s ^ c
c = (s ^ c )<< 1
s = t
print("{:011b}".format(a))
print("{:011b}".format(b))
print("-"*11)
print("{:011b} current".format(s))
print("-"*11)
print("{:011b} expected".format(1000))
import psycopg
```
|
github_jupyter
|
a = [0, 1, 2, 3]
l = 0
r = len(a) - 1
count = 0
while l < r:
i, j = r, l
t = a[j]
while j <= r:
t = t & a[j]
count += t
j+= 1
t = a[i]
while i > l:
t = t & a[i]
count += t
i -= 1
# inc,dec
l += 1
r -= 1
if l == r:
count += a[l]
print(count)
from collections import defaultdict
import math
math.inf = float('Inf') #avoid crashing with python2
n, m = 7, 5
cost = [2, 1, 1, 2, 1, 1, 0]
path = [[1, 2, 0], [1, 3 ,1], [1, 4 ,0], [2, 5, 2], [2, 6, 0], [5, 7, 1]]
q = [[1, 2], [2, 3], [1, 3], [1, 7], [7, 1]]
dist = defaultdict(lambda:defaultdict(lambda:float('Inf')))
for a, b, c in path:
dist[a][b] = c
dist[b][a] = c
v = n + 1 #number of vertices
for i in range(1, v):
dist[i][i] = 0
def dfs(g, src, dst):
import heapq
a = [2,1,4,5,6,7]
class e:
def __init__(self, x):
self.x = x
def __lt__(self, x):
if self.x < x.x:
return False
return True
def __repr__(self):
return str(self.x)
a = list(map(e,a))
heapq.heapify(a)
heapq.heappop(a)
a
from collections import Counter, defaultdict
def gap(a, b):
t = abs(abs(a) - abs(b))
if a > b:
return -t
else:
return t
class Solution(object):
def gap(self, a, b):
t = abs(abs(a) - abs(b))
if a > b:
return -t
else:
return t
def twoSum(self, nums, target):
n = defaultdict(list)
for i, e in enumerate(nums):
n[e].append(i)
for i in n:
gp = gap(i, target)
if not gp in n:
continue
else:
if gp == i and len(n[gp]) > 1:
return sorted(n[gp][:2])
elif gp != i:
return sorted([n[i][0], n[gp][0]])
x = Solution()
x.twoSum([-3,4,3,90], 0)
x.gap(0,3)
# self.next = None
def toNum(it):
it = it[::-1]
l = len(it)
t = list(map(str, it))
t = ''.join(t)
# print(t)
return int(t)
def toList(num):
t = str(num)
t = list(map(int, t))
# print(t)
return t[::-1]
class Solution(object):
def addTwoNumbers(self, l1, l2):
return toList(toNum(l1) + toNum(l2))
x = Solution()
x.addTwoNumbers([2,4,3], [5,6,4])
class Solution(object):
def lengthOfLongestSubstring(self, s):
prev = {}
m = 0
seq = []
count = 0
last = 0
for i, e in enumerate(s):
print(i, e)
if e in prev:
m = max(m, count - last)
t = prev[e]
for k in seq[last:prev[e]]:
try:
prev.pop(k)
except:
pass
last = t + 1
seq += [e]
prev[e] = count
count += 1
else:
seq += [e]
prev[e] = count
count += 1
m = max(m, len(prev))
return m
x = Solution()
x.lengthOfLongestSubstring('dvdf')
import heapq as hq
class MaxHeap():
def __init__(self, x):
self.val = x
def __lt__(self, x):
if self.val < x.val:
return False
return True
def get(self):
return self.val
def __repr__(self):
return str(self.val)
class MinHeap():
def __init__(self, x):
self.val = x
def __lt__(self, x):
if self.val < x.val:
return True
return False
def get(self):
return self.val
def __repr__(self):
return str(self.val)
class Q:
def __init__(self, wrapper = MinHeap):
self.q = []
self.wrapper = wrapper
def push(self, x):
x = self.wrapper(x)
hq.heappush(self.q, x)
def peek(self):
return self[0].get()
def pop(self):
return hq.heappop(self.q).get()
def __repr__(self):
return str(self.q)
def __len__(self):
return len(self.q)
def insert(mnq, mxq, x):
if len(mnq) > len(mxq):
mxq.push(x)
else:
mnq.push(x)
nums1 = [1, 2]
nums2 = [3 , 4, 5]
x = Q(MaxHeap)
y = Q()
for i in nums1 + nums2:
insert(y,x,i)
x, y
class Solution(object):
def longestPalindrome(self, s):
ln = len(s)
st = ""
m = 0
for i in range(ln - 1):
# print(i)
l, r = i, i
while r < ln - 1 and s[r] == s[r + 1]:
r += 1
while l >= 0 and r < ln:
print(l, r)
if s[l] != s[r]:
print(st)
print('broke', l, r)
l += 1
r -= 1
break
l -= 1
r += 1
if l == -1 or r == ln:
l += 1
r -= 1
if m < r - l:
m = r - l
st = s[l:r + 1]
return st if st != '' else s[:1]
x = Solution()
x.longestPalindrome('aaa')
from collections import defaultdict
def nums(rows):
t = 0
while True:
t = 0
while t < rows:
yield t
t += 1
t -= 2
while t > 0:
yield t
t -= 1
class Solution(object):
def convert(self, s, numRows):
rows = defaultdict(list)
it = nums(numRows)
for i in s:
t = next(it)
# print(t)
rows[t].append(i)
tmp = []
# print(rows)
for i in range(numRows):
tmp += rows[i]
return ''.join(tmp)
x = Solution()
x.convert("PAYPALISHIRING", 3)
class Solution(object):
def reverse(self, x):
neg = x < 0
data = str(abs(x))
data = int(data[::-1])
m = 0
for i in range(31):
m = m << 1
m = m | 1
print(m)
if neg:
if -data < ~m:
return 0
return -data
if data > m:
return 0
return data
x = Solution()
x.reverse(-123)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
t = self
x = []
while t != None:
x += [t.val]
t = t.next
return str(x)
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
tmp = head
count = 0
while tmp != None:
count += 1
tmp = tmp.next
count -= n
if count < 0:
return None
t = 0
tmp = head
prev = None
prev2 = None
while t <= count and tmp != None:
# print(t)
prev2 = prev
prev = tmp
tmp = tmp.next
t += 1
print(prev2, prev, tmp)
if prev2 == None:
del prev
return tmp
else:
prev2.next = tmp
del prev
return head
x = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
print(x.removeNthFromEnd(head, 1))
from collections import defaultdict
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
indices = defaultdict()
for k,i in enumerate(words):
index = s.find(i)
while index >= 0:
indices[index] = k
index = s.find(i,index+1)
print(indices)
check = set()
indices
for i in sorted(indices.keys()):
print(i)
if not indices[i] in check:
check.add(indices[i])
else:
x = Solution()
x.findSubstring("barfoothefoobarman", ["foo", "bar"])
from collections import Counter
t = int(input())
for _ in range(t):
n = int(input())
tickets = []
for _ in range(n):
tickets += [input().strip()]
tickets += [input().strip()]
c = {}
el = iter(tickets)
s1 = set()
d1 = set()
for i in range(n):
s = next(el)
d = next(el)
s1.add(s)
d1.add(d)
c[s] = d
start = (s1 - d1).pop()
s = []
while start in c:
s += ["{}-{}".format(start, c[start])]
start = c[start]
print(' '.join(s))
t = int(input())
for _ in range(t):
l, r = list(map(int, input().strip().split()))
nums = set()
tot = 0
for n in range(l, r+1):
# print(n)
while n > 1:
for i in [7, 5, 3, 2]:
if n % i == 0:
nums.add(i)
n //= i
continue
if n == 1:
break
nums.add(sum(map(int, str(n))))
break
# print(nums)
tot += sum(nums)
nums.clear()
print(tot)
from collections import defaultdict
s = input().strip()
graph = defaultdict(set)
exist = defaultdict(list)
for i, e in enumerate(s):
for j in exist[e]:
graph[j].add(i)
graph[i -1].add(i)
graph[i].add(i-1)
exist[e].append(i)
dp = {}
def dfs(g, src, dst, weight = 0):
global dp
if src == dst:
return weight
else:
weight += 1
key = (src,dst,weight)
if key in dp:
return dp[key]
m = float('Inf')
for i in g[src]:
m = min(m,dfs(g, i, dst, weight))
dp[key] = m
return m
print(dfs(graph, 0, len(s) -1, 0))
t = int(input())
for _ in range(t):
input()
s = input().strip()
vals = list(map(int, input().strip().split()))
stack = []
seq = []
match = {i: j for i, j in zip('>]})','<[{(') }
opening = {i for i in '<[{('}
m = -float('Inf')
for i, e in enumerate(s):
if e in opening:
# print("open")
stack.append(i)
elif len(stack) > 0:
# print(s[stack[-1]], match[e])
if s[stack[-1]] == match[e]:
seq.append(stack.pop())
for k in seq:
t = vals[k]+ vals[i]
if m < t:
m = t
else:
seq = []
print(m if m != -float('Inf') else 0)
s = "[[))"
vals = [300,2,-1,3,-5,6,9, 10]
stack = []
seq = []
match = {i: j for i, j in zip('>]})','<[{(') }
opening = {i for i in '<[{('}
m = -float('Inf')
for i, e in enumerate(s):
if e in opening:
# print("open")
stack.append(i)
elif len(stack) > 0:
# print(s[stack[-1]], match[e])
if s[stack[-1]] == match[e]:
seq.append(stack.pop())
for k in seq:
t = vals[k]+ vals[i]
if m < t:
m = t
else:
seq = []
print(m if m != -float('Inf') else 0)
a, b = 3, 5
def getRMB(n):
if n & 1 == 1:
return True
return False
def bitInverse(n, bitlen = 31):
out = 0
for _ in range(bitlen):
b = getRMB(n)
out = out << 1
if not b:
out = out | 1
n = n >> 1
return bitReverse(out, bitlen)
def bitReverse(n, bitlen = 31):
out = 0
for _ in range(bitlen):
b = getRMB(n)
out = out << 1
out |= b
n = n >> 1
return out
def add(a, b):
pos = True
if a < 0 and b < 0:
pos =False
out = 0
carry = False
for _ in range(31):
b1 = getRMB(a)
b2 = getRMB(b)
print(b1, b2, end = " ")
if b1 and b2:
print(' both')
if carry:
out = out << 1
out |= 1
else:
out = out << 1
carry = True
elif b1 or b2:
print(' one of em')
out = out << 1
if not carry:
out = out | 1
elif carry:
print(' carry')
out = out << 1
out = out | 1
carry = False
else:
print('nothing')
out = out << 1
print(out)
a = a >> 1
b = b >> 1
out = bitReverse(out, 31)
if not pos:
return -out
return out
add(-3, -5)
a = 999
b = 1
s = a ^ b
c = (a & b) << 1
while c != 0:
t = s ^ c
c = (s ^ c )<< 1
s = t
print("{:011b}".format(a))
print("{:011b}".format(b))
print("-"*11)
print("{:011b} current".format(s))
print("-"*11)
print("{:011b} expected".format(1000))
import psycopg
| 0.242564 | 0.572962 |

# Custom Python Transforms
There will be scenarios when the easiest thing for you to do is just to write some Python code. This SDK provides three extension points that you can use.
1. New Script Column
2. New Script Filter
3. Transform Partition
Each of these are supported in both the scale-up and the scale-out runtime. A key advantage of using these extension points is that you don't need to pull all of the data in order to create a dataframe. Your custom python code will be run just like other transforms, at scale, by partition, and typically in parallel.
## Initial data prep
We start by loading crime data.
```
import azureml.dataprep as dprep
col = dprep.col
dflow = dprep.read_csv(path='../data/crime-spring.csv')
dflow.head(5)
```
We trim the dataset down and keep only the columns we are interested in.
```
dflow = dflow.keep_columns(['Case Number','Primary Type', 'Description', 'Latitude', 'Longitude'])
dflow = dflow.replace_na(columns=['Latitude', 'Longitude'], custom_na_list='')
dflow.head(5)
```
We look for null values using a filter. We found some, so now we'll look at a way to fill these missing values.
```
dflow.filter(col('Latitude').is_null()).head(5)
```
## Transform Partition
We want to replace all null values with a 0, so we decide to use a handy pandas function. This code will be run by partition, not on all of the dataset at a time. This means that on a large dataset, this code may run in parallel as the runtime processes the data partition by partition.
```
pt_dflow = dflow
dflow = pt_dflow.transform_partition("""
def transform(df, index):
df['Latitude'].fillna('0',inplace=True)
df['Longitude'].fillna('0',inplace=True)
return df
""")
dflow.head(5)
```
### Transform Partition With File
Being able to use any python code to manipulate your data as a pandas DataFrame is extremely useful for complex and specific data operations that DataPrep doesn't handle natively. Though the code isn't very testable unfortunately, it's just sitting inside a string.
So to improve code testability and ease of script writing there is another transform_partiton interface that takes the path to a python script which must contain a function matching the 'transform' signature defined above.
The `script_path` argument should be a relative path to ensure Dataflow portability. Here `map_func.py` contains the same code as in the previous example.
```
dflow = pt_dflow.transform_partition_with_file('../data/map_func.py')
dflow.head(5)
```
## New Script Column
We want to create a new column that has both the latitude and longitude. We can achieve it easily using [Data Prep expression](./add-column-using-expression.ipynb), which is faster in execution. Alternatively, We can do this using Python code by using the `new_script_column()` method on the dataflow. Note that we use custom Python code here for demo purpose only. In practise, you should always use Data Prep native functions as a preferred method, and use custom Python code when the functionality is not available in Data Prep.
```
dflow = dflow.new_script_column(new_column_name='coordinates', insert_after='Longitude', script="""
def newvalue(row):
return '(' + row['Latitude'] + ', ' + row['Longitude'] + ')'
""")
dflow.head(5)
```
## New Script Filter
Now we want to filter the dataset down to only the crimes that incurred over $300 in loss. We can build a Python expression that returns True if we want to keep the row, and False to drop the row.
```
dflow = dflow.new_script_filter("""
def includerow(row):
val = row['Description']
return 'OVER $ 300' in val
""")
dflow.head(5)
```
|
github_jupyter
|
import azureml.dataprep as dprep
col = dprep.col
dflow = dprep.read_csv(path='../data/crime-spring.csv')
dflow.head(5)
dflow = dflow.keep_columns(['Case Number','Primary Type', 'Description', 'Latitude', 'Longitude'])
dflow = dflow.replace_na(columns=['Latitude', 'Longitude'], custom_na_list='')
dflow.head(5)
dflow.filter(col('Latitude').is_null()).head(5)
pt_dflow = dflow
dflow = pt_dflow.transform_partition("""
def transform(df, index):
df['Latitude'].fillna('0',inplace=True)
df['Longitude'].fillna('0',inplace=True)
return df
""")
dflow.head(5)
dflow = pt_dflow.transform_partition_with_file('../data/map_func.py')
dflow.head(5)
dflow = dflow.new_script_column(new_column_name='coordinates', insert_after='Longitude', script="""
def newvalue(row):
return '(' + row['Latitude'] + ', ' + row['Longitude'] + ')'
""")
dflow.head(5)
dflow = dflow.new_script_filter("""
def includerow(row):
val = row['Description']
return 'OVER $ 300' in val
""")
dflow.head(5)
| 0.248261 | 0.988027 |
<a href="https://colab.research.google.com/github/KSY1526/myblog/blob/master/_notebooks/handssu5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# "[SSUDA] 간단한 오토인코더 실습해보기"
- author: Seong Yeon Kim
- categories: [SSUDA, book, jupyter, Deep Learning, Pytorch, AutoEncoder]
- image: images/220211.png
지금까지 공부 했던 지도 학습 문제를 해결하기 위한 모델들은 당연히 레이블 정보가 필요했습니다.
목표로 하는 것은 레이블 정보 없이도 유용한 표현을 학습하는 것인데요. 만약 선형 활성화 함수만 사용하고 비용 함수가 MSE라면 PCA와 동일합니다.
이를 조금 응용한 것을 오토인코더라고 할 수 있습니다. 입력을 차원이 줄어든 압축된 표현으로 나타내는 층(부호화층)과 압축된 표현을 다시 원래의 차원을 가진 최초 입력 데이터로 복원하는 층(복호화층)으로 구성됩니다.
이 복호화층에서 입력을 재구성하는데 유용한 저차원 표현이 학습됩니다.
```
import numpy as np
import pandas as pd
import torch
from torch import Tensor
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.optim import Optimizer
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
```
# 층 구현하기
```
class DenseLayer(nn.Module):
def __init__(self, input_size: int, neurons: int,
dropout: float = 1.0, activation: nn.Module = None) -> None:
super().__init__()
self.linear = nn.Linear(input_size, neurons)
self.activation = activation
if dropout < 1.0:
self.dropout = nn.Dropout(1 - dropout)
def forward(self, x: Tensor) -> Tensor:
# 모든 파이토치 연산은 nn.Module를 상속하므로 역전파 연산를 자동으로 처리합니다.
x = self.linear(x) # 가중치를 곱하고 편향을 더함
if self.activation:
x = self.activation(x)
if hasattr(self, 'dropout'):
x = self.dropout(x)
return x
```
파이토치를 이용해 히든층을 구현했습니다. 역시 파이토치를 쓰니 식이 훨씬 간편해졌습니다.
특히 역전파 연산을 자동으로 해주기 때문에 forward 함수만 잘 구현하면 되겠습니다.
이전에 제가 공부했던 방식과 유사하게 구현하기 위해 이런식의 코드를 썼으며, 실제 파이토치 사용시 더 간편하다고 합니다.
다음 글은 파이토치 사용법을 학습해볼까 해요.
```
class ConvLayer(nn.Module):
def __init__(self, in_channels : int, out_channels : int,
filter_size: int, activation = None,
dropout: float = 1.0, flatten : bool = False) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, filter_size,
padding = filter_size // 2)
self.activation = activation
self.flatten = flatten
if dropout < 1.0:
self.dropout = nn.Dropout(1 - dropout)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x) # 합성곱 연산 수행
if self.activation: # 활성화 함수 적용
x = self.activation(x)
if self.flatten: # 1차원으로 펴주는 경우
x = x.view(x.shape[0], x.shape[1] * x.shape[2] * x.shape[3])
if hasattr(self, 'dropout'): # 드롭아웃이 있는 경우
x = self.dropout(x)
return x
```
히든층과 비슷한 구조인 합성곱 층입니다. nn.Conv2d 함수를 실제로 이용하여 사용합니다.
# 인코더, 디코더 구현하기
```
class Encoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Encoder, self).__init__()
self.conv1 = ConvLayer(1, 14, 5, activation = nn.Tanh())
self.conv2 = ConvLayer(14, 7, 5, activation = nn.Tanh(), flatten = True)
self.dense1 = DenseLayer(7 * 28 * 28, hidden_dim, activation = nn.Tanh())
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.conv2(x)
x = self.dense1(x)
return x
```
인코더 역할을 하는 인코더 클래스 입니다.
입력을 1채널에서 14채널로 변환하는 합성곱층, 14채널을 다시 7채널(각 체널은 28*28 뉴런)로 변환한 뒤 데이터를 1차원으로 펼칩니다.
그 후 히든층에 값을 넣어 28개의 특성을 최종 배출하게 되며 모든 층에서 하이퍼탄전트 함수를 활성화함수로 사용합니다.
```
class Decoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Decoder, self).__init__()
self.dense1 = DenseLayer(hidden_dim, 7 * 28 * 28, activation = nn.Tanh())
self.conv1 = ConvLayer(7, 14, 5, activation = nn.Tanh())
self.conv2 = ConvLayer(14, 1, 5, activation = nn.Tanh())
def forward(self, x: Tensor) -> Tensor:
x = self.dense1(x)
x = x.view(-1, 7, 28, 28) # -1은 알맞은 값을 계산해서 대입하라
x = self.conv1(x)
x = self.conv2(x)
return x
```
디코더 역할을 하는 디코더 클래스입니다. 구성을 보시면 알겠지만, 인코더와 반대로 대칭되는 구조입니다.
밀집층에 28개의 특성을 입력받아 7 * 28 * 28 개의 특성을 출력합니다. 그 후 7채널과 2차원 구조를 만들어 줍니다.
그 후 2번의 합성곱 층을 거치는데 채널을 14개로 늘려주었다가 1채널로 다시 줄여준 것을 출력합니다.
결국 인코더의 입력값과 디코더의 출력값은 같은 형태를 유지하게 됩니다.
```
class Autoencoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Autoencoder, self).__init__()
self.encoder = Encoder(hidden_dim)
self.decoder = Decoder(hidden_dim)
def forward(self, x: Tensor) -> Tensor:
encoding = self.encoder(x)
x = self.decoder(encoding)
return x, encoding
```
앞서 구현한 인코더와 디코더를 같이 실행시키는 클래스를 만들었습니다.
# 트레이너 구현하기
```
from typing import Optional, Tuple
def permute_data(X: Tensor, y: Tensor):
perm = torch.randperm(X.shape[0]) # 데이터 셔플
return X[perm], y[perm]
class PyTorchTrainer(object):
def __init__(self, model, optim, criterion):
self.model = model
self.optim = optim
self.loss = criterion
def _generate_batches(self, x: Tensor, y: Tensor, size: int = 32):
N = x.shape[0]
for ii in range(0, N, size):
x_batch, y_batch = x[ii:ii+size], y[ii:ii+size]
yield x_batch, y_batch # 제너레이터 관련
def fit(self, x_train, y_train, x_test, y_test,
epochs: int = 100, eval_every: int = 10, batch_size: int = 32):
for e in range(epochs):
x_train, y_train = permute_data(x_train, y_train)
# 배치 크기별로 데이터 분리함.
batch_generator = self._generate_batches(x_train, y_train, batch_size)
for ii, (x_batch, y_batch) in enumerate(batch_generator):
self.optim.zero_grad() # 매개변수 초기화
output = self.model(x_batch)[0] # 배치값 모델에 대입
loss = self.loss(output, y_batch) # 로스값 출력
loss.backward() # 역전파 계산 수행.
self.optim.step() # 매개변수 갱신
# 한 에포크 끝난 뒤 결과 출력.
output = self.model(x_test)[0]
loss = self.loss(output, y_test)
print(e, loss)
```
트레이너 또한 파이토치 클래스를 상속받아 직접 구현했습니다. 이전에 트레이너를 밑바닥부터 구현했기 때문에 어렵지는 않았습니다.
permute_data 함수는 데이터 순서를 섞어주는 역할을 하고, _generate_batches 함수는 배치 크기로 데이터를 분리합니다.
이때 파이썬에서 for문 내 yield 은 제너레이터를 사용한다고 하는데 정확히는 모르겠지만 메모리와 속도 차원에서 유용한 방식이다로 이해했습니다.
배치별로 zero_grad 함수를 시작 전에 수행하여 매개변수를 초기화시켜줘야 한다고 합니다.
딥러닝 모델에 값 대입하고, 로스 값 출력하고 역전파 계산을 통해 파라미터 업데이트를 진행하여 더 좋은 모델을 만들어 갑니다.
# 간단한 실습 해보기
```
import torchvision
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
img_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1305,), (0.3081,))
])
train_dataset = MNIST(root='../mnist_data/',
train=True,
download=True,
transform=img_transforms)
test_dataset = MNIST(root='../mnist_data/',
train=False,
download=True,
transform=img_transforms)
mnist_train = ((train_dataset.data.type(torch.float32).unsqueeze(3).permute(0, 3, 1, 2) / 255.0) - 0.1305) / 0.3081
mnist_test = ((test_dataset.data.type(torch.float32).unsqueeze(3).permute(0, 3, 1, 2) / 255.0) - 0.1305) / 0.3081
X_train = mnist_train
X_test = mnist_test
# 모든 데이터를 -1 ~ 1 사이로 변환
X_train_auto = (X_train - X_train.min()) / (X_train.max() - X_train.min()) * 2 - 1
X_test_auto = (X_test - X_train.min()) / (X_train.max() - X_train.min()) * 2 - 1
```
실습 데이터로 유명한 MNIST 데이터를 불러와서 전처리를 수행했습니다.
```
model = Autoencoder(hidden_dim = 28)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum = 0.9)
trainer = PyTorchTrainer(model, optimizer, criterion)
trainer.fit(X_train_auto, X_train_auto,
X_test_auto, X_test_auto,
epochs = 1, batch_size = 60)
```
오토인코더의 여러가지 응용이 있겠지만, 입력값을 그대로 복원하는 방식을 진행하겠습니다.
그렇게 하려면 입력 데이터와 타겟 데이터를 같게 넣으면 되겠죠. 수행 결과 실제 로스값도 상당히 낮은 수치를 보입니다.
실제로 오토인코더는 비지도 학습을 수행한다고도 생각할 수 있고, 원본 데이터를 압축시키는 개념으로도 적용할 수 있겠습니다.
```
reconstructed_images, image_representations = model(X_test_auto)
def display_image(ax, t: Tensor):
n = t.detach().numpy()
ax.imshow(n.reshape(28, 28))
a = np.random.randint(0, 10000)
f, axarr = plt.subplots(1,2)
display_image(axarr[0], X_test[a])
display_image(axarr[1], reconstructed_images[a])
axarr[0].set_title("Originally")
axarr[1].set_title("AutoEncoder")
axarr[0].axis('off')
axarr[1].axis('off')
```
원본 그림과 꽤 비슷한 그림이 유지됩니다! 인코더 후 28개의 특징이 중요한 값을 잘 기억을 한 모양이죠.
# t-SNE를 이용한 시각화
```
from sklearn.manifold import TSNE
tsne_result = TSNE(n_components=2, random_state=20190405).fit_transform(image_representations.detach().numpy())
```
t-SNE 기술을 이용해 2차원으로 차원을 축소해보겠습니다.
더 자세히 얘기하면 오토인코더로 28개의 특징으로 원본이미지를 압축한 뒤 그 결과에 다시 t-SNE를 적용해 2차원으로 특징을 축소합니다.
```
tsne_df = pd.DataFrame({'tsne_dim_1': tsne_result[:,0],
'tsne_dim_2': tsne_result[:,1],
'category': test_dataset.targets})
groups = tsne_df.groupby('category')
# Plot
fig, ax = plt.subplots(figsize=(25,25))
ax.margins(0.05) # 자동 스케일링을 위한 5% 패딩 추가
for name, group in groups:
ax.scatter(group['tsne_dim_1'], group['tsne_dim_2'], marker='o', label=name)
ax.legend()
```
2차원으로 축소하게 되면 위의 그림과 같이 이미지를 2차원 그래프에 시각화가 가능해집니다.
그림에 있는 색깔은 실제 숫자 값 레이블에 따라 다르게 색칠했습니다. 이 레이블은 오토인코더 모델 학습 시 적용하지 않았었죠.
압축된 2개의 특징으로도 색깔 별로 꽤 잘 구분하는 모습이니 28개 특징으로는 레이블을 더 잘 구분하겠죠.
또 다른 의의는 레이블 없이 학습을 했는데도 레이블을 꽤 잘 구분한다는 점입니다. PCA를 딥러닝 버전으로 한 것 같네요.
# 느낀점
지나가는 말로 오토인코더를 들어봤는데 직접 학습하니 남들에게 오토인코더가 뭔지 자신있게 말할 정도로는 학습한 것 같습니다.
비지도 학습 분야에서도 딥러닝이 잘 활용되는걸 관찰하니 신기하네요. 아직 맛보기만 했지만.
딥러닝에 대한 이론적인 이해가 꽤 진행된거 같습니다. 이제 그 도구인 파이토치, 텐서플로를 다루는 법을 공부하는게 좋겠군요.
|
github_jupyter
|
import numpy as np
import pandas as pd
import torch
from torch import Tensor
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.optim import Optimizer
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
class DenseLayer(nn.Module):
def __init__(self, input_size: int, neurons: int,
dropout: float = 1.0, activation: nn.Module = None) -> None:
super().__init__()
self.linear = nn.Linear(input_size, neurons)
self.activation = activation
if dropout < 1.0:
self.dropout = nn.Dropout(1 - dropout)
def forward(self, x: Tensor) -> Tensor:
# 모든 파이토치 연산은 nn.Module를 상속하므로 역전파 연산를 자동으로 처리합니다.
x = self.linear(x) # 가중치를 곱하고 편향을 더함
if self.activation:
x = self.activation(x)
if hasattr(self, 'dropout'):
x = self.dropout(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels : int, out_channels : int,
filter_size: int, activation = None,
dropout: float = 1.0, flatten : bool = False) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, filter_size,
padding = filter_size // 2)
self.activation = activation
self.flatten = flatten
if dropout < 1.0:
self.dropout = nn.Dropout(1 - dropout)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x) # 합성곱 연산 수행
if self.activation: # 활성화 함수 적용
x = self.activation(x)
if self.flatten: # 1차원으로 펴주는 경우
x = x.view(x.shape[0], x.shape[1] * x.shape[2] * x.shape[3])
if hasattr(self, 'dropout'): # 드롭아웃이 있는 경우
x = self.dropout(x)
return x
class Encoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Encoder, self).__init__()
self.conv1 = ConvLayer(1, 14, 5, activation = nn.Tanh())
self.conv2 = ConvLayer(14, 7, 5, activation = nn.Tanh(), flatten = True)
self.dense1 = DenseLayer(7 * 28 * 28, hidden_dim, activation = nn.Tanh())
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.conv2(x)
x = self.dense1(x)
return x
class Decoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Decoder, self).__init__()
self.dense1 = DenseLayer(hidden_dim, 7 * 28 * 28, activation = nn.Tanh())
self.conv1 = ConvLayer(7, 14, 5, activation = nn.Tanh())
self.conv2 = ConvLayer(14, 1, 5, activation = nn.Tanh())
def forward(self, x: Tensor) -> Tensor:
x = self.dense1(x)
x = x.view(-1, 7, 28, 28) # -1은 알맞은 값을 계산해서 대입하라
x = self.conv1(x)
x = self.conv2(x)
return x
class Autoencoder(nn.Module):
def __init__(self, hidden_dim: int = 28):
super(Autoencoder, self).__init__()
self.encoder = Encoder(hidden_dim)
self.decoder = Decoder(hidden_dim)
def forward(self, x: Tensor) -> Tensor:
encoding = self.encoder(x)
x = self.decoder(encoding)
return x, encoding
from typing import Optional, Tuple
def permute_data(X: Tensor, y: Tensor):
perm = torch.randperm(X.shape[0]) # 데이터 셔플
return X[perm], y[perm]
class PyTorchTrainer(object):
def __init__(self, model, optim, criterion):
self.model = model
self.optim = optim
self.loss = criterion
def _generate_batches(self, x: Tensor, y: Tensor, size: int = 32):
N = x.shape[0]
for ii in range(0, N, size):
x_batch, y_batch = x[ii:ii+size], y[ii:ii+size]
yield x_batch, y_batch # 제너레이터 관련
def fit(self, x_train, y_train, x_test, y_test,
epochs: int = 100, eval_every: int = 10, batch_size: int = 32):
for e in range(epochs):
x_train, y_train = permute_data(x_train, y_train)
# 배치 크기별로 데이터 분리함.
batch_generator = self._generate_batches(x_train, y_train, batch_size)
for ii, (x_batch, y_batch) in enumerate(batch_generator):
self.optim.zero_grad() # 매개변수 초기화
output = self.model(x_batch)[0] # 배치값 모델에 대입
loss = self.loss(output, y_batch) # 로스값 출력
loss.backward() # 역전파 계산 수행.
self.optim.step() # 매개변수 갱신
# 한 에포크 끝난 뒤 결과 출력.
output = self.model(x_test)[0]
loss = self.loss(output, y_test)
print(e, loss)
import torchvision
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
img_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1305,), (0.3081,))
])
train_dataset = MNIST(root='../mnist_data/',
train=True,
download=True,
transform=img_transforms)
test_dataset = MNIST(root='../mnist_data/',
train=False,
download=True,
transform=img_transforms)
mnist_train = ((train_dataset.data.type(torch.float32).unsqueeze(3).permute(0, 3, 1, 2) / 255.0) - 0.1305) / 0.3081
mnist_test = ((test_dataset.data.type(torch.float32).unsqueeze(3).permute(0, 3, 1, 2) / 255.0) - 0.1305) / 0.3081
X_train = mnist_train
X_test = mnist_test
# 모든 데이터를 -1 ~ 1 사이로 변환
X_train_auto = (X_train - X_train.min()) / (X_train.max() - X_train.min()) * 2 - 1
X_test_auto = (X_test - X_train.min()) / (X_train.max() - X_train.min()) * 2 - 1
model = Autoencoder(hidden_dim = 28)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum = 0.9)
trainer = PyTorchTrainer(model, optimizer, criterion)
trainer.fit(X_train_auto, X_train_auto,
X_test_auto, X_test_auto,
epochs = 1, batch_size = 60)
reconstructed_images, image_representations = model(X_test_auto)
def display_image(ax, t: Tensor):
n = t.detach().numpy()
ax.imshow(n.reshape(28, 28))
a = np.random.randint(0, 10000)
f, axarr = plt.subplots(1,2)
display_image(axarr[0], X_test[a])
display_image(axarr[1], reconstructed_images[a])
axarr[0].set_title("Originally")
axarr[1].set_title("AutoEncoder")
axarr[0].axis('off')
axarr[1].axis('off')
from sklearn.manifold import TSNE
tsne_result = TSNE(n_components=2, random_state=20190405).fit_transform(image_representations.detach().numpy())
tsne_df = pd.DataFrame({'tsne_dim_1': tsne_result[:,0],
'tsne_dim_2': tsne_result[:,1],
'category': test_dataset.targets})
groups = tsne_df.groupby('category')
# Plot
fig, ax = plt.subplots(figsize=(25,25))
ax.margins(0.05) # 자동 스케일링을 위한 5% 패딩 추가
for name, group in groups:
ax.scatter(group['tsne_dim_1'], group['tsne_dim_2'], marker='o', label=name)
ax.legend()
| 0.935898 | 0.983502 |
```
import hashlib
import numpy as np
import pybryt
sort = lambda l: sorted(l)
```
Complexity annotations can be used to assert that a block of student code runs within a certain level of complexity. PyBryt determines the complexity of a block of student code by comparing the number of execution steps for various input lengths and using least-squares to determine which complexity class minimizes best represents the relationship.
Making use of complexity annotations is a two-part endeavor: you must create an annotation that tells PyBryt to look for time complexity in the memory footprint, but you must also create a block of code in the student's submission that tells PyBryt which block of code to run the complexity check on.
Creating the annotation is simple: just instantiate the `pybryt.TimeComplexity` class. This annotation takes as its argument a complexity class supplied by the module `pybryt.complexities`:
```
import pybryt.complexities as cplx
cplx.complexity_classes
```
The `TimeComplexity` constructor also requires the `name` option to be supplied, as this is how the data from the student's submission will be tied to the annotation.
```
pybryt.TimeComplexity(cplx.linear, name="foo")
```
And that's all that's required on the reference implementation end. The real work of checking the time complexity of students' code comes in writing the scaffold provided to students, which must use PyBryt's `check_time_complexity` context manager to mark a block of code as a block that should be checked for time complexity. This context manager accepts as arguments the name of the block (which should be the same as the `name` provided to the annotation) and the size of input being run in that context.
For example, consider a simple exponentiation algorithm where the size of the input is the power that the base is being raised to.
```
def power(b, p):
if p == 0:
return 1
return b * power(b, p - 1)
with pybryt.check_time_complexity("foo", 10):
assert power(2, 10) == 2 ** 10
```
One data point, however, isn't enough. To collect data for multiple input sizes, you can use the context manager with the same name and vary the input length:
```
for p in [2, 5, 10, 15, 20]:
with pybryt.check_time_complexity("foo", p):
assert power(2, p) == 2 ** p
```
For simplicity, if the input you're running is an object that supports `len` for determining its size, you can also just pass the input itself as the second argument:
```
l = [1, 2, 3]
with pybryt.check_time_complexity("bar", l):
sort(l)
```
When used in the student's code (or in any context where the notebook isn't being executed by PyBryt to generate a memory footprint), the `check_time_complexity` context does nothing. However, when PyBryt is running the code, it tracks the number of steps it takes to execute the block. Because the input lengths needed to accurately measure time complexity can get very high, PyBryt doesn't trace for values inside these contexts; this means that any calls needed to satisfy value annotations must occur **outside** a `check_time_complexity` context, otherwise PyBryt won't see the value in the student's memory footprint.
|
github_jupyter
|
import hashlib
import numpy as np
import pybryt
sort = lambda l: sorted(l)
import pybryt.complexities as cplx
cplx.complexity_classes
pybryt.TimeComplexity(cplx.linear, name="foo")
def power(b, p):
if p == 0:
return 1
return b * power(b, p - 1)
with pybryt.check_time_complexity("foo", 10):
assert power(2, 10) == 2 ** 10
for p in [2, 5, 10, 15, 20]:
with pybryt.check_time_complexity("foo", p):
assert power(2, p) == 2 ** p
l = [1, 2, 3]
with pybryt.check_time_complexity("bar", l):
sort(l)
| 0.52342 | 0.97377 |
```
import torch
import pandas as pd
import numpy as np
import sklearn
from collections import Counter
from sklearn.utils import Bunch
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from itertools import combinations
import re
import os
import torch.nn as nn
import matplotlib.pyplot as plt
```
# Data Loading
```
path = r"E:\github\movie_hatespeech_detection\data\twitter\twitter.csv"
df = pd.read_csv(path, index_col=0)
df = df.rename(columns={'class': 'label'})
# df['label'] = df['label'].replace({0: 2, 2: 0})
df.head()
df.label.value_counts(normalize=True)
df.duplicated(subset='tweet').value_counts()
path = r'E:\github\movie_hatespeech_detection\data\movies_for_training\all_movies.csv'
movie_data = pd.read_csv(path, index_col=0)
movie_data.head()
print(df.label.value_counts())
df.label.value_counts().plot(kind='pie', subplots=True, autopct='%1.0f%%', title='Hate Speech Distribution')
```
## Data Splitting
```
def split_dataset(df, seed, test_size):
train, test = train_test_split(df, test_size=test_size, random_state=seed, shuffle=True)
return train.tweet.values, train.label.values, test.tweet.values, test.label.values
categories = [0,1,2]
seed = 11
test_size = 0.2
train, train_targets, test, test_targets = split_dataset(df, seed=seed, test_size=test_size)
train_size = len(train)
test_size = len(test)
def calculate_dataset_class_distribution(targets, categories):
df = pd.DataFrame({'category':targets})
s = df.category.value_counts(normalize=True)
s = s.reindex(categories)
return [s.index[0], s[0]], [s.index[1], s[1]], [s.index[2], s[2]]
train_class_distribution = calculate_dataset_class_distribution(train_targets, categories)
test_class_distribution = calculate_dataset_class_distribution(test_targets, categories)
print(train_class_distribution)
print(test_class_distribution)
train_ds = Bunch(data=train, target=train_targets)
test_ds = Bunch(data=test, target=test_targets)
```
## Buidling the Model
```
# Getting all the vocabularies and indexing to a unique position
vocab = Counter()
#Indexing words from the training data
for text in train_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
#Indexing words from the training data
for text in test_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
for text in movie_data.text.values:
for word in text.split(' '):
vocab[word.lower()]+=1
total_words = len(vocab)
def get_word_2_index(vocab):
word2index = {}
for i,word in enumerate(vocab):
word2index[word.lower()] = i
return word2index
word2index = get_word_2_index(vocab)
print(len(word2index))
print(word2index["the"]) # Showing the index of 'the'
print (total_words)
# define the network
class News_20_Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(News_20_Net, self).__init__()
self.layer_1 = nn.Linear(input_size,hidden_size, bias=True).cuda()
self.relu = nn.ReLU().cuda()
self.layer_2 = nn.Linear(hidden_size, hidden_size, bias=True).cuda()
self.output_layer = nn.Linear(hidden_size, num_classes, bias=True).cuda()
# accept input and return an output
def forward(self, x):
out = self.layer_1(x)
out = self.relu(out)
out = self.layer_2(out)
out = self.relu(out)
out = self.output_layer(out)
return out
def get_batch(df,i,batch_size):
batches = []
results = []
# Split into different batchs, get the next batch
texts = df.data[i*batch_size:i*batch_size+batch_size]
# get the targets
categories = df.target[i*batch_size:i*batch_size+batch_size]
#print(categories)
for text in texts:
# Dimension, 196609
layer = np.zeros(total_words,dtype=float)
for word in text.split(' '):
layer[word2index[word.lower()]] += 1
batches.append(layer)
# We have 5 categories
for category in categories:
#print(category)
index_y = -1
if category == 0:
index_y = 0
elif category == 1:
index_y = 1
elif category == 2:
index_y = 2
results.append(index_y)
# the training and the targets
return np.array(batches),np.array(results)
# Parameters
learning_rate = 0.001
num_epochs = 8
batch_size = 32
display_step = 10 # ADDED will multiplied by 10
# Network Parameters
hidden_size = 100 # 1st layer and 2nd layer number of features
input_size = total_words # Words in vocab
num_classes = len(categories) # Categories: "graphics","space","baseball","guns", "christian"
```
## Training
```
results = []
news_net = News_20_Net(input_size, hidden_size, num_classes)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss() # This includes the Softmax loss function
optimizer = torch.optim.Adam(news_net.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
# determine the number of min-batches based on the batch size and size of training data
total_batch = int(len(train_ds.data)/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x,batch_y = get_batch(train_ds,i,batch_size)
articles = torch.cuda.FloatTensor(batch_x, device='cuda')
labels = torch.cuda.LongTensor(batch_y, device='cuda')
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
outputs = news_net(articles)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % display_step == 0:
result = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f'%(epoch+1, num_epochs, i+1, len(train_ds.data)/batch_size, loss.data)
results.append({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
if (i+1) % (display_step*10) == 0:
print({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
```
## Validation
```
# Test the Model
correct = 0
total = 0
total_test_data = len(test_ds.target)
iterates = total_test_data/batch_size # ignore last (<batch_size) batch
all_total = []
all_correct = []
labels_all = []
predicted_all = []
for i in range(int(iterates)):
batch_x_test,batch_y_test = get_batch(test_ds,i,batch_size)
articles = torch.FloatTensor(batch_x_test).to('cuda')
labels = torch.LongTensor(batch_y_test).to('cuda')
outputs = news_net(articles)
_, predicted = torch.max(outputs.data, 1)
labels_all.extend([x.item() for x in labels])
predicted_all.extend([x.item() for x in predicted])
report = classification_report(labels_all, predicted_all, output_dict=True)
df_report = pd.DataFrame(report).transpose()
df_report.round(2)
```
----
## Classication of Movies
### Load Movies
```
def annotate_df(movie_df):
utterances = movie_df.text.values
predictions = []
batch = []
for text in utterances:
# Dimension, 196609
layer = np.zeros(total_words,dtype=float)
for word in text.split(' '):
layer[word2index[word.lower()]] += 1
batch.append(layer)
texts = torch.FloatTensor(batch).to('cuda')
outputs = news_net(texts)
_, predicted = torch.max(outputs.data, 1)
predictions.extend([x.item() for x in predicted])
result = []
for i, pred in enumerate(predictions):
result.append({'index': i, 'label_bow_twitter': pred})
result_df = pd.DataFrame(result)
movie_df = movie_df.merge(result_df, right_index=True, left_index=True)
return movie_df
result_df = annotate_df(movie_data)
result_df.label_bow_twitter.unique()
result_df.label_bow_twitter.value_counts()
result_df.majority_answer.value_counts()
def get_classifications_results(df):
df = df.copy()
labels_all = df.majority_answer.values
predicted_all = df.label_bow_twitter.values
results_classification = classification_report(labels_all, predicted_all, output_dict=True)
df_report = pd.DataFrame(results_classification).transpose()
return df_report
get_classifications_results(result_df).round(2)
```
|
github_jupyter
|
import torch
import pandas as pd
import numpy as np
import sklearn
from collections import Counter
from sklearn.utils import Bunch
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from itertools import combinations
import re
import os
import torch.nn as nn
import matplotlib.pyplot as plt
path = r"E:\github\movie_hatespeech_detection\data\twitter\twitter.csv"
df = pd.read_csv(path, index_col=0)
df = df.rename(columns={'class': 'label'})
# df['label'] = df['label'].replace({0: 2, 2: 0})
df.head()
df.label.value_counts(normalize=True)
df.duplicated(subset='tweet').value_counts()
path = r'E:\github\movie_hatespeech_detection\data\movies_for_training\all_movies.csv'
movie_data = pd.read_csv(path, index_col=0)
movie_data.head()
print(df.label.value_counts())
df.label.value_counts().plot(kind='pie', subplots=True, autopct='%1.0f%%', title='Hate Speech Distribution')
def split_dataset(df, seed, test_size):
train, test = train_test_split(df, test_size=test_size, random_state=seed, shuffle=True)
return train.tweet.values, train.label.values, test.tweet.values, test.label.values
categories = [0,1,2]
seed = 11
test_size = 0.2
train, train_targets, test, test_targets = split_dataset(df, seed=seed, test_size=test_size)
train_size = len(train)
test_size = len(test)
def calculate_dataset_class_distribution(targets, categories):
df = pd.DataFrame({'category':targets})
s = df.category.value_counts(normalize=True)
s = s.reindex(categories)
return [s.index[0], s[0]], [s.index[1], s[1]], [s.index[2], s[2]]
train_class_distribution = calculate_dataset_class_distribution(train_targets, categories)
test_class_distribution = calculate_dataset_class_distribution(test_targets, categories)
print(train_class_distribution)
print(test_class_distribution)
train_ds = Bunch(data=train, target=train_targets)
test_ds = Bunch(data=test, target=test_targets)
# Getting all the vocabularies and indexing to a unique position
vocab = Counter()
#Indexing words from the training data
for text in train_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
#Indexing words from the training data
for text in test_ds.data:
for word in text.split(' '):
vocab[word.lower()]+=1
for text in movie_data.text.values:
for word in text.split(' '):
vocab[word.lower()]+=1
total_words = len(vocab)
def get_word_2_index(vocab):
word2index = {}
for i,word in enumerate(vocab):
word2index[word.lower()] = i
return word2index
word2index = get_word_2_index(vocab)
print(len(word2index))
print(word2index["the"]) # Showing the index of 'the'
print (total_words)
# define the network
class News_20_Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(News_20_Net, self).__init__()
self.layer_1 = nn.Linear(input_size,hidden_size, bias=True).cuda()
self.relu = nn.ReLU().cuda()
self.layer_2 = nn.Linear(hidden_size, hidden_size, bias=True).cuda()
self.output_layer = nn.Linear(hidden_size, num_classes, bias=True).cuda()
# accept input and return an output
def forward(self, x):
out = self.layer_1(x)
out = self.relu(out)
out = self.layer_2(out)
out = self.relu(out)
out = self.output_layer(out)
return out
def get_batch(df,i,batch_size):
batches = []
results = []
# Split into different batchs, get the next batch
texts = df.data[i*batch_size:i*batch_size+batch_size]
# get the targets
categories = df.target[i*batch_size:i*batch_size+batch_size]
#print(categories)
for text in texts:
# Dimension, 196609
layer = np.zeros(total_words,dtype=float)
for word in text.split(' '):
layer[word2index[word.lower()]] += 1
batches.append(layer)
# We have 5 categories
for category in categories:
#print(category)
index_y = -1
if category == 0:
index_y = 0
elif category == 1:
index_y = 1
elif category == 2:
index_y = 2
results.append(index_y)
# the training and the targets
return np.array(batches),np.array(results)
# Parameters
learning_rate = 0.001
num_epochs = 8
batch_size = 32
display_step = 10 # ADDED will multiplied by 10
# Network Parameters
hidden_size = 100 # 1st layer and 2nd layer number of features
input_size = total_words # Words in vocab
num_classes = len(categories) # Categories: "graphics","space","baseball","guns", "christian"
results = []
news_net = News_20_Net(input_size, hidden_size, num_classes)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss() # This includes the Softmax loss function
optimizer = torch.optim.Adam(news_net.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
# determine the number of min-batches based on the batch size and size of training data
total_batch = int(len(train_ds.data)/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x,batch_y = get_batch(train_ds,i,batch_size)
articles = torch.cuda.FloatTensor(batch_x, device='cuda')
labels = torch.cuda.LongTensor(batch_y, device='cuda')
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
outputs = news_net(articles)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % display_step == 0:
result = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f'%(epoch+1, num_epochs, i+1, len(train_ds.data)/batch_size, loss.data)
results.append({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
if (i+1) % (display_step*10) == 0:
print({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()})
# Test the Model
correct = 0
total = 0
total_test_data = len(test_ds.target)
iterates = total_test_data/batch_size # ignore last (<batch_size) batch
all_total = []
all_correct = []
labels_all = []
predicted_all = []
for i in range(int(iterates)):
batch_x_test,batch_y_test = get_batch(test_ds,i,batch_size)
articles = torch.FloatTensor(batch_x_test).to('cuda')
labels = torch.LongTensor(batch_y_test).to('cuda')
outputs = news_net(articles)
_, predicted = torch.max(outputs.data, 1)
labels_all.extend([x.item() for x in labels])
predicted_all.extend([x.item() for x in predicted])
report = classification_report(labels_all, predicted_all, output_dict=True)
df_report = pd.DataFrame(report).transpose()
df_report.round(2)
def annotate_df(movie_df):
utterances = movie_df.text.values
predictions = []
batch = []
for text in utterances:
# Dimension, 196609
layer = np.zeros(total_words,dtype=float)
for word in text.split(' '):
layer[word2index[word.lower()]] += 1
batch.append(layer)
texts = torch.FloatTensor(batch).to('cuda')
outputs = news_net(texts)
_, predicted = torch.max(outputs.data, 1)
predictions.extend([x.item() for x in predicted])
result = []
for i, pred in enumerate(predictions):
result.append({'index': i, 'label_bow_twitter': pred})
result_df = pd.DataFrame(result)
movie_df = movie_df.merge(result_df, right_index=True, left_index=True)
return movie_df
result_df = annotate_df(movie_data)
result_df.label_bow_twitter.unique()
result_df.label_bow_twitter.value_counts()
result_df.majority_answer.value_counts()
def get_classifications_results(df):
df = df.copy()
labels_all = df.majority_answer.values
predicted_all = df.label_bow_twitter.values
results_classification = classification_report(labels_all, predicted_all, output_dict=True)
df_report = pd.DataFrame(results_classification).transpose()
return df_report
get_classifications_results(result_df).round(2)
| 0.783077 | 0.777638 |
# Starbucks Capstone Project solution for ML Engineer Nanodegree
## Data Processing
```
# Importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Read the data
portfolio_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/portfolio.json', orient='records', lines=True)
profile_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/profile.json', orient='records', lines=True)
transcript_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/transcript.json', orient='records', lines=True)
```
### Data Exploration
#### Portfolio DataFrame
```
# Descripe the Portfolio dataframe
print('The shape of Portfolio dataframe is {}'.format(portfolio_df.shape))
```
This dataframe contains the information about different offers with details about each of them
```
# Show the Portfolio dataframe
display(portfolio_df)
```
#### Profile DataFrame
```
# Descripe the Profile dataframe
print('The shape of Profile dataframe is {}'.format(profile_df.shape))
```
This dataframe contains the information about different customers with their demographic data
```
# Show the Profile dataframe
display(profile_df)
```
We see the missing values in gender and income, so there is a reason to process this dataframe. In addition, it is useful to convert the string dates into datetime values.
```
# There are no duplicated customers in dataframe
set(profile_df.duplicated(subset=['id']))
# We see that NaN values for Income and Gender intersects, so we can drop them
display(profile_df.loc[profile_df['income'].isnull()].describe())
display(profile_df.loc[profile_df['gender'].isnull()].describe())
profile_df = profile_df.loc[~profile_df['income'].isnull()]
print('After that, the shape of Profile dataframe is {}'.format(profile_df.shape))
display(profile_df)
# Let's change string date to datetime
profile_df['became_member_on'] = pd.to_datetime(profile_df['became_member_on'].astype(str)).dt.date
# # We see that the Other gender is not so frequent in the data
# pd.DataFrame(profile_df.groupby('gender').describe()['age']['count'])
# We can see the age distribution looks bell-shaped
sns.distplot(profile_df['age'])
plt.title('Age distribution')
plt.show()
# While income distribution is not bell-shaped
sns.distplot(profile_df['income'])
plt.title('Income distribution')
plt.show()
# The major share of the customers arrived after the 2017
profile_df['became_member_on'].hist()
plt.show()
```
#### Transcript DataFrame
This dataframe contains the information about different transactions with details.
```
# Descripe the Transcript dataframe
print('The shape of Transcript dataframe is {}'.format(transcript_df.shape))
# Show the Transcript dataframe
display(transcript_df)
# Here is the descriptive statistics about the each event count
pd.DataFrame(transcript_df.groupby('event').describe()['time']['count'])
# Let's delve more into the Value feature
# and check the cross-intersection between the event and value
values_parsed = transcript_df['value'].apply(lambda x: str(list(x.keys())))
pd.crosstab(values_parsed, transcript_df['event'])
# We can parse these values and replace value feature with the more
# detailed ones
transcript_df['offer_id'] = transcript_df['value'].apply(lambda x: \
x['offer_id'] if 'offer_id' in x \
else (x['offer id'] if 'offer id' \
in x else None))
for key in ['amount', 'reward']:
transcript_df[key] = transcript_df['value'].apply(lambda x: \
x[key] if key in x else None)
# Therefore, we can drop the old feature
transcript_df = transcript_df.drop('value', axis=1)
# Let's analyze the behavior of the particular client and check
# the maximum number of purchases for specific customer
purchases_per_client = transcript_df.groupby('person')['time'].count().sort_values(ascending=False)
# Here is Top-5
purchases_per_client.head(5)
# Let's check the first client
transcript_df.loc[transcript_df['person'] == \
purchases_per_client.index[0]].sort_values('time')
```
We see that there is connection between transaction and offer completed, displayed with the same time. Let's check whether this is true
```
print('There are {} matches'.format(\
len(pd.merge(transcript_df.loc[transcript_df['event'] == \
'offer completed'],
transcript_df.loc[transcript_df['event'] == 'transaction'],
on=['person', 'time']))))
# Let's also check the connection between offer received and offer viewed
print('There are {} matches'.format(\
len(pd.merge(transcript_df.loc[transcript_df['event'] == \
'offer received'],
transcript_df.loc[transcript_df['event'] == 'offer viewed'],
on=['person', 'offer_id']))))
```
#### Customer's Journey
In order to analyze the conversion, we have to recreate the customer's journey using the data. We have to:
- Analyze the data about the offer view
- Check the conversion into the purchase
- Analyze the data about the transaction
```
# Merge the offer receives and offer views
offer_view_df = pd.merge(\
transcript_df.loc[transcript_df['event'] == 'offer received', \
['person', 'offer_id', 'time']],
transcript_df.loc[transcript_df['event'] == 'offer viewed', \
['person', 'offer_id', 'time']],
on=['person', 'offer_id'], how='left', \
suffixes=['_received', '_viewed'])
# Remove the broken data: view have to be later than receive and remove null values
offer_view_df = offer_view_df.loc[(offer_view_df['time_viewed'] >= \
offer_view_df['time_received']) | \
~(offer_view_df['time_viewed'].isnull())]
# Take the nearest receive before the view
offer_view_df = pd.concat((offer_view_df.groupby(['person', 'offer_id',
'time_viewed']).agg({'time_received': 'max'}).reset_index(),
offer_view_df.loc[offer_view_df['time_viewed'].isnull()]))
offer_view_df.head()
```
Let's apply the same reasoning to the offer completion
```
# Merge the DataFrames
offer_complete_df = pd.merge(offer_view_df,
transcript_df.loc[transcript_df['event'] == 'offer completed', \
['person', 'offer_id', 'time', 'reward']],
on=['person', 'offer_id'], how='left')
# Rename the column
offer_complete_df.rename(columns={'time': 'time_completed'}, inplace=True)
# We ensure that completion is before the view
offer_complete_df.loc[(offer_complete_df['time_viewed'].isnull()) | \
(offer_complete_df['time_viewed'] > \
offer_complete_df['time_completed']), \
['reward', 'time_completed']] = (np.nan, np.nan)
offer_complete_df.drop_duplicates = offer_complete_df.drop_duplicates()
# Concatenate the nearest completion to the view and receive
offer_complete_df = pd.concat(
(offer_complete_df.groupby(['person', 'offer_id',
'time_completed', 'reward']).agg({'time_viewed': 'max',
'time_received': 'max'}).reset_index(),
offer_complete_df.loc[offer_complete_df['time_completed'].isnull()]))
offer_complete_df.head()
```
Now let's add the information about the transactions
```
# Merge the DataFrames
offer_transaction_df = pd.merge(offer_complete_df,
transcript_df.loc[transcript_df['event'] == 'transaction', \
['person', 'time', 'amount']],
left_on=['person', 'time_completed'],
right_on=['person', 'time'], how='outer')
# Rename the column
offer_transaction_df.rename(columns={'time': 'time_transaction'}, inplace=True)
# Add a column with time equal to received offer,
# and transaction time otherwise
offer_transaction_df['time'] = offer_transaction_df['time_received']
offer_transaction_df.loc[offer_transaction_df['time'].isnull(),
'time'] = offer_transaction_df['time_transaction']
# Drop the duplicates
offer_transaction_df.sort_values(['person', 'offer_id', 'time',
'time_completed'], inplace=True)
offer_transaction_df = offer_transaction_df.drop_duplicates(['person',
'offer_id', 'time'])
print("The final data size is ", offer_transaction_df.shape)
```
Let's finally merge all the data into the single DataFrame.
```
# Add offer type information
offer_type_df = pd.merge(offer_transaction_df,
portfolio_df.rename(columns={'id': 'offer_id',
'reward': 'portfolio_reward'}),
on='offer_id', how='left')
offer_type_df.head()
# Add demographic information
offer_all_df = pd.merge(offer_type_df,
profile_df.rename(columns={'id': 'person'}),
how='inner', on='person')
offer_all_df.head()
# Sort the data
offer_all_df.sort_values(['person', 'time', 'offer_id'], inplace=True)
# Let's fill the values for transactions' offer type
offer_all_df['offer_type'].fillna('transaction', inplace=True)
offer_all_df.head()
print('The final shape of the data is ', offer_all_df.shape)
# Save the data
offer_all_df.to_csv('./data/customer_journey.csv', index=False)
```
### New Features Creation
```
# Let's test that the file we saved is loading correctly
customer_journey_df = pd.read_csv('./data/customer_journey.csv',
parse_dates=['became_member_on'])
# Let's drop the data when the offer was never viewed
customer_journey_df = customer_journey_df.loc[\
(customer_journey_df['offer_type'] == 'transaction') \
|(customer_journey_df['time_viewed'].isnull() == False)]
# Keep the time variable equal to time viewed, transaction time otherwise
customer_journey_df['time'] = customer_journey_df['time_viewed']
customer_journey_df.loc[customer_journey_df['offer_type'] == \
'transaction', 'time'] = customer_journey_df['time_transaction']
print('The current shape of data is {}'.format(customer_journey_df.shape))
customer_journey_df.head()
```
We set as the aim to maximize the conversion rate for each offer type.
In order to evaluate the model, we have to calculate the benchmark based on the historical data.
```
# Keep only relevant features
conversion_df = customer_journey_df.loc[:, ['offer_type',
'time_viewed', 'time_completed']]
# Mark the offers viewed if they are non-informational and viewed
conversion_df['viewed'] = 0
conversion_df.loc[(conversion_df['offer_type'].isin(['bogo', 'discount'])) & \
(conversion_df['time_viewed'].isnull() == False),
'viewed'] = 1
# Mark conversion
conversion_df['conversion'] = 0
conversion_df.loc[(conversion_df['viewed'] == 1.0) & \
(conversion_df['time_completed'].isnull() == False),
'conversion'] = 1
viewed_num = np.sum(conversion_df['viewed'])
conversion_num = np.sum(conversion_df['conversion'])
print('{} users viewed the offer and {} completed it'.format(
viewed_num, conversion_num))
print('Therefore, the conversion is {} %'.format(\
round(conversion_num/viewed_num*100, 2)))
# We can also divide it by the offer type
conversion_df.loc[conversion_df['viewed'] == 1\
].groupby('offer_type').agg({'conversion': 'mean'})
```
Furthermore, we can analyze the conversion for the informational offer. This can be evaluated as transaction near the informational offer.
```
# Copy the dataset and take viewed offers with non-empty transaction
informational_offer_df = customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) | \
(customer_journey_df['time_transaction'].isnull() == False),
['person', 'offer_id', 'offer_type', 'time_viewed', 'time_transaction']]
# Replace time with time viewed. Otherwise - transaction time
informational_offer_df['time'] = informational_offer_df['time_viewed']
informational_offer_df.loc[informational_offer_df['time'].isnull(),
'time'] = informational_offer_df['time_transaction']
# In order to analyze it, we have to check the consequent offer for the user
informational_offer_df['next_offer_type'] = \
informational_offer_df['offer_type'].shift(-1)
informational_offer_df['next_time'] = informational_offer_df['time'].shift(-1)
# If the offer relates to other person, we skip it
informational_offer_df.loc[
informational_offer_df['person'].shift(-1) != \
informational_offer_df['person'],
['next_offer_type', 'next_time']] = ['', np.nan]
# Get the information about the difference in time for the offer types
informational_offer_df['time_diff'] = \
informational_offer_df['next_time'] - informational_offer_df['time_viewed']
# Let's check the time distribution between informational offer and transaction
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction') &
(informational_offer_df['time_diff'] >=0),
'time_diff'].describe()
```
We see that the median difference in 24 hours
```
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction')&
(informational_offer_df['time_diff'] >=0),
'time_diff'].hist()
# Let's check the conversion if we check the transaction within 24 hours
# after the informational offer
time_diff_threshold = 24.0
viewed_info_num = np.sum(informational_offer_df['offer_type'] == \
'informational')
conversion_info_num = np.sum((informational_offer_df['offer_type'] == \
'informational') \
& (informational_offer_df['next_offer_type'] == 'transaction') & \
(informational_offer_df['time_diff'] <= time_diff_threshold))
print('{} users viewed the offer and {} completed it'.format(
viewed_info_num, conversion_info_num))
print('Therefore, the conversion is {} %'.format(\
round(conversion_info_num/viewed_info_num*100, 2)))
```
Now let's create features for each offer type
```
# If the offer was viewed and it is BOGO and there was transaction, fill it
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'bogo'),
'bogo'] = 0
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'bogo') & \
(customer_journey_df['time_completed'].isnull() == False), 'bogo'] = 1
# If the offer was viewed and it is Discount and there was transaction, fill it
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'discount'),
'discount'] = 0
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'discount') & \
(customer_journey_df['time_completed'].isnull() == False), 'discount'] = 1
```
Now let's work a bit on the informational offer DataFrame
```
informational_offer_df.loc[
informational_offer_df['offer_type'] == 'informational', 'info'] = 0
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction') & \
(informational_offer_df['time_diff'] <= time_diff_threshold), 'info'] = 1
customer_journey_df = pd.merge(customer_journey_df,
informational_offer_df.loc[
informational_offer_df['info'].isnull() == False,
['person', 'offer_id', 'time_viewed', 'info', 'next_time']],
how='left', on=['person', 'offer_id', 'time_viewed'])
# Override time completed with the following time of transaction
customer_journey_df.loc[customer_journey_df['info'] == 1,
'time_completed'] = customer_journey_df['next_time']
customer_journey_df.loc[customer_journey_df['info'] == 1,
'time_transaction'] = customer_journey_df['next_time']
customer_journey_df = customer_journey_df.drop('next_time', axis=1)
bogo_num = np.sum(customer_journey_df['bogo'].isnull() == False)
disc_num = np.sum(customer_journey_df['discount'].isnull() == False)
info_num = np.sum(customer_journey_df['info'].isnull() == False)
print('The current DataFrame contains: {} BOGO, {} Discount and {} \
Informational events of conversion.'.format(bogo_num, disc_num, info_num))
```
Now we can work more on the features for the customers
```
customer_df = customer_journey_df[['person', 'gender',
'age', 'income', 'became_member_on']].drop_duplicates()
customer_df.describe(include='all').T
```
Now let's create a feature to analyze the retention of the customers to the service.
```
def months_difference(date_start, date_end):
''' This function is used to calculate the difference
in months between two dates
Args:
date_start (timestamp/datetime) - start date of the period
date_end (timestamp/datetime) - end date of the period
Outputs:
difference(int) - difference in months between the dates
'''
difference = (date_end.year - date_start.year) * 12 + \
(date_end.month - date_start.month)
return difference
customer_journey_df['day'] = np.floor(
customer_journey_df['time_viewed'] / 24.0)
customer_journey_df['weekday'] = customer_journey_df['day'] % 7.0
customer_journey_df['became_member_from'] = customer_journey_df.apply(
lambda x: months_difference(
x['became_member_on'], datetime(2018, 8, 1)), 1)
customer_journey_df.head()
```
Let's check the distribution of these values
```
sns.distplot(customer_journey_df['day'].dropna())
plt.title('Offer Day Distribution')
plt.show()
sns.distplot(customer_journey_df['weekday'].dropna())
plt.title('Offer Weekday Distribution')
plt.show()
sns.distplot(customer_journey_df['became_member_from'].dropna())
plt.title('Months from the initial Membership')
plt.show()
```
In order to analyze the data correctly, it is important to look at the data in the past. I propose to create new features to analyze the particular clients' behavior:
- Particular Transactions
- Average number of Transactions per client
- Number of rewards sent
- Number of offers, which were completed or viewed
- The time from offer receival to completion or view
```
# Check whether there was a transaction
customer_journey_df['transaction'] = 0
customer_journey_df.loc[
customer_journey_df['time_transaction'].isnull() == False,
'transaction'] = 1
# Check whether the offer was completed
customer_journey_df['completed'] = 0
customer_journey_df.loc[
customer_journey_df['time_completed'].isnull() == False,
'completed'] = 1
# Create new features
customer_journey_df['number_of_offers_viewed'] = 0
customer_journey_df['number_of_offers_completed'] = 0
customer_journey_df['receival_to_view_avg'] = 0
customer_journey_df['view_to_completion_avg'] = 0
customer_journey_df['number_of_transactions'] = 0
customer_journey_df['avg_number_of_transctions'] = 0
customer_journey_df['avg_reward'] = 0
customer_journey_df['receival_to_view'] = \
customer_journey_df['time_viewed'] - customer_journey_df['time_received']
customer_journey_df['time_from_view_to_completion'] = \
customer_journey_df['time_completed'] - customer_journey_df['time_viewed']
# Check if the same person is between the transactions
customer_journey_df['prev_person'] = customer_journey_df['person'].shift(1)
# Fill the features via loop
for i, row in customer_journey_df.iterrows():
# We fill the features if rows are attributed to the same person
if row['person'] == row['prev_person']:
# If the previous offer was viewed
customer_journey_df.loc[i, 'number_of_offers_viewed'] = \
customer_journey_df.loc[i-1, 'number_of_offers_viewed'] + \
(0 if customer_journey_df.loc[i-1, 'offer_type'] == \
'transaction' else 1)
# If the previous offer was completed
customer_journey_df.loc[i, 'number_of_offers_completed'] = \
customer_journey_df.loc[i-1, 'number_of_offers_completed'] + \
customer_journey_df.loc[i-1, 'completed']
# Previous time from Receival to View
customer_journey_df.loc[i, 'receival_to_view_avg'] = \
np.nansum((customer_journey_df.loc[i-1, \
'receival_to_view_avg'],
customer_journey_df.loc[i-1, 'receival_to_view_avg']))
# Previous time from View to Completion
customer_journey_df.loc[i, 'view_to_completion_avg'] = \
np.nansum((customer_journey_df.loc[i-1,
'view_to_completion_avg'],
customer_journey_df.loc[i-1,
'time_from_view_to_completion']))
# If the previous row was a Transaction
customer_journey_df.loc[i, 'number_of_transactions'] = \
customer_journey_df.loc[i-1, 'number_of_transactions'] + \
customer_journey_df.loc[i-1, 'transaction']
# If the previous row was a Transaction, add amount
customer_journey_df.loc[i, 'avg_number_of_transctions'] = \
customer_journey_df.loc[i-1, 'avg_number_of_transctions'] + \
(0 if customer_journey_df.loc[i-1, 'transaction'] == \
0 else customer_journey_df.loc[i-1, 'amount'])
# If the previous row was a Reward, add reward
customer_journey_df.loc[i, 'avg_reward'] = \
np.nansum((customer_journey_df.loc[i-1, 'avg_reward'],
customer_journey_df.loc[i-1, 'reward']))
# Get the average values
customer_journey_df['receival_to_view_avg'] = \
customer_journey_df['receival_to_view_avg'] / \
customer_journey_df['number_of_offers_viewed']
customer_journey_df['view_to_completion_avg'] = \
customer_journey_df['view_to_completion_avg'] / \
customer_journey_df['number_of_offers_completed']
customer_journey_df['avg_number_of_transctions'] = \
customer_journey_df['avg_number_of_transctions'] / \
customer_journey_df['number_of_transactions']
customer_journey_df['receival_to_view_avg'].fillna(0, inplace=True)
customer_journey_df['view_to_completion_avg'].fillna(0, inplace=True)
customer_journey_df['avg_number_of_transctions'].fillna(0, inplace=True)
customer_journey_df.tail()
# Save the data to CSV to upload it after to the Sagemaker
customer_journey_df.to_csv('customer_journey_updated.csv')
```
Now let's upload the data to Sagemaker
|
github_jupyter
|
# Importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Read the data
portfolio_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/portfolio.json', orient='records', lines=True)
profile_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/profile.json', orient='records', lines=True)
transcript_df = pd.read_json('C:/Users/user/Desktop/capstone_project_starbucks/data/transcript.json', orient='records', lines=True)
# Descripe the Portfolio dataframe
print('The shape of Portfolio dataframe is {}'.format(portfolio_df.shape))
# Show the Portfolio dataframe
display(portfolio_df)
# Descripe the Profile dataframe
print('The shape of Profile dataframe is {}'.format(profile_df.shape))
# Show the Profile dataframe
display(profile_df)
# There are no duplicated customers in dataframe
set(profile_df.duplicated(subset=['id']))
# We see that NaN values for Income and Gender intersects, so we can drop them
display(profile_df.loc[profile_df['income'].isnull()].describe())
display(profile_df.loc[profile_df['gender'].isnull()].describe())
profile_df = profile_df.loc[~profile_df['income'].isnull()]
print('After that, the shape of Profile dataframe is {}'.format(profile_df.shape))
display(profile_df)
# Let's change string date to datetime
profile_df['became_member_on'] = pd.to_datetime(profile_df['became_member_on'].astype(str)).dt.date
# # We see that the Other gender is not so frequent in the data
# pd.DataFrame(profile_df.groupby('gender').describe()['age']['count'])
# We can see the age distribution looks bell-shaped
sns.distplot(profile_df['age'])
plt.title('Age distribution')
plt.show()
# While income distribution is not bell-shaped
sns.distplot(profile_df['income'])
plt.title('Income distribution')
plt.show()
# The major share of the customers arrived after the 2017
profile_df['became_member_on'].hist()
plt.show()
# Descripe the Transcript dataframe
print('The shape of Transcript dataframe is {}'.format(transcript_df.shape))
# Show the Transcript dataframe
display(transcript_df)
# Here is the descriptive statistics about the each event count
pd.DataFrame(transcript_df.groupby('event').describe()['time']['count'])
# Let's delve more into the Value feature
# and check the cross-intersection between the event and value
values_parsed = transcript_df['value'].apply(lambda x: str(list(x.keys())))
pd.crosstab(values_parsed, transcript_df['event'])
# We can parse these values and replace value feature with the more
# detailed ones
transcript_df['offer_id'] = transcript_df['value'].apply(lambda x: \
x['offer_id'] if 'offer_id' in x \
else (x['offer id'] if 'offer id' \
in x else None))
for key in ['amount', 'reward']:
transcript_df[key] = transcript_df['value'].apply(lambda x: \
x[key] if key in x else None)
# Therefore, we can drop the old feature
transcript_df = transcript_df.drop('value', axis=1)
# Let's analyze the behavior of the particular client and check
# the maximum number of purchases for specific customer
purchases_per_client = transcript_df.groupby('person')['time'].count().sort_values(ascending=False)
# Here is Top-5
purchases_per_client.head(5)
# Let's check the first client
transcript_df.loc[transcript_df['person'] == \
purchases_per_client.index[0]].sort_values('time')
print('There are {} matches'.format(\
len(pd.merge(transcript_df.loc[transcript_df['event'] == \
'offer completed'],
transcript_df.loc[transcript_df['event'] == 'transaction'],
on=['person', 'time']))))
# Let's also check the connection between offer received and offer viewed
print('There are {} matches'.format(\
len(pd.merge(transcript_df.loc[transcript_df['event'] == \
'offer received'],
transcript_df.loc[transcript_df['event'] == 'offer viewed'],
on=['person', 'offer_id']))))
# Merge the offer receives and offer views
offer_view_df = pd.merge(\
transcript_df.loc[transcript_df['event'] == 'offer received', \
['person', 'offer_id', 'time']],
transcript_df.loc[transcript_df['event'] == 'offer viewed', \
['person', 'offer_id', 'time']],
on=['person', 'offer_id'], how='left', \
suffixes=['_received', '_viewed'])
# Remove the broken data: view have to be later than receive and remove null values
offer_view_df = offer_view_df.loc[(offer_view_df['time_viewed'] >= \
offer_view_df['time_received']) | \
~(offer_view_df['time_viewed'].isnull())]
# Take the nearest receive before the view
offer_view_df = pd.concat((offer_view_df.groupby(['person', 'offer_id',
'time_viewed']).agg({'time_received': 'max'}).reset_index(),
offer_view_df.loc[offer_view_df['time_viewed'].isnull()]))
offer_view_df.head()
# Merge the DataFrames
offer_complete_df = pd.merge(offer_view_df,
transcript_df.loc[transcript_df['event'] == 'offer completed', \
['person', 'offer_id', 'time', 'reward']],
on=['person', 'offer_id'], how='left')
# Rename the column
offer_complete_df.rename(columns={'time': 'time_completed'}, inplace=True)
# We ensure that completion is before the view
offer_complete_df.loc[(offer_complete_df['time_viewed'].isnull()) | \
(offer_complete_df['time_viewed'] > \
offer_complete_df['time_completed']), \
['reward', 'time_completed']] = (np.nan, np.nan)
offer_complete_df.drop_duplicates = offer_complete_df.drop_duplicates()
# Concatenate the nearest completion to the view and receive
offer_complete_df = pd.concat(
(offer_complete_df.groupby(['person', 'offer_id',
'time_completed', 'reward']).agg({'time_viewed': 'max',
'time_received': 'max'}).reset_index(),
offer_complete_df.loc[offer_complete_df['time_completed'].isnull()]))
offer_complete_df.head()
# Merge the DataFrames
offer_transaction_df = pd.merge(offer_complete_df,
transcript_df.loc[transcript_df['event'] == 'transaction', \
['person', 'time', 'amount']],
left_on=['person', 'time_completed'],
right_on=['person', 'time'], how='outer')
# Rename the column
offer_transaction_df.rename(columns={'time': 'time_transaction'}, inplace=True)
# Add a column with time equal to received offer,
# and transaction time otherwise
offer_transaction_df['time'] = offer_transaction_df['time_received']
offer_transaction_df.loc[offer_transaction_df['time'].isnull(),
'time'] = offer_transaction_df['time_transaction']
# Drop the duplicates
offer_transaction_df.sort_values(['person', 'offer_id', 'time',
'time_completed'], inplace=True)
offer_transaction_df = offer_transaction_df.drop_duplicates(['person',
'offer_id', 'time'])
print("The final data size is ", offer_transaction_df.shape)
# Add offer type information
offer_type_df = pd.merge(offer_transaction_df,
portfolio_df.rename(columns={'id': 'offer_id',
'reward': 'portfolio_reward'}),
on='offer_id', how='left')
offer_type_df.head()
# Add demographic information
offer_all_df = pd.merge(offer_type_df,
profile_df.rename(columns={'id': 'person'}),
how='inner', on='person')
offer_all_df.head()
# Sort the data
offer_all_df.sort_values(['person', 'time', 'offer_id'], inplace=True)
# Let's fill the values for transactions' offer type
offer_all_df['offer_type'].fillna('transaction', inplace=True)
offer_all_df.head()
print('The final shape of the data is ', offer_all_df.shape)
# Save the data
offer_all_df.to_csv('./data/customer_journey.csv', index=False)
# Let's test that the file we saved is loading correctly
customer_journey_df = pd.read_csv('./data/customer_journey.csv',
parse_dates=['became_member_on'])
# Let's drop the data when the offer was never viewed
customer_journey_df = customer_journey_df.loc[\
(customer_journey_df['offer_type'] == 'transaction') \
|(customer_journey_df['time_viewed'].isnull() == False)]
# Keep the time variable equal to time viewed, transaction time otherwise
customer_journey_df['time'] = customer_journey_df['time_viewed']
customer_journey_df.loc[customer_journey_df['offer_type'] == \
'transaction', 'time'] = customer_journey_df['time_transaction']
print('The current shape of data is {}'.format(customer_journey_df.shape))
customer_journey_df.head()
# Keep only relevant features
conversion_df = customer_journey_df.loc[:, ['offer_type',
'time_viewed', 'time_completed']]
# Mark the offers viewed if they are non-informational and viewed
conversion_df['viewed'] = 0
conversion_df.loc[(conversion_df['offer_type'].isin(['bogo', 'discount'])) & \
(conversion_df['time_viewed'].isnull() == False),
'viewed'] = 1
# Mark conversion
conversion_df['conversion'] = 0
conversion_df.loc[(conversion_df['viewed'] == 1.0) & \
(conversion_df['time_completed'].isnull() == False),
'conversion'] = 1
viewed_num = np.sum(conversion_df['viewed'])
conversion_num = np.sum(conversion_df['conversion'])
print('{} users viewed the offer and {} completed it'.format(
viewed_num, conversion_num))
print('Therefore, the conversion is {} %'.format(\
round(conversion_num/viewed_num*100, 2)))
# We can also divide it by the offer type
conversion_df.loc[conversion_df['viewed'] == 1\
].groupby('offer_type').agg({'conversion': 'mean'})
# Copy the dataset and take viewed offers with non-empty transaction
informational_offer_df = customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) | \
(customer_journey_df['time_transaction'].isnull() == False),
['person', 'offer_id', 'offer_type', 'time_viewed', 'time_transaction']]
# Replace time with time viewed. Otherwise - transaction time
informational_offer_df['time'] = informational_offer_df['time_viewed']
informational_offer_df.loc[informational_offer_df['time'].isnull(),
'time'] = informational_offer_df['time_transaction']
# In order to analyze it, we have to check the consequent offer for the user
informational_offer_df['next_offer_type'] = \
informational_offer_df['offer_type'].shift(-1)
informational_offer_df['next_time'] = informational_offer_df['time'].shift(-1)
# If the offer relates to other person, we skip it
informational_offer_df.loc[
informational_offer_df['person'].shift(-1) != \
informational_offer_df['person'],
['next_offer_type', 'next_time']] = ['', np.nan]
# Get the information about the difference in time for the offer types
informational_offer_df['time_diff'] = \
informational_offer_df['next_time'] - informational_offer_df['time_viewed']
# Let's check the time distribution between informational offer and transaction
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction') &
(informational_offer_df['time_diff'] >=0),
'time_diff'].describe()
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction')&
(informational_offer_df['time_diff'] >=0),
'time_diff'].hist()
# Let's check the conversion if we check the transaction within 24 hours
# after the informational offer
time_diff_threshold = 24.0
viewed_info_num = np.sum(informational_offer_df['offer_type'] == \
'informational')
conversion_info_num = np.sum((informational_offer_df['offer_type'] == \
'informational') \
& (informational_offer_df['next_offer_type'] == 'transaction') & \
(informational_offer_df['time_diff'] <= time_diff_threshold))
print('{} users viewed the offer and {} completed it'.format(
viewed_info_num, conversion_info_num))
print('Therefore, the conversion is {} %'.format(\
round(conversion_info_num/viewed_info_num*100, 2)))
# If the offer was viewed and it is BOGO and there was transaction, fill it
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'bogo'),
'bogo'] = 0
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'bogo') & \
(customer_journey_df['time_completed'].isnull() == False), 'bogo'] = 1
# If the offer was viewed and it is Discount and there was transaction, fill it
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'discount'),
'discount'] = 0
customer_journey_df.loc[
(customer_journey_df['time_viewed'].isnull() == False) & \
(customer_journey_df['offer_type'] == 'discount') & \
(customer_journey_df['time_completed'].isnull() == False), 'discount'] = 1
informational_offer_df.loc[
informational_offer_df['offer_type'] == 'informational', 'info'] = 0
informational_offer_df.loc[
(informational_offer_df['offer_type'] == 'informational') & \
(informational_offer_df['next_offer_type'] == 'transaction') & \
(informational_offer_df['time_diff'] <= time_diff_threshold), 'info'] = 1
customer_journey_df = pd.merge(customer_journey_df,
informational_offer_df.loc[
informational_offer_df['info'].isnull() == False,
['person', 'offer_id', 'time_viewed', 'info', 'next_time']],
how='left', on=['person', 'offer_id', 'time_viewed'])
# Override time completed with the following time of transaction
customer_journey_df.loc[customer_journey_df['info'] == 1,
'time_completed'] = customer_journey_df['next_time']
customer_journey_df.loc[customer_journey_df['info'] == 1,
'time_transaction'] = customer_journey_df['next_time']
customer_journey_df = customer_journey_df.drop('next_time', axis=1)
bogo_num = np.sum(customer_journey_df['bogo'].isnull() == False)
disc_num = np.sum(customer_journey_df['discount'].isnull() == False)
info_num = np.sum(customer_journey_df['info'].isnull() == False)
print('The current DataFrame contains: {} BOGO, {} Discount and {} \
Informational events of conversion.'.format(bogo_num, disc_num, info_num))
customer_df = customer_journey_df[['person', 'gender',
'age', 'income', 'became_member_on']].drop_duplicates()
customer_df.describe(include='all').T
def months_difference(date_start, date_end):
''' This function is used to calculate the difference
in months between two dates
Args:
date_start (timestamp/datetime) - start date of the period
date_end (timestamp/datetime) - end date of the period
Outputs:
difference(int) - difference in months between the dates
'''
difference = (date_end.year - date_start.year) * 12 + \
(date_end.month - date_start.month)
return difference
customer_journey_df['day'] = np.floor(
customer_journey_df['time_viewed'] / 24.0)
customer_journey_df['weekday'] = customer_journey_df['day'] % 7.0
customer_journey_df['became_member_from'] = customer_journey_df.apply(
lambda x: months_difference(
x['became_member_on'], datetime(2018, 8, 1)), 1)
customer_journey_df.head()
sns.distplot(customer_journey_df['day'].dropna())
plt.title('Offer Day Distribution')
plt.show()
sns.distplot(customer_journey_df['weekday'].dropna())
plt.title('Offer Weekday Distribution')
plt.show()
sns.distplot(customer_journey_df['became_member_from'].dropna())
plt.title('Months from the initial Membership')
plt.show()
# Check whether there was a transaction
customer_journey_df['transaction'] = 0
customer_journey_df.loc[
customer_journey_df['time_transaction'].isnull() == False,
'transaction'] = 1
# Check whether the offer was completed
customer_journey_df['completed'] = 0
customer_journey_df.loc[
customer_journey_df['time_completed'].isnull() == False,
'completed'] = 1
# Create new features
customer_journey_df['number_of_offers_viewed'] = 0
customer_journey_df['number_of_offers_completed'] = 0
customer_journey_df['receival_to_view_avg'] = 0
customer_journey_df['view_to_completion_avg'] = 0
customer_journey_df['number_of_transactions'] = 0
customer_journey_df['avg_number_of_transctions'] = 0
customer_journey_df['avg_reward'] = 0
customer_journey_df['receival_to_view'] = \
customer_journey_df['time_viewed'] - customer_journey_df['time_received']
customer_journey_df['time_from_view_to_completion'] = \
customer_journey_df['time_completed'] - customer_journey_df['time_viewed']
# Check if the same person is between the transactions
customer_journey_df['prev_person'] = customer_journey_df['person'].shift(1)
# Fill the features via loop
for i, row in customer_journey_df.iterrows():
# We fill the features if rows are attributed to the same person
if row['person'] == row['prev_person']:
# If the previous offer was viewed
customer_journey_df.loc[i, 'number_of_offers_viewed'] = \
customer_journey_df.loc[i-1, 'number_of_offers_viewed'] + \
(0 if customer_journey_df.loc[i-1, 'offer_type'] == \
'transaction' else 1)
# If the previous offer was completed
customer_journey_df.loc[i, 'number_of_offers_completed'] = \
customer_journey_df.loc[i-1, 'number_of_offers_completed'] + \
customer_journey_df.loc[i-1, 'completed']
# Previous time from Receival to View
customer_journey_df.loc[i, 'receival_to_view_avg'] = \
np.nansum((customer_journey_df.loc[i-1, \
'receival_to_view_avg'],
customer_journey_df.loc[i-1, 'receival_to_view_avg']))
# Previous time from View to Completion
customer_journey_df.loc[i, 'view_to_completion_avg'] = \
np.nansum((customer_journey_df.loc[i-1,
'view_to_completion_avg'],
customer_journey_df.loc[i-1,
'time_from_view_to_completion']))
# If the previous row was a Transaction
customer_journey_df.loc[i, 'number_of_transactions'] = \
customer_journey_df.loc[i-1, 'number_of_transactions'] + \
customer_journey_df.loc[i-1, 'transaction']
# If the previous row was a Transaction, add amount
customer_journey_df.loc[i, 'avg_number_of_transctions'] = \
customer_journey_df.loc[i-1, 'avg_number_of_transctions'] + \
(0 if customer_journey_df.loc[i-1, 'transaction'] == \
0 else customer_journey_df.loc[i-1, 'amount'])
# If the previous row was a Reward, add reward
customer_journey_df.loc[i, 'avg_reward'] = \
np.nansum((customer_journey_df.loc[i-1, 'avg_reward'],
customer_journey_df.loc[i-1, 'reward']))
# Get the average values
customer_journey_df['receival_to_view_avg'] = \
customer_journey_df['receival_to_view_avg'] / \
customer_journey_df['number_of_offers_viewed']
customer_journey_df['view_to_completion_avg'] = \
customer_journey_df['view_to_completion_avg'] / \
customer_journey_df['number_of_offers_completed']
customer_journey_df['avg_number_of_transctions'] = \
customer_journey_df['avg_number_of_transctions'] / \
customer_journey_df['number_of_transactions']
customer_journey_df['receival_to_view_avg'].fillna(0, inplace=True)
customer_journey_df['view_to_completion_avg'].fillna(0, inplace=True)
customer_journey_df['avg_number_of_transctions'].fillna(0, inplace=True)
customer_journey_df.tail()
# Save the data to CSV to upload it after to the Sagemaker
customer_journey_df.to_csv('customer_journey_updated.csv')
| 0.466116 | 0.903379 |
# T1563 - Remote Service Session Hijacking
Adversaries may take control of preexisting sessions with remote services to move laterally in an environment. Users may use valid credentials to log into a service specifically designed to accept remote connections, such as telnet, SSH, and RDP. When a user logs into a service, a session will be established that will allow them to maintain a continuous interaction with that service.
Adversaries may commandeer these sessions to carry out actions on remote systems. [Remote Service Session Hijacking](https://attack.mitre.org/techniques/T1563) differs from use of [Remote Services](https://attack.mitre.org/techniques/T1021) because it hijacks an existing session rather than creating a new session using [Valid Accounts](https://attack.mitre.org/techniques/T1078).(Citation: RDP Hijacking Medium)(Citation: Breach Post-mortem SSH Hijack)
## Atomic Tests:
Currently, no tests are available for this technique.
## Detection
Use of these services may be legitimate, depending upon the network environment and how it is used. Other factors, such as access patterns and activity that occurs after a remote login, may indicate suspicious or malicious behavior with that service. Monitor for user accounts logged into systems they would not normally access or access patterns to multiple systems over a relatively short period of time.
Monitor for processes and command-line arguments associated with hijacking service sessions.
## Shield Active Defense
### Behavioral Analytics
Deploy tools that detect unusual system or user behavior.
Instrument a system to collect detailed information about process execution and user activity, develop a sense of normal or expected behaviors, and alert on abnormal or unexpected activity. This can be accomplished either onboard the target system or by shipping data to a centralized analysis and alerting system.
#### Opportunity
There is an opportunity to detect the presence of an adversary by identifying and alerting on anomalous behaviors.
#### Use Case
A defender can look for anomalies in accounts being active with other services/systems during hours they are normally not active. This can indicate malicious activity.
#### Procedures
Use behavioral analytics to detect Living Off The Land Binaries (LOLBins) being used to download and execute a file.
Use behavioral analytics to identify a system running development tools, but is not used by someone who does development.
Use behavioral analytics to identify abnormal system processes being used to launch a different process.
|
github_jupyter
|
# T1563 - Remote Service Session Hijacking
Adversaries may take control of preexisting sessions with remote services to move laterally in an environment. Users may use valid credentials to log into a service specifically designed to accept remote connections, such as telnet, SSH, and RDP. When a user logs into a service, a session will be established that will allow them to maintain a continuous interaction with that service.
Adversaries may commandeer these sessions to carry out actions on remote systems. [Remote Service Session Hijacking](https://attack.mitre.org/techniques/T1563) differs from use of [Remote Services](https://attack.mitre.org/techniques/T1021) because it hijacks an existing session rather than creating a new session using [Valid Accounts](https://attack.mitre.org/techniques/T1078).(Citation: RDP Hijacking Medium)(Citation: Breach Post-mortem SSH Hijack)
## Atomic Tests:
Currently, no tests are available for this technique.
## Detection
Use of these services may be legitimate, depending upon the network environment and how it is used. Other factors, such as access patterns and activity that occurs after a remote login, may indicate suspicious or malicious behavior with that service. Monitor for user accounts logged into systems they would not normally access or access patterns to multiple systems over a relatively short period of time.
Monitor for processes and command-line arguments associated with hijacking service sessions.
## Shield Active Defense
### Behavioral Analytics
Deploy tools that detect unusual system or user behavior.
Instrument a system to collect detailed information about process execution and user activity, develop a sense of normal or expected behaviors, and alert on abnormal or unexpected activity. This can be accomplished either onboard the target system or by shipping data to a centralized analysis and alerting system.
#### Opportunity
There is an opportunity to detect the presence of an adversary by identifying and alerting on anomalous behaviors.
#### Use Case
A defender can look for anomalies in accounts being active with other services/systems during hours they are normally not active. This can indicate malicious activity.
#### Procedures
Use behavioral analytics to detect Living Off The Land Binaries (LOLBins) being used to download and execute a file.
Use behavioral analytics to identify a system running development tools, but is not used by someone who does development.
Use behavioral analytics to identify abnormal system processes being used to launch a different process.
| 0.713831 | 0.802672 |
# <center><u><u>Bayesian Modeling for the Busy and the Confused - Part I</u></u></center>
## <center><i>Basic Principles of Bayesian Computation and the Grid Approximation</i><center>
Currently, the capacity to gather data is far ahead of the ability to generate meaningful insight using conventional approaches. Hopes of alleviating this bottleneck has come through the application of machine learning tools. Among these tools one that is increasingly garnering traction is probabilistic programming, particularly Bayesian modeling. In this paradigm, variables that are used to define models carry a probabilistic distribution rather than a scalar value. "Fitting" a model to data can then , simplistically, be construed as finding the appropriate parameterization for these distributions, given the model structure and the data. This offers a number of advantages over other methods, not the least of which is the estimation of uncertainty around model results. This in turn can better inform subsequent processes, such as decision-making, and/or scientific discovery.
<br><br>
<u>Part-I overview</u>:
The present is the first of a two-notebook series, the subject of which is a brief, basic, but hands-on programmatic introduction to Bayesian modeling. This first notebook begins with an overview of a few key probability principles relevant to Bayesian inference. An illustration of how to put these in practice follows. In particular, I will demonstrate one of the more intuitve approaches to Bayesian computation; Grid Approximation (GA). With this framework I will show how to create simple models that can be used to interpret and predict real world data. <br>
<u>Part-II overview</u>:
GA is computationally intensive and runs into problems quickly when the data set is large and/or the model increases in complexity. One of the more popular solutions to this problem is the Markov Chain Monte-Carlo (MCMC) algorithm. The implementation of MCMC in Bayesian models will be the subject of the [second notebook of this series]().
<br>
<u>Hands-on approach with Python</u>:
Bayesian modeling cannot be understood without practice. To that end, this notebook uses code snippets that should be iteratively modified and run for better insight.
As of this writing the most popular programming language in machine learning is Python. Python is an easy language to pickup. Python is free, open source, and a large number of very useful libraries have been written over the years that have propelled it to its current place of prominence in a number of fields, in addition to machine learning.
<br><br>
I use Python (3.6+) code to illustrate the mechanics of Bayesian inference in lieu of lengthy explanations. I also use a number of dedicated Python libraries that shortens the code considerably. A solid understanding of Bayesian modeling cannot be spoon-fed and can only come from getting one's hands dirty.. Emphasis is therefore on readable reproducible code. This should ease the work the interested has to do to get some practice re-running the notebook and experimenting with some of the coding and Bayesian modeling patterns presented. Some know-how is required regarding installing and running a Python distribution, the required libraries, and jupyter notebooks; this is easily gleaned from the internet. A popular option in the machine learning community is [Anaconda](https://www.anaconda.com/distribution).
<a id='TOP'></a>
## Notebook Contents
1. [Basics: Joint probability, Inverse probability and Bayes' Theorem](#BASIC)
2. [Example: Inferring the Statistical Distribution of Chlorophyll from Data](#JustCHL)
1. [Grid Approximation](#GRID)
1. [Impact of priors](#PriorImpact)
2. [Impact of data set size](#DataImpact)
2. [MCMC](#MCMC)
3. [PyMC3](#PyMC3)
3. [Regression](#Reg)
1. [Data Preparation](#DataPrep)
2. [Regression in PyMC3](#RegPyMC3)
3. [Checking Priors](#PriorCheck)
4. [Model Fitting](#Mining)
5. [Flavors of Uncertainty](#UNC)
4. [Final Comments](#Conclusion
```
import pickle
import warnings
import sys
import pandas as pd
import numpy as np
from scipy.stats import norm as gaussian, uniform
import seaborn as sb
import matplotlib.pyplot as pl
from matplotlib import rcParams
from matplotlib import ticker as mtick
print('Versions:')
print('---------')
print(f'python: {sys.version.split("|")[0]}')
print(f'numpy: {np.__version__}')
print(f'pandas: {pd.__version__}')
print(f'seaborn: {sb.__version__}')
%matplotlib inline
warnings.filterwarnings('ignore', category=FutureWarning)
```
<a id='BASIC'></a>
[Back to Contents](#TOP)
## 1. <u>Basics</u>:
#### $\Rightarrow$Joint probability, Inverse probability and Bayes' rule
<br>
Here's a circumspect list of basic concepts that will help understand what is going on:
* Joint probability of two events $A$, $B$:
$$P(A, B)=P(A|B)\times P(B)=P(B|A)\times P(A)$$
* If A and B are independent: $$P(A|B) = P(A)\ \leftrightarrow P(A,B) = P(A)\times P(B)$$
* Inverse probability:$$\boxed{P(A|B) = \frac{P(B|A) \times P(A)}{P(B)}}$$
$\rightarrow$Inverse probability is handy when $P(A|B)$ is desired but hard to compute, but its counterpart, $P(B|A)$ is easy to compute. The result above which is derived directly from the joint probability formulation above, is referred to as Bayes' theorem/rule. One might ask next, how this is used to build a "Bayesian model."
#### $\Rightarrow$Extending Bayes' theorem to model building
<br>
Given a model:
* Hypotheses (\\(H\\)): values that model parameters can take
* \\( P(H) \\): probability of each value in H
* Data (\\( D \\))
* \\( P(D) \\): probability of the data, commonly referred to as "Evidence."
Approach
* formulate initial opinion on what $H$ might include and with what probability, $P(H)$
* collect data ($D$)
* update $P(H)$ using $D$ and Bayes' theorem
$$\frac{P(H)\times P(D|H)}{P(D)} = P(H|D)$$
Computing the "Evidence", P(D), can yield intractable integrals to solve. Fortunately, it turns out that we can approximate the posterior, and give those integrals a wide berth. Hereafter, P(D), will be considered a normalization constant and will therefore be dropped; without prejudice, as it turns out.<br><br>
$$\boxed{P(H) \times P(D|H) \propto P(H|D)}$$
Note that what we care about is updating H, model parameters, after evaluating some observations.
Let's go over each of the elements of this proportionality statement.
#### The prior
$$\underline{P(H)}\times P(D|H) \propto P(H|D)$$
* $H$: set of values that model parameters might take with corresponding probability $P(H)$.
* Priors should encompass justifiable assumptions/context information and nothing more.
* We can use probability distributions to express $P(H)$ as shown below.
#### The likelihood
$$P(H)\times \underline{P(D|H)} \propto P(H|D)$$
* probability of the data, \\(D\\), *given* \\(H\\).
* in the frequentist framework, this quantity is maximized to find the "best" fit \\(\rightarrow\\) Likelihood Maximization.
* maximizing the likelihood means finding a particular value for H, \\(\hat{H}\\).
* for simple models and uninformative priors, \\(\hat{H}\\) often corresponds to the mode of the Bayesian posterior (see below).
* likelihood maximization discards a lot of potentially valuable information (the posterior).
#### The posterior:
$$P(H)\times P(D|H) \propto \underline{P(H|D)}$$
* it's what Bayesians are after!!!
* updated probability of \\(H\\) after exposing the model to \\(D\\).
* used as prior for next iteration \\(P(H|D)\rightarrow P(H)\\), when new data become available.
* $P(H|D)$ naturally yields uncertainty around the estimate via propagation.
In the next section I will attempt to illustrate the mechanics of Bayesian inference on real-world data.
[Back to Contents](#TOP)
<a id='JustCHL'></a>
## 2. <u>Bayesian "Hello World": Inferring the Statistical Distribution of Chlorophyll</u>
<p>
The goal of Bayesian modeling is to approximate the process that generated a set of outcomes observed. Often, a set of input observations can be used to modify the expected outcome via a deterministic model expression. In a first instance, neither input observations nor deterministic expression are included. Only the set of outcomes is of concern here and the model is reduced to a probability assignment, using a simple statistical distribution. <br>
For the present example the outcome of interest are some chlorophyll measurements. Assuming that the process generating these observations can be approximated, <u>after log-transformation of the data</u>, by a Gaussian distribution, the scalar parameters of which are not expected to vary. The goal is to the range of values these parameters - a constant central tendency, \\(\mu\\), and a constant spread \\(\sigma\\) - could take. Note that this example, while not realistic, is intended to help build intuition. Further down the road, the use of inputs and deterministic models will be introduced with linear regression as example.</p>
</p> I will contrast two major approaches. <u>Grid computation</u>, and <u>Markov Chain Monte-Carlo</u>. Note in both methods, , as mentioned earlier, the evidence \\(P(D)\\) is ignored. In both cases, relative probabilities are computed and subsequently normalized so as to add to 1.</p>
### A. Grid Computation
In grid-based inference, all the possible parameter combinations to infer upon are fixed before hand, through the building of a grid. This grid is made of as many dimensions as there are parameter to the model of interest. The user needs to define a range and a resolution for each dimension. This choice depends on the computing power available, and the requirements of the problem at hand.I will illustrate that as the model complexity increases, along with the number of parameters featured, the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) can quickly take hold and limit the usefulness of this approach.
Given a set of ranges and a resolutions for the grid's dimension, each grid point "stores" the joint probability of the corresponding parameter values. Initially the grid is populated by the stipulation of prior probabilities that should encode what is deemed to be "reasonable" by the practitioner. These priors can diverge between individual users. This is not a problem however as it makes assumptions - and therefore ground for disagreement - explicit and specific. As these priors are confronted to a relatively (usually to the model complexity) large amount of data, initially diverging priors tend to converge.
Given our model is a Gaussian distribution, our set of hypotheses (\\(H\\) in the previous section) includes 2 vectors; a mean \\(\mu\\) and a standard deviation \\(\sigma\\). The next couple of lines of code defines the corresponding two axes of a \\(200 \times 200\\) grid, and include the range of the axes, and by extension, their resolution.
```
μ = np.linspace(-2, 2, num=200) # μ-axis
σ = np.linspace(0, 2, num=200) # σ-axis
```
For ease of manipulation I will use a [pandas DataFrame](), which at first sight looks deceivingly like a 'lame' spreadsheet, to store the grid coordinates. I use this dataframe to subsequently store the prior definitions, and the results of likelihood and posterior computation at each grid point. Here's the code that defines the DataFrame, named and populates the first two columns \\(\mu\\) and \\(\sigma\\).
```
df_grid = pd.DataFrame([[μ_i, σ_i]
for σ_i in σ for μ_i in μ], columns=['μ', 'σ'])
```
Accessing say the column **\\(\mu\\)** is as simple as typing: ***df\_grid.\\(\mu\\)***
#### Priors
The next step is to define the priors for both **\\(\mu\\)** and **\\(\sigma\\)** that encodes what the user's knowledge, or more commonly her or his lack thereof. Principles guiding the choice of priors are beyond the scope of this post. For no other reason than what seems to make sense. In this case, chlorophyll is expected to be log-transformed, so \\(\mu\\) should range within a few digits north and south of '0', and \\(\sigma\\) should be positive, and not expected to range beyond a few orders of magnitude. Thus a normal distribution for \\(\mu\\) and a uniform distribution for \\(\sigma\\) parameterized as below seems to make sense: <br>
\\(\rightarrow \mu \sim \mathcal{N}(mean=1, st.dev.=1)\\); a gaussian (normal) distribution centered at 1, with an standard deviation of 1<br>
\\(\rightarrow \sigma \sim \mathcal{U}(lo=0, high=2)\\); a uniform distribution bounded at 0 and 2<br>
Note that these are specified independently because \\(\mu\\) and \\(\sigma\\) are assumed independent.
The code below computes the probability for each \\(\mu\\) and \\(\sigma\\) values;
The lines below show how to pass the grid defined above to the scipy.stats distribution functions to compute the prior at each grid point.
```
μ_log_prior = gaussian.logpdf(df_grid.μ, 1, 1)
σ_log_prior = uniform.logpdf(df_grid.σ, 0, 2)
```
Note that the code above computes the log (prior) probability of each parameter at each grid point. Because the parameters \\(\mu\\) and \\(\sigma\\) are assumed independent, the joint prior probability at each grid point is just the product the individual prior probability. Products of probabilities can result in underflow errors. Log-transformed probabilities can be summed and exponentiated to compute joint probabilities of the entire grid can be computed by summing log probabilities followed by taking the exponent of the result. I store both the joint log-probability and the log-probability at each grid point in the pandas dataframe with the code snippet below:
```
# log prior probability
df_grid['log_prior_prob'] = μ_log_prior + σ_log_prior
# straight prior probability from exponentiation of log_prior_prob
df_grid['prior_prob'] = np.exp(df_grid.log_prior_prob
- df_grid.log_prior_prob.max())
```
Since there are only two parameters, visualizing the joint prior probability is straighforward:
```
f, ax = pl.subplots(figsize=(6, 6))
df_grid.plot.hexbin(x='μ', y='σ', C='prior_prob', figsize=(7,6),
cmap='plasma', sharex=False, ax=ax);
ax.set_title('Prior')
f.savefig('./resources/f1_grid_prior.svg')
```
In the figure above looking across the \\(\sigma\\)-axis reveals the 'wall' of uniform probability where none of the positive values, bounded here between 0 and 2.0, is expected to be more likely. Looking down the \\(\mu\\)-axis, on the other hand, reveals the gaussian peak around 1, within a grid of floats extending from -2.0 to 2.0.
Once priors have been defined, the model is ready to be fed some data. The *chl_* loaded earlier had several thousand observations. Because grid approximation is computationally intensive, I'll only pick a handful of data. For reasons discussed further below, this will enable the comparison of the effects different priors can have on the final result.
I'll start by selecting 10 observations.
<a id='GRID'></a>
#### Building the Grid
For this example I simply want to approximate the distribution of *chl_l* following these steps:
* Define a model to approximate the process that generates the observations
* Theory: data generation is well approximated by a Gaussian.
* Hypotheses (\\(H\\)) therefore include 2 vectors; mean \\(\mu\\) and standard deviation \\(\sigma\\).
* Both parameters are expected to vary within a certain range.
* Build the grid of model parameters
* 2D grid of \\((\mu, \sigma)\\) pair
* Propose priors
* define priors for both \\(\mu\\) and \\(\sigma\\)
* Compute likelihood
* Compute posterior
First, I load data stored in a pandas dataframe that contains among other things, log-transformed phytoplankton chlorophyll (*chl_l*) values measured during oceanographic cruises around the world.
```
df_data = pd.read_pickle('./pickleJar/df_logMxBlues.pkl')
df_data[['MxBl-Gr', 'chl_l']].info()
```
here are two columns. *MxBl-Gr* is a blue-to-green ratio that will serve as predictor of chlorophyll when I address regression. For now, *MxBl-Gr* is ignored, only *chl_l* is of interest. Here is what the distribution of *chl_l*, smoothed by kernel density estimation, looks like:
```
f, ax = pl.subplots(figsize=(4,4))
sb.kdeplot(df_data.chl_l, ax=ax, legend=False);
ax.set_xlabel('chl_l');
f.tight_layout()
f.savefig('./figJar/Presentation/fig1_chl.svg', dpi=300, format='svg')
```
... and here is what it looks like.
```
print(df_grid.shape)
df_grid.head(7)
```
In the figure above looking down the \\(\sigma\\)-axis shows the 'wall' of uniform probability where none of the positive values, capped here at 2.0 has is expected to be more likely. Looking down the \\(\mu\\)-axis, on the other hand, reveals the gaussian peak around 1, within a grid of floats extending from -2.0 to 2.0.
Once priors have been defined, the model is ready to be fed some data. The *chl_* loaded earlier had several thousand observations. Because grid approximation is computationally intensive, I'll only pick a handful of data. For reasons discussed further below, this will enable the comparison of the effects different priors can have on the final result.
I'll start by selecting 10 observations.
```
sample_N = 10
df_data_s = df_data.dropna().sample(n=sample_N)
g = sb.PairGrid(df_data_s.loc[:,['MxBl-Gr', 'chl_l']],
diag_sharey=False)
g.map_diag(sb.kdeplot, )
g.map_offdiag(sb.scatterplot, alpha=0.75, edgecolor='k');
make_lower_triangle(g)
g.axes[1,0].set_ylabel(r'$log_{10}(chl)$');
g.axes[1,1].set_xlabel(r'$log_{10}(chl)$');
```
Compute Log-Likelihood of the data given every pair \\( ( \mu ,\sigma)\\). This is done by summing the log-probability of each datapoint, given each grid point; i.e. each \\((\mu, \sigma)\\) pair.
```
df_grid['LL'] = np.sum(norm.logpdf(df_data_s.chl_l.values.reshape(1, -1),
loc=df_grid.μ.values.reshape(-1, 1),
scale=df_grid.σ.values.reshape(-1, 1)
), axis=1)
```
#### Compute Posterior $P(\mu,\sigma\ | data) \propto P(data | \mu, \sigma) \times P(\mu, \sigma)$
```
# compute log-probability
df_grid['log_post_prob'] = df_grid.LL + df_grid.log_prior_prob
# convert to straight prob.
df_grid['post_prob'] = np.exp(df_grid.log_post_prob
- df_grid.log_post_prob.max())
# Plot Multi-Dimensional Prior and Posterior
f, ax = pl.subplots(ncols=2, figsize=(12, 5), sharey=True)
df_grid.plot.hexbin(x='μ', y='σ', C='prior_prob',
cmap='plasma', sharex=False, ax=ax[0])
df_grid.plot.hexbin(x='μ', y='σ', C='post_prob',
cmap='plasma', sharex=False, ax=ax[1]);
ax[0].set_title('Prior Probability Distribution')
ax[1].set_title('Posterior Probability Distribution')
f.tight_layout()
f.savefig('./figJar/Presentation/grid1.svg')
```
<img src='./resources/grid1.svg'/>
```
# Compute Marginal Priors and Posteriors for each Parameter
df_μ = df_grid.groupby(['μ']).sum().drop('σ', axis=1)[['prior_prob',
'post_prob']
].reset_index()
df_σ = df_grid.groupby(['σ']).sum().drop('μ', axis=1)[['prior_prob',
'post_prob']
].reset_index()
# Normalize Probability Distributions
df_μ.prior_prob /= df_μ.prior_prob.max()
df_μ.post_prob /= df_μ.post_prob.max()
df_σ.prior_prob /= df_σ.prior_prob.max()
df_σ.post_prob /= df_σ.post_prob.max()
#Plot Marginal Priors and Posteriors
f, ax = pl.subplots(ncols=2, figsize=(12, 4))
df_μ.plot(x='μ', y='prior_prob', ax=ax[0], label='prior');
df_μ.plot(x='μ', y='post_prob', ax=ax[0], label='posterior')
df_σ.plot(x='σ', y='prior_prob', ax=ax[1], label='prior')
df_σ.plot(x='σ', y='post_prob', ax=ax[1], label='posterior');
f.suptitle('Marginal Probability Distributions', fontsize=16);
f.tight_layout(pad=2)
f.savefig('./figJar/Presentation/grid2.svg')
```
[Back to Contents](#TOP)
<a id='PriorImpact'></a>
### Impact of Priors
```
def compute_bayes_framework(data, priors_dict):
# build grid:
μ = np.linspace(-2, 2, num=200)
σ = np.linspace(0, 2, num=200)
df_b = pd.DataFrame([[μ_i, σ_i] for σ_i in σ for μ_i in μ],
columns=['μ', 'σ'])
# compute/store distributions
μ_prior = norm.logpdf(df_b.μ, priors_dict['μ_mean'],
priors_dict['μ_sd'])
σ_prior = uniform.logpdf(df_b.σ, priors_dict['σ_lo'],
priors_dict['σ_hi'])
# compute joint prior
df_b['log_prior_prob'] = μ_prior + σ_prior
df_b['prior_prob'] = np.exp(df_b.log_prior_prob
- df_b.log_prior_prob.max())
# compute log likelihood
df_b['LL'] = np.sum(norm.logpdf(data.chl_l.values.reshape(1, -1),
loc=df_b.μ.values.reshape(-1, 1),
scale=df_b.σ.values.reshape(-1, 1)
), axis=1)
# compute joint posterior
df_b['log_post_prob'] = df_b.LL + df_b.log_prior_prob
df_b['post_prob'] = np.exp(df_b.log_post_prob
- df_b.log_post_prob.max())
return df_b
def plot_posterior(df_, ax1, ax2):
df_.plot.hexbin(x='μ', y='σ', C='prior_prob',
cmap='plasma', sharex=False, ax=ax1)
df_.plot.hexbin(x='μ', y='σ', C='post_prob',
cmap='plasma', sharex=False, ax=ax2);
ax1.set_title('Prior Probability Distribution')
ax2.set_title('Posterior Probability Distribution')
def plot_marginals(df_, ax1, ax2, plot_prior=True):
"""Compute marginal posterior distributions."""
df_μ = df_.groupby(['μ']).sum().drop('σ',
axis=1)[['prior_prob',
'post_prob']
].reset_index()
df_σ = df_.groupby(['σ']).sum().drop('μ',
axis=1)[['prior_prob',
'post_prob']
].reset_index()
# Normalize Probability Distributions
df_μ.prior_prob /= df_μ.prior_prob.max()
df_μ.post_prob /= df_μ.post_prob.max()
df_σ.prior_prob /= df_σ.prior_prob.max()
df_σ.post_prob /= df_σ.post_prob.max()
#Plot Marginal Priors and Posteriors
if plot_prior:
df_μ.plot(x='μ', y='prior_prob', ax=ax1, label='prior');
df_σ.plot(x='σ', y='prior_prob', ax=ax2, label='prior')
df_μ.plot(x='μ', y='post_prob', ax=ax1, label='posterior')
df_σ.plot(x='σ', y='post_prob', ax=ax2, label='posterior');
```
Try two priors:
1. $\mu \sim \mathcal{N}(1, 1)$, $\sigma \sim \mathcal{U}(0, 2)$ - a weakly informative set of priors
```
weak_prior=dict(μ_mean=1, μ_sd=1, σ_lo=0, σ_hi=2)
df_grid_1 = compute_bayes_framework(df_data_s, priors_dict=weak_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_1, axp[0], axp[1])
plot_marginals(df_grid_1, axp[2], axp[3])
axp[2].legend(['weak prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid3.svg')
```
<img src="./resources/grid3.svg?modified=3"/>
2. $\mu \sim \mathcal{N}(-1.5, 0.1)$, $\sigma \sim \mathcal{U}(0, 2)$ - a strongly informative prior
```
strong_prior=dict(μ_mean=-1.5, μ_sd=.1, σ_lo=0, σ_hi=2)
df_grid_2 = compute_bayes_framework(df_data_s, priors_dict=strong_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_2, axp[0], axp[1])
plot_marginals(df_grid_2, axp[2], axp[3])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid4.svg')
```
[Back to Contents](#TOP)
<a id='DataImpact'></a>
### Impact of data set size
* sub-sample size is now 500 samples,
* same two priors used
```
sample_N = 500
# compute the inference dataframe
df_data_s = df_data.dropna().sample(n=sample_N)
# display the new sub-sample
g = sb.PairGrid(df_data_s.loc[:,['MxBl-Gr', 'chl_l']],
diag_sharey=False)
g.map_diag(sb.kdeplot, )
g.map_offdiag(sb.scatterplot, alpha=0.75, edgecolor='k');
make_lower_triangle(g)
g.axes[1,0].set_ylabel(r'$log_{10}(chl)$');
g.axes[1,1].set_xlabel(r'$log_{10}(chl)$');
%%time
df_grid_3 = compute_bayes_framework(df_data_s, priors_dict=weak_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_3, axp[0], axp[1])
plot_marginals(df_grid_3, axp[2], axp[3])
axp[2].legend(['weak prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid5.svg')
```
<img src=./resources/grid5.svg/>
```
df_grid_4 = compute_bayes_framework(df_data_s, priors_dict=strong_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_4, axp[0], axp[1])
plot_marginals(df_grid_4, axp[2], axp[3])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid6.svg')
```
<img src=./resources/grid6.svg/>
```
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 8), sharey=True)
axp = axp.ravel()
plot_marginals(df_grid_3, axp[0], axp[1])
plot_marginals(df_grid_4, axp[2], axp[3])
axp[0].legend(['weak prior', 'posterior'])
axp[1].legend(['flat prior', 'posterior'])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid7.svg')
```
***And using all the data?***
```
%%time
priors=dict(μ_mean=-1.5, μ_sd=.1, σ_lo=0, σ_hi=2)
try:
df_grid_all_data= compute_bayes_framework(df_data, priors_dict=priors)
except MemoryError:
print("OUT OF MEMORY!")
print("--------------")
```
[Back to Contents](#TOP)
<a id="Next"></a>
|
github_jupyter
|
import pickle
import warnings
import sys
import pandas as pd
import numpy as np
from scipy.stats import norm as gaussian, uniform
import seaborn as sb
import matplotlib.pyplot as pl
from matplotlib import rcParams
from matplotlib import ticker as mtick
print('Versions:')
print('---------')
print(f'python: {sys.version.split("|")[0]}')
print(f'numpy: {np.__version__}')
print(f'pandas: {pd.__version__}')
print(f'seaborn: {sb.__version__}')
%matplotlib inline
warnings.filterwarnings('ignore', category=FutureWarning)
μ = np.linspace(-2, 2, num=200) # μ-axis
σ = np.linspace(0, 2, num=200) # σ-axis
df_grid = pd.DataFrame([[μ_i, σ_i]
for σ_i in σ for μ_i in μ], columns=['μ', 'σ'])
μ_log_prior = gaussian.logpdf(df_grid.μ, 1, 1)
σ_log_prior = uniform.logpdf(df_grid.σ, 0, 2)
# log prior probability
df_grid['log_prior_prob'] = μ_log_prior + σ_log_prior
# straight prior probability from exponentiation of log_prior_prob
df_grid['prior_prob'] = np.exp(df_grid.log_prior_prob
- df_grid.log_prior_prob.max())
f, ax = pl.subplots(figsize=(6, 6))
df_grid.plot.hexbin(x='μ', y='σ', C='prior_prob', figsize=(7,6),
cmap='plasma', sharex=False, ax=ax);
ax.set_title('Prior')
f.savefig('./resources/f1_grid_prior.svg')
df_data = pd.read_pickle('./pickleJar/df_logMxBlues.pkl')
df_data[['MxBl-Gr', 'chl_l']].info()
f, ax = pl.subplots(figsize=(4,4))
sb.kdeplot(df_data.chl_l, ax=ax, legend=False);
ax.set_xlabel('chl_l');
f.tight_layout()
f.savefig('./figJar/Presentation/fig1_chl.svg', dpi=300, format='svg')
print(df_grid.shape)
df_grid.head(7)
sample_N = 10
df_data_s = df_data.dropna().sample(n=sample_N)
g = sb.PairGrid(df_data_s.loc[:,['MxBl-Gr', 'chl_l']],
diag_sharey=False)
g.map_diag(sb.kdeplot, )
g.map_offdiag(sb.scatterplot, alpha=0.75, edgecolor='k');
make_lower_triangle(g)
g.axes[1,0].set_ylabel(r'$log_{10}(chl)$');
g.axes[1,1].set_xlabel(r'$log_{10}(chl)$');
df_grid['LL'] = np.sum(norm.logpdf(df_data_s.chl_l.values.reshape(1, -1),
loc=df_grid.μ.values.reshape(-1, 1),
scale=df_grid.σ.values.reshape(-1, 1)
), axis=1)
# compute log-probability
df_grid['log_post_prob'] = df_grid.LL + df_grid.log_prior_prob
# convert to straight prob.
df_grid['post_prob'] = np.exp(df_grid.log_post_prob
- df_grid.log_post_prob.max())
# Plot Multi-Dimensional Prior and Posterior
f, ax = pl.subplots(ncols=2, figsize=(12, 5), sharey=True)
df_grid.plot.hexbin(x='μ', y='σ', C='prior_prob',
cmap='plasma', sharex=False, ax=ax[0])
df_grid.plot.hexbin(x='μ', y='σ', C='post_prob',
cmap='plasma', sharex=False, ax=ax[1]);
ax[0].set_title('Prior Probability Distribution')
ax[1].set_title('Posterior Probability Distribution')
f.tight_layout()
f.savefig('./figJar/Presentation/grid1.svg')
# Compute Marginal Priors and Posteriors for each Parameter
df_μ = df_grid.groupby(['μ']).sum().drop('σ', axis=1)[['prior_prob',
'post_prob']
].reset_index()
df_σ = df_grid.groupby(['σ']).sum().drop('μ', axis=1)[['prior_prob',
'post_prob']
].reset_index()
# Normalize Probability Distributions
df_μ.prior_prob /= df_μ.prior_prob.max()
df_μ.post_prob /= df_μ.post_prob.max()
df_σ.prior_prob /= df_σ.prior_prob.max()
df_σ.post_prob /= df_σ.post_prob.max()
#Plot Marginal Priors and Posteriors
f, ax = pl.subplots(ncols=2, figsize=(12, 4))
df_μ.plot(x='μ', y='prior_prob', ax=ax[0], label='prior');
df_μ.plot(x='μ', y='post_prob', ax=ax[0], label='posterior')
df_σ.plot(x='σ', y='prior_prob', ax=ax[1], label='prior')
df_σ.plot(x='σ', y='post_prob', ax=ax[1], label='posterior');
f.suptitle('Marginal Probability Distributions', fontsize=16);
f.tight_layout(pad=2)
f.savefig('./figJar/Presentation/grid2.svg')
def compute_bayes_framework(data, priors_dict):
# build grid:
μ = np.linspace(-2, 2, num=200)
σ = np.linspace(0, 2, num=200)
df_b = pd.DataFrame([[μ_i, σ_i] for σ_i in σ for μ_i in μ],
columns=['μ', 'σ'])
# compute/store distributions
μ_prior = norm.logpdf(df_b.μ, priors_dict['μ_mean'],
priors_dict['μ_sd'])
σ_prior = uniform.logpdf(df_b.σ, priors_dict['σ_lo'],
priors_dict['σ_hi'])
# compute joint prior
df_b['log_prior_prob'] = μ_prior + σ_prior
df_b['prior_prob'] = np.exp(df_b.log_prior_prob
- df_b.log_prior_prob.max())
# compute log likelihood
df_b['LL'] = np.sum(norm.logpdf(data.chl_l.values.reshape(1, -1),
loc=df_b.μ.values.reshape(-1, 1),
scale=df_b.σ.values.reshape(-1, 1)
), axis=1)
# compute joint posterior
df_b['log_post_prob'] = df_b.LL + df_b.log_prior_prob
df_b['post_prob'] = np.exp(df_b.log_post_prob
- df_b.log_post_prob.max())
return df_b
def plot_posterior(df_, ax1, ax2):
df_.plot.hexbin(x='μ', y='σ', C='prior_prob',
cmap='plasma', sharex=False, ax=ax1)
df_.plot.hexbin(x='μ', y='σ', C='post_prob',
cmap='plasma', sharex=False, ax=ax2);
ax1.set_title('Prior Probability Distribution')
ax2.set_title('Posterior Probability Distribution')
def plot_marginals(df_, ax1, ax2, plot_prior=True):
"""Compute marginal posterior distributions."""
df_μ = df_.groupby(['μ']).sum().drop('σ',
axis=1)[['prior_prob',
'post_prob']
].reset_index()
df_σ = df_.groupby(['σ']).sum().drop('μ',
axis=1)[['prior_prob',
'post_prob']
].reset_index()
# Normalize Probability Distributions
df_μ.prior_prob /= df_μ.prior_prob.max()
df_μ.post_prob /= df_μ.post_prob.max()
df_σ.prior_prob /= df_σ.prior_prob.max()
df_σ.post_prob /= df_σ.post_prob.max()
#Plot Marginal Priors and Posteriors
if plot_prior:
df_μ.plot(x='μ', y='prior_prob', ax=ax1, label='prior');
df_σ.plot(x='σ', y='prior_prob', ax=ax2, label='prior')
df_μ.plot(x='μ', y='post_prob', ax=ax1, label='posterior')
df_σ.plot(x='σ', y='post_prob', ax=ax2, label='posterior');
weak_prior=dict(μ_mean=1, μ_sd=1, σ_lo=0, σ_hi=2)
df_grid_1 = compute_bayes_framework(df_data_s, priors_dict=weak_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_1, axp[0], axp[1])
plot_marginals(df_grid_1, axp[2], axp[3])
axp[2].legend(['weak prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid3.svg')
strong_prior=dict(μ_mean=-1.5, μ_sd=.1, σ_lo=0, σ_hi=2)
df_grid_2 = compute_bayes_framework(df_data_s, priors_dict=strong_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_2, axp[0], axp[1])
plot_marginals(df_grid_2, axp[2], axp[3])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid4.svg')
sample_N = 500
# compute the inference dataframe
df_data_s = df_data.dropna().sample(n=sample_N)
# display the new sub-sample
g = sb.PairGrid(df_data_s.loc[:,['MxBl-Gr', 'chl_l']],
diag_sharey=False)
g.map_diag(sb.kdeplot, )
g.map_offdiag(sb.scatterplot, alpha=0.75, edgecolor='k');
make_lower_triangle(g)
g.axes[1,0].set_ylabel(r'$log_{10}(chl)$');
g.axes[1,1].set_xlabel(r'$log_{10}(chl)$');
%%time
df_grid_3 = compute_bayes_framework(df_data_s, priors_dict=weak_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_3, axp[0], axp[1])
plot_marginals(df_grid_3, axp[2], axp[3])
axp[2].legend(['weak prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid5.svg')
df_grid_4 = compute_bayes_framework(df_data_s, priors_dict=strong_prior)
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 9))
axp = axp.ravel()
plot_posterior(df_grid_4, axp[0], axp[1])
plot_marginals(df_grid_4, axp[2], axp[3])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid6.svg')
f , axp = pl.subplots(ncols=2, nrows=2, figsize=(12, 8), sharey=True)
axp = axp.ravel()
plot_marginals(df_grid_3, axp[0], axp[1])
plot_marginals(df_grid_4, axp[2], axp[3])
axp[0].legend(['weak prior', 'posterior'])
axp[1].legend(['flat prior', 'posterior'])
axp[2].legend(['strong prior', 'posterior'])
axp[3].legend(['flat prior', 'posterior'])
f.tight_layout()
f.savefig('./figJar/Presentation/grid7.svg')
%%time
priors=dict(μ_mean=-1.5, μ_sd=.1, σ_lo=0, σ_hi=2)
try:
df_grid_all_data= compute_bayes_framework(df_data, priors_dict=priors)
except MemoryError:
print("OUT OF MEMORY!")
print("--------------")
| 0.494385 | 0.990578 |
<img src='http://www.puc-rio.br/sobrepuc/admin/vrd/brasao/download/ass_vertpb_reduz4.jpg' align='left'/>
## Demonstration Class 03
# Using genetic algorithms to solve the traveling salesperson problem
### Luis Martí, LIRA/[DEE](http://www.ele.puc-rio.br)/[PUC-Rio](http://www.puc-rio.br)
[http://lmarti.com](http://lmarti.com); [lmarti@ele.puc-rio.br](mailto:lmarti@ele.puc-rio.br)
[Advanced Evolutionary Computation: Theory and Practice](http://lmarti.com/aec-2014)
The notebook is better viewed rendered as slides. You can convert it to slides and view them by:
- using [nbconvert](http://ipython.org/ipython-doc/1/interactive/nbconvert.html) with a command like:
```bash
$ ipython nbconvert --to slides --post serve <this-notebook-name.ipynb>
```
- installing [Reveal.js - Jupyter/IPython Slideshow Extension](https://github.com/damianavila/live_reveal)
- using the online [IPython notebook slide viewer](https://slideviewer.herokuapp.com/) (some slides of the notebook might not be properly rendered).
This and other related IPython notebooks can be found at the course github repository:
* [https://github.com/lmarti/evolutionary-computation-course](https://github.com/lmarti/evolutionary-computation-course)
# [*Traveling Salesperson Problem*](http://en.wikipedia.org/wiki/Traveling_salesman_problem) (TSP):
> *Given a set of cities, and the distances between each pair of cities, find a **tour** of the cities with the minimum total distance. A **tour** means you start at one city, visit every other city exactly once, and then return to the starting city.*
- This notebook relies on [Peter Norvig](http://norvig.com/)'s [IPython notebook on the traveling salesperson problem](http://nbviewer.ipython.org/url/norvig.com/ipython/TSPv3.ipynb).
- I will be showing how to apply evolutionary algorithms to solve the TSP.
- This is a well-known [*intractable*](http://en.wikipedia.org/wiki/Intractability_(complexity) problem, meaning that there are no efficient solutions that work for a large number of cities.
- We can create an inefficient algorithm that works fine for a small number of cites (about a dozen).
- We can also find a *nearly*-shortest tour over thousands of cities.
- Actually, the fact there is no efficient algorithm is liberating:
> **This means that we can use a very simple, inefficient algorithm and not feel too bad about it.**
### The *vocabulary* of the problem:
- **City**: For the purpose of this exercise, a city is "atomic" in the sense that we don't have to know anything about the components or attributes of a city, just how far it is from other cities.
- **Cities**: We will need to represent a set of cities; Python's `set` datatype might be appropriate for that.
- **Distance**: We will need the distance between two cities. If `A` and `B` are cities. This could be done with a function, `distance(A, B)`, or with a dict, `distance[A][B]` or `distance[A, B]`, or with an array if `A` and `B` are integer indexes. The resulting distance will be a real number (which Python calls a `float`).
- **Tour**: A tour is an ordered list of cities; Python's `list` or `tuple` datatypes would work.
- **Total distance**: The sum of the distances of adjacent cities in the tour. We will probably have a function, `total_distance(tour)`.
We are doing this demonstration as an IPython notebook. Therefore, we need to perform some initialization.
```
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import random, operator
import time
import itertools
import numpy
import math
%matplotlib inline
random.seed(time.time()) # planting a random seed
```
### First algorithm: find the tour with shortest total distance fro all possible tours
> *Generate all the possible tours of the cities, and choose the shortest one (the tour with the minimum total distance).*
We can implement this as the Python function `exact_TSP` (TSP is the standard abbreviation for Traveling Salesperson Problem, and "exact" means that it finds the shortest tour, exactly, not just an approximation to the shortest tour). Here's the design philosophy we will use:
> *Write Python code that closely mirrors the English description of the algorithm. This will probably require
some auxilliary functions and data structures; just assume we will be able to define them as well, using the same design philosophy.*
```
def exact_TSP(cities):
"Generate all possible tours of the cities and choose the shortest one."
return shortest(alltours(cities))
def shortest(tours):
"Return the tour with the minimum total distance."
return min(tours, key=total_distance)
```
_Note 1_: We have not yet defined the function `total_distance`, nor `alltours`.
_Note 2_: In Python `min(`*collection*`,key=`*function*`)` means to find the element *x* that is a member of *collection* such that *function(x)* is minimized. So `shortest` finds the tour whose `total_distance` in the minimal among the tours. So our Python code implements (and closely mimics) our English description of the algorithm. Now we need to define what a tour is, and how to measure total distance.
### Representing Tours
- A tour starts in one city, and then visits each of the other cities in order, before finally retirning to the start.
- A natural representation of the set of available cities is a Python `set`, and a natural representation of a tour is a sequence that is a *permutation* of the set.
- The tuple `(1, 2, 3)`, for example, represents a tour that starts in city 1, moves to 2, then 3, and then returns to 1 to finish the tour.
```
alltours = itertools.permutations # The permutation function is already defined in the itertools module
cities = {1, 2, 3}
list(alltours(cities))
```
### Representing Cities and Distance
Now for the notion of *distance*. We define `total_distance(tour)` as the sum of the distances between consecutive cities in the tour; that part is shown below and is easy (with one Python-specific trick: when `i` is 0, then `distance(tour[0], tour[-1])` gives us the wrap-around distance between the first and last cities, because `tour[-1]` is the last element of `tour`).
```
def total_distance(tour):
"The total distance between each pair of consecutive cities in the tour."
return sum(distance(tour[i], tour[i-1])
for i in range(len(tour)))
```
### Distance between cities
Before we can define `distance(A, B)`, the distance between two cities, we have to make a choice. In the fully general version of the TSP problem, the distance between two cities could be anything: it could be the amount of time it takes to travel between cities, the number of dollars it costs, or anything else.
How will we represent a two-dimensional point? Here are some choices, with their pros and cons:
* **Tuple:** A point (or city) is a two-tuple of (*x*, *y*) coordinates, for example, `(300, 0)`.
* **Pro:** Very simple, easy to break a point down into components. Reasonably efficient.
* **Con:** doesn't distinguish points from other two-tuples. If `p` is a point, can't do `p.x` or `p.y`.
* **class:** Define `City` as a custom class with *x* and *y* fields.
* **Pro:** explicit, gives us `p.x` accessors.
* **Con:** less efficient because of the overhead of creating user-defined objects.
### Distance between cities (contd)
* **complex:** Python already has the two-dimensional point as a built-in numeric data type, but in a non-obvious way: as *complex numbers*, which inhabit the two-dimensional (real × complex) plane. We can make this use more explicit by defining "`City = complex`", meaning that we can construct the representation of a city using the same constructor that makes complex numbers.
* **Pro:** most efficient, because it uses a builtin type that is already a pair of numbers. The distance between two points is simple: the absolute value of their difference.
* **Con:** it may seem confusing to bring complex numbers into play; can't say `p.x`.
* **subclass:** Define "`class Point(complex): pass`", meaning that points are a subclass of complex numbers.
* **Pro:** All the pros of using `complex` directly, with the added protection of making it more explicit that these are treated as points, not as complex numbers.
* **Con:** less efficient than using `complex` directly; still can't do `p.x` or `p.y`.
* **subclass with properties:** Define "`class Point(complex): x, y = property(lambda p: p.real), property(lambda p: p.imag)`".
* **Pro:** All the pros of previous approach, and we can finally say `p.x`.
* **Con:** less efficient than using `complex` directly.
From possible alternatives Peter chose to go with `complex` numbers:
```
City = complex # Constructor for new cities, e.g. City(300, 400)
def distance(A, B):
"The Euclidean distance between two cities."
return abs(A - B)
A = City(300, 0)
B = City(0, 400)
distance(A, B)
def generate_cities(n):
"Make a set of n cities, each with random coordinates."
return set(City(random.randrange(10, 890),
random.randrange(10, 590))
for c in range(n))
cities8, cities10, cities100, cities1000 = generate_cities(8), generate_cities(10), generate_cities(100), generate_cities(1000)
cities8
```
A cool thing is to be able to plot a tour
```
def plot_tour(tour, alpha=1, color=None):
# Plot the tour as blue lines between blue circles, and the starting city as a red square.
plotline(list(tour) + [tour[0]], alpha=alpha, color=color)
plotline([tour[0]], 'rs', alpha=alpha)
# plt.show()
def plotline(points, style='bo-', alpha=1, color=None):
"Plot a list of points (complex numbers) in the 2-D plane."
X, Y = XY(points)
if color:
plt.plot(X, Y, style, alpha=alpha, color=color)
else:
plt.plot(X, Y, style, alpha=alpha)
def XY(points):
"Given a list of points, return two lists: X coordinates, and Y coordinates."
return [p.real for p in points], [p.imag for p in points]
```
We are ready to test our algorithm
```
tour = exact_TSP(cities8)
plot_tour(tour)
```
### Improving the algorithm: Try All Non-Redundant Tours
The permutation `(1, 2, 3)` represents the tour that goes from 1 to 2 to 3 and back to 1. You may have noticed that there aren't really six different tours of three cities: the cities 1, 2, and 3 form a triangle; any tour must connect the three points of the triangle; and there are really only two ways to do this: clockwise or counterclockwise. In general, with $n$ cities, there are $n!$ (that is, $n$ factorial) permutations, but only $(n-1)!$, tours that are *distinct*: the tours `123`, `231`, and `312` are three ways of representing the *same* tour.
So we can make our `TSP` program $n$ times faster by never considering redundant tours. Arbitrarily, we will say that all tours must start with the "first" city in the set of cities. We don't have to change the definition of `TSP`—just by making `alltours` return only nonredundant tours, the whole program gets faster.
(While we're at it, we'll make tours be represented as lists, rather than the tuples that are returned by `permutations`. It doesn't matter now, but later on we will want to represent *partial* tours, to which we will want to append cities one by one; that can only be done to lists, not tuples.)
```
def all_non_redundant_tours(cities):
"Return a list of tours, each a permutation of cities, but each one starting with the same city."
start = first(cities)
return [[start] + list(tour)
for tour in itertools.permutations(cities - {start})]
def first(collection):
"Start iterating over collection, and return the first element."
for x in collection: return x
def exact_non_redundant_TSP(cities):
"Generate all possible tours of the cities and choose the shortest one."
return shortest(all_non_redundant_tours(cities))
all_non_redundant_tours({1, 2, 3})
```
### Results of the improvement
```
%timeit exact_TSP(cities8)
%timeit exact_non_redundant_TSP(cities8)
%timeit exact_non_redundant_TSP(cities10)
```
It takes a few seconds on my machine to solve this problem. In general, the function `exact_non_redundant_TSP()` looks at $(n-1)!$ tours for an $n$-city problem, and each tour has $n$ cities, so the time for $n$ cities should be roughly proportional to $n!$. This means that the time grows rapidly with the number of cities; we'd need longer than the **[age of the Universe](http://en.wikipedia.org/wiki/Age_of_the_universe)** to run `exact_non_redundant_TSP()` on just 24 cities:
<table>
<tr><th>n cities<th>time
<tr><td>10<td>3 secs
<tr><td>12<td>3 secs × 12 × 11 = 6.6 mins
<tr><td>14<td>6.6 mins × 13 × 14 = 20 hours
<tr><td>24<td>3 secs × 24! / 10! = <a href="https://www.google.com/search?q=3+seconds+*+24!+%2F+10!+in+years">16 billion years</a>
</table>
> There must be a better way... or at least we need to look for it until quantum computing comes around.
# Approximate (Heuristic) Algorithms
- The *general, exact* Traveling Salesperson Problem is intractable;
- there is no efficient algorithm to find the tour with minimum total distance.
- But if we restrict ourselves to Euclidean distance and if we are willing to settle for a tour that is *reasonably* short but not the shortest, then the news is much better.
We will consider several *approximate* algorithms, which find tours that are usually within 10 or 20% of the shortest possible and can handle thousands of cities in a few seconds.
### Greedy Nearest Neighbor (greedy_TSP)
Here is our first approximate algorithm:
> *Start at any city; at each step extend the tour by moving from the previous city to its nearest neighbor that has not yet been visited.*
This is called a *greedy algorithm*, because it greedily takes what looks best in the short term (the nearest neighbor) even when that won't always be the best in the long term.
To implement the algorithm I need to represent all the noun phrases in the English description:
* **start**: a city which is arbitrarily the first city;
* **the tour**: a list of cities, initialy just the start city);
* **previous city**: the last element of tour, that is, `tour[-1]`);
* **nearest neighbor**: a function that, when given a city, A, and a list of other cities, finds the one with minimal distance from A); and
* **not yet visited**: we will keep a set of unvisited cities; initially all cities but the start city are unvisited).
Once these are initialized, we repeatedly find the nearest unvisited neighbor, `C`, and add it to the tour and remove it from `unvisited`.
```
def greedy_TSP(cities):
"At each step, visit the nearest neighbor that is still unvisited."
start = first(cities)
tour = [start]
unvisited = cities - {start}
while unvisited:
C = nearest_neighbor(tour[-1], unvisited)
tour.append(C)
unvisited.remove(C)
return tour
def nearest_neighbor(A, cities):
"Find the city in cities that is nearest to city A."
return min(cities, key=lambda x: distance(x, A))
```
(In Python, as in the formal mathematical theory of computability, `lambda` is the symbol for *function*, so "`lambda x: distance(x, A)`" means the function of `x` that computes the distance from `x` to the city `A`. The name `lambda` comes from the Greek letter λ.)
We can compare the fast approximate `greedy_TSP` algorithm to the slow `exact_TSP` algorithm on a small map, as shown below. (If you have this page in a IPython notebook you can repeatedly `run` the cell, and see how the algorithms compare. `Cities(9)` will return a different set of cities each time. I ran it 20 times, and only once did the greedy algorithm find the optimal solution, but half the time it was within 10% of optimal, and it was never more than 25% worse than optimal.)
```
cities = generate_cities(9)
%timeit exact_non_redundant_TSP(cities)
plot_tour(exact_non_redundant_TSP(cities))
%timeit greedy_TSP(cities)
plot_tour(greedy_TSP(cities))
```
### `greedy_TSP()` can handle bigger problems
```
%timeit greedy_TSP(cities100)
plot_tour(greedy_TSP(cities100))
%timeit greedy_TSP(cities1000)
plot_tour(greedy_TSP(cities1000))
```
### But... don't be greedy!
A [greedy algorithm](http://en.wikipedia.org/wiki/Greedy_algorithm) is an algorithm that follows the problem solving heuristic of making the locally optimal choice at each stage with the hope of finding a global optimum. In many problems, a greedy strategy does not in general produce an optimal solution, but nonetheless a greedy heuristic may yield locally optimal solutions that approximate a global optimal solution in a reasonable time.
For many problmes greedy algorithms fail to produce the optimal solution, and may even produce the *unique worst possible solution*. One example is the traveling salesman problem mentioned above: for each number of cities, there is an assignment of distances between the cities for which the nearest neighbor heuristic produces the unique worst possible tour.
### A thought on computational complexity
<img src='http://imgs.xkcd.com/comics/travelling_salesman_problem.png' align='center' width='65%'/>
[from XKCD](http://xkcd.com/399/)
### Check out [Peter Norvig](http://norvig.com/)'s [IPython notebook on the traveling salesperson problem](http://nbviewer.ipython.org/url/norvig.com/ipython/TSPv3.ipynb) on more alternatives for the TSP.
# Nature-inspired metaheuristics
- We have seen in class some examples of nature-inspired metaheuristics.
- They are an option in which we dedicate a little more computational effort in order to produce better solutions than `greedy_TSP()`.
> We will be using the [DEAP](https://github.com/DEAP/deap) library to code this tackle this problem using a genetic algorithm.
[<img src='https://raw.githubusercontent.com/DEAP/deap/master/doc/_static/deap_long.png' width='29%' align='center'/>](https://github.com/DEAP/deap)
```
from deap import algorithms, base, creator, tools
```
### Elements to take into account solving problems with genetic algorithms
* **Individual representation** (binary, floating-point, etc.);
* **evaluation** and **fitness assignment**;
* **selection**, that establishes a partial order of individuals in the population using their fitness function value as reference and determines the degree at which individuals in the population will take part in the generation of new (offspring) individuals.
* **variation**, that applies a range of evolution-inspired operators, like crossover, mutation, etc., to synthesize offspring individuals from the current (parent) population. This process is supposed to prime the fittest individuals so they play a bigger role in the generation of the offspring.
* **stopping criterion**, that determines when the algorithm shoulod be stopped, either because the optimum was reach or because the optimization process is not progressing.
### Hence a 'general' evolutionary algorithm can be described as
```
def evolutionary_algorithm():
'Pseudocode of an evolutionary algorithm'
populations = [] # a list with all the populations
populations[0] = initialize_population(pop_size)
t = 0
while not stop_criterion(populations[t]):
fitnesses = evaluate(populations[t])
offspring = matting_and_variation(populations[t],
fitnesses)
populations[t+1] = environmental_selection(
populations[t],
offspring)
t = t+1
```
### Some preliminaries for the experiment
We will carry out our tests with a 30-cities problem.
```
num_cities = 30
cities = generate_cities(num_cities)
```
The `toolbox` stored the setup of the algorithm. It describes the different elements to take into account.
```
toolbox = base.Toolbox()
```
### Individual representation and evaluation
* Individuals represent possible solutions to the problem.
* In the TSP case, it looks like the tour itself can be a suitable representation.
* For simplicity, an individual can be a list with the indexes corresponding to each city.
* This will simplify the crossover and mutation operators.
* We can rely on the `total_distance()` function for evaluation and set the fitness assignment as to minimize it.
```
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
```
Let's now define that our individuals are composed by indexes that referr to elements of `cities` and, correspondingly, the population is composed by individuals.
```
toolbox.register("indices", numpy.random.permutation, len(cities))
toolbox.register("individual", tools.initIterate, creator.Individual,
toolbox.indices)
toolbox.register("population", tools.initRepeat, list,
toolbox.individual)
```
Defining the crossover and mutation operators can be a challenging task.
There are various <a href='http://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)#Crossover_for_Ordered_Chromosomes'>crossover operators</a> that have been devised to deal with ordered individuals like ours.
- We will be using DEAP's `deap.tools.cxOrdered()` crossover.
- For mutation we will swap elements from two points of the individual.
- This is performed by `deap.tools.mutShuffleIndexes()`.
```
toolbox.register("mate", tools.cxOrdered)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
```
Evaluation can be easily defined from the `total_distance()` definition.
```
def create_tour(individual):
return [list(cities)[e] for e in individual]
def evaluation(individual):
'''Evaluates an individual by converting it into
a list of cities and passing that list to total_distance'''
return (total_distance(create_tour(individual)),)
toolbox.register("evaluate", evaluation)
```
We will employ tournament selection with size 3.
```
toolbox.register("select", tools.selTournament, tournsize=3)
```
Lets' run the algorithm with a population of 100 individuals and 400 generations.
```
pop = toolbox.population(n=100)
%%time
result, log = algorithms.eaSimple(pop, toolbox,
cxpb=0.8, mutpb=0.2,
ngen=400, verbose=False)
```
### We can now review the results
The best individual of the last population:
```
best_individual = tools.selBest(result, k=1)[0]
print('Fitness of the best individual: ', evaluation(best_individual)[0])
plot_tour(create_tour(best_individual))
```
It is interesting to assess how the fitness of the population changed as the evolution process took place.
We can prepare an `deap.tools.Statistics` instance to specify what data to collect.
```
fit_stats = tools.Statistics(key=operator.attrgetter("fitness.values"))
fit_stats.register('mean', numpy.mean)
fit_stats.register('min', numpy.min)
```
We are all set now but lets run again the genetic algorithm configured to collect the statistics that we want to gather:
```
result, log = algorithms.eaSimple(toolbox.population(n=100), toolbox,
cxpb=0.5, mutpb=0.2,
ngen=400, verbose=False,
stats=fit_stats)
```
### Plotting mean and minimium fitness as evolution took place.
```
plt.figure(1, figsize=(11, 4), dpi=500)
plots = plt.plot(log.select('min'),'c-', log.select('mean'), 'b-', antialiased=True)
plt.legend(plots, ('Minimum fitness', 'Mean fitness'))
plt.ylabel('Fitness')
plt.xlabel('Iterations')
```
### How has the population evolved?
Ok, but how the population evolved? As TSP solutions are easy to visualize, we can plot the individuals of each population the evolution progressed. We need a new `Statistics` instance prepared for that.
```
pop_stats = tools.Statistics(key=numpy.copy)
pop_stats.register('pop', numpy.copy) # -- copies the populations themselves
pop_stats.register('fitness', # -- computes and stores the fitnesses
lambda x : [evaluation(a) for a in x])
```
_Note_: I am aware that this could be done in a more efficient way.
```
result, log = algorithms.eaSimple(toolbox.population(n=100), toolbox,
cxpb=0.5, mutpb=0.2,
ngen=400, verbose=False,
stats=pop_stats)
```
### Plotting the individuals and their fitness (color-coded)
```
def plot_population(record, min_fitness, max_fitness):
'''
Plots all individuals in a population.
Darker individuals have a better fitness.
'''
pop = record['pop']
fits = record['fitness']
index = sorted(range(len(fits)), key=lambda k: fits[k])
norm=colors.Normalize(vmin=min_fitness,
vmax=max_fitness)
sm = cmx.ScalarMappable(norm=norm,
cmap=plt.get_cmap('PuBu'))
for i in range(len(index)):
color = sm.to_rgba(max_fitness - fits[index[i]][0])
plot_tour(create_tour(pop[index[i]]), alpha=0.5, color=color)
min_fitness = numpy.min(log.select('fitness'))
max_fitness = numpy.max(log.select('fitness'))
```
We can now plot the population as the evolutionary process progressed. Darker blue colors imply better fitness.
```
plt.figure(1, figsize=(11,11), dpi=500)
for i in range(0, 12):
plt.subplot(4,3,i+1)
it = int(math.ceil((len(log)-1.)/15))
plt.title('t='+str(it*i))
plot_population(log[it*i], min_fitness, max_fitness)
```
### Comprarison with `greedy_TSP()`
```
%timeit total_distance(greedy_TSP(cities))
print('greedy_TSP() distance: ', total_distance(greedy_TSP(cities)))
print('Genetic algorithm best distance: ', evaluation(best_individual)[0])
```
The genetic algorithm outperformed the greedy approach at a *viable* computational cost.
- _Note 1_: *Viable* depends on the particular problem, of course.
- _Note 2_: These results depend on the cities that were randomly generated. Your milleage may vary.
Homework
--------
1. We have just performed one run of the experiment, but genetic algorithms are stochastic algorithms and their performace should be assessed in statistical terms. Modify the genetic algorithm code in order to be able to report the comparison with `greedy_TSP()` in statistically sound terms.
2. Population size should have an impact on the performace of the algorithm. Make an experiment regarding that.
3. What is the influence of the mutation and crossover probabilities in the performance of the genetic algorithm?
### Extra credit
The population of the previous experiment can be better appreciated in animated form. We are going to use `matplotlib.animation` and the [JSAnimation](https://github.com/jakevdp/JSAnimation) library (you need to install it if you plan to run this notebook locally). Similarly, this functionality needs an HTML5 capable browser.
Part of this code has also been inspired by [A Simple Animation: The Magic Triangle](http://nbviewer.ipython.org/url/jakevdp.github.io/downloads/notebooks/MagicTriangle.ipynb).
```
from JSAnimation import IPython_display
from matplotlib import animation
def update_plot_tour(plot, points, alpha=1, color='blue'):
'A function for updating a plot with an individual'
X, Y = XY(list(points) + [points[0]])
plot.set_data(X, Y)
plot.set_color(color)
return plot
def init():
'Initialization of all plots to empty data'
for p in list(tour_plots):
p.set_data([], [])
return tour_plots
def animate(i):
'Updates all plots to match frame _i_ of the animation'
pop = log[i]['pop']
fits = log[i]['fitness']
index = sorted(range(len(fits)), key=lambda k: fits[k])
norm=colors.Normalize(vmin=min_fitness,
vmax=max_fitness)
sm = cmx.ScalarMappable(norm=norm,
cmap=plt.get_cmap('PuBu'))
for j in range(len(tour_plots)):
color = sm.to_rgba(max_fitness - fits[index[j]][0])
update_plot_tour(tour_plots[j],
create_tour(pop[index[j]]),
alpha=0.5, color=color)
return tour_plots
```
The next step takes some time to execute. Use the video controls to see the evolution in animated form.
```
fig = plt.figure()
ax = plt.axes(xlim=(0, 900), ylim=(0, 600))
tour_plots = [ax.plot([], [], 'bo-', alpha=0.1) for i in range(len(log[0]['pop']))]
tour_plots = [p[0] for p in tour_plots]
animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=60, blit=True)
```
Embeding the previous animation in the online notebook makes it really big. I have removed the result of the previous cell and created a `.gif` version of the animation for online viewing.
```
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=60, blit=True)
anim.save('tsp-populations.gif', writer='imagemagick')
```

|
github_jupyter
|
$ ipython nbconvert --to slides --post serve <this-notebook-name.ipynb>
```
- installing [Reveal.js - Jupyter/IPython Slideshow Extension](https://github.com/damianavila/live_reveal)
- using the online [IPython notebook slide viewer](https://slideviewer.herokuapp.com/) (some slides of the notebook might not be properly rendered).
This and other related IPython notebooks can be found at the course github repository:
* [https://github.com/lmarti/evolutionary-computation-course](https://github.com/lmarti/evolutionary-computation-course)
# [*Traveling Salesperson Problem*](http://en.wikipedia.org/wiki/Traveling_salesman_problem) (TSP):
> *Given a set of cities, and the distances between each pair of cities, find a **tour** of the cities with the minimum total distance. A **tour** means you start at one city, visit every other city exactly once, and then return to the starting city.*
- This notebook relies on [Peter Norvig](http://norvig.com/)'s [IPython notebook on the traveling salesperson problem](http://nbviewer.ipython.org/url/norvig.com/ipython/TSPv3.ipynb).
- I will be showing how to apply evolutionary algorithms to solve the TSP.
- This is a well-known [*intractable*](http://en.wikipedia.org/wiki/Intractability_(complexity) problem, meaning that there are no efficient solutions that work for a large number of cities.
- We can create an inefficient algorithm that works fine for a small number of cites (about a dozen).
- We can also find a *nearly*-shortest tour over thousands of cities.
- Actually, the fact there is no efficient algorithm is liberating:
> **This means that we can use a very simple, inefficient algorithm and not feel too bad about it.**
### The *vocabulary* of the problem:
- **City**: For the purpose of this exercise, a city is "atomic" in the sense that we don't have to know anything about the components or attributes of a city, just how far it is from other cities.
- **Cities**: We will need to represent a set of cities; Python's `set` datatype might be appropriate for that.
- **Distance**: We will need the distance between two cities. If `A` and `B` are cities. This could be done with a function, `distance(A, B)`, or with a dict, `distance[A][B]` or `distance[A, B]`, or with an array if `A` and `B` are integer indexes. The resulting distance will be a real number (which Python calls a `float`).
- **Tour**: A tour is an ordered list of cities; Python's `list` or `tuple` datatypes would work.
- **Total distance**: The sum of the distances of adjacent cities in the tour. We will probably have a function, `total_distance(tour)`.
We are doing this demonstration as an IPython notebook. Therefore, we need to perform some initialization.
### First algorithm: find the tour with shortest total distance fro all possible tours
> *Generate all the possible tours of the cities, and choose the shortest one (the tour with the minimum total distance).*
We can implement this as the Python function `exact_TSP` (TSP is the standard abbreviation for Traveling Salesperson Problem, and "exact" means that it finds the shortest tour, exactly, not just an approximation to the shortest tour). Here's the design philosophy we will use:
> *Write Python code that closely mirrors the English description of the algorithm. This will probably require
some auxilliary functions and data structures; just assume we will be able to define them as well, using the same design philosophy.*
_Note 1_: We have not yet defined the function `total_distance`, nor `alltours`.
_Note 2_: In Python `min(`*collection*`,key=`*function*`)` means to find the element *x* that is a member of *collection* such that *function(x)* is minimized. So `shortest` finds the tour whose `total_distance` in the minimal among the tours. So our Python code implements (and closely mimics) our English description of the algorithm. Now we need to define what a tour is, and how to measure total distance.
### Representing Tours
- A tour starts in one city, and then visits each of the other cities in order, before finally retirning to the start.
- A natural representation of the set of available cities is a Python `set`, and a natural representation of a tour is a sequence that is a *permutation* of the set.
- The tuple `(1, 2, 3)`, for example, represents a tour that starts in city 1, moves to 2, then 3, and then returns to 1 to finish the tour.
### Representing Cities and Distance
Now for the notion of *distance*. We define `total_distance(tour)` as the sum of the distances between consecutive cities in the tour; that part is shown below and is easy (with one Python-specific trick: when `i` is 0, then `distance(tour[0], tour[-1])` gives us the wrap-around distance between the first and last cities, because `tour[-1]` is the last element of `tour`).
### Distance between cities
Before we can define `distance(A, B)`, the distance between two cities, we have to make a choice. In the fully general version of the TSP problem, the distance between two cities could be anything: it could be the amount of time it takes to travel between cities, the number of dollars it costs, or anything else.
How will we represent a two-dimensional point? Here are some choices, with their pros and cons:
* **Tuple:** A point (or city) is a two-tuple of (*x*, *y*) coordinates, for example, `(300, 0)`.
* **Pro:** Very simple, easy to break a point down into components. Reasonably efficient.
* **Con:** doesn't distinguish points from other two-tuples. If `p` is a point, can't do `p.x` or `p.y`.
* **class:** Define `City` as a custom class with *x* and *y* fields.
* **Pro:** explicit, gives us `p.x` accessors.
* **Con:** less efficient because of the overhead of creating user-defined objects.
### Distance between cities (contd)
* **complex:** Python already has the two-dimensional point as a built-in numeric data type, but in a non-obvious way: as *complex numbers*, which inhabit the two-dimensional (real × complex) plane. We can make this use more explicit by defining "`City = complex`", meaning that we can construct the representation of a city using the same constructor that makes complex numbers.
* **Pro:** most efficient, because it uses a builtin type that is already a pair of numbers. The distance between two points is simple: the absolute value of their difference.
* **Con:** it may seem confusing to bring complex numbers into play; can't say `p.x`.
* **subclass:** Define "`class Point(complex): pass`", meaning that points are a subclass of complex numbers.
* **Pro:** All the pros of using `complex` directly, with the added protection of making it more explicit that these are treated as points, not as complex numbers.
* **Con:** less efficient than using `complex` directly; still can't do `p.x` or `p.y`.
* **subclass with properties:** Define "`class Point(complex): x, y = property(lambda p: p.real), property(lambda p: p.imag)`".
* **Pro:** All the pros of previous approach, and we can finally say `p.x`.
* **Con:** less efficient than using `complex` directly.
From possible alternatives Peter chose to go with `complex` numbers:
A cool thing is to be able to plot a tour
We are ready to test our algorithm
### Improving the algorithm: Try All Non-Redundant Tours
The permutation `(1, 2, 3)` represents the tour that goes from 1 to 2 to 3 and back to 1. You may have noticed that there aren't really six different tours of three cities: the cities 1, 2, and 3 form a triangle; any tour must connect the three points of the triangle; and there are really only two ways to do this: clockwise or counterclockwise. In general, with $n$ cities, there are $n!$ (that is, $n$ factorial) permutations, but only $(n-1)!$, tours that are *distinct*: the tours `123`, `231`, and `312` are three ways of representing the *same* tour.
So we can make our `TSP` program $n$ times faster by never considering redundant tours. Arbitrarily, we will say that all tours must start with the "first" city in the set of cities. We don't have to change the definition of `TSP`—just by making `alltours` return only nonredundant tours, the whole program gets faster.
(While we're at it, we'll make tours be represented as lists, rather than the tuples that are returned by `permutations`. It doesn't matter now, but later on we will want to represent *partial* tours, to which we will want to append cities one by one; that can only be done to lists, not tuples.)
### Results of the improvement
It takes a few seconds on my machine to solve this problem. In general, the function `exact_non_redundant_TSP()` looks at $(n-1)!$ tours for an $n$-city problem, and each tour has $n$ cities, so the time for $n$ cities should be roughly proportional to $n!$. This means that the time grows rapidly with the number of cities; we'd need longer than the **[age of the Universe](http://en.wikipedia.org/wiki/Age_of_the_universe)** to run `exact_non_redundant_TSP()` on just 24 cities:
<table>
<tr><th>n cities<th>time
<tr><td>10<td>3 secs
<tr><td>12<td>3 secs × 12 × 11 = 6.6 mins
<tr><td>14<td>6.6 mins × 13 × 14 = 20 hours
<tr><td>24<td>3 secs × 24! / 10! = <a href="https://www.google.com/search?q=3+seconds+*+24!+%2F+10!+in+years">16 billion years</a>
</table>
> There must be a better way... or at least we need to look for it until quantum computing comes around.
# Approximate (Heuristic) Algorithms
- The *general, exact* Traveling Salesperson Problem is intractable;
- there is no efficient algorithm to find the tour with minimum total distance.
- But if we restrict ourselves to Euclidean distance and if we are willing to settle for a tour that is *reasonably* short but not the shortest, then the news is much better.
We will consider several *approximate* algorithms, which find tours that are usually within 10 or 20% of the shortest possible and can handle thousands of cities in a few seconds.
### Greedy Nearest Neighbor (greedy_TSP)
Here is our first approximate algorithm:
> *Start at any city; at each step extend the tour by moving from the previous city to its nearest neighbor that has not yet been visited.*
This is called a *greedy algorithm*, because it greedily takes what looks best in the short term (the nearest neighbor) even when that won't always be the best in the long term.
To implement the algorithm I need to represent all the noun phrases in the English description:
* **start**: a city which is arbitrarily the first city;
* **the tour**: a list of cities, initialy just the start city);
* **previous city**: the last element of tour, that is, `tour[-1]`);
* **nearest neighbor**: a function that, when given a city, A, and a list of other cities, finds the one with minimal distance from A); and
* **not yet visited**: we will keep a set of unvisited cities; initially all cities but the start city are unvisited).
Once these are initialized, we repeatedly find the nearest unvisited neighbor, `C`, and add it to the tour and remove it from `unvisited`.
(In Python, as in the formal mathematical theory of computability, `lambda` is the symbol for *function*, so "`lambda x: distance(x, A)`" means the function of `x` that computes the distance from `x` to the city `A`. The name `lambda` comes from the Greek letter λ.)
We can compare the fast approximate `greedy_TSP` algorithm to the slow `exact_TSP` algorithm on a small map, as shown below. (If you have this page in a IPython notebook you can repeatedly `run` the cell, and see how the algorithms compare. `Cities(9)` will return a different set of cities each time. I ran it 20 times, and only once did the greedy algorithm find the optimal solution, but half the time it was within 10% of optimal, and it was never more than 25% worse than optimal.)
### `greedy_TSP()` can handle bigger problems
### But... don't be greedy!
A [greedy algorithm](http://en.wikipedia.org/wiki/Greedy_algorithm) is an algorithm that follows the problem solving heuristic of making the locally optimal choice at each stage with the hope of finding a global optimum. In many problems, a greedy strategy does not in general produce an optimal solution, but nonetheless a greedy heuristic may yield locally optimal solutions that approximate a global optimal solution in a reasonable time.
For many problmes greedy algorithms fail to produce the optimal solution, and may even produce the *unique worst possible solution*. One example is the traveling salesman problem mentioned above: for each number of cities, there is an assignment of distances between the cities for which the nearest neighbor heuristic produces the unique worst possible tour.
### A thought on computational complexity
<img src='http://imgs.xkcd.com/comics/travelling_salesman_problem.png' align='center' width='65%'/>
[from XKCD](http://xkcd.com/399/)
### Check out [Peter Norvig](http://norvig.com/)'s [IPython notebook on the traveling salesperson problem](http://nbviewer.ipython.org/url/norvig.com/ipython/TSPv3.ipynb) on more alternatives for the TSP.
# Nature-inspired metaheuristics
- We have seen in class some examples of nature-inspired metaheuristics.
- They are an option in which we dedicate a little more computational effort in order to produce better solutions than `greedy_TSP()`.
> We will be using the [DEAP](https://github.com/DEAP/deap) library to code this tackle this problem using a genetic algorithm.
[<img src='https://raw.githubusercontent.com/DEAP/deap/master/doc/_static/deap_long.png' width='29%' align='center'/>](https://github.com/DEAP/deap)
### Elements to take into account solving problems with genetic algorithms
* **Individual representation** (binary, floating-point, etc.);
* **evaluation** and **fitness assignment**;
* **selection**, that establishes a partial order of individuals in the population using their fitness function value as reference and determines the degree at which individuals in the population will take part in the generation of new (offspring) individuals.
* **variation**, that applies a range of evolution-inspired operators, like crossover, mutation, etc., to synthesize offspring individuals from the current (parent) population. This process is supposed to prime the fittest individuals so they play a bigger role in the generation of the offspring.
* **stopping criterion**, that determines when the algorithm shoulod be stopped, either because the optimum was reach or because the optimization process is not progressing.
### Hence a 'general' evolutionary algorithm can be described as
### Some preliminaries for the experiment
We will carry out our tests with a 30-cities problem.
The `toolbox` stored the setup of the algorithm. It describes the different elements to take into account.
### Individual representation and evaluation
* Individuals represent possible solutions to the problem.
* In the TSP case, it looks like the tour itself can be a suitable representation.
* For simplicity, an individual can be a list with the indexes corresponding to each city.
* This will simplify the crossover and mutation operators.
* We can rely on the `total_distance()` function for evaluation and set the fitness assignment as to minimize it.
Let's now define that our individuals are composed by indexes that referr to elements of `cities` and, correspondingly, the population is composed by individuals.
Defining the crossover and mutation operators can be a challenging task.
There are various <a href='http://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)#Crossover_for_Ordered_Chromosomes'>crossover operators</a> that have been devised to deal with ordered individuals like ours.
- We will be using DEAP's `deap.tools.cxOrdered()` crossover.
- For mutation we will swap elements from two points of the individual.
- This is performed by `deap.tools.mutShuffleIndexes()`.
Evaluation can be easily defined from the `total_distance()` definition.
We will employ tournament selection with size 3.
Lets' run the algorithm with a population of 100 individuals and 400 generations.
### We can now review the results
The best individual of the last population:
It is interesting to assess how the fitness of the population changed as the evolution process took place.
We can prepare an `deap.tools.Statistics` instance to specify what data to collect.
We are all set now but lets run again the genetic algorithm configured to collect the statistics that we want to gather:
### Plotting mean and minimium fitness as evolution took place.
### How has the population evolved?
Ok, but how the population evolved? As TSP solutions are easy to visualize, we can plot the individuals of each population the evolution progressed. We need a new `Statistics` instance prepared for that.
_Note_: I am aware that this could be done in a more efficient way.
### Plotting the individuals and their fitness (color-coded)
We can now plot the population as the evolutionary process progressed. Darker blue colors imply better fitness.
### Comprarison with `greedy_TSP()`
The genetic algorithm outperformed the greedy approach at a *viable* computational cost.
- _Note 1_: *Viable* depends on the particular problem, of course.
- _Note 2_: These results depend on the cities that were randomly generated. Your milleage may vary.
Homework
--------
1. We have just performed one run of the experiment, but genetic algorithms are stochastic algorithms and their performace should be assessed in statistical terms. Modify the genetic algorithm code in order to be able to report the comparison with `greedy_TSP()` in statistically sound terms.
2. Population size should have an impact on the performace of the algorithm. Make an experiment regarding that.
3. What is the influence of the mutation and crossover probabilities in the performance of the genetic algorithm?
### Extra credit
The population of the previous experiment can be better appreciated in animated form. We are going to use `matplotlib.animation` and the [JSAnimation](https://github.com/jakevdp/JSAnimation) library (you need to install it if you plan to run this notebook locally). Similarly, this functionality needs an HTML5 capable browser.
Part of this code has also been inspired by [A Simple Animation: The Magic Triangle](http://nbviewer.ipython.org/url/jakevdp.github.io/downloads/notebooks/MagicTriangle.ipynb).
The next step takes some time to execute. Use the video controls to see the evolution in animated form.
Embeding the previous animation in the online notebook makes it really big. I have removed the result of the previous cell and created a `.gif` version of the animation for online viewing.
| 0.941095 | 0.951459 |
```
import pandas as pd
BlendDF = pd.read_csv('BlendedReviews.csv')
import numpy as np
import pandas as pd
import nltk
import matplotlib.pyplot as plt
import multiprocessing
from sklearn import utils
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.ensemble import StackingClassifier
"""
VADER Score
First group of models are binary models predicting positive or negative rating
"""
#Split data into training and test sets with a 80/20 split for all binary models
X = BlendDF[['VaderCompound','Short','Verified','Long','IsImage']] #set independent variables for regression
Y = BlendDF['BinaryRating'] #set dependent variable for regression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1) #Split into 80/20 train and test sets
#Run binary logistic regression
LR = linear_model.LogisticRegression(solver='lbfgs',max_iter=10000)
LR.fit(X_train, Y_train)
print('Binary Logistic Intercept is:', LR.intercept_, '\n')
print('Binary Logistic Coefficients are:', LR.coef_, '\n')
#Look at ability of model to predict test set
LRScore = round((LR.score(X_test, Y_test))*100,2)
print('Binary Logistic Model Score for VADER Score:',LRScore,'%','\n')
Y_pred = LR.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Binary SVM
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, Y_train)
#Look at ability of model to predict test set
SVMScore = round((svclassifier.score(X_test, Y_test))*100,2)
print('Binary SVM Score for VADER Score:',SVMScore,'%','\n')
Y_pred = svclassifier.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Naive Bayes Classifier
NB = GaussianNB()
NB.fit(X_train, Y_train)
#Look at ability of model to predict test set
NBScore = round((NB.score(X_test, Y_test))*100,2)
print('Binary Naive Bayes Classifier Score for VADER Score:',NBScore,'%','\n')
Y_pred = NB.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Implement stacked ensemble model
Estimators = [('NB',NB), ('SVM',svclassifier)]
StackedModel = StackingClassifier (estimators = Estimators, final_estimator = linear_model.LogisticRegression(solver='lbfgs',max_iter=10000))
StackedModel.fit(X_train, Y_train)
#Look at ability of stacked ensemble model to predict test set
StackScore = round((StackedModel.score(X_test, Y_test))*100,2)
print('Stacked ensemble model score for VADER Score: ',StackScore,'%','\n')
Y_pred = StackedModel.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
from yellowbrick.features import Manifold
viz = Manifold(manifold="tsne")
viz.fit_transform(X, Y)
viz.show()
from sklearn.model_selection import TimeSeriesSplit
from yellowbrick.target import ClassBalance
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
visualizer = ClassBalance()
visualizer.fit(Y_train, Y_test)
visualizer.show()
from sklearn.model_selection import TimeSeriesSplit
from sklearn.naive_bayes import GaussianNB
from yellowbrick.classifier import ClassificationReport
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
model = GaussianNB()
visualizer = ClassificationReport(model, support=False)
visualizer.fit(X_train, Y_train)
visualizer.score(X_test, Y_test)
visualizer.show()
from yellowbrick.classifier import ClassificationReport
from sklearn.linear_model import LogisticRegression
viz = ClassificationReport(LogisticRegression())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(SVC())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
"""
VADER Score
Second group of models are multiclass models for 1-5 rating
"""
#Split data into training and test sets with a 80/20 split for multiclass models
X = BlendDF[['VaderCompound','Short','Verified','Long','IsImage']] #set independent variables for regression
Y = BlendDF['overall'] #set dependent variable for regression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1) #Split into 80/20 train and test sets
#Run multinomial logistic regression
MLR = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs',max_iter=10000)
MLR.fit(X_train, Y_train)
#Look at ability of model to predict test set
MLRScore = round((MLR.score(X_test, Y_test))*100,2)
print('Multinomial Logistic Model Score for VADER Score: ',MLRScore,'%','\n')
Y_pred = MLR.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Multiclass SVM
msvclassifier = SVC(kernel='linear')
msvclassifier.fit(X_train, Y_train)
#Look at ability of model to predict test set
MSVMScore = round((msvclassifier.score(X_test, Y_test))*100,2)
print('Multiclass SVM Score is for VADER Score: ',MSVMScore,'%','\n')
Y_pred = msvclassifier.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run K Nearest Neighbors Algorithm
KNN = KNeighborsClassifier(n_neighbors = 15)
KNN.fit(X_train, Y_train)
#Look at ability of model to predict test set
KNNScore = round((KNN.score(X_test, Y_test))*100,2)
print('K Nearest Neighbors Algorithm Model Score for VADER Score: ',KNNScore,'%','\n')
Y_pred = KNN.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Random Forest Algorithm
RF = RandomForestClassifier(n_estimators=5, random_state=0)
RF.fit(X_train, Y_train)
#Look at ability of model to predict test set
RFScore = round((RF.score(X_test, Y_test))*100,2)
print('Random Forest Classifier Model Score for VADER Score: ',RFScore,'%','\n')
Y_pred = RF.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Implement stacked ensemble model
Estimators = [('KNN',KNN), ('SVM',msvclassifier)]
StackedModel = StackingClassifier (estimators = Estimators, final_estimator = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs',max_iter=10000))
StackedModel.fit(X_train, Y_train)
#Look at ability of stacked ensemble model to predict test set
StackScore = round((StackedModel.score(X_test, Y_test))*100,2)
print('Stacked ensemble model score for VADER Score: ',StackScore,'%','\n')
Y_pred = StackedModel.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
from yellowbrick.classifier import ClassificationReport
from sklearn.linear_model import LogisticRegression
viz = ClassificationReport(LogisticRegression())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(KNeighborsClassifier(n_neighbors = 15))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(SVC(kernel='linear'))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(RandomForestClassifier(n_estimators=5, random_state=0))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from sklearn.model_selection import TimeSeriesSplit
from yellowbrick.target import ClassBalance
# Create the training and test data
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
# Instantiate the visualizer
visualizer = ClassBalance()
visualizer.fit(Y_train, Y_test) # Fit the data to the visualizer
visualizer.show()
```
|
github_jupyter
|
import pandas as pd
BlendDF = pd.read_csv('BlendedReviews.csv')
import numpy as np
import pandas as pd
import nltk
import matplotlib.pyplot as plt
import multiprocessing
from sklearn import utils
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.ensemble import StackingClassifier
"""
VADER Score
First group of models are binary models predicting positive or negative rating
"""
#Split data into training and test sets with a 80/20 split for all binary models
X = BlendDF[['VaderCompound','Short','Verified','Long','IsImage']] #set independent variables for regression
Y = BlendDF['BinaryRating'] #set dependent variable for regression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1) #Split into 80/20 train and test sets
#Run binary logistic regression
LR = linear_model.LogisticRegression(solver='lbfgs',max_iter=10000)
LR.fit(X_train, Y_train)
print('Binary Logistic Intercept is:', LR.intercept_, '\n')
print('Binary Logistic Coefficients are:', LR.coef_, '\n')
#Look at ability of model to predict test set
LRScore = round((LR.score(X_test, Y_test))*100,2)
print('Binary Logistic Model Score for VADER Score:',LRScore,'%','\n')
Y_pred = LR.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Binary SVM
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, Y_train)
#Look at ability of model to predict test set
SVMScore = round((svclassifier.score(X_test, Y_test))*100,2)
print('Binary SVM Score for VADER Score:',SVMScore,'%','\n')
Y_pred = svclassifier.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Naive Bayes Classifier
NB = GaussianNB()
NB.fit(X_train, Y_train)
#Look at ability of model to predict test set
NBScore = round((NB.score(X_test, Y_test))*100,2)
print('Binary Naive Bayes Classifier Score for VADER Score:',NBScore,'%','\n')
Y_pred = NB.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Implement stacked ensemble model
Estimators = [('NB',NB), ('SVM',svclassifier)]
StackedModel = StackingClassifier (estimators = Estimators, final_estimator = linear_model.LogisticRegression(solver='lbfgs',max_iter=10000))
StackedModel.fit(X_train, Y_train)
#Look at ability of stacked ensemble model to predict test set
StackScore = round((StackedModel.score(X_test, Y_test))*100,2)
print('Stacked ensemble model score for VADER Score: ',StackScore,'%','\n')
Y_pred = StackedModel.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
from yellowbrick.features import Manifold
viz = Manifold(manifold="tsne")
viz.fit_transform(X, Y)
viz.show()
from sklearn.model_selection import TimeSeriesSplit
from yellowbrick.target import ClassBalance
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
visualizer = ClassBalance()
visualizer.fit(Y_train, Y_test)
visualizer.show()
from sklearn.model_selection import TimeSeriesSplit
from sklearn.naive_bayes import GaussianNB
from yellowbrick.classifier import ClassificationReport
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
model = GaussianNB()
visualizer = ClassificationReport(model, support=False)
visualizer.fit(X_train, Y_train)
visualizer.score(X_test, Y_test)
visualizer.show()
from yellowbrick.classifier import ClassificationReport
from sklearn.linear_model import LogisticRegression
viz = ClassificationReport(LogisticRegression())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(SVC())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
"""
VADER Score
Second group of models are multiclass models for 1-5 rating
"""
#Split data into training and test sets with a 80/20 split for multiclass models
X = BlendDF[['VaderCompound','Short','Verified','Long','IsImage']] #set independent variables for regression
Y = BlendDF['overall'] #set dependent variable for regression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1) #Split into 80/20 train and test sets
#Run multinomial logistic regression
MLR = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs',max_iter=10000)
MLR.fit(X_train, Y_train)
#Look at ability of model to predict test set
MLRScore = round((MLR.score(X_test, Y_test))*100,2)
print('Multinomial Logistic Model Score for VADER Score: ',MLRScore,'%','\n')
Y_pred = MLR.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Multiclass SVM
msvclassifier = SVC(kernel='linear')
msvclassifier.fit(X_train, Y_train)
#Look at ability of model to predict test set
MSVMScore = round((msvclassifier.score(X_test, Y_test))*100,2)
print('Multiclass SVM Score is for VADER Score: ',MSVMScore,'%','\n')
Y_pred = msvclassifier.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run K Nearest Neighbors Algorithm
KNN = KNeighborsClassifier(n_neighbors = 15)
KNN.fit(X_train, Y_train)
#Look at ability of model to predict test set
KNNScore = round((KNN.score(X_test, Y_test))*100,2)
print('K Nearest Neighbors Algorithm Model Score for VADER Score: ',KNNScore,'%','\n')
Y_pred = KNN.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Run Random Forest Algorithm
RF = RandomForestClassifier(n_estimators=5, random_state=0)
RF.fit(X_train, Y_train)
#Look at ability of model to predict test set
RFScore = round((RF.score(X_test, Y_test))*100,2)
print('Random Forest Classifier Model Score for VADER Score: ',RFScore,'%','\n')
Y_pred = RF.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
#Implement stacked ensemble model
Estimators = [('KNN',KNN), ('SVM',msvclassifier)]
StackedModel = StackingClassifier (estimators = Estimators, final_estimator = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs',max_iter=10000))
StackedModel.fit(X_train, Y_train)
#Look at ability of stacked ensemble model to predict test set
StackScore = round((StackedModel.score(X_test, Y_test))*100,2)
print('Stacked ensemble model score for VADER Score: ',StackScore,'%','\n')
Y_pred = StackedModel.predict(X_test)
print(classification_report(Y_test, Y_pred, zero_division=0), '\n')
from yellowbrick.classifier import ClassificationReport
from sklearn.linear_model import LogisticRegression
viz = ClassificationReport(LogisticRegression())
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(KNeighborsClassifier(n_neighbors = 15))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(SVC(kernel='linear'))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from yellowbrick.classifier import ClassificationReport
viz = ClassificationReport(RandomForestClassifier(n_estimators=5, random_state=0))
viz.fit(X_train, Y_train)
viz.score(X_test, Y_test)
viz.show()
from sklearn.model_selection import TimeSeriesSplit
from yellowbrick.target import ClassBalance
# Create the training and test data
tscv = TimeSeriesSplit()
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
# Instantiate the visualizer
visualizer = ClassBalance()
visualizer.fit(Y_train, Y_test) # Fit the data to the visualizer
visualizer.show()
| 0.678647 | 0.521227 |
# Mining Input Grammars
So far, the grammars we have seen have been mostly specified manually – that is, you (or the person knowing the input format) had to design and write a grammar in the first place. While the grammars we have seen so far have been rather simple, creating a grammar for complex inputs can involve quite some effort. In this chapter, we therefore introduce techniques that _automatically mine grammars from programs_ – by executing the programs and observing how they process which parts of the input. In conjunction with a grammar fuzzer, this allows us to
1. take a program,
2. extract its input grammar, and
3. fuzz it with high efficiency and effectiveness, using the concepts in this book.
**Prerequisites**
* You should have read the [chapter on grammars](Grammars.ipynb).
* The [chapter on configuration fuzzing](ConfigurationFuzzer.ipynb) introduces grammar mining for configuration options, as well as observing variables and values during execution.
* We use the tracer from the [chapter on coverage](Coverage.ipynb).
* The concept of parsing from the [chapter on parsers](Parser.ipynb) is also useful.
## Synopsis
<!-- Automatically generated. Do not edit. -->
To [use the code provided in this chapter](Importing.ipynb), write
```python
>>> from fuzzingbook.GrammarMiner import <identifier>
```
and then make use of the following features.
This chapter provides a number of classes to mine input grammars from existing programs. The function `recover_grammar()` could be the easiest to use. It takes a function and a set of inputs, and returns a grammar that describes its input language.
We apply `recover_grammar()` on a `url_parse()` function that takes and decomposes URLs:
```python
>>> url_parse('https://www.fuzzingbook.org/')
>>> URLS
['http://user:pass@www.google.com:80/?q=path#ref',
'https://www.cispa.saarland:80/',
'http://www.fuzzingbook.org/#News']
```
We extract the input grammar for `url_parse()` using `recover_grammar()`:
```python
>>> grammar = recover_grammar(url_parse, URLS)
>>> grammar
{'<start>': ['<urlsplit@394:url>'],
'<urlsplit@394:url>': ['<__new__@12:scheme>:<_splitnetloc@386:url>'],
'<__new__@12:scheme>': ['https', 'http', '<__new__@12:scheme>'],
'<_splitnetloc@386:url>': ['//<__new__@12:netloc><urlsplit@415:url>',
'//<__new__@12:netloc>/'],
'<__new__@12:netloc>': ['www.cispa.saarland:80',
'user:pass@www.google.com:80',
'<__new__@12:netloc>',
'www.fuzzingbook.org'],
'<urlsplit@415:url>': ['/#<__new__@12:fragment>',
'<urlsplit@420:url>#<__new__@12:fragment>'],
'<urlsplit@420:url>': ['/?<__new__@12:query>'],
'<__new__@12:query>': ['<__new__@12:query>', 'q=path'],
'<__new__@12:fragment>': ['News', '<__new__@12:fragment>', 'ref']}
```
The names of nonterminals are a bit technical; but the grammar nicely represents the structure of the input; for instance, the different schemes (`"http"`, `"https"`) are all identified.
The grammar can be immediately used for fuzzing, producing arbitrary combinations of input elements, which are all syntactically valid.
```python
>>> from GrammarCoverageFuzzer import GrammarCoverageFuzzer
>>> fuzzer = GrammarCoverageFuzzer(grammar)
>>> [fuzzer.fuzz() for i in range(5)]
['http://www.cispa.saarland:80/',
'https://www.fuzzingbook.org/?q=path#ref',
'http://user:pass@www.google.com:80/#News',
'http://www.fuzzingbook.org/#News',
'http://www.cispa.saarland:80/?q=path#ref']
```
Being able to automatically extract a grammar and to use this grammar for fuzzing makes for very effective test generation with a minimum of manual work.
## A Grammar Challenge
Consider the `process_inventory()` method from the [chapter on parsers](Parser.ipynb):
```
import fuzzingbook_utils
from Parser import process_inventory, process_vehicle, process_car, process_van, lr_graph # minor dependency
```
It takes inputs of the following form.
```
INVENTORY = """\
1997,van,Ford,E350
2000,car,Mercury,Cougar
1999,car,Chevy,Venture\
"""
print(process_inventory(INVENTORY))
```
We found from the [chapter on parsers](Parser.ipynb) that coarse grammars do not work well for fuzzing when the input format includes details expressed only in code. That is, even though we have the formal specification of CSV files ([RFC 4180](https://tools.ietf.org/html/rfc4180)), the inventory system includes further rules as to what is expected at each index of the CSV file. The solution of simply recombining existing inputs, while practical, is incomplete. In particular, it relies on a formal input specification being available in the first place. However, we have no assurance that the program obeys the input specification given.
One of the ways out of this predicament is to interrogate the program under test as to what its input specification is. That is, if the program under test is written in a style such that specific methods are responsible for handling specific parts of the input, one can recover the parse tree by observing the process of parsing. Further, one can recover a reasonable approximation of the grammar by abstraction from multiple input trees.
_We start with the assumption (1) that the program is written in such a fashion that specific methods are responsible for parsing specific fragments of the program -- This includes almost all ad hoc parsers._
The idea is as follows:
* Hook into the Python execution and observe the fragments of input string as they are produced and named in different methods.
* Stitch the input fragments together in a tree structure to retrieve the **Parse Tree**.
* Abstract common elements from multiple parse trees to produce the **Context Free Grammar** of the input.
## A Simple Grammar Miner
Say we want to obtain the input grammar for the function `process_vehicle()`. We first collect the sample inputs for this function.
```
VEHICLES = INVENTORY.split('\n')
```
The set of methods responsible for processing inventory are the following.
```
INVENTORY_METHODS = {
'process_inventory',
'process_vehicle',
'process_van',
'process_car'}
```
We have seen from the chapter on [configuration fuzzing](ConfigurationFuzzer.ipynb) that one can hook into the Python runtime to observe the arguments to a function and any local variables created. We have also seen that one can obtain the context of execution by inspecting the `frame` argument. Here is a simple tracer that can return the local variables and other contextual information in a traced function. We reuse the `Coverage` tracing class.
### Tracer
```
from Coverage import Coverage
import inspect
class Tracer(Coverage):
def traceit(self, frame, event, arg):
method_name = inspect.getframeinfo(frame).function
if method_name not in INVENTORY_METHODS:
return
file_name = inspect.getframeinfo(frame).filename
param_names = inspect.getargvalues(frame).args
lineno = inspect.getframeinfo(frame).lineno
local_vars = inspect.getargvalues(frame).locals
print(event, file_name, lineno, method_name, param_names, local_vars)
return self.traceit
```
We run the code under trace context.
```
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
```
The main thing that we want out of tracing is a list of assignments of input fragments to different variables. We can use the tracing facility `settrace()` to get that as we showed above.
However, the `settrace()` function hooks into the Python debugging facility. When it is in operation, no debugger can hook into the program. That is, if there is a problem with our grammar miner, we will not be able to attach a debugger to it to understand what is happening. This is not ideal. Hence, we limit the tracer to the simplest implementation possible, and implement the core of grammar mining in later stages.
The `traceit()` function relies on information from the `frame` variable which exposes Python internals. We define a `context` class that encapsulates the information that we need from the `frame`.
### Context
The `Context` class provides easy access to the information such as the current module, and parameter names.
```
class Context:
def __init__(self, frame, track_caller=True):
self.method = inspect.getframeinfo(frame).function
self.parameter_names = inspect.getargvalues(frame).args
self.file_name = inspect.getframeinfo(frame).filename
self.line_no = inspect.getframeinfo(frame).lineno
def _t(self):
return (self.file_name, self.line_no, self.method,
','.join(self.parameter_names))
def __repr__(self):
return "%s:%d:%s(%s)" % self._t()
```
Here are a few convenience method that operate on the `frame` to `Context`.
```
class Context(Context):
def extract_vars(self, frame):
return inspect.getargvalues(frame).locals
def parameters(self, all_vars):
return {k: v for k, v in all_vars.items() if k in self.parameter_names}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
```
We hook printing the context to our `traceit()` to see it in action. First we define an `log_event()` for displaying events.
```
def log_event(event, var):
print({'call': '->', 'return': '<-'}.get(event, ' '), var)
```
And use the `log_event()` in the `traceit()` function.
```
class Tracer(Tracer):
def traceit(self, frame, event, arg):
log_event(event, Context(frame))
return self.traceit
```
Running `process_vehicle()` under trace prints the contexts encountered.
```
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
```
The trace produced by executing any function can get overwhelmingly large. Hence, we need restrict our attention to specific modules. Further, we also restrict our attention exclusively to `str` variables since these variables are more likely to contain input fragments. (We will show how to deal with complex objects later in exercises.)
The `Context` class we developed earlier is used to decide which modules to monitor, and which variables to trace.
We store the current *input string* so that it can be used to determine if any particular string fragments came from the current input string. Any optional arguments are processed separately.
```
class Tracer(Tracer):
def __init__(self, my_input, **kwargs):
self.options(kwargs)
self.my_input, self.trace = my_input, []
```
We use an optional argument `files` to indicate the specific source files we are interested in, and `methods` to indicate which specific methods that are of interest. Further, we also use `log` to specify whether verbose logging should be enabled during trace. We use the `log_event()` method we defined earlier for logging.
The options processing is as below.
```
class Tracer(Tracer):
def options(self, kwargs):
self.files = kwargs.get('files', [])
self.methods = kwargs.get('methods', [])
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
```
The `files` and `methods` are checked to determine if a particular event should be traced or not
```
class Tracer(Tracer):
def tracing_context(self, cxt, event, arg):
fres = not self.files or any(
cxt.file_name.endswith(f) for f in self.files)
mres = not self.methods or any(cxt.method == m for m in self.methods)
return fres and mres
```
Similar to the context of events, we also want to restrict our attention to specific variables. For now, we want to focus only on strings. (See the Exercises at the end of the chapter on how to extend it to other kinds of objects).
```
class Tracer(Tracer):
def tracing_var(self, k, v):
return isinstance(v, str)
```
We modify the `traceit()` to call an `on_event()` function with the context information only on the specific events we are interested in.
```
class Tracer(Tracer):
def on_event(self, event, arg, cxt, my_vars):
self.trace.append((event, arg, cxt, my_vars))
def create_context(self, frame):
return Context(frame)
def traceit(self, frame, event, arg):
cxt = self.create_context(frame)
if not self.tracing_context(cxt, event, arg):
return self.traceit
self.log(event, cxt)
my_vars = {
k: v
for k, v in cxt.extract_vars(frame).items()
if self.tracing_var(k, v)
}
self.on_event(event, arg, cxt, my_vars)
return self.traceit
```
The `Tracer` class can now focus on specific kinds of events on specific files. Further, it provides a first level filter for variables that we find interesting. For example, we want to focus specifically on variables from `process_*` methods that contain input fragments. Here is how our updated `Tracer` can be used
```
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(VEHICLES[0])
```
The execution produced the following trace.
```
for t in tracer.trace:
print(t[0], t[2].method, dict(t[3]))
```
Since we are saving the input already in Tracer, it is redundant to specify it separately again as an argument.
```
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(tracer.my_input)
```
### DefineTracker
We define a `DefineTracker` class that processes the trace from the `Tracer`. The idea is to store different variable definitions which are input fragments.
The tracker identifies string fragments that are part of the input string, and stores them in a dictionary `my_assignments`. It saves the trace, and the corresponding input for processing. Finally it calls `process()` to process the `trace` it was given. We will start with a simple tracker that relies on certain assumptions, and later see how these assumptions can be relaxed.
```
class DefineTracker:
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.trace = trace
self.my_assignments = {}
self.process()
```
One of the problems of using substring search is that short string sequences tend to be included in other string sequences even though they may not have come from the original string. That is, say the input fragment is `v`. It could have equally come from either `van` or `chevy`. We rely on being able to predict the exact place input where a given fragment occurred. Hence, we define a constant `FRAGMENT_LEN` such that we ignore strings up to that length. We also incorporate a logging facility as before.
```
FRAGMENT_LEN = 3
class DefineTracker(DefineTracker):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
self.fragment_len = kwargs.get('fragment_len', FRAGMENT_LEN)
```
Our tracer simply records the variable values as they occur. We next need to check if the variables contain values from the **input string**. Common ways to do this is to rely on symbolic execution or at least dynamic tainting, which are powerful, but also complex. However, one can obtain a reasonable approximation by simply relying on substring search. That is, we consider any value produced that is a substring of the original input string to have come from the original input.
We define `is_input_fragment()` method that relies on string inclusion to detect if the string came from the input.
```
class DefineTracker(DefineTracker):
def is_input_fragment(self, var, value):
return len(value) >= self.fragment_len and value in self.my_input
```
We can use `is_input_fragment()` to select only a subset of variables defined, as implemented below in `fragments()`.
```
class DefineTracker(DefineTracker):
def fragments(self, variables):
return {k: v for k, v in variables.items(
) if self.is_input_fragment(k, v)}
```
The tracker processes each event, and at each event, it updates the dictionary `my_assignments` with the current local variables that contain strings that are part of the input. Note that there is a choice here with respect to what happens during reassignment. We can either discard all the reassignments, or keep only the last assignment. Here, we choose the later. If you want the former behavior, check whether the value exists in `my_assignments` before storing a fragment.
```
class DefineTracker(DefineTracker):
def track_event(self, event, arg, cxt, my_vars):
self.log(event, (cxt.method, my_vars))
self.my_assignments.update(self.fragments(my_vars))
def process(self):
for event, arg, cxt, my_vars in self.trace:
self.track_event(event, arg, cxt, my_vars)
```
Using the tracker, we can obtain the input fragments. For example, say we are only interested in strings that are at least `5` characters long.
```
tracker = DefineTracker(tracer.my_input, tracer.trace, fragment_len=5)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
```
Or strings that are `2` characters long (the default).
```
tracker = DefineTracker(tracer.my_input, tracer.trace)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
class DefineTracker(DefineTracker):
def assignments(self):
return self.my_assignments.items()
```
### Assembling a Derivation Tree
```
from Grammars import START_SYMBOL, syntax_diagram, is_nonterminal
from GrammarFuzzer import GrammarFuzzer, FasterGrammarFuzzer, display_tree, tree_to_string
```
The input fragments from the `DefineTracker` only tell half the story. The fragments may be created at different stages of parsing. Hence, we need to assemble the fragments to a derivation tree of the input. The basic idea is as follows:
Our input from the previous step was:
```python
"1997,van,Ford,E350"
```
We start a derivation tree, and associate it with the start symbol in the grammar.
```
derivation_tree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
display_tree(derivation_tree)
```
The next input was:
```python
vehicle = "1997,van,Ford,E350"
```
Since vehicle covers the `<start>` node's value completely, we replace the value with the vehicle node.
```
derivation_tree = (START_SYMBOL, [('<vehicle>', [("1997,van,Ford,E350", [])],
[])])
display_tree(derivation_tree)
```
The next input was:
```python
model = 'E350'
```
Traversing the derivation tree from `<start>`, we see that it replaces a portion of the `<vehicle>` node's value. Hence we split the `<vehicle>` node's value to two children, where one corresponds to the value `"1997"` and the other to `",van,Ford,E350"`, and replace the first one with the node `<model>`.
```
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",van,Ford,E350", [])], [])])
display_tree(derivation_tree)
```
We perform similar operations for
```python
company = 'Ford'
```
```
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",van,", []),
('<company>', [('Ford', [])]),
(",E350", [])], [])])
display_tree(derivation_tree)
```
Similarly for
```python
kind = 'van'
```
and
```python
model = 'E350'
```
```
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",", []),
("<kind>", [('van', [])]),
(",", []),
('<company>', [('Ford', [])]),
(",", []),
("<model>", [('E350', [])])
], [])])
display_tree(derivation_tree)
```
We now develop the complete algorithm with the above described steps.
The derivation tree `TreeMiner` is initialized with the input string, and the variable assignments, and it converts the assignments to the corresponding derivation tree.
```
class TreeMiner:
def __init__(self, my_input, my_assignments, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = my_assignments
self.tree = self.get_derivation_tree()
def options(self, kwargs):
self.log = log_call if kwargs.get('log') else lambda _i, _v: None
def get_derivation_tree(self):
return (START_SYMBOL, [])
```
the `log_call()` is as follows.
```
def log_call(indent, var):
print('\t' * indent, var)
```
The basic idea is as follows:
* **For now, we assume that the value assigned to a variable is stable. That is, it is never reassigned. In particular, there are no recursive calls, or multiple calls to the same function from different parts.** (We will show how to overcome this limitation later).
* For each pair _var_, _value_ found in `my_assignments`:
1. We search for occurrences of _value_ `val` in the derivation tree recursively.
2. If an occurrence was found as a value `V1` of a node `P1`, we partition the value of the node `P1` into three parts, with the central part matching the _value_ `val`, and the first and last part, the corresponding prefix and suffix in `V1`.
3. Reconstitute the node `P1` with three children, where prefix and suffix mentioned earlier are string values, and the matching value `val` is replaced by a node `var` with a single value `val`.
First, we define a wrapper to generate a nonterminal from a variable name.
```
def to_nonterminal(var):
return "<" + var.lower() + ">"
```
The `string_part_of_value()` method checks whether the given `part` value was part of the whole.
```
class TreeMiner(TreeMiner):
def string_part_of_value(self, part, value):
return (part in value)
```
The `partition_by_part()` splits the `value` by the given part if it matches, and returns a list containing the first part, the part that was replaced, and the last part. This is a format that can be used as a part of the list of children.
```
class TreeMiner(TreeMiner):
def partition(self, part, value):
return value.partition(part)
class TreeMiner(TreeMiner):
def partition_by_part(self, pair, value):
k, part = pair
prefix_k_suffix = [
(k, [[part, []]]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(part, value))
if e]
return prefix_k_suffix
```
The `insert_into_tree()` method accepts a given tree `tree` and a `(k,v)` pair. It recursively checks whether the given pair can be applied. If the pair can be applied, it applies the pair and returns `True`.
```
class TreeMiner(TreeMiner):
def insert_into_tree(self, my_tree, pair):
var, values = my_tree
k, v = pair
self.log(1, "- Node: %s\t\t? (%s:%s)" % (var, k, repr(v)))
applied = False
for i, value_ in enumerate(values):
value, arr = value_
self.log(2, "-> [%d] %s" % (i, repr(value)))
if is_nonterminal(value):
applied = self.insert_into_tree(value_, pair)
if applied:
break
elif self.string_part_of_value(v, value):
prefix_k_suffix = self.partition_by_part(pair, value)
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
else:
continue
return applied
```
Here is how `insert_into_tree()` is used.
```
tree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
m = TreeMiner('', {}, log=True)
```
First, we have our input string as the only node.
```
display_tree(tree)
```
Inserting the `<vehicle>` node.
```
v = m.insert_into_tree(tree, ('<vehicle>', "1997,van,Ford,E350"))
display_tree(tree)
```
Inserting `<model>` node.
```
v = m.insert_into_tree(tree, ('<model>', 'E350'))
display_tree((tree))
```
Inserting `<company>`.
```
v = m.insert_into_tree(tree, ('<company>', 'Ford'))
display_tree(tree)
```
Inserting `<kind>`.
```
v = m.insert_into_tree(tree, ('<kind>', 'van'))
display_tree(tree)
```
Inserting `<year>`.
```
v = m.insert_into_tree(tree, ('<year>', '1997'))
display_tree(tree)
```
To make life simple, we define a wrapper function `nt_var()` that will convert a token to its corresponding nonterminal symbol.
```
class TreeMiner(TreeMiner):
def nt_var(self, var):
return var if is_nonterminal(var) else to_nonterminal(var)
```
Now, we need to apply a new definition to an entire grammar.
```
class TreeMiner(TreeMiner):
def apply_new_definition(self, tree, var, value):
nt_var = self.nt_var(var)
return self.insert_into_tree(tree, (nt_var, value))
```
This algorithm is implemented as `get_derivation_tree()`.
```
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
```
The `TreeMiner` is used as follows:
```
with Tracer(VEHICLES[0]) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
dt = TreeMiner(tracer.my_input, assignments, log=True)
dt.tree
```
The obtained derivation tree is as below.
```
display_tree(TreeMiner(tracer.my_input, assignments).tree)
```
Combining all the pieces:
```
trees = []
for vehicle in VEHICLES:
print(vehicle)
with Tracer(vehicle) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
```
The corresponding derivation trees are below.
```
csv_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
csv_dt.append(dt)
display_tree(dt.tree)
```
### Recovering Grammars from Derivation Trees
We define a class `Miner` that can combine multiple derivation trees to produce the grammar. The initial grammar is empty.
```
class GrammarMiner:
def __init__(self):
self.grammar = {}
```
The `tree_to_grammar()` method converts our derivation tree to a grammar by picking one node at a time, and adding it to a grammar. The node name becomes the key, and any list of children it has becomes another alternative for that key.
```
class GrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
node, children = tree
one_alt = [ck for ck, gc in children]
hsh = {node: [one_alt] if one_alt else []}
for child in children:
if not is_nonterminal(child[0]):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
gm = GrammarMiner()
gm.tree_to_grammar(csv_dt[0].tree)
```
The grammar being generated here is `canonical`. We define a function `readable()` that takes in a canonical grammar and returns it in a readable form.
```
def readable(grammar):
def readable_rule(rule):
return ''.join(rule)
return {k: list(set(readable_rule(a) for a in grammar[k]))
for k in grammar}
syntax_diagram(readable(gm.tree_to_grammar(csv_dt[0].tree)))
```
The `add_tree()` method gets a combined list of non-terminals from current grammar, and the tree to be added to the grammar, and updates the definitions of each non-terminal.
```
import itertools
class GrammarMiner(GrammarMiner):
def add_tree(self, t):
t_grammar = self.tree_to_grammar(t.tree)
self.grammar = {
key: self.grammar.get(key, []) + t_grammar.get(key, [])
for key in itertools.chain(self.grammar.keys(), t_grammar.keys())
}
```
The `add_tree()` is used as follows:
```
inventory_grammar = GrammarMiner()
for dt in csv_dt:
inventory_grammar.add_tree(dt)
syntax_diagram(readable(inventory_grammar.grammar))
```
Given execution traces from various inputs, one can define `update_grammar()` to obtain the complete grammar from the traces.
```
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.assignments())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return DefineTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
```
The complete grammar recovery is implemented in `recover_grammar()`.
```
def recover_grammar(fn, inputs, **kwargs):
miner = GrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.grammar)
```
Note that the grammar could have been retrieved directly from the tracker, without the intermediate derivation tree stage. However, going through the derivation tree allows one to inspect the inputs being fragmented and verify that it happens correctly.
#### Example 1. Recovering the Inventory Grammar
```
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
inventory_grammar
```
#### Example 2. Recovering URL Grammar
Our algorithm is robust enough to recover grammar from real world programs. For example, the `urlparse` function in the Python `urlib` module accepts the following sample URLs.
```
URLS = [
'http://user:pass@www.google.com:80/?q=path#ref',
'https://www.cispa.saarland:80/',
'http://www.fuzzingbook.org/#News',
]
```
The urllib caches its intermediate results for faster access. Hence, we need to disable it using `clear_cache()` after every invocation.
```
from urllib.parse import urlparse, clear_cache
```
We use the sample URLs to recover grammar as follows. The `urlparse` function tends to cache its previous parsing results. Hence, we define a new method `url_parse()` that clears the cache before each call.
```
def url_parse(url):
clear_cache()
urlparse(url)
trees = []
for url in URLS:
print(url)
with Tracer(url) as tracer:
url_parse(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
url_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
url_dt.append(dt)
display_tree(dt.tree)
```
Using `url_parse()` to recover grammar.
```
url_grammar = recover_grammar(url_parse, URLS, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
```
The recovered grammar describes the URL format reasonably well.
### Fuzzing
We can now use our recovered grammar for fuzzing as follows.
First, the inventory grammar.
```
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
```
Next, the URL grammar.
```
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
```
What this means is that we can now take a program and a few samples, extract its grammar, and then use this very grammar for fuzzing. Now that's quite an opportunity!
### Problems with the Simple Miner
One of the problems with our simple grammar miner is the assumption that the values assigned to variables are stable. Unfortunately, that may not hold true in all cases. For example, here is a URL with a slightly different format.
```
URLS_X = URLS + ['ftp://freebsd.org/releases/5.8']
```
The grammar generated from this set of samples is not as nice as what we got earlier
```
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
```
Clearly, something has gone wrong.
To investigate why the `url` definition has gone wrong, let us inspect the trace for the URL.
```
clear_cache()
with Tracer(URLS_X[0]) as tracer:
urlparse(tracer.my_input)
for i, t in enumerate(tracer.trace):
if t[0] in {'call', 'line'} and 'parse.py' in str(t[2]) and t[3]:
print(i, t[2]._t()[1], t[3:])
```
Notice how the value of `url` changes as the parsing progresses? This violates our assumption that the value assigned to a variable is stable. We next look at how this limitation can be removed.
## Grammar Miner with Reassignment
One way to uniquely identify different variables is to annotate them with *line numbers* both when they are defined and also when their value changes. Consider the code fragment below
### Tracking variable assignment locations
```
def C(cp_1):
c_2 = cp_1 + '@2'
c_3 = c_2 + '@3'
return c_3
def B(bp_7):
b_8 = bp_7 + '@8'
return C(b_8)
def A(ap_12):
a_13 = ap_12 + '@13'
a_14 = B(a_13) + '@14'
a_14 = a_14 + '@15'
a_13 = a_14 + '@16'
a_14 = B(a_13) + '@17'
a_14 = B(a_13) + '@18'
```
Notice how all variables are either named corresponding to either where they are defined, or the value is annotated to indicate that it was changed.
Let us run this under the trace.
```
with Tracer('____') as tracer:
A(tracer.my_input)
for t in tracer.trace:
print(t[0], "%d:%s" % (t[2].line_no, t[2].method), t[3])
```
Each variables were referenced first as follows:
* `cp_1` -- *call* `1:C`
* `c_2` -- *line* `3:C` (but the previous event was *line* `2:C`)
* `c_3` -- *line* `4:C` (but the previous event was *line* `3:C`)
* `bp_7` -- *call* `7:B`
* `b_8` -- *line* `9:B` (but the previous event was *line* `8:B`)
* `ap_12` -- *call* `12:A`
* `a_13` -- *line* `14:A` (but the previous event was *line* `13:A`)
* `a_14` -- *line* `15:A` (the previous event was *return* `9:B`. However, the previous event in A was *line* `14:A`)
* reassign `a_14` at *15* -- *line* `16:A` (the previous event was *line* `15:A`)
* reassign `a_13` at *16* -- *line* `17:A` (the previous event was *line* `16:A`)
* reassign `a_14` at *17* -- *return* `17:A` (the previous event in A was *line* `17:A`)
* reassign `a_14` at *18* -- *return* `18:A` (the previous event in A was *line* `18:A`)
So, our observations are that, if it is a call, the current location is the right one for any new variables being defined. On the other hand, if the variable being referenced for the first time (or reassigned a new value), then the right location to consider is the previous location *in the same method invocation*. Next, let us see how we can incorporate this information into variable naming.
Next, we need a way to track the individual method calls as they are being made. For this we define the class `CallStack`. Each method invocation gets a separate identifier, and when the method call is over, the identifier is reset.
### CallStack
```
class CallStack:
def __init__(self, **kwargs):
self.options(kwargs)
self.method_id = (START_SYMBOL, 0)
self.method_register = 0
self.mstack = [self.method_id]
def enter(self, method):
self.method_register += 1
self.method_id = (method, self.method_register)
self.log('call', "%s%s" % (self.indent(), str(self)))
self.mstack.append(self.method_id)
def leave(self):
self.mstack.pop()
self.log('return', "%s%s" % (self.indent(), str(self)))
self.method_id = self.mstack[-1]
```
A few extra functions to make life simpler.
```
class CallStack(CallStack):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
def indent(self):
return len(self.mstack) * "\t"
def at(self, n):
return self.mstack[n]
def __len__(self):
return len(mstack) - 1
def __str__(self):
return "%s:%d" % self.method_id
def __repr__(self):
return repr(self.method_id)
```
We also define a convenience method to display a given stack.
```
def display_stack(istack):
def stack_to_tree(stack):
current, *rest = stack
if not rest:
return (repr(current), [])
return (repr(current), [stack_to_tree(rest)])
display_tree(stack_to_tree(istack.mstack), graph_attr=lr_graph)
```
Here is how we can use the `CallStack`.
```
cs = CallStack()
display_stack(cs)
cs
cs.enter('hello')
display_stack(cs)
cs
cs.enter('world')
display_stack(cs)
cs
cs.leave()
display_stack(cs)
cs
cs.enter('world')
display_stack(cs)
cs
cs.leave()
display_stack(cs)
cs
```
In order to account for variable reassignments, we need to have a more intelligent data structure than a dictionary for storing variables. We first define a simple interface `Vars`. It acts as a container for variables, and is instantiated at `my_assignments`.
### Vars
The `Vars` stores references to variables as they occur during parsing in its internal dictionary `defs`. We initialize the dictionary with the original string.
```
class Vars:
def __init__(self, original):
self.defs = {}
self.my_input = original
```
The dictionary needs two methods: `update()` that takes a set of key-value pairs to update itself, and `_set_kv()` that updates a particular key-value pair.
```
class Vars(Vars):
def _set_kv(self, k, v):
self.defs[k] = v
def __setitem__(self, k, v):
self._set_kv(k, v)
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
```
The vars is a proxy for the internal dictionary. For example, here is how one can use it.
```
v = Vars('')
v.defs
v['x'] = 'X'
v.defs
v.update({'x': 'x', 'y': 'y'})
v.defs
```
### AssignmentVars
We now extend the simple `Vars` to account for variable reassignments. For this, we define `AssignmentVars`.
The idea for detecting reassignments and renaming variables is as follows: We keep track of the previous reassignments to particular variables using `accessed_seq_var`. It contains the last rename of any particular variable as its corresponding value. The `new_vars` contains a list of all new variables that were added on this iteration.
```
class AssignmentVars(Vars):
def __init__(self, original):
super().__init__(original)
self.accessed_seq_var = {}
self.var_def_lines = {}
self.current_event = None
self.new_vars = set()
self.method_init()
```
The `method_init()` method takes care of keeping track of method invocations using records saved in the `call_stack`. `event_locations` is for keeping track of the locations accessed *within this method*. This is used for line number tracking of variable definitions.
```
class AssignmentVars(AssignmentVars):
def method_init(self):
self.call_stack = CallStack()
self.event_locations = {self.call_stack.method_id: []}
```
The `update()` is now modified to track the changed line numbers if any, using `var_location_register()`. We reinitialize the `new_vars` after use for the next event.
```
class AssignmentVars(AssignmentVars):
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
self.var_location_register(self.new_vars)
self.new_vars = set()
```
The variable name now incorporates an index of how many reassignments it has gone through, effectively making each reassignment a unique variable.
```
class AssignmentVars(AssignmentVars):
def var_name(self, var):
return (var, self.accessed_seq_var[var])
```
While storing variables, we need to first check whether it was previously known. If it is not, we need to initialize the rename count. This is accomplished by `var_access`.
```
class AssignmentVars(AssignmentVars):
def var_access(self, var):
if var not in self.accessed_seq_var:
self.accessed_seq_var[var] = 0
return self.var_name(var)
```
During a variable reassignment, we update the `accessed_seq_var` to reflect the new count.
```
class AssignmentVars(AssignmentVars):
def var_assign(self, var):
self.accessed_seq_var[var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
```
These methods can be used as follows
```
sav = AssignmentVars('')
sav.defs
sav.var_access('v1')
sav.var_assign('v1')
```
Assigning to it again increments the counter.
```
sav.var_assign('v1')
```
The core of the logic is in `_set_kv()`. When a variable is being assigned, we get the sequenced variable name `s_var`. If the sequenced variable name was previously unknown in `defs`, then we have no further concerns. We add the sequenced variable to `defs`.
If the variable is previously known, then it is an indication of a possible reassignment. In this case, we look at the value the variable is holding. We check if the value changed. If it has not, then it is not.
If the value has changed, it is a reassignment. We first increment the variable usage sequence using `var_assign`, retrieve the new name, update the new name in `defs`.
```
class AssignmentVars(AssignmentVars):
def _set_kv(self, var, val):
s_var = self.var_access(var)
if s_var in self.defs and self.defs[s_var] == val:
return
self.defs[self.var_assign(var)] = val
```
Here is how it can be used. Assigning a variable the first time initializes its counter.
```
sav = AssignmentVars('')
sav['x'] = 'X'
sav.defs
```
If the variable is assigned again with the same value, it is probably not a reassignment.
```
sav['x'] = 'X'
sav.defs
```
However, if the value changed, it is a reassignment.
```
sav['x'] = 'Y'
sav.defs
```
There is a subtlety here. It is possible for a child method to be called from the middle of a parent method, and for both to use the same variable name with different values. In this case, when the child returns, parent will have the old variable with old value in context. With our implementation, we consider this as a reassignment. However, this is OK because adding a new reassignment is harmless, but missing one is not. Further, we will discuss later how this can be avoided.
We also define book keeping codes for `register_event()` `method_enter()` and `method_exit()` which are the methods responsible for keeping track of the method stack. The basic idea is that, each `method_enter()` represents a new method invocation. Hence it merits a new method id, which is generated from the `method_register`, and saved in the `method_id`. Since this is a new method, the method stack is extended by one element with this id. In the case of `method_exit()`, we pop the method stack, and reset the current `method_id` to what was below the current one.
```
class AssignmentVars(AssignmentVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method)
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
def method_exit(self, cxt, my_vars):
self.current_event = 'return'
self.register_event(cxt)
self.update(my_vars)
self.call_stack.leave()
def method_statement(self, cxt, my_vars):
self.current_event = 'line'
self.register_event(cxt)
self.update(my_vars)
```
For each of the method events, we also register the event using `register_event()` which keeps track of the line numbers that were referenced in *this* method.
```
class AssignmentVars(AssignmentVars):
def register_event(self, cxt):
self.event_locations[self.call_stack.method_id].append(cxt.line_no)
```
The `var_location_register()` keeps the locations of newly added variables. The definition location of variables in a `call` is the *current* location. However, for a `line`, it would be the previous event in the current method.
```
class AssignmentVars(AssignmentVars):
def var_location_register(self, my_vars):
def loc(mid):
if self.current_event == 'call':
return self.event_locations[mid][-1]
elif self.current_event == 'line':
return self.event_locations[mid][-2]
elif self.current_event == 'return':
return self.event_locations[mid][-2]
else:
assert False
my_loc = loc(self.call_stack.method_id)
for var in my_vars:
self.var_def_lines[var] = my_loc
```
We define `defined_vars()` which returns the names of variables annotated with the line numbers as below.
```
class AssignmentVars(AssignmentVars):
def defined_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k])
return "%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
```
Similar to `defined_vars()` we define `seq_vars()` which annotates different variables with the number of times it was used.
```
class AssignmentVars(AssignmentVars):
def seq_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k], k[1])
return "%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
```
### AssignmentTracker
The `AssignmentTracker` keeps the assignment definitions using the `AssignmentVars` we defined previously.
```
class AssignmentTracker(DefineTracker):
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = self.create_assignments(my_input)
self.trace = trace
self.process()
def create_assignments(self, *args):
return AssignmentVars(*args)
```
To fine-tune the process, we define an optional parameter called `track_return`. During tracing a method return, Python produces a virtual variable that contains the result of the returned value. If the `track_return` is set, we capture this value as a variable.
* `track_return` -- if true, add a *virtual variable* to the Vars representing the return value
```
class AssignmentTracker(AssignmentTracker):
def options(self, kwargs):
self.track_return = kwargs.get('track_return', False)
super().options(kwargs)
```
There can be different kinds of events during a trace, which includes `call` when a function is entered, `return` when the function returns, `exception` when an exception is thrown and `line` when a statement is executed.
The previous `Tracker` was too simplistic in that it did not distinguish between the different events. We rectify that and define `on_call()`, `on_return()`, and `on_line()` respectively that gets called on their corresponding events.
Note that `on_line()` is called also for `on_return()`. The reason is that, Python invokes the trace function *before* the corresponding line is executed. Hence, effectively, the `on_return()` is called with the binding produced by the execution of the previous statement in the environment. Our processing in effect is done on values that were bound by the previous statement. Hence, calling `on_line()` here is appropriate as it provides the event handler a chance to work on the previous binding.
```
class AssignmentTracker(AssignmentTracker):
def on_call(self, arg, cxt, my_vars):
my_vars = cxt.parameters(my_vars)
self.my_assignments.method_enter(cxt, self.fragments(my_vars))
def on_line(self, arg, cxt, my_vars):
self.my_assignments.method_statement(cxt, self.fragments(my_vars))
def on_return(self, arg, cxt, my_vars):
self.on_line(arg, cxt, my_vars)
my_vars = {'<-%s' % cxt.method: arg} if self.track_return else {}
self.my_assignments.method_exit(cxt, my_vars)
def on_exception(self, arg, cxt, my_vara):
return
def track_event(self, event, arg, cxt, my_vars):
self.current_event = event
dispatch = {
'call': self.on_call,
'return': self.on_return,
'line': self.on_line,
'exception': self.on_exception
}
dispatch[event](arg, cxt, my_vars)
```
We can now use `AssignmentTracker` to track the different variables. To verify that our variable line number inference works, we recover definitions from the functions A, B and C (with data annotations removed so that the input fragments are correctly identified).
```
def C(cp_1):
c_2 = cp_1
c_3 = c_2
return c_3
def B(bp_7):
b_8 = bp_7
return C(b_8)
def A(ap_12):
a_13 = ap_12
a_14 = B(a_13)
a_14 = a_14
a_13 = a_14
a_14 = B(a_13)
a_14 = B(a_14)[3:]
```
Running `A()` with sufficient input.
```
with Tracer('---xxx') as tracer:
A(tracer.my_input)
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.seq_vars().items():
print(k, '=', repr(v))
print()
for k, v in tracker.my_assignments.defined_vars(formatted=True):
print(k, '=', repr(v))
```
As can be seen, the line numbers are now correctly identified for each variables.
Let us try retrieving the assignments for a real world example.
```
traces = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
traces.append((tracer.my_input, tracer.trace))
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.defined_vars():
print(k, '=', repr(v))
print()
```
The line numbers of variables can be verified from the source code of [urllib/parse.py](https://github.com/python/cpython/blob/3.6/Lib/urllib/parse.py).
### Recovering a Derivation Tree
Does handling variable reassignments help with our URL examples? We look at these next.
```
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
```
#### Example 1: Recovering URL Derivation Tree
First we obtain the derivation tree of the URL 1
##### URL 1 derivation tree
```
clear_cache()
with Tracer(URLS_X[0], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
```
Next, we obtain the derivation tree of URL 4
##### URL 4 derivation tree
```
clear_cache()
with Tracer(URLS_X[-1], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
```
The derivation trees seem to belong to the same grammar. Hence, we obtain the grammar for the complete set. First, we update the `recover_grammar()` to use `AssignTracker`.
### Recover Grammar
```
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.my_assignments.defined_vars())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return AssignmentTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
```
Next, we use the modified `recover_grammar()` on derivation trees obtained from URLs.
```
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
```
The recovered grammar is below.
```
syntax_diagram(url_grammar)
```
Let us fuzz a little to see if the produced values are sane.
```
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
```
Our modifications does seem to help. Next, we check whether we can still retrieve the grammar for inventory.
#### Example 2: Recovering Inventory Grammar
```
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
syntax_diagram(inventory_grammar)
```
Using fuzzing to produce values from the grammar.
```
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
```
### Problems with the Grammar Miner with Reassignment
One of the problems with our grammar miner is that it doesn't yet account for the current context. That is, when replacing, a variable can replace tokens that it does not have access to (and hence, it is not a fragment of). Consider this example.
```
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree, graph_attr=lr_graph)
```
As can be seen, the derivation tree obtained is not quite what we expected. The issue is easily seen if we enable logging in the `TreeMiner`.
```
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars(), log=True)
```
Look at the last statement. We have a value `1999,car,` where only the `year` got replaced. We no longer have a `'car'` variable to continue the replacement here. This happens because the `'car'` value in `'1999,car,Chevy,Venture'` is not treated as a new value because the value `'car'` had occurred for `'vehicle'` variable in the exact same location for a *different* method call (for `'2000,car,Mercury,Cougar'`).
## A Grammar Miner with Scope
We need to incorporate inspection of the variables in the current context. We already have a stack of method calls so that we can obtain the current method at any point. We need to do the same for variables.
For that, we extend the `CallStack` to a new class `InputStack` which holds the method invoked as well as the parameters observed. It is essentially the record of activation of the method. We start with the original input at the base of the stack, and for each new method-call, we push the parameters of that call into the stack as a new record.
### Input Stack
```
class InputStack(CallStack):
def __init__(self, i, fragment_len=FRAGMENT_LEN):
self.inputs = [{START_SYMBOL: i}]
self.fragment_len = fragment_len
super().__init__()
```
In order to check if a particular variable be saved, we define `in_current_record()` which checks only the variables in the current scope for inclusion (rather than the original input string).
```
class InputStack(InputStack):
def in_current_record(self, val):
return any(val in var for var in self.inputs[-1].values())
my_istack = InputStack('hello my world')
my_istack.in_current_record('hello')
my_istack.in_current_record('bye')
my_istack.inputs.append({'greeting': 'hello', 'location': 'world'})
my_istack.in_current_record('hello')
my_istack.in_current_record('my')
```
We define the method `ignored()` that returns true if either the variable is not a string, or the variable length is less than the defined `fragment_len`.
```
class InputStack(InputStack):
def ignored(self, val):
return not (isinstance(val, str) and len(val) >= self.fragment_len)
my_istack = InputStack('hello world')
my_istack.ignored(1)
my_istack.ignored('a')
my_istack.ignored('help')
```
We can now define the `in_scope()` method that checks whether the variable needs to be ignored, and if it is not to be ignored, whether the variable value is present in the current scope.
```
class InputStack(InputStack):
def in_scope(self, k, val):
if self.ignored(val):
return False
return self.in_current_record(val)
```
Finally, we update `enter()` that pushes relevant variables in the current context to the stack.
```
class InputStack(InputStack):
def enter(self, method, inputs):
my_inputs = {k: v for k, v in inputs.items() if self.in_scope(k, v)}
self.inputs.append(my_inputs)
super().enter(method)
```
When a method returns, we also need a corresponding `leave()` to pop out the inputs and unwind the stack.
```
class InputStack(InputStack):
def leave(self):
self.inputs.pop()
super().leave()
```
### ScopedVars
We need to update our `AssignmentVars` to include information about which scope the variable was defined. We start by updating `method_init()`.
```
class ScopedVars(AssignmentVars):
def method_init(self):
self.call_stack = self.create_call_stack(self.my_input)
self.event_locations = {self.call_stack.method_id: []}
def create_call_stack(self, i):
return InputStack(i)
```
Similarly, the `method_enter()` now initializes the `accessed_seq_var` for the current method call.
```
class ScopedVars(ScopedVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method, my_vars)
self.accessed_seq_var[self.call_stack.method_id] = {}
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
```
The `update()` method now saves the context in which the value is defined. In the case of a parameter to a function, the context should be the context in which the function was called. On the other hand, a value defined during a statement execution would have the current context.
Further, we annotate on value rather than key because we do not want to duplicate variables when parameters are in context in the next line. They will have same value, but different context because they are present in a statement execution.
```
class ScopedVars(ScopedVars):
def update(self, v):
if self.current_event == 'call':
context = -2
elif self.current_event == 'line':
context = -1
else:
context = -1
for k, v in v.items():
self._set_kv(k, (v, self.call_stack.at(context)))
self.var_location_register(self.new_vars)
self.new_vars = set()
```
We also need to save the current method invocation so as to determine which variables are in scope. This information is now incorporated in the variable name as `accessed_seq_var[method_id][var]`.
```
class ScopedVars(ScopedVars):
def var_name(self, var):
return (var, self.call_stack.method_id,
self.accessed_seq_var[self.call_stack.method_id][var])
```
As before, `var_access` simply initializes the corresponding counter, this time in the context of `method_id`.
```
class ScopedVars(ScopedVars):
def var_access(self, var):
if var not in self.accessed_seq_var[self.call_stack.method_id]:
self.accessed_seq_var[self.call_stack.method_id][var] = 0
return self.var_name(var)
```
During a variable reassignment, we update the `accessed_seq_var` to reflect the new count.
```
class ScopedVars(ScopedVars):
def var_assign(self, var):
self.accessed_seq_var[self.call_stack.method_id][var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
```
We now update `defined_vars()` to account for the new information.
```
class ScopedVars(ScopedVars):
def defined_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k])
return "%s[%d]:%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
```
Updating `seq_vars()` to account for new information.
```
class ScopedVars(ScopedVars):
def seq_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k], k[2])
return "%s[%d]:%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
```
### Scope Tracker
With the `InputStack` and `Vars` defined, we can now define the `ScopeTracker`. The `ScopeTracker` only saves variables if the value is present in the current scope.
```
class ScopeTracker(AssignmentTracker):
def __init__(self, my_input, trace, **kwargs):
self.current_event = None
super().__init__(my_input, trace, **kwargs)
def create_assignments(self, *args):
return ScopedVars(*args)
```
We define a wrapper for checking whether a variable is present in the scope.
```
class ScopeTracker(ScopeTracker):
def is_input_fragment(self, var, value):
return self.my_assignments.call_stack.in_scope(var, value)
```
We can use the `ScopeTracker` as follows
```
vehicle_traces = []
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
vehicle_traces.append((tracer.my_input, sm))
for k, v in sm.my_assignments.seq_vars().items():
print(k, '=', repr(v))
```
### Recovering a Derivation Tree
The main difference in `apply_new_definition()` is that we add a second condition that checks for scope. In particular, variables are only allowed to replace portions of string fragments that were in scope.
The variable scope is indicated by `scope`. However, merely accounting for scope is not sufficient. For example, consider the fragment below.
```python
def my_fn(stringval):
partA, partB = stringval.split('/')
return partA, partB
svalue = ...
v1, v2 = my_fn(svalue)
```
Here, `v1` and `v2` get their values from a previous function call. Not from their current context. That is, we have to provide an exception for cases where an internal child method call may have generated a large fragment as we showed above. To account for that, we define `mseq()` that retrieves the method call sequence. In the above case, the `mseq()` of the internal child method call would be larger than the current `mseq()`. If so, we allow the replacement to proceed.
```
class ScopeTreeMiner(TreeMiner):
def mseq(self, key):
method, seq, var, lno = key
return seq
```
The `nt_var()` method needs to take the tuple and generate a non-terminal symbol out of it. We skip the method sequence because it is not relevant for the grammar.
```
class ScopeTreeMiner(ScopeTreeMiner):
def nt_var(self, key):
method, seq, var, lno = key
return to_nonterminal("%s@%d:%s" % (method, lno, var))
```
We now redefine the `apply_new_definition()` to account for context and scope. In particular, a variable is allowed to replace a part of a value only if the variable is in *scope* -- that is, it's scope (method sequence number of either its calling context in case it is a parameter or the current context in case it is a statement) is same as that of the value's method sequence number. An exception is made when the value's method sequence number is greater than the variable's method sequence number. In that case, the value may have come from an internal call. We allow the replacement to proceed in that case.
```
class ScopeTreeMiner(ScopeTreeMiner):
def partition(self, part, value):
return value.partition(part)
def partition_by_part(self, pair, value):
(nt_var, nt_seq), (v, v_scope) = pair
prefix_k_suffix = [
(nt_var, [(v, [], nt_seq)]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(v, value))
if e]
return prefix_k_suffix
def insert_into_tree(self, my_tree, pair):
var, values, my_scope = my_tree
(nt_var, nt_seq), (v, v_scope) = pair
applied = False
for i, value_ in enumerate(values):
key, arr, scope = value_
self.log(2, "-> [%d] %s" % (i, repr(value_)))
if is_nonterminal(key):
applied = self.insert_into_tree(value_, pair)
if applied:
break
else:
if v_scope != scope:
if nt_seq > scope:
continue
if not v or not self.string_part_of_value(v, key):
continue
prefix_k_suffix = [(k, children, scope) for k, children
in self.partition_by_part(pair, key)]
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
return applied
```
The `apply_new_definition()` is now modified to carry additional contextual information `mseq`.
```
class ScopeTreeMiner(ScopeTreeMiner):
def apply_new_definition(self, tree, var, value_):
nt_var = self.nt_var(var)
seq = self.mseq(var)
val, (smethod, mseq) = value_
return self.insert_into_tree(tree, ((nt_var, seq), (val, mseq)))
```
We also modify `get_derivation_tree()` so that the initial node carries the context.
```
class ScopeTreeMiner(ScopeTreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [], 0)], 0)
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
```
#### Example 1: Recovering URL Parse Tree
We verify that our URL parse tree recovery still works as expected.
```
url_dts = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars(formatted=False):
print(k, '=', repr(v))
dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(dt.tree, graph_attr=lr_graph)
url_dts.append(dt)
```
#### Example 2: Recovering Inventory Parse Tree
Next, we look at recovering the parse tree from `process_inventory()` which failed last time.
```
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars():
print(k, '=', repr(v))
inventory_dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(inventory_dt.tree, graph_attr=lr_graph)
```
The recovered parse tree seems reasonable.
One of the things that one might notice from our Example (2) is that the three subtrees -- `vehicle[2:1]`, `vehicle[4:1]` and `vehicle[6:1]` are quite alike. We will examine how this can be exploited to generate a grammar directly, next.
### Grammar Mining
The `tree_to_grammar()` is now redefined as follows, to account for the extra scope in nodes.
```
class ScopedGrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
key, children, scope = tree
one_alt = [ckey for ckey, gchildren, cscope in children if ckey != key]
hsh = {key: [one_alt] if one_alt else []}
for child in children:
(ckey, _gc, _cscope) = child
if not is_nonterminal(ckey):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
```
The grammar is in canonical form, which needs to be massaged to display. First, the recovered grammar for inventory.
```
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.grammar))
```
The recovered grammar for URLs.
```
su = ScopedGrammarMiner()
for url_dt in url_dts:
su.add_tree(url_dt)
syntax_diagram(readable(su.grammar))
```
One might notice that the grammar is not entirely human readable, with a number of single token definitions.
Hence, the last piece of the puzzle is the cleanup method `clean_grammar()`, which cleans up such definitions. The idea is to look for single token definitions such that a key is defined exactly by another key (single alternative, single token, nonterminal).
```
class ScopedGrammarMiner(ScopedGrammarMiner):
def get_replacements(self, grammar):
replacements = {}
for k in grammar:
if k == START_SYMBOL:
continue
alts = grammar[k]
if len(set([str(i) for i in alts])) != 1:
continue
rule = alts[0]
if len(rule) != 1:
continue
tok = rule[0]
if not is_nonterminal(tok):
continue
replacements[k] = tok
return replacements
```
Once we have such a list, iteratively replace the original key where ever it is used with the token we found earlier. Repeat until none is left.
```
class ScopedGrammarMiner(ScopedGrammarMiner):
def clean_grammar(self):
replacements = self.get_replacements(self.grammar)
while True:
changed = set()
for k in self.grammar:
if k in replacements:
continue
new_alts = []
for alt in self.grammar[k]:
new_alt = []
for t in alt:
if t in replacements:
new_alt.append(replacements[t])
changed.add(t)
else:
new_alt.append(t)
new_alts.append(new_alt)
self.grammar[k] = new_alts
if not changed:
break
for k in changed:
self.grammar.pop(k, None)
return readable(self.grammar)
```
The `clean_grammar()` is used as follows:
```
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.clean_grammar()))
```
We update the `update_grammar()` to use the right tracker and miner.
```
class ScopedGrammarMiner(ScopedGrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(
inputstr, at.my_assignments.defined_vars(
formatted=False))
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return ScopeTracker(*args)
def create_tree_miner(self, *args):
return ScopeTreeMiner(*args)
```
The `recover_grammar()` uses the right miner, and returns a cleaned grammar.
```
def recover_grammar(fn, inputs, **kwargs):
miner = ScopedGrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
inventory_grammar = recover_grammar(process_inventory, [INVENTORY])
syntax_diagram(inventory_grammar)
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
```
We see how tracking scope helps us to extract an even more precise grammar.
Notice that we use *String* inclusion testing as a way of determining whether a particular string fragment came from the original input string. While this may seem rather error-prone compared to dynamic tainting, we note that numerous tracing tools such as `dtrace()` and `ptrace()` allow one to obtain the information we seek from execution of binaries directly in different platforms. However, methods for obtaining dynamic taints almost always involve instrumenting the binaries before they can be used. Hence, this method of string inclusion can be more generally applied than dynamic tainting approaches. Further, dynamic taints are often lost due to implicit transmission, or at the boundary between *Python* and *C* code. String inclusion has not such problems. Hence, our approach can often obtain better results than relying on dynamic tainting.
## Synopsis
This chapter provides a number of classes to mine input grammars from existing programs. The function `recover_grammar()` could be the easiest to use. It takes a function and a set of inputs, and returns a grammar that describes its input language.
We apply `recover_grammar()` on a `url_parse()` function that takes and decomposes URLs:
```
url_parse('https://www.fuzzingbook.org/')
URLS
```
We extract the input grammar for `url_parse()` using `recover_grammar()`:
```
grammar = recover_grammar(url_parse, URLS)
grammar
```
The names of nonterminals are a bit technical; but the grammar nicely represents the structure of the input; for instance, the different schemes (`"http"`, `"https"`) are all identified.
The grammar can be immediately used for fuzzing, producing arbitrary combinations of input elements, which are all syntactically valid.
```
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
fuzzer = GrammarCoverageFuzzer(grammar)
[fuzzer.fuzz() for i in range(5)]
```
Being able to automatically extract a grammar and to use this grammar for fuzzing makes for very effective test generation with a minimum of manual work.
## Lessons Learned
* Given a set of sample inputs for program, we can learn an input grammar by examining variable values during execution if the program relies on handwritten parsers.
* Simple string inclusion checks are sufficient to obtain reasonably accurate grammars from real world programs.
* The resulting grammars can be directly used for fuzzing, and can have a multiplier effect on any samples you have.
## Next Steps
* Learn how to use [information flow](InformationFlow.ipynb) to further improve mapping inputs to states.
## Background
Recovering the language from a _set of samples_ (i.e., not taking into account a possible program that might process them) is a well researched topic. The excellent reference by Higuera \cite{higuera2010grammatical} covers all the classical approaches. The current state of the art in black box grammar mining is described by Clark \cite{clark2013learning}.
Learning an input language from a _program_, with or without samples, is yet a emerging topic, despite its potential for fuzzing. The pioneering work in this area was done by Lin et al. \cite{Lin2008} who invented a way to retrieve the parse trees from top down and bottom up parsers. The approach described in this chapter is based directly on the AUTOGRAM work of Hoschele et al. \cite{Hoschele2017}.
## Exercises
### Exercise 1: Flattening complex objects
Our grammar miners only check for string fragments. However, programs may often pass containers or custom objects containing input fragments. For example, consider the plausible modification for our inventory processor, where we use a custom object `Vehicle` to carry fragments.
```
class Vehicle:
def __init__(self, vehicle):
year, kind, company, model, *_ = vehicle.split(',')
self.year, self.kind, self.company, self.model = year, kind, company, model
def process_inventory(inventory):
res = []
for vehicle in inventory.split('\n'):
ret = process_vehicle(vehicle)
res.extend(ret)
return '\n'.join(res)
def process_vehicle(vehicle):
v = Vehicle(vehicle)
if v.kind == 'van':
return process_van(v)
elif v.kind == 'car':
return process_car(v)
else:
raise Exception('Invalid entry')
def process_van(vehicle):
res = [
"We have a %s %s van from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2010:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
def process_car(vehicle):
res = [
"We have a %s %s car from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2016:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
```
We recover the grammar as before.
```
vehicle_grammar = recover_grammar(
process_inventory,
[INVENTORY],
methods=INVENTORY_METHODS)
```
The new vehicle grammar is missing in details, especially as to the different models and company for a van and car.
```
syntax_diagram(vehicle_grammar)
```
The problem is that, we are looking specifically for string objects that contain fragments of the input string during tracing. Can you modify our grammar miner to correctly account for the complex objects too?
**Solution.**
The problem can be understood if we execute the tracer under verbose logging.
```
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
```
You can see that we lose track of string fragments as soon as they are incorporated into the `Vehicle` object. The way out is to trace these variables separately.
For that, we develop the `flatten()` method that given any custom complex object and its key, returns a list of flattened *key*,*value* pairs that correspond to the object passed in.
The `MAX_DEPTH` parameter controls the maximum flattening limit.
```
MAX_DEPTH = 10
def set_flatten_depth(depth):
global MAX_DEPTH
MAX_DEPTH = depth
def flatten(key, val, depth=MAX_DEPTH):
tv = type(val)
if depth <= 0:
return [(key, val)]
if isinstance(val, (int, float, complex, str, bytes, bytearray)):
return [(key, val)]
elif isinstance(val, (set, frozenset, list, tuple, range)):
values = [(i, e) for i, elt in enumerate(val) for e in flatten(i, elt, depth-1)]
return [("%s.%d" % (key, i), v) for i, v in values]
elif isinstance(val, dict):
values = [e for k, elt in val.items() for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
elif isinstance(val, str):
return [(key, val)]
elif hasattr(val, '__dict__'):
values = [e for k, elt in val.__dict__.items()
for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
else:
return [(key, val)]
```
Next, we hook the `flatten()` into the `Context` class so that the parameters we obtain are flattened.
```
class Context(Context):
def extract_vars(self, frame):
vals = inspect.getargvalues(frame).locals
return {k1: v1 for k, v in vals.items() for k1, v1 in flatten(k, v)}
def parameters(self, all_vars):
def check_param(k):
return any(k.startswith(p) for p in self.parameter_names)
return {k: v for k, v in all_vars.items() if check_param(k)}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
```
With this change, we have the following trace output.
```
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
```
Our change seems to have worked. Let us derive the grammar.
```
vehicle_grammar = recover_grammar(
process_inventory,
[INVENTORY],
methods=INVENTORY_METHODS)
syntax_diagram(vehicle_grammar)
```
The recovered grammar contains all the details that we were able to recover before.
### Exercise 2: Incorporating Taints from InformationFlow
We have been using *string inclusion* to check whether a particular fragment came from the input string. This is unsatisfactory as it required us to compromise on the size of the strings tracked, which was limited to those greater than `FRAGMENT_LEN`. Further, it is possible that a single method could process a string where a fragment repeats, but is part of different tokens. For example, an embedded comma in the CSV file would cause our parser to fail. One way to avoid this is to rely on *dynamic taints*, and check for taint inclusion rather than string inclusion.
The chapter on [information flow](InformationFlow.ipynb) details how to incorporate dynamic taints. Can you update our grammar miner based on scope to use *dynamic taints* instead?
**Solution.**
First, we import `ostr` to track the origins of string fragments.
```
from InformationFlow import ostr
```
Next, we define `is_fragment()` to verify that a fragment is from a given input string.
```
def is_fragment(fragment, original):
assert isinstance(original, ostr)
if not isinstance(fragment, ostr):
return False
return set(fragment.origin) <= set(original.origin)
```
Now, all that remains is to hook the tainted fragment check to our grammar miner. This is accomplished by modifying `in_current_record()` and `ignored()` methods in the `InputStack`.
```
class TaintedInputStack(InputStack):
def in_current_record(self, val):
return any(is_fragment(val, var) for var in self.inputs[-1].values())
class TaintedInputStack(TaintedInputStack):
def ignored(self, val):
return not isinstance(val, ostr)
```
We then hook in the `TaintedInputStack` to the grammar mining infrastructure.
```
class TaintedScopedVars(ScopedVars):
def create_call_stack(self, i):
return TaintedInputStack(i)
class TaintedScopeTracker(ScopeTracker):
def create_assignments(self, *args):
return TaintedScopedVars(*args)
class TaintedScopeTreeMiner(ScopeTreeMiner):
def string_part_of_value(self, part, value):
return str(part.origin).strip('[]') in str(value.origin).strip('[]')
def partition(self, part, value):
begin = value.origin.index(part.origin[0])
end = value.origin.index(part.origin[-1])+1
return value[:begin], value[begin:end], value[end:]
```
<!-- **Advanced.** The *dynamic taint* approach is limited in that it can not observe implicit flows. For example, consider the fragment below.
```python
if my_fragment == 'begin':
return 'begin'
```
In this case, we lose track of the string `begin` that is returned even though it is dependent on the value of `my_fragment`. For such cases, a better (but costly) alternative is to rely on concolic execution and capture the constraints as it relates to input characters on each variable.
The chapter on [concolic fuzzing](ConcolicFuzzer.ipynb) details how to incorporate concolic symbolic execution to program execution. Can you update our grammar miner to use *concolic execution* to track taints instead?
-->
```
class TaintedScopedGrammarMiner(ScopedGrammarMiner):
def create_tracker(self, *args):
return TaintedScopeTracker(*args)
def create_tree_miner(self, *args):
return TaintedScopeTreeMiner(*args)
```
Finally, we define `recover_grammar_with_taints()` to recover the grammar.
```
def recover_grammar_with_taints(fn, inputs, **kwargs):
miner = TaintedScopedGrammarMiner()
for inputstr in inputs:
with Tracer(ostr(inputstr), **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
```
Here is how one can use it.
```
inventory_grammar = recover_grammar_with_taints(
process_inventory, [INVENTORY],
methods=[
'process_inventory', 'process_vehicle', 'process_car', 'process_van'
])
syntax_diagram(inventory_grammar)
url_grammar = recover_grammar_with_taints(
url_parse, URLS_X + ['ftp://user4:pass1@host4/?key4=value3'],
methods=['urlsplit', 'urlparse', '_splitnetloc'])
syntax_diagram(url_grammar)
```
|
github_jupyter
|
>>> from fuzzingbook.GrammarMiner import <identifier>
>>> url_parse('https://www.fuzzingbook.org/')
>>> URLS
['http://user:pass@www.google.com:80/?q=path#ref',
'https://www.cispa.saarland:80/',
'http://www.fuzzingbook.org/#News']
>>> grammar = recover_grammar(url_parse, URLS)
>>> grammar
{'<start>': ['<urlsplit@394:url>'],
'<urlsplit@394:url>': ['<__new__@12:scheme>:<_splitnetloc@386:url>'],
'<__new__@12:scheme>': ['https', 'http', '<__new__@12:scheme>'],
'<_splitnetloc@386:url>': ['//<__new__@12:netloc><urlsplit@415:url>',
'//<__new__@12:netloc>/'],
'<__new__@12:netloc>': ['www.cispa.saarland:80',
'user:pass@www.google.com:80',
'<__new__@12:netloc>',
'www.fuzzingbook.org'],
'<urlsplit@415:url>': ['/#<__new__@12:fragment>',
'<urlsplit@420:url>#<__new__@12:fragment>'],
'<urlsplit@420:url>': ['/?<__new__@12:query>'],
'<__new__@12:query>': ['<__new__@12:query>', 'q=path'],
'<__new__@12:fragment>': ['News', '<__new__@12:fragment>', 'ref']}
>>> from GrammarCoverageFuzzer import GrammarCoverageFuzzer
>>> fuzzer = GrammarCoverageFuzzer(grammar)
>>> [fuzzer.fuzz() for i in range(5)]
['http://www.cispa.saarland:80/',
'https://www.fuzzingbook.org/?q=path#ref',
'http://user:pass@www.google.com:80/#News',
'http://www.fuzzingbook.org/#News',
'http://www.cispa.saarland:80/?q=path#ref']
import fuzzingbook_utils
from Parser import process_inventory, process_vehicle, process_car, process_van, lr_graph # minor dependency
INVENTORY = """\
1997,van,Ford,E350
2000,car,Mercury,Cougar
1999,car,Chevy,Venture\
"""
print(process_inventory(INVENTORY))
VEHICLES = INVENTORY.split('\n')
INVENTORY_METHODS = {
'process_inventory',
'process_vehicle',
'process_van',
'process_car'}
from Coverage import Coverage
import inspect
class Tracer(Coverage):
def traceit(self, frame, event, arg):
method_name = inspect.getframeinfo(frame).function
if method_name not in INVENTORY_METHODS:
return
file_name = inspect.getframeinfo(frame).filename
param_names = inspect.getargvalues(frame).args
lineno = inspect.getframeinfo(frame).lineno
local_vars = inspect.getargvalues(frame).locals
print(event, file_name, lineno, method_name, param_names, local_vars)
return self.traceit
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
class Context:
def __init__(self, frame, track_caller=True):
self.method = inspect.getframeinfo(frame).function
self.parameter_names = inspect.getargvalues(frame).args
self.file_name = inspect.getframeinfo(frame).filename
self.line_no = inspect.getframeinfo(frame).lineno
def _t(self):
return (self.file_name, self.line_no, self.method,
','.join(self.parameter_names))
def __repr__(self):
return "%s:%d:%s(%s)" % self._t()
class Context(Context):
def extract_vars(self, frame):
return inspect.getargvalues(frame).locals
def parameters(self, all_vars):
return {k: v for k, v in all_vars.items() if k in self.parameter_names}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
def log_event(event, var):
print({'call': '->', 'return': '<-'}.get(event, ' '), var)
class Tracer(Tracer):
def traceit(self, frame, event, arg):
log_event(event, Context(frame))
return self.traceit
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
class Tracer(Tracer):
def __init__(self, my_input, **kwargs):
self.options(kwargs)
self.my_input, self.trace = my_input, []
class Tracer(Tracer):
def options(self, kwargs):
self.files = kwargs.get('files', [])
self.methods = kwargs.get('methods', [])
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
class Tracer(Tracer):
def tracing_context(self, cxt, event, arg):
fres = not self.files or any(
cxt.file_name.endswith(f) for f in self.files)
mres = not self.methods or any(cxt.method == m for m in self.methods)
return fres and mres
class Tracer(Tracer):
def tracing_var(self, k, v):
return isinstance(v, str)
class Tracer(Tracer):
def on_event(self, event, arg, cxt, my_vars):
self.trace.append((event, arg, cxt, my_vars))
def create_context(self, frame):
return Context(frame)
def traceit(self, frame, event, arg):
cxt = self.create_context(frame)
if not self.tracing_context(cxt, event, arg):
return self.traceit
self.log(event, cxt)
my_vars = {
k: v
for k, v in cxt.extract_vars(frame).items()
if self.tracing_var(k, v)
}
self.on_event(event, arg, cxt, my_vars)
return self.traceit
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(VEHICLES[0])
for t in tracer.trace:
print(t[0], t[2].method, dict(t[3]))
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(tracer.my_input)
class DefineTracker:
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.trace = trace
self.my_assignments = {}
self.process()
FRAGMENT_LEN = 3
class DefineTracker(DefineTracker):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
self.fragment_len = kwargs.get('fragment_len', FRAGMENT_LEN)
class DefineTracker(DefineTracker):
def is_input_fragment(self, var, value):
return len(value) >= self.fragment_len and value in self.my_input
class DefineTracker(DefineTracker):
def fragments(self, variables):
return {k: v for k, v in variables.items(
) if self.is_input_fragment(k, v)}
class DefineTracker(DefineTracker):
def track_event(self, event, arg, cxt, my_vars):
self.log(event, (cxt.method, my_vars))
self.my_assignments.update(self.fragments(my_vars))
def process(self):
for event, arg, cxt, my_vars in self.trace:
self.track_event(event, arg, cxt, my_vars)
tracker = DefineTracker(tracer.my_input, tracer.trace, fragment_len=5)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
tracker = DefineTracker(tracer.my_input, tracer.trace)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
class DefineTracker(DefineTracker):
def assignments(self):
return self.my_assignments.items()
from Grammars import START_SYMBOL, syntax_diagram, is_nonterminal
from GrammarFuzzer import GrammarFuzzer, FasterGrammarFuzzer, display_tree, tree_to_string
"1997,van,Ford,E350"
derivation_tree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
display_tree(derivation_tree)
vehicle = "1997,van,Ford,E350"
derivation_tree = (START_SYMBOL, [('<vehicle>', [("1997,van,Ford,E350", [])],
[])])
display_tree(derivation_tree)
model = 'E350'
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",van,Ford,E350", [])], [])])
display_tree(derivation_tree)
company = 'Ford'
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",van,", []),
('<company>', [('Ford', [])]),
(",E350", [])], [])])
display_tree(derivation_tree)
kind = 'van'
model = 'E350'
derivation_tree = (START_SYMBOL, [('<vehicle>', [('<model>', [('1997', [])]),
(",", []),
("<kind>", [('van', [])]),
(",", []),
('<company>', [('Ford', [])]),
(",", []),
("<model>", [('E350', [])])
], [])])
display_tree(derivation_tree)
class TreeMiner:
def __init__(self, my_input, my_assignments, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = my_assignments
self.tree = self.get_derivation_tree()
def options(self, kwargs):
self.log = log_call if kwargs.get('log') else lambda _i, _v: None
def get_derivation_tree(self):
return (START_SYMBOL, [])
def log_call(indent, var):
print('\t' * indent, var)
def to_nonterminal(var):
return "<" + var.lower() + ">"
class TreeMiner(TreeMiner):
def string_part_of_value(self, part, value):
return (part in value)
class TreeMiner(TreeMiner):
def partition(self, part, value):
return value.partition(part)
class TreeMiner(TreeMiner):
def partition_by_part(self, pair, value):
k, part = pair
prefix_k_suffix = [
(k, [[part, []]]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(part, value))
if e]
return prefix_k_suffix
class TreeMiner(TreeMiner):
def insert_into_tree(self, my_tree, pair):
var, values = my_tree
k, v = pair
self.log(1, "- Node: %s\t\t? (%s:%s)" % (var, k, repr(v)))
applied = False
for i, value_ in enumerate(values):
value, arr = value_
self.log(2, "-> [%d] %s" % (i, repr(value)))
if is_nonterminal(value):
applied = self.insert_into_tree(value_, pair)
if applied:
break
elif self.string_part_of_value(v, value):
prefix_k_suffix = self.partition_by_part(pair, value)
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
else:
continue
return applied
tree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
m = TreeMiner('', {}, log=True)
display_tree(tree)
v = m.insert_into_tree(tree, ('<vehicle>', "1997,van,Ford,E350"))
display_tree(tree)
v = m.insert_into_tree(tree, ('<model>', 'E350'))
display_tree((tree))
v = m.insert_into_tree(tree, ('<company>', 'Ford'))
display_tree(tree)
v = m.insert_into_tree(tree, ('<kind>', 'van'))
display_tree(tree)
v = m.insert_into_tree(tree, ('<year>', '1997'))
display_tree(tree)
class TreeMiner(TreeMiner):
def nt_var(self, var):
return var if is_nonterminal(var) else to_nonterminal(var)
class TreeMiner(TreeMiner):
def apply_new_definition(self, tree, var, value):
nt_var = self.nt_var(var)
return self.insert_into_tree(tree, (nt_var, value))
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
with Tracer(VEHICLES[0]) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
dt = TreeMiner(tracer.my_input, assignments, log=True)
dt.tree
display_tree(TreeMiner(tracer.my_input, assignments).tree)
trees = []
for vehicle in VEHICLES:
print(vehicle)
with Tracer(vehicle) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
csv_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
csv_dt.append(dt)
display_tree(dt.tree)
class GrammarMiner:
def __init__(self):
self.grammar = {}
class GrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
node, children = tree
one_alt = [ck for ck, gc in children]
hsh = {node: [one_alt] if one_alt else []}
for child in children:
if not is_nonterminal(child[0]):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
gm = GrammarMiner()
gm.tree_to_grammar(csv_dt[0].tree)
def readable(grammar):
def readable_rule(rule):
return ''.join(rule)
return {k: list(set(readable_rule(a) for a in grammar[k]))
for k in grammar}
syntax_diagram(readable(gm.tree_to_grammar(csv_dt[0].tree)))
import itertools
class GrammarMiner(GrammarMiner):
def add_tree(self, t):
t_grammar = self.tree_to_grammar(t.tree)
self.grammar = {
key: self.grammar.get(key, []) + t_grammar.get(key, [])
for key in itertools.chain(self.grammar.keys(), t_grammar.keys())
}
inventory_grammar = GrammarMiner()
for dt in csv_dt:
inventory_grammar.add_tree(dt)
syntax_diagram(readable(inventory_grammar.grammar))
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.assignments())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return DefineTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
def recover_grammar(fn, inputs, **kwargs):
miner = GrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.grammar)
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
inventory_grammar
URLS = [
'http://user:pass@www.google.com:80/?q=path#ref',
'https://www.cispa.saarland:80/',
'http://www.fuzzingbook.org/#News',
]
from urllib.parse import urlparse, clear_cache
def url_parse(url):
clear_cache()
urlparse(url)
trees = []
for url in URLS:
print(url)
with Tracer(url) as tracer:
url_parse(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
url_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
url_dt.append(dt)
display_tree(dt.tree)
url_grammar = recover_grammar(url_parse, URLS, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
URLS_X = URLS + ['ftp://freebsd.org/releases/5.8']
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
clear_cache()
with Tracer(URLS_X[0]) as tracer:
urlparse(tracer.my_input)
for i, t in enumerate(tracer.trace):
if t[0] in {'call', 'line'} and 'parse.py' in str(t[2]) and t[3]:
print(i, t[2]._t()[1], t[3:])
def C(cp_1):
c_2 = cp_1 + '@2'
c_3 = c_2 + '@3'
return c_3
def B(bp_7):
b_8 = bp_7 + '@8'
return C(b_8)
def A(ap_12):
a_13 = ap_12 + '@13'
a_14 = B(a_13) + '@14'
a_14 = a_14 + '@15'
a_13 = a_14 + '@16'
a_14 = B(a_13) + '@17'
a_14 = B(a_13) + '@18'
with Tracer('____') as tracer:
A(tracer.my_input)
for t in tracer.trace:
print(t[0], "%d:%s" % (t[2].line_no, t[2].method), t[3])
class CallStack:
def __init__(self, **kwargs):
self.options(kwargs)
self.method_id = (START_SYMBOL, 0)
self.method_register = 0
self.mstack = [self.method_id]
def enter(self, method):
self.method_register += 1
self.method_id = (method, self.method_register)
self.log('call', "%s%s" % (self.indent(), str(self)))
self.mstack.append(self.method_id)
def leave(self):
self.mstack.pop()
self.log('return', "%s%s" % (self.indent(), str(self)))
self.method_id = self.mstack[-1]
class CallStack(CallStack):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
def indent(self):
return len(self.mstack) * "\t"
def at(self, n):
return self.mstack[n]
def __len__(self):
return len(mstack) - 1
def __str__(self):
return "%s:%d" % self.method_id
def __repr__(self):
return repr(self.method_id)
def display_stack(istack):
def stack_to_tree(stack):
current, *rest = stack
if not rest:
return (repr(current), [])
return (repr(current), [stack_to_tree(rest)])
display_tree(stack_to_tree(istack.mstack), graph_attr=lr_graph)
cs = CallStack()
display_stack(cs)
cs
cs.enter('hello')
display_stack(cs)
cs
cs.enter('world')
display_stack(cs)
cs
cs.leave()
display_stack(cs)
cs
cs.enter('world')
display_stack(cs)
cs
cs.leave()
display_stack(cs)
cs
class Vars:
def __init__(self, original):
self.defs = {}
self.my_input = original
class Vars(Vars):
def _set_kv(self, k, v):
self.defs[k] = v
def __setitem__(self, k, v):
self._set_kv(k, v)
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
v = Vars('')
v.defs
v['x'] = 'X'
v.defs
v.update({'x': 'x', 'y': 'y'})
v.defs
class AssignmentVars(Vars):
def __init__(self, original):
super().__init__(original)
self.accessed_seq_var = {}
self.var_def_lines = {}
self.current_event = None
self.new_vars = set()
self.method_init()
class AssignmentVars(AssignmentVars):
def method_init(self):
self.call_stack = CallStack()
self.event_locations = {self.call_stack.method_id: []}
class AssignmentVars(AssignmentVars):
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
self.var_location_register(self.new_vars)
self.new_vars = set()
class AssignmentVars(AssignmentVars):
def var_name(self, var):
return (var, self.accessed_seq_var[var])
class AssignmentVars(AssignmentVars):
def var_access(self, var):
if var not in self.accessed_seq_var:
self.accessed_seq_var[var] = 0
return self.var_name(var)
class AssignmentVars(AssignmentVars):
def var_assign(self, var):
self.accessed_seq_var[var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
sav = AssignmentVars('')
sav.defs
sav.var_access('v1')
sav.var_assign('v1')
sav.var_assign('v1')
class AssignmentVars(AssignmentVars):
def _set_kv(self, var, val):
s_var = self.var_access(var)
if s_var in self.defs and self.defs[s_var] == val:
return
self.defs[self.var_assign(var)] = val
sav = AssignmentVars('')
sav['x'] = 'X'
sav.defs
sav['x'] = 'X'
sav.defs
sav['x'] = 'Y'
sav.defs
class AssignmentVars(AssignmentVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method)
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
def method_exit(self, cxt, my_vars):
self.current_event = 'return'
self.register_event(cxt)
self.update(my_vars)
self.call_stack.leave()
def method_statement(self, cxt, my_vars):
self.current_event = 'line'
self.register_event(cxt)
self.update(my_vars)
class AssignmentVars(AssignmentVars):
def register_event(self, cxt):
self.event_locations[self.call_stack.method_id].append(cxt.line_no)
class AssignmentVars(AssignmentVars):
def var_location_register(self, my_vars):
def loc(mid):
if self.current_event == 'call':
return self.event_locations[mid][-1]
elif self.current_event == 'line':
return self.event_locations[mid][-2]
elif self.current_event == 'return':
return self.event_locations[mid][-2]
else:
assert False
my_loc = loc(self.call_stack.method_id)
for var in my_vars:
self.var_def_lines[var] = my_loc
class AssignmentVars(AssignmentVars):
def defined_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k])
return "%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
class AssignmentVars(AssignmentVars):
def seq_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k], k[1])
return "%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
class AssignmentTracker(DefineTracker):
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = self.create_assignments(my_input)
self.trace = trace
self.process()
def create_assignments(self, *args):
return AssignmentVars(*args)
class AssignmentTracker(AssignmentTracker):
def options(self, kwargs):
self.track_return = kwargs.get('track_return', False)
super().options(kwargs)
class AssignmentTracker(AssignmentTracker):
def on_call(self, arg, cxt, my_vars):
my_vars = cxt.parameters(my_vars)
self.my_assignments.method_enter(cxt, self.fragments(my_vars))
def on_line(self, arg, cxt, my_vars):
self.my_assignments.method_statement(cxt, self.fragments(my_vars))
def on_return(self, arg, cxt, my_vars):
self.on_line(arg, cxt, my_vars)
my_vars = {'<-%s' % cxt.method: arg} if self.track_return else {}
self.my_assignments.method_exit(cxt, my_vars)
def on_exception(self, arg, cxt, my_vara):
return
def track_event(self, event, arg, cxt, my_vars):
self.current_event = event
dispatch = {
'call': self.on_call,
'return': self.on_return,
'line': self.on_line,
'exception': self.on_exception
}
dispatch[event](arg, cxt, my_vars)
def C(cp_1):
c_2 = cp_1
c_3 = c_2
return c_3
def B(bp_7):
b_8 = bp_7
return C(b_8)
def A(ap_12):
a_13 = ap_12
a_14 = B(a_13)
a_14 = a_14
a_13 = a_14
a_14 = B(a_13)
a_14 = B(a_14)[3:]
with Tracer('---xxx') as tracer:
A(tracer.my_input)
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.seq_vars().items():
print(k, '=', repr(v))
print()
for k, v in tracker.my_assignments.defined_vars(formatted=True):
print(k, '=', repr(v))
traces = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
traces.append((tracer.my_input, tracer.trace))
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.defined_vars():
print(k, '=', repr(v))
print()
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
clear_cache()
with Tracer(URLS_X[0], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
clear_cache()
with Tracer(URLS_X[-1], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.my_assignments.defined_vars())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return AssignmentTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
syntax_diagram(inventory_grammar)
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree, graph_attr=lr_graph)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars(), log=True)
class InputStack(CallStack):
def __init__(self, i, fragment_len=FRAGMENT_LEN):
self.inputs = [{START_SYMBOL: i}]
self.fragment_len = fragment_len
super().__init__()
class InputStack(InputStack):
def in_current_record(self, val):
return any(val in var for var in self.inputs[-1].values())
my_istack = InputStack('hello my world')
my_istack.in_current_record('hello')
my_istack.in_current_record('bye')
my_istack.inputs.append({'greeting': 'hello', 'location': 'world'})
my_istack.in_current_record('hello')
my_istack.in_current_record('my')
class InputStack(InputStack):
def ignored(self, val):
return not (isinstance(val, str) and len(val) >= self.fragment_len)
my_istack = InputStack('hello world')
my_istack.ignored(1)
my_istack.ignored('a')
my_istack.ignored('help')
class InputStack(InputStack):
def in_scope(self, k, val):
if self.ignored(val):
return False
return self.in_current_record(val)
class InputStack(InputStack):
def enter(self, method, inputs):
my_inputs = {k: v for k, v in inputs.items() if self.in_scope(k, v)}
self.inputs.append(my_inputs)
super().enter(method)
class InputStack(InputStack):
def leave(self):
self.inputs.pop()
super().leave()
class ScopedVars(AssignmentVars):
def method_init(self):
self.call_stack = self.create_call_stack(self.my_input)
self.event_locations = {self.call_stack.method_id: []}
def create_call_stack(self, i):
return InputStack(i)
class ScopedVars(ScopedVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method, my_vars)
self.accessed_seq_var[self.call_stack.method_id] = {}
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
class ScopedVars(ScopedVars):
def update(self, v):
if self.current_event == 'call':
context = -2
elif self.current_event == 'line':
context = -1
else:
context = -1
for k, v in v.items():
self._set_kv(k, (v, self.call_stack.at(context)))
self.var_location_register(self.new_vars)
self.new_vars = set()
class ScopedVars(ScopedVars):
def var_name(self, var):
return (var, self.call_stack.method_id,
self.accessed_seq_var[self.call_stack.method_id][var])
class ScopedVars(ScopedVars):
def var_access(self, var):
if var not in self.accessed_seq_var[self.call_stack.method_id]:
self.accessed_seq_var[self.call_stack.method_id][var] = 0
return self.var_name(var)
class ScopedVars(ScopedVars):
def var_assign(self, var):
self.accessed_seq_var[self.call_stack.method_id][var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
class ScopedVars(ScopedVars):
def defined_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k])
return "%s[%d]:%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
class ScopedVars(ScopedVars):
def seq_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k], k[2])
return "%s[%d]:%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
class ScopeTracker(AssignmentTracker):
def __init__(self, my_input, trace, **kwargs):
self.current_event = None
super().__init__(my_input, trace, **kwargs)
def create_assignments(self, *args):
return ScopedVars(*args)
class ScopeTracker(ScopeTracker):
def is_input_fragment(self, var, value):
return self.my_assignments.call_stack.in_scope(var, value)
vehicle_traces = []
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
vehicle_traces.append((tracer.my_input, sm))
for k, v in sm.my_assignments.seq_vars().items():
print(k, '=', repr(v))
def my_fn(stringval):
partA, partB = stringval.split('/')
return partA, partB
svalue = ...
v1, v2 = my_fn(svalue)
class ScopeTreeMiner(TreeMiner):
def mseq(self, key):
method, seq, var, lno = key
return seq
class ScopeTreeMiner(ScopeTreeMiner):
def nt_var(self, key):
method, seq, var, lno = key
return to_nonterminal("%s@%d:%s" % (method, lno, var))
class ScopeTreeMiner(ScopeTreeMiner):
def partition(self, part, value):
return value.partition(part)
def partition_by_part(self, pair, value):
(nt_var, nt_seq), (v, v_scope) = pair
prefix_k_suffix = [
(nt_var, [(v, [], nt_seq)]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(v, value))
if e]
return prefix_k_suffix
def insert_into_tree(self, my_tree, pair):
var, values, my_scope = my_tree
(nt_var, nt_seq), (v, v_scope) = pair
applied = False
for i, value_ in enumerate(values):
key, arr, scope = value_
self.log(2, "-> [%d] %s" % (i, repr(value_)))
if is_nonterminal(key):
applied = self.insert_into_tree(value_, pair)
if applied:
break
else:
if v_scope != scope:
if nt_seq > scope:
continue
if not v or not self.string_part_of_value(v, key):
continue
prefix_k_suffix = [(k, children, scope) for k, children
in self.partition_by_part(pair, key)]
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
return applied
class ScopeTreeMiner(ScopeTreeMiner):
def apply_new_definition(self, tree, var, value_):
nt_var = self.nt_var(var)
seq = self.mseq(var)
val, (smethod, mseq) = value_
return self.insert_into_tree(tree, ((nt_var, seq), (val, mseq)))
class ScopeTreeMiner(ScopeTreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [], 0)], 0)
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
url_dts = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars(formatted=False):
print(k, '=', repr(v))
dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(dt.tree, graph_attr=lr_graph)
url_dts.append(dt)
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars():
print(k, '=', repr(v))
inventory_dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(inventory_dt.tree, graph_attr=lr_graph)
class ScopedGrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
key, children, scope = tree
one_alt = [ckey for ckey, gchildren, cscope in children if ckey != key]
hsh = {key: [one_alt] if one_alt else []}
for child in children:
(ckey, _gc, _cscope) = child
if not is_nonterminal(ckey):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.grammar))
su = ScopedGrammarMiner()
for url_dt in url_dts:
su.add_tree(url_dt)
syntax_diagram(readable(su.grammar))
class ScopedGrammarMiner(ScopedGrammarMiner):
def get_replacements(self, grammar):
replacements = {}
for k in grammar:
if k == START_SYMBOL:
continue
alts = grammar[k]
if len(set([str(i) for i in alts])) != 1:
continue
rule = alts[0]
if len(rule) != 1:
continue
tok = rule[0]
if not is_nonterminal(tok):
continue
replacements[k] = tok
return replacements
class ScopedGrammarMiner(ScopedGrammarMiner):
def clean_grammar(self):
replacements = self.get_replacements(self.grammar)
while True:
changed = set()
for k in self.grammar:
if k in replacements:
continue
new_alts = []
for alt in self.grammar[k]:
new_alt = []
for t in alt:
if t in replacements:
new_alt.append(replacements[t])
changed.add(t)
else:
new_alt.append(t)
new_alts.append(new_alt)
self.grammar[k] = new_alts
if not changed:
break
for k in changed:
self.grammar.pop(k, None)
return readable(self.grammar)
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.clean_grammar()))
class ScopedGrammarMiner(ScopedGrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(
inputstr, at.my_assignments.defined_vars(
formatted=False))
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return ScopeTracker(*args)
def create_tree_miner(self, *args):
return ScopeTreeMiner(*args)
def recover_grammar(fn, inputs, **kwargs):
miner = ScopedGrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
syntax_diagram(url_grammar)
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
inventory_grammar = recover_grammar(process_inventory, [INVENTORY])
syntax_diagram(inventory_grammar)
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
url_parse('https://www.fuzzingbook.org/')
URLS
grammar = recover_grammar(url_parse, URLS)
grammar
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
fuzzer = GrammarCoverageFuzzer(grammar)
[fuzzer.fuzz() for i in range(5)]
class Vehicle:
def __init__(self, vehicle):
year, kind, company, model, *_ = vehicle.split(',')
self.year, self.kind, self.company, self.model = year, kind, company, model
def process_inventory(inventory):
res = []
for vehicle in inventory.split('\n'):
ret = process_vehicle(vehicle)
res.extend(ret)
return '\n'.join(res)
def process_vehicle(vehicle):
v = Vehicle(vehicle)
if v.kind == 'van':
return process_van(v)
elif v.kind == 'car':
return process_car(v)
else:
raise Exception('Invalid entry')
def process_van(vehicle):
res = [
"We have a %s %s van from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2010:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
def process_car(vehicle):
res = [
"We have a %s %s car from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2016:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
vehicle_grammar = recover_grammar(
process_inventory,
[INVENTORY],
methods=INVENTORY_METHODS)
syntax_diagram(vehicle_grammar)
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
MAX_DEPTH = 10
def set_flatten_depth(depth):
global MAX_DEPTH
MAX_DEPTH = depth
def flatten(key, val, depth=MAX_DEPTH):
tv = type(val)
if depth <= 0:
return [(key, val)]
if isinstance(val, (int, float, complex, str, bytes, bytearray)):
return [(key, val)]
elif isinstance(val, (set, frozenset, list, tuple, range)):
values = [(i, e) for i, elt in enumerate(val) for e in flatten(i, elt, depth-1)]
return [("%s.%d" % (key, i), v) for i, v in values]
elif isinstance(val, dict):
values = [e for k, elt in val.items() for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
elif isinstance(val, str):
return [(key, val)]
elif hasattr(val, '__dict__'):
values = [e for k, elt in val.__dict__.items()
for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
else:
return [(key, val)]
class Context(Context):
def extract_vars(self, frame):
vals = inspect.getargvalues(frame).locals
return {k1: v1 for k, v in vals.items() for k1, v1 in flatten(k, v)}
def parameters(self, all_vars):
def check_param(k):
return any(k.startswith(p) for p in self.parameter_names)
return {k: v for k, v in all_vars.items() if check_param(k)}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
vehicle_grammar = recover_grammar(
process_inventory,
[INVENTORY],
methods=INVENTORY_METHODS)
syntax_diagram(vehicle_grammar)
from InformationFlow import ostr
def is_fragment(fragment, original):
assert isinstance(original, ostr)
if not isinstance(fragment, ostr):
return False
return set(fragment.origin) <= set(original.origin)
class TaintedInputStack(InputStack):
def in_current_record(self, val):
return any(is_fragment(val, var) for var in self.inputs[-1].values())
class TaintedInputStack(TaintedInputStack):
def ignored(self, val):
return not isinstance(val, ostr)
class TaintedScopedVars(ScopedVars):
def create_call_stack(self, i):
return TaintedInputStack(i)
class TaintedScopeTracker(ScopeTracker):
def create_assignments(self, *args):
return TaintedScopedVars(*args)
class TaintedScopeTreeMiner(ScopeTreeMiner):
def string_part_of_value(self, part, value):
return str(part.origin).strip('[]') in str(value.origin).strip('[]')
def partition(self, part, value):
begin = value.origin.index(part.origin[0])
end = value.origin.index(part.origin[-1])+1
return value[:begin], value[begin:end], value[end:]
if my_fragment == 'begin':
return 'begin'
class TaintedScopedGrammarMiner(ScopedGrammarMiner):
def create_tracker(self, *args):
return TaintedScopeTracker(*args)
def create_tree_miner(self, *args):
return TaintedScopeTreeMiner(*args)
def recover_grammar_with_taints(fn, inputs, **kwargs):
miner = TaintedScopedGrammarMiner()
for inputstr in inputs:
with Tracer(ostr(inputstr), **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
inventory_grammar = recover_grammar_with_taints(
process_inventory, [INVENTORY],
methods=[
'process_inventory', 'process_vehicle', 'process_car', 'process_van'
])
syntax_diagram(inventory_grammar)
url_grammar = recover_grammar_with_taints(
url_parse, URLS_X + ['ftp://user4:pass1@host4/?key4=value3'],
methods=['urlsplit', 'urlparse', '_splitnetloc'])
syntax_diagram(url_grammar)
| 0.436742 | 0.984139 |
# [experimental] WebDataset integration
This notebook shows how to write a dataloading pipeline for ASR using mini LibriSpeech dataset leveraging Lhotse's WebDataset integration.
The WebDataset project helps to speed-up reading data from disks or network. They way its used in Lhotse is that we package the meta-data (cuts) along with binary data (audio, features, etc.) into tar files, which can be read much faster this way. This is because we perform sequential reads rather than random reads in typical Lhotse workflows, so we need to open the file handle only once, and make it easier for the disk/OS caching and prefetching to anticipate what we're going to read next.
This step requires you to make a full copy of your data, so note that there is a storage size / performance trade-off to it.
Find out more about the WebDataset project here: https://github.com/webdataset/webdataset
```
# Optional auto-formatting
#!pip install nb_black
#%load_ext lab_black
# Get the latest version of Lhotse, if not installed:
#!pip install git+https://github.com/lhotse-speech/lhotse
# Get WebDataset 0.2.5 which is the only version we support at this time:
#!pip install -U webdataset==0.2.5
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from lhotse import CutSet, Fbank
from lhotse.dataset import (
DynamicBucketingSampler,
K2SpeechRecognitionDataset,
OnTheFlyFeatures,
PerturbSpeed,
PerturbVolume,
SpecAugment,
make_worker_init_fn,
)
from lhotse.recipes import (
download_librispeech,
prepare_librispeech,
)
root_dir = Path("data")
tmp_dir = Path("tmp")
tmp_dir.mkdir(exist_ok=True)
num_jobs = os.cpu_count() - 1
```
# (mini) LibriSpeech
We're downloading the data, preparing recording/supervision manfiests, and compiling them into CutSets.
A cut is a basic "example" of data in Lhotse.
Approx. download size 450MB.
```
# libri_variant = "librispeech"
libri_variant = "mini_librispeech"
libri_root = download_librispeech(root_dir, dataset_parts=libri_variant)
libri = prepare_librispeech(
libri_root, dataset_parts=libri_variant, output_dir=root_dir, num_jobs=num_jobs
)
cuts_train = CutSet.from_manifests(**libri["train-clean-5"]).trim_to_supervisions()
cuts_dev = CutSet.from_manifests(**libri["dev-clean-2"]).trim_to_supervisions()
```
# Export cuts to WebDataset tar file shards
Sharding is a technique used to partition a large dataset into smaller parts that can be split between different GPU nodes and dataloading workers.
In this example, we're working with small data, but we'll treat it like a large dataset to illustrate the typical usage.
```
from lhotse.dataset.webdataset import export_to_webdataset
# We'll keep the audio in SPHERE format to reduce its size.
# We can also use "flac" but it may require setting torchaudio.set_audio_backend("soundfile"),
# as we observed the "sox_io" backend FLAC decoder tends to fail sometimes with in-memory buffers.
export_to_webdataset(
cuts_train,
output_path=f"{tmp_dir}/shard-%d.tar",
shard_size=300,
audio_format="sph",
)
```
# Reading WebDataset-stored CutSet+data
We list the shards and give the list to function `CutSet.from_webdataset`. We can also pass a single shard, a url, or a bash command.
It's possible to read from cloud storage this way, e.g. `'pipe:aws s3 cp s3://my-bucket/shard-1.tar -'` would spawn an S3 reading subprocess from which we'll read the data.
The meaning of extra arguments in `from_webdataset` function is explained in the next paragraph.
```
shards = [str(path) for path in sorted(tmp_dir.glob("shard-*.tar"))]
print(shards)
cuts_train_webdataset = CutSet.from_webdataset(
shards,
split_by_worker=True,
split_by_node=True,
shuffle_shards=True,
)
print(cuts_train_webdataset)
```
# Training DataLoader with WebDataset
Since WebDataset approach uses sharding for data de-duplication across multiple DataLoader workers and GPU nodes,
we need to adjust how we create the DataLoader.
We'll extend the "base" approach used in `examples/00-basic-workflow.ipynb` (next to this file).
The code below has the same functionality, just reads the data differently.
The main change is that we create an IterableDataset, which is just a wrapper over sampler iteration and map-style dataset that converts CutSet mini-batch to tensors.
What this does is it moves the sampler to dataloading worker processes, so WebDataset can "auto-detect" that its in a multi-worker context and can drop some shards in each worker/node. Remember that in a "typical" sampler + map-style dataset scenario, the sampler lives in the same process as the main training loop instead.
To learn more about map-style and iterable-style datasets, see: https://pytorch.org/docs/stable/data.html#dataset-types
```
train_sampler = DynamicBucketingSampler(
cuts_train_webdataset, # <-- note the "_webdataset" variant being used here
shuffle=True,
max_duration=100.0,
num_buckets=10,
)
train_dataset = K2SpeechRecognitionDataset(
cut_transforms=[
PerturbSpeed(factors=[0.9, 1.1], p=2 / 3),
PerturbVolume(scale_low=0.125, scale_high=2.0, p=0.5),
],
input_transforms=[
SpecAugment(), # default configuration is well-tuned
],
input_strategy=OnTheFlyFeatures(Fbank()),
)
# This is the part that's different:
from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
train_iter_dataset = IterableDatasetWrapper(
dataset=train_dataset,
sampler=train_sampler,
)
train_dloader = DataLoader(
train_iter_dataset,
batch_size=None,
# For faster dataloading, use num_workers > 1
num_workers=0,
# Note: Lhotse offers its own "worker_init_fn" that helps properly
# set the random seeds in all workers (also with multi-node training)
# and sets up data de-duplication for multi-node training with WebDataset.
worker_init_fn=make_worker_init_fn(),
)
```
### High-level architecture of the solution
```
┌────────────────────────────────────────────────────────────────────────┐
│┌──────────────────────────────────────────────────────────────────────┐│
││ Training loop ││
│└──────────────────────────────────────────────────────────────────────┘│
│ │ │
│ ▼ │
│ ┌─────────────────────────────┐ │
│ │ torch.utils.data.DataLoader │ │
│ └─────────────────────────────┘ Main process│
└────────────────────────────────────┬───────────────────────────────────┘
┌─────────────────────────────┼───────────────────────────────┐
▼ ┌─────────────────────▼───────────────────────┐ ▼
┌─────────┐ │ ┌─────────┐ Sub-process #i │ ┌─────────┐
│Worker #1│ │ │Worker #i│ │ │Worker #N│
└─────────┘ │ └─────────┘ │ └─────────┘
│ │ │
│ ▼ │
│ ┌────────────────────────┐ │
│ │ IterableDatasetWrapper │ │
│ └────────────────────────┘ │
│ │ │
│ ┌─────────┴──────┐ │
│ ▼ ▼ │
│ ┌─────────────────┐ ┌───────────┐ │
│ │Map-style Dataset│ │ Sampler │ │
│ │ (task-specific) │ │ │ │
│ └─────────────────┘ └───────────┘ │
│ │ │
│ ▼ │
│ ┌───────────┐ │
│ │ CutSet │ │
│ └───────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────┐ │
│ │Lazy WebDataset Iterator│ │
│ │(discards shard_idx % N)│ │
│ └────────────────────────┘ │
│ │ │
│ ┌───────────┼───────────┐ │
│ ▼ ▼ ▼ │
│ ┌────────┐ ┌────────┐ ┌────────┐│
│ │Shard #1│ │Shard #j│ │Shard #M││
│ └────────┘ └────────┘ └────────┘│
└─────────────────────────────────────────────┘
```
### Visualisation
We simply iterate the dataloader as usual and view how the first batch looks like
```
from lhotse.dataset.vis import plot_batch
for batch in train_dloader:
plot_batch(batch)
break
```
# Basic benchmark
## Random read version
```
%%time
for cut in cuts_train:
cut.load_audio()
```
## Sequential read version
Note: this would get even faster if the shards are bigger, we used 300 just for illustrations since mini LibriSpeech has only 1500 training utterances.
```
%%time
for cut in cuts_train_webdataset:
cut.load_audio()
```
# Lower-level exporting API
If you want to do additional filtering or compute something extra and add it to features while exporting, you can use `WebdatasetWriter` instead of `export_to_webdataset`:
```
from lhotse.dataset.webdataset import WebdatasetWriter
from lhotse.features.io import MemoryRawWriter
from torchaudio.functional import compute_kaldi_pitch
with WebdatasetWriter(
f"{tmp_dir}/shard-writer-%d.tar", shard_size=300, audio_format="sph"
) as tar_writer:
for cut in tqdm(cuts_train):
if cut.duration > 5.0:
# skip some cuts
continue
# Move audio data to memory so that we avoid loading it twice
cut = cut.move_to_memory(audio_format="sph")
# Compute pitch features with an external library, i.e., torchaudio.
# Note: snip_edges=False makes the number of frames consistent with what Lhotse typically expects,
# but is not strictly required here.
pitch_feats = compute_kaldi_pitch(
torch.from_numpy(cut.load_audio()),
sample_rate=cut.sampling_rate,
snip_edges=False,
).squeeze(0)
# Attach pitch features as a custom field to Cut -- it will be persisted when read again.
# We're using MemoryRawWriter which converts numpy arrays into binary data.
# That data will be stored together with the cut and audio in the tarfile.
# Frame shift is the default for Kaldi pitch features.
cut.pitch = MemoryRawWriter().store_array(
cut.id, pitch_feats.numpy(), frame_shift=0.01, temporal_dim=0
)
# Writes the cutset with in-memory data into tar files.
tar_writer.write(cut)
# Check that writing was successful
shards = [str(path) for path in sorted(tmp_dir.glob("shard-writer-*.tar"))]
print(shards)
# Note lack of extra args -- we're just going to iterate over the cutset, so we don't need
# WebDataset to do de-duplication.
cuts = CutSet.from_webdataset(shards)
for cut in tqdm(cuts):
audio = cut.load_audio()
pitch = cut.load_pitch()
print(audio.shape)
print(pitch.shape)
```
|
github_jupyter
|
# Optional auto-formatting
#!pip install nb_black
#%load_ext lab_black
# Get the latest version of Lhotse, if not installed:
#!pip install git+https://github.com/lhotse-speech/lhotse
# Get WebDataset 0.2.5 which is the only version we support at this time:
#!pip install -U webdataset==0.2.5
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from lhotse import CutSet, Fbank
from lhotse.dataset import (
DynamicBucketingSampler,
K2SpeechRecognitionDataset,
OnTheFlyFeatures,
PerturbSpeed,
PerturbVolume,
SpecAugment,
make_worker_init_fn,
)
from lhotse.recipes import (
download_librispeech,
prepare_librispeech,
)
root_dir = Path("data")
tmp_dir = Path("tmp")
tmp_dir.mkdir(exist_ok=True)
num_jobs = os.cpu_count() - 1
# libri_variant = "librispeech"
libri_variant = "mini_librispeech"
libri_root = download_librispeech(root_dir, dataset_parts=libri_variant)
libri = prepare_librispeech(
libri_root, dataset_parts=libri_variant, output_dir=root_dir, num_jobs=num_jobs
)
cuts_train = CutSet.from_manifests(**libri["train-clean-5"]).trim_to_supervisions()
cuts_dev = CutSet.from_manifests(**libri["dev-clean-2"]).trim_to_supervisions()
from lhotse.dataset.webdataset import export_to_webdataset
# We'll keep the audio in SPHERE format to reduce its size.
# We can also use "flac" but it may require setting torchaudio.set_audio_backend("soundfile"),
# as we observed the "sox_io" backend FLAC decoder tends to fail sometimes with in-memory buffers.
export_to_webdataset(
cuts_train,
output_path=f"{tmp_dir}/shard-%d.tar",
shard_size=300,
audio_format="sph",
)
shards = [str(path) for path in sorted(tmp_dir.glob("shard-*.tar"))]
print(shards)
cuts_train_webdataset = CutSet.from_webdataset(
shards,
split_by_worker=True,
split_by_node=True,
shuffle_shards=True,
)
print(cuts_train_webdataset)
train_sampler = DynamicBucketingSampler(
cuts_train_webdataset, # <-- note the "_webdataset" variant being used here
shuffle=True,
max_duration=100.0,
num_buckets=10,
)
train_dataset = K2SpeechRecognitionDataset(
cut_transforms=[
PerturbSpeed(factors=[0.9, 1.1], p=2 / 3),
PerturbVolume(scale_low=0.125, scale_high=2.0, p=0.5),
],
input_transforms=[
SpecAugment(), # default configuration is well-tuned
],
input_strategy=OnTheFlyFeatures(Fbank()),
)
# This is the part that's different:
from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
train_iter_dataset = IterableDatasetWrapper(
dataset=train_dataset,
sampler=train_sampler,
)
train_dloader = DataLoader(
train_iter_dataset,
batch_size=None,
# For faster dataloading, use num_workers > 1
num_workers=0,
# Note: Lhotse offers its own "worker_init_fn" that helps properly
# set the random seeds in all workers (also with multi-node training)
# and sets up data de-duplication for multi-node training with WebDataset.
worker_init_fn=make_worker_init_fn(),
)
┌────────────────────────────────────────────────────────────────────────┐
│┌──────────────────────────────────────────────────────────────────────┐│
││ Training loop ││
│└──────────────────────────────────────────────────────────────────────┘│
│ │ │
│ ▼ │
│ ┌─────────────────────────────┐ │
│ │ torch.utils.data.DataLoader │ │
│ └─────────────────────────────┘ Main process│
└────────────────────────────────────┬───────────────────────────────────┘
┌─────────────────────────────┼───────────────────────────────┐
▼ ┌─────────────────────▼───────────────────────┐ ▼
┌─────────┐ │ ┌─────────┐ Sub-process #i │ ┌─────────┐
│Worker #1│ │ │Worker #i│ │ │Worker #N│
└─────────┘ │ └─────────┘ │ └─────────┘
│ │ │
│ ▼ │
│ ┌────────────────────────┐ │
│ │ IterableDatasetWrapper │ │
│ └────────────────────────┘ │
│ │ │
│ ┌─────────┴──────┐ │
│ ▼ ▼ │
│ ┌─────────────────┐ ┌───────────┐ │
│ │Map-style Dataset│ │ Sampler │ │
│ │ (task-specific) │ │ │ │
│ └─────────────────┘ └───────────┘ │
│ │ │
│ ▼ │
│ ┌───────────┐ │
│ │ CutSet │ │
│ └───────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────┐ │
│ │Lazy WebDataset Iterator│ │
│ │(discards shard_idx % N)│ │
│ └────────────────────────┘ │
│ │ │
│ ┌───────────┼───────────┐ │
│ ▼ ▼ ▼ │
│ ┌────────┐ ┌────────┐ ┌────────┐│
│ │Shard #1│ │Shard #j│ │Shard #M││
│ └────────┘ └────────┘ └────────┘│
└─────────────────────────────────────────────┘
from lhotse.dataset.vis import plot_batch
for batch in train_dloader:
plot_batch(batch)
break
%%time
for cut in cuts_train:
cut.load_audio()
%%time
for cut in cuts_train_webdataset:
cut.load_audio()
from lhotse.dataset.webdataset import WebdatasetWriter
from lhotse.features.io import MemoryRawWriter
from torchaudio.functional import compute_kaldi_pitch
with WebdatasetWriter(
f"{tmp_dir}/shard-writer-%d.tar", shard_size=300, audio_format="sph"
) as tar_writer:
for cut in tqdm(cuts_train):
if cut.duration > 5.0:
# skip some cuts
continue
# Move audio data to memory so that we avoid loading it twice
cut = cut.move_to_memory(audio_format="sph")
# Compute pitch features with an external library, i.e., torchaudio.
# Note: snip_edges=False makes the number of frames consistent with what Lhotse typically expects,
# but is not strictly required here.
pitch_feats = compute_kaldi_pitch(
torch.from_numpy(cut.load_audio()),
sample_rate=cut.sampling_rate,
snip_edges=False,
).squeeze(0)
# Attach pitch features as a custom field to Cut -- it will be persisted when read again.
# We're using MemoryRawWriter which converts numpy arrays into binary data.
# That data will be stored together with the cut and audio in the tarfile.
# Frame shift is the default for Kaldi pitch features.
cut.pitch = MemoryRawWriter().store_array(
cut.id, pitch_feats.numpy(), frame_shift=0.01, temporal_dim=0
)
# Writes the cutset with in-memory data into tar files.
tar_writer.write(cut)
# Check that writing was successful
shards = [str(path) for path in sorted(tmp_dir.glob("shard-writer-*.tar"))]
print(shards)
# Note lack of extra args -- we're just going to iterate over the cutset, so we don't need
# WebDataset to do de-duplication.
cuts = CutSet.from_webdataset(shards)
for cut in tqdm(cuts):
audio = cut.load_audio()
pitch = cut.load_pitch()
print(audio.shape)
print(pitch.shape)
| 0.569134 | 0.924756 |
<a href="https://colab.research.google.com/github/PyTorchLightning/lightning-flash/blob/master/flash_notebooks/tabular_classification.ipynb" target="_parent">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
In this notebook, we'll go over the basics of lightning Flash by training a TabularClassifier on [Titanic Dataset](https://www.kaggle.com/c/titanic).
---
- Give us a ⭐ [on Github](https://www.github.com/PytorchLightning/pytorch-lightning/)
- Check out [Flash documentation](https://lightning-flash.readthedocs.io/en/latest/)
- Check out [Lightning documentation](https://pytorch-lightning.readthedocs.io/en/latest/)
- Join us [on Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-pw5v393p-qRaDgEk24~EjiZNBpSQFgQ)
# Training
```
# %%capture
! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git
from torchmetrics.classification import Accuracy, Precision, Recall
import flash
from flash.data.utils import download_data
from flash.tabular import TabularClassifier, TabularData
```
### 1. Download the data
The data are downloaded from a URL, and save in a 'data' directory.
```
download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", 'data/')
```
### 2. Load the data
Flash Tasks have built-in DataModules that you can use to organize your data. Pass in a train, validation and test folders and Flash will take care of the rest.
Creates a TabularData relies on [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html).
```
datamodule = TabularData.from_csv(
["Sex", "Age", "SibSp", "Parch", "Ticket", "Cabin", "Embarked"],
["Fare"],
target_fields="Survived",
train_file="./data/titanic/titanic.csv",
test_file="./data/titanic/test.csv",
val_split=0.25,
)
```
### 3. Build the model
Note: Categorical columns will be mapped to the embedding space. Embedding space is set of tensors to be trained associated to each categorical column.
```
model = TabularClassifier.from_data(datamodule, metrics=[Accuracy(), Precision(), Recall()])
```
### 4. Create the trainer. Run 10 times on data
```
trainer = flash.Trainer(max_epochs=10)
```
### 5. Train the model
```
trainer.fit(model, datamodule=datamodule)
```
### 6. Test model
```
trainer.test()
```
### 7. Save it!
```
trainer.save_checkpoint("tabular_classification_model.pt")
```
# Predicting
### 8. Load the model from a checkpoint
`TabularClassifier.load_from_checkpoint` supports both url or local_path to a checkpoint. If provided with an url, the checkpoint will first be downloaded and laoded to re-create the model.
```
model = TabularClassifier.load_from_checkpoint(
"https://flash-weights.s3.amazonaws.com/tabular_classification_model.pt")
```
### 9. Generate predictions from a sheet file! Who would survive?
`TabularClassifier.predict` support both DataFrame and path to `.csv` file.
```
predictions = model.predict("data/titanic/titanic.csv")
print(predictions)
```
<code style="color:#792ee5;">
<h1> <strong> Congratulations - Time to Join the Community! </strong> </h1>
</code>
Congratulations on completing this notebook tutorial! If you enjoyed it and would like to join the Lightning movement, you can do so in the following ways!
### Help us build Flash by adding support for new data-types and new tasks.
Flash aims at becoming the first task hub, so anyone can get started to great amazing application using deep learning.
If you are interested, please open a PR with your contributions !!!
### Star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) on GitHub
The easiest way to help our community is just by starring the GitHub repos! This helps raise awareness of the cool tools we're building.
* Please, star [Lightning](https://github.com/PyTorchLightning/pytorch-lightning)
### Join our [Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-pw5v393p-qRaDgEk24~EjiZNBpSQFgQ)!
The best way to keep up to date on the latest advancements is to join our community! Make sure to introduce yourself and share your interests in `#general` channel
### Interested by SOTA AI models ! Check out [Bolt](https://github.com/PyTorchLightning/lightning-bolts)
Bolts has a collection of state-of-the-art models, all implemented in [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) and can be easily integrated within your own projects.
* Please, star [Bolt](https://github.com/PyTorchLightning/lightning-bolts)
### Contributions !
The best way to contribute to our community is to become a code contributor! At any time you can go to [Lightning](https://github.com/PyTorchLightning/pytorch-lightning) or [Bolt](https://github.com/PyTorchLightning/lightning-bolts) GitHub Issues page and filter for "good first issue".
* [Lightning good first issue](https://github.com/PyTorchLightning/pytorch-lightning/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
* [Bolt good first issue](https://github.com/PyTorchLightning/lightning-bolts/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
* You can also contribute your own notebooks with useful examples !
### Great thanks from the entire Pytorch Lightning Team for your interest !
<img src="https://raw.githubusercontent.com/PyTorchLightning/lightning-flash/18c591747e40a0ad862d4f82943d209b8cc25358/docs/source/_static/images/logo.svg" width="800" height="200" />
|
github_jupyter
|
# %%capture
! pip install git+https://github.com/PyTorchLightning/pytorch-flash.git
from torchmetrics.classification import Accuracy, Precision, Recall
import flash
from flash.data.utils import download_data
from flash.tabular import TabularClassifier, TabularData
download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", 'data/')
datamodule = TabularData.from_csv(
["Sex", "Age", "SibSp", "Parch", "Ticket", "Cabin", "Embarked"],
["Fare"],
target_fields="Survived",
train_file="./data/titanic/titanic.csv",
test_file="./data/titanic/test.csv",
val_split=0.25,
)
model = TabularClassifier.from_data(datamodule, metrics=[Accuracy(), Precision(), Recall()])
trainer = flash.Trainer(max_epochs=10)
trainer.fit(model, datamodule=datamodule)
trainer.test()
trainer.save_checkpoint("tabular_classification_model.pt")
model = TabularClassifier.load_from_checkpoint(
"https://flash-weights.s3.amazonaws.com/tabular_classification_model.pt")
predictions = model.predict("data/titanic/titanic.csv")
print(predictions)
| 0.538983 | 0.979215 |
# Db2 Jupyter Notebook Extensions Tutorial
The SQL code tutorials for Db2 rely on a Jupyter notebook extension, commonly refer to as a "magic" command. The beginning of all of the notebooks begin with the following command which will load the extension and allow the remainder of the notebook to use the %sql magic command.
<pre>
%run db2.ipynb
</pre>
The cell below will load the Db2 extension. Note that it will take a few seconds for the extension to load, so you should generally wait until the "Db2 Extensions Loaded" message is displayed in your notebook.
```
%run db2.ipynb
%run connection.ipynb
```
## Options
There are four options that can be set with the **`%sql`** command. These options are shown below with the default value shown in parenthesis.
- **`MAXROWS n (10)`** - The maximum number of rows that will be displayed before summary information is shown. If the answer set is less than this number of rows, it will be completely shown on the screen. If the answer set is larger than this amount, only the first 5 rows and last 5 rows of the answer set will be displayed. If you want to display a very large answer set, you may want to consider using the grid option `-g` to display the results in a scrollable table. If you really want to show all results then setting MAXROWS to -1 will return all output.
- **`MAXGRID n (5)`** - The maximum size of a grid display. When displaying a result set in a grid `-g`, the default size of the display window is 5 rows. You can set this to a larger size so that more rows are shown on the screen. Note that the minimum size always remains at 5 which means that if the system is unable to display your maximum row size it will reduce the table display until it fits.
- **`DISPLAY PANDAS | GRID (PANDAS)`** - Display the results as a PANDAS dataframe (default) or as a scrollable GRID
- **`RUNTIME n (1)`** - When using the timer option on a SQL statement, the statement will execute for **`n`** number of seconds. The result that is returned is the number of times the SQL statement executed rather than the execution time of the statement. The default value for runtime is one second, so if the SQL is very complex you will need to increase the run time.
- **`LIST`** - Display the current settings
To set an option use the following syntax:
```
%sql option option_name value option_name value ....
```
The following example sets all options:
```
%sql option maxrows 100 runtime 2 display grid maxgrid 10
```
The values will **not** be saved between Jupyter notebooks sessions. If you need to retrieve the current options values, use the LIST command as the only argument:
```
%sql option list
```
## Connections to Db2
Before any SQL commands can be issued, a connection needs to be made to the Db2 database that you will be using. The connection can be done manually (through the use of the CONNECT command), or automatically when the first `%sql` command is issued.
The Db2 magic command tracks whether or not a connection has occured in the past and saves this information between notebooks and sessions. When you start up a notebook and issue a command, the program will reconnect to the database using your credentials from the last session. In the event that you have not connected before, the system will prompt you for all the information it needs to connect. This information includes:
- Database name (SAMPLE)
- Hostname - localhost (enter an IP address if you need to connect to a remote server)
- PORT - 50000 (this is the default but it could be different)
- Userid - DB2INST1
- Password - No password is provided so you have to enter a value
- Maximum Rows - 10 lines of output are displayed when a result set is returned
There will be default values presented in the panels that you can accept, or enter your own values. All of the information will be stored in the directory that the notebooks are stored on. Once you have entered the information, the system will attempt to connect to the database for you and then you can run all of the SQL scripts. More details on the CONNECT syntax will be found in a section below.
If you have credentials available from Db2 on Cloud or DSX, place the contents of the credentials into a variable and then use the `CONNECT CREDENTIALS <var>` syntax to connect to the database.
```Python
db2blu = { "uid" : "xyz123456", ...}
%sql CONNECT CREDENTIALS db2blu
```
If the connection is successful using the credentials, the variable will be saved to disk so that you can connected from within another notebook using the same syntax.
The next statement will force a CONNECT to occur with the default values. If you have not connected before, it will prompt you for the information.
```
%sql CONNECT
```
## Line versus Cell Command
The Db2 extension is made up of one magic command that works either at the LINE level (`%sql`) or at the CELL level (`%%sql`). If you only want to execute a SQL command on one line in your script, use the `%sql` form of the command. If you want to run a larger block of SQL, then use the `%%sql` form. Note that when you use the `%%sql` form of the command, the entire contents of the cell is considered part of the command, so you cannot mix other commands in the cell.
The following is an example of a line command:
```
%sql VALUES 'HELLO THERE'
```
If you have SQL that requires multiple lines, of if you need to execute many lines of SQL, then you should
be using the CELL version of the `%sql` command. To start a block of SQL, start the cell with `%%sql` and do not place any SQL following the command. Subsequent lines can contain SQL code, with each SQL statement delimited with the semicolon (`;`). You can change the delimiter if required for procedures, etc... More details on this later.
```
%%sql
VALUES
1,
2,
3
```
If you are using a single statement then there is no need to use a delimiter. However, if you are combining a number of commands then you must use the semicolon.
```
%%sql
DROP TABLE STUFF;
CREATE TABLE STUFF (A INT);
INSERT INTO STUFF VALUES
1,2,3;
SELECT * FROM STUFF;
```
The script will generate messages and output as it executes. Each SQL statement that generates results will have a table displayed with the result set. If a command is executed, the results of the execution get listed as well. The script you just ran probably generated an error on the DROP table command.
## Options
Both forms of the `%sql` command have options that can be used to change the behavior of the code. For both forms of the command (`%sql`, `%%sql`), the options must be on the same line as the command:
<pre>
%sql -t ...
%%sql -t
</pre>
The only difference is that the `%sql` command can have SQL following the parameters, while the `%%sql` requires the SQL to be placed on subsequent lines.
There are a number of parameters that you can specify as part of the `%sql` statement.
* `-d` - Use alternative statement delimiter `@`
* `-t,-time` - Time the statement execution
* `-q,-quiet` - Suppress messages
* `-j` - JSON formatting of the first column
* `-json` - Retrieve the result set as a JSON record
* `-a,-all` - Show all output
* `-pb,-bar` - Bar chart of results
* `-pp,-pie` - Pie chart of results
* `-pl,-line` - Line chart of results
* `-sampledata` Load the database with the sample EMPLOYEE and DEPARTMENT tables
* `-r,-array` - Return the results into a variable (list of rows)
* `-e,-echo` - Echo macro substitution
* `-h,-help` - Display help information
* `-grid` - Display results in a scrollable grid
Multiple parameters are allowed on a command line. Each option should be separated by a space:
<pre>
%sql -a -j ...
</pre>
A `SELECT` statement will return the results as a dataframe and display the results as a table in the notebook. If you use the assignment statement, the dataframe will be placed into the variable and the results will not be displayed:
<pre>
r = %sql SELECT * FROM EMPLOYEE
</pre>
The sections below will explain the options in more detail.
## Delimiters
The default delimiter for all SQL statements is the semicolon. However, this becomes a problem when you try to create a trigger, function, or procedure that uses SQLPL (or PL/SQL). Use the `-d` option to turn the SQL delimiter into the at (`@`) sign and `-q` to suppress error messages. The semi-colon is then ignored as a delimiter.
For example, the following SQL will use the `@` sign as the delimiter.
```
%%sql -d -q
DROP TABLE STUFF
@
CREATE TABLE STUFF (A INT)
@
INSERT INTO STUFF VALUES
1,2,3
@
SELECT * FROM STUFF
@
```
The delimiter change will only take place for the statements following the `%%sql` command. Subsequent cells
in the notebook will still use the semicolon. You must use the `-d` option for every cell that needs to use the
semicolon in the script.
## Limiting Result Sets
The default number of rows displayed for any result set is 10. You have the option of changing this option when initially connecting to the database. If you want to override the number of rows display you can either update
the control variable, or use the -a option. The `-a` option will display all of the rows in the answer set. For instance, the following SQL will only show 10 rows even though we inserted 15 values:
```
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
```
You will notice that the displayed result will split the visible rows to the first 5 rows and the last 5 rows.
Using the `-a` option will display all of the values.
```
%sql -a values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
```
If you want a scrollable list, use the `-grid` option.
```
%sql -grid values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
```
To change the default value of rows displayed, you can use the `%sql option maxrow` command to set the value to something else. A value of -1 means unlimited output. Note that `MAXROWS` will display all of the data for answer sets that are less than `MAXROWS` in size. For instance, if you set `MAXROWS` to 20, then any answer set less than or equal to 20 will be shown on the screen. Anything larger than this amount will be summarized with the first `MAXROWS/2` rows displayed followed by the last `MAXROWS/2` rows.
The following example will set the maximum rows to 8. Since our answer set is greater than 8, only the first 4 (8/2) rows will be shown, followed by the last 4.
```
%sql option maxrows 8
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
```
For a grid display `-grid -g`, the `MAXGRID` setting will try to display the scrollable table with *at least* `MAXGRID` rows. The minimum display size of a table is 5 rows so if the table can't fit on the screen it will try to force at least 5 rows to be displayed. The size of the table display does not impact your ability to use the scrollbars to see the entire answer set.
```
%sql option maxrows 10
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
```
A special note regarding the output from a `SELECT` statement. If the SQL statement is the last line of a block, the results will be displayed by default (unless you assigned the results to a variable). If the SQL is in the middle of a block of statements, the results will not be displayed.
## Quiet Mode
Every SQL statement will result in some output. You will either get an answer set (`SELECT`), or an indication if
the command worked. For instance, the following set of SQL will generate some error messages since the tables
will probably not exist:
```
%%sql
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
```
If you know that these errors may occur you can silence them with the -q option.
```
%%sql -q
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
```
SQL output will not be suppressed, so the following command will still show the results.
```
%%sql -q
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
VALUES 1,2,3;
```
## Variables in %sql Blocks
Python variables can be passed to a `%sql` line command, and to a `%%sql` block. For both forms of the `%sql` command you can pass variables by placing a colon in front of the variable name.
```python
%sql SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
```
The following example illustrates the use of a variable in the SQL.
```
empno = '000010'
%sql SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
```
You can doublecheck that the substitution took place by using the `-e` option which echos the SQL command after substitution.
```
%sql -echo SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
```
Note that the variable `:empno` did not have quotes around it, although it is a string value. The `%sql` call will examine the contents of the variable and add quotes around strings so you do not have to supply them in the SQL command.
Variables can also be array types. Arrays are expanded into multiple values, each separated by commas. This is useful when building SQL `IN` lists. The following example searches for 3 employees based on their employee number.
```
empnos = ['000010','000020','000030']
%sql SELECT * FROM EMPLOYEE WHERE EMPNO IN (:empnos)
```
You can reference individual array items using this technique as well. If you wanted to search for only the first value in the `empnos` array, use `:empnos[0]` instead.
```
%sql SELECT * FROM EMPLOYEE WHERE EMPNO IN (:empnos[0])
```
One final type of variable substitution that is allowed is for dictionaries. Python dictionaries resemble JSON objects and can be used to insert JSON values into Db2. For instance, the following variable contains company information in a JSON structure.
```
customer = {
"name" : "Aced Hardware Stores",
"city" : "Rockwood",
"employees" : 14
}
```
Db2 has builtin functions for dealing with JSON objects. There is another Jupyter notebook which goes through this in detail. Rather than using those functions, the following code will create a Db2 table with a string column that will contain the contents of this JSON record.
```
%%sql
DROP TABLE SHOWJSON;
CREATE TABLE SHOWJSON (INJSON VARCHAR(256));
```
To insert the Dictionary (JSON Record) into this Db2 table, you only need to use the variable name as one of the fields being inserted.
```
%sql INSERT INTO SHOWJSON VALUES :customer
```
Selecting from this table will show that the data has been inserted as a string.
```
%sql select * from showjson
```
If you want to retrieve the data from a column that contains JSON records, you must use the `-j` flag to insert the contents back into a variable.
```
v = %sql -j SELECT * FROM SHOWJSON
```
The variable `v` now contains the original JSON record for you to use.
```
v
```
## SQL Character Strings
Character strings require special handling when dealing with Db2. The single quote character `'` is reserved for delimiting string constants, while the double quote `"` is used for naming columns that require special characters. You cannot use the double quote character to delimit strings that happen to contain the single quote character. What Db2 requires you do is placed two quotes in a row to have them interpreted as a single quote character. For instance, the next statement will select one employee from the table who has a quote in their last name: `O'CONNELL`.
```
%sql SELECT * FROM EMPLOYEE WHERE LASTNAME = 'O''CONNELL'
```
Python handles quotes differently! You can assign a string to a Python variable using single or double quotes. The following assignment statements are not identical!
```
lastname = "O'CONNELL"
print(lastname)
lastname = 'O''CONNELL'
print(lastname)
```
If you use the same syntax as Db2, Python will remove the quote in the string! It interprets this as two strings (O and CONNELL) being concatentated together. That probably isn't what you want! So the safest approach is to use double quotes around your string when you assign it to a variable. Then you can use the variable in the SQL statement as shown in the following example.
```
lastname = "O'CONNELL"
%sql -e SELECT * FROM EMPLOYEE WHERE LASTNAME = :lastname
```
Notice how the string constant was updated to contain two quotes when inserted into the SQL statement. This is done automatically by the `%sql` magic command, so there is no need to use the two single quotes when assigning a string to a variable. However, you must use the two single quotes when using constants in a SQL statement.
## Builtin Variables
There are 5 predefined variables defined in the program:
- database - The name of the database you are connected to
- uid - The userid that you connected with
- hostname = The IP address of the host system
- port - The port number of the host system
- max - The maximum number of rows to return in an answer set
Theses variables are all part of a structure called _settings. To retrieve a value, use the syntax:
```python
db = _settings['database']
```
There are also 3 variables that contain information from the last SQL statement that was executed.
- sqlcode - SQLCODE from the last statement executed
- sqlstate - SQLSTATE from the last statement executed
- sqlerror - Full error message returned on last statement executed
You can access these variables directly in your code. The following code segment illustrates the use of the SQLCODE variable.
```
empnos = ['000010','999999']
for empno in empnos:
ans1 = %sql -r SELECT SALARY FROM EMPLOYEE WHERE EMPNO = :empno
if (sqlcode != 0):
print("Employee "+ empno + " left the company!")
else:
print("Employee "+ empno + " salary is " + str(ans1[1][0]))
```
## Timing SQL Statements
Sometimes you want to see how the execution of a statement changes with the addition of indexes or other
optimization changes. The `-t` option will run the statement on the LINE or one SQL statement in the CELL for
exactly one second. The results will be displayed and optionally placed into a variable. The syntax of the
command is:
<pre>
sql_time = %sql -t SELECT * FROM EMPLOYEE
</pre>
For instance, the following SQL will time the VALUES clause.
```
%sql -t VALUES 1,2,3,4,5,6,7,8,9
```
When timing a statement, no output will be displayed. If your SQL statement takes longer than one second you
will need to modify the runtime options. You can use the `%sql option runtime` command to change the duration the statement runs.
```
%sql option runtime 5
%sql -t VALUES 1,2,3,4,5,6,7,8,9
%sql option runtime 1
```
## JSON Formatting
Db2 supports querying JSON that is stored in a column within a table. Standard output would just display the
JSON as a string. For instance, the following statement would just return a large string of output.
```
%%sql
VALUES
'{
"empno":"000010",
"firstnme":"CHRISTINE",
"midinit":"I",
"lastname":"HAAS",
"workdept":"A00",
"phoneno":[3978],
"hiredate":"01/01/1995",
"job":"PRES",
"edlevel":18,
"sex":"F",
"birthdate":"08/24/1963",
"pay" : {
"salary":152750.00,
"bonus":1000.00,
"comm":4220.00}
}'
```
Adding the -j option to the `%sql` (or `%%sql`) command will format the first column of a return set to better
display the structure of the document. Note that if your answer set has additional columns associated with it, they will not be displayed in this format.
```
%%sql -j
VALUES
'{
"empno":"000010",
"firstnme":"CHRISTINE",
"midinit":"I",
"lastname":"HAAS",
"workdept":"A00",
"phoneno":[3978],
"hiredate":"01/01/1995",
"job":"PRES",
"edlevel":18,
"sex":"F",
"birthdate":"08/24/1963",
"pay" : {
"salary":152750.00,
"bonus":1000.00,
"comm":4220.00}
}'
```
JSON fields can be inserted into Db2 columns using Python dictionaries. This makes the input and output of JSON fields much simpler. For instance, the following code will create a Python dictionary which is similar to a JSON record.
```
employee = {
"firstname" : "John",
"lastname" : "Williams",
"age" : 45
}
```
The field can be inserted into a character column (or BSON if you use the JSON functions) by doing a direct variable insert.
```
%%sql -q
DROP TABLE SHOWJSON;
CREATE TABLE SHOWJSON(JSONIN VARCHAR(128));
```
An insert would use a variable parameter (colon in front of the variable) instead of a character string.
```
%sql INSERT INTO SHOWJSON VALUES (:employee)
%sql SELECT * FROM SHOWJSON
```
An assignment statement to a variable will result in an equivalent Python dictionary type being created. Note that we must use the raw `-j` flag to make sure we only get the data and not a data frame.
```
x = %sql -j SELECT * FROM SHOWJSON
print("First Name is " + x[0]["firstname"] + " and the last name is " + x[0]['lastname'])
```
## Plotting
Sometimes it would be useful to display a result set as either a bar, pie, or line chart. The first one or two
columns of a result set need to contain the values need to plot the information.
The three possible plot options are:
* `-pb` - bar chart (x,y)
* `-pp` - pie chart (y)
* `-pl` - line chart (x,y)
The following data will be used to demonstrate the different charting options.
```
%sql values 1,2,3,4,5
```
Since the results only have one column, the pie, line, and bar charts will not have any labels associated with
them. The first example is a bar chart.
```
%sql -pb values 1,2,3,4,5
```
The same data as a pie chart.
```
%sql -pp values 1,2,3,4,5
```
And finally a line chart.
```
%sql -pl values 1,2,3,4,5
```
If you retrieve two columns of information, the first column is used for the labels (X axis or pie slices) and
the second column contains the data.
```
%sql -pb values ('A',1),('B',2),('C',3),('D',4),('E',5)
```
For a pie chart, the first column is used to label the slices, while the data comes from the second column.
```
%sql -pp values ('A',1),('B',2),('C',3),('D',4),('E',5)
```
Finally, for a line chart, the x contains the labels and the y values are used.
```
%sql -pl values ('A',1),('B',2),('C',3),('D',4),('E',5)
```
The following SQL will plot the number of employees per department.
```
%%sql -pb
SELECT WORKDEPT, COUNT(*)
FROM EMPLOYEE
GROUP BY WORKDEPT
```
## Sample Data
Many of the Db2 notebooks depend on two of the tables that are found in the `SAMPLE` database. Rather than
having to create the entire `SAMPLE` database, this option will create and populate the `EMPLOYEE` and
`DEPARTMENT` tables in your database. Note that if you already have these tables defined, they will not be dropped.
```
%sql -sampledata
```
## Result Sets
By default, any `%sql` block will return the contents of a result set as a table that is displayed in the notebook. The results are displayed using a feature of pandas dataframes. The following select statement demonstrates a simple result set.
```
%sql select * from employee fetch first 3 rows only
```
You can assign the result set directly to a variable.
```
x = %sql select * from employee fetch first 3 rows only
```
The variable x contains the dataframe that was produced by the `%sql` statement so you access the result set by using this variable or display the contents by just referring to it in a command line.
```
x
```
There is an additional way of capturing the data through the use of the `-r` flag.
<pre>
var = %sql -r select * from employee
</pre>
Rather than returning a dataframe result set, this option will produce a list of rows. Each row is a list itself. The column names are found in row zero (0) and the data rows start at 1. To access the first column of the first row, you would use var[1][0] to access it.
```
rows = %sql -r select * from employee fetch first 3 rows only
print(rows[1][0])
```
The number of rows in the result set can be determined by using the length function and subtracting one for the header row.
```
print(len(rows)-1)
```
If you want to iterate over all of the rows and columns, you could use the following Python syntax instead of
creating a for loop that goes from 0 to 41.
```
for row in rows:
line = ""
for col in row:
line = line + str(col) + ","
print(line)
```
If you don't want the header row, modify the first line to start at the first row instead of row zero.
```
for row in rows[1:]:
line = ""
for col in row:
line = line + str(col) + ","
print(line)
```
Since the data may be returned in different formats (like integers), you should use the str() function to convert the values to strings. Otherwise, the concatenation function used in the above example might fail. For instance, the 9th field is an education level. If you retrieve it as an individual value and try and concatenate a string to it, you get the following error.
```
try:
print("Education level="+rows[1][8])
except Exception as err:
print("Oops... Something went wrong!")
print(err)
```
You can fix this problem by adding the str function to convert the date.
```
print("Education Level="+str(rows[1][8]))
```
## Development SQL
The previous set of `%sql` and `%%sql` commands deals with SQL statements and commands that are run in an interactive manner. There is a class of SQL commands that are more suited to a development environment where code is iterated or requires changing input. The commands that are associated with this form of SQL are:
- AUTOCOMMIT
- COMMIT/ROLLBACK
- PREPARE
- EXECUTE
In addition, the `sqlcode`, `sqlstate` and `sqlerror` fields are populated after every statement so you can use these variables to test for errors.
Autocommit is the default manner in which SQL statements are executed. At the end of the successful completion of a statement, the results are commited to the database. There is no concept of a transaction where multiple DML/DDL statements are considered one transaction. The `AUTOCOMMIT` command allows you to turn autocommit `OFF` or `ON`. This means that the set of SQL commands run after the `AUTOCOMMIT OFF` command are executed are not commited to the database until a `COMMIT` or `ROLLBACK` command is issued.
`COMMIT (WORK)` will finalize all of the transactions (`COMMIT`) to the database and `ROLLBACK` will undo all of the changes. If you issue a `SELECT` statement during the execution of your block, the results will reflect all of your changes. If you `ROLLBACK` the transaction, the changes will be lost.
`PREPARE` is typically used in a situation where you want to repeatidly execute a SQL statement with different variables without incurring the SQL compilation overhead. For instance:
```
x = %sql PREPARE SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=?
for y in ['000010','000020','000030']:
%sql execute :x using :y
```
`EXECUTE` is used to execute a previously compiled statement.
## Db2 CONNECT Statement
As mentioned at the beginning of this notebook, connecting to Db2 is automatically done when you issue your first
`%sql` statement. Usually the program will prompt you with what options you want when connecting to a database. The other option is to use the `CONNECT` statement directly. The `CONNECT` statement is similar to the native Db2
`CONNECT` command, but includes some options that allow you to connect to databases that has not been
catalogued locally.
The `CONNECT` command has the following format:
<pre>
%sql CONNECT TO database USER userid USING password | HOST ip address PORT port number SSL
</pre>
If you use a "?" for the password field, the system will prompt you for a password. This avoids typing the
password as clear text on the screen. If a connection is not successful, the system will print the error
message associated with the connect request.
If the connection is successful, the parameters are saved on your system and will be used the next time you
run a SQL statement, or when you issue the `%sql CONNECT` command with no parameters.
If you want to force the program to connect to a different database (with prompting), use the `CONNECT RESET` command. The next time you run a SQL statement, the program will prompt you for the the connection
and will force the program to reconnect the next time a SQL statement is executed.
#### Copyright (C) IBM 2021, George Baklarz [baklarz@ca.ibm.com]
|
github_jupyter
|
%run db2.ipynb
%run connection.ipynb
%sql option option_name value option_name value ....
%sql option maxrows 100 runtime 2 display grid maxgrid 10
%sql option list
db2blu = { "uid" : "xyz123456", ...}
%sql CONNECT CREDENTIALS db2blu
%sql CONNECT
%sql VALUES 'HELLO THERE'
%%sql
VALUES
1,
2,
3
%%sql
DROP TABLE STUFF;
CREATE TABLE STUFF (A INT);
INSERT INTO STUFF VALUES
1,2,3;
SELECT * FROM STUFF;
%%sql -d -q
DROP TABLE STUFF
@
CREATE TABLE STUFF (A INT)
@
INSERT INTO STUFF VALUES
1,2,3
@
SELECT * FROM STUFF
@
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
%sql -a values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
%sql -grid values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
%sql option maxrows 8
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
%sql option maxrows 10
%sql values 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
%%sql
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
%%sql -q
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
%%sql -q
DROP TABLE TABLE_NOT_FOUND;
DROP TABLE TABLE_SPELLED_WRONG;
VALUES 1,2,3;
%sql SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
empno = '000010'
%sql SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
%sql -echo SELECT * FROM EMPLOYEE WHERE EMPNO = :empno
empnos = ['000010','000020','000030']
%sql SELECT * FROM EMPLOYEE WHERE EMPNO IN (:empnos)
%sql SELECT * FROM EMPLOYEE WHERE EMPNO IN (:empnos[0])
customer = {
"name" : "Aced Hardware Stores",
"city" : "Rockwood",
"employees" : 14
}
%%sql
DROP TABLE SHOWJSON;
CREATE TABLE SHOWJSON (INJSON VARCHAR(256));
%sql INSERT INTO SHOWJSON VALUES :customer
%sql select * from showjson
v = %sql -j SELECT * FROM SHOWJSON
v
%sql SELECT * FROM EMPLOYEE WHERE LASTNAME = 'O''CONNELL'
lastname = "O'CONNELL"
print(lastname)
lastname = 'O''CONNELL'
print(lastname)
lastname = "O'CONNELL"
%sql -e SELECT * FROM EMPLOYEE WHERE LASTNAME = :lastname
db = _settings['database']
empnos = ['000010','999999']
for empno in empnos:
ans1 = %sql -r SELECT SALARY FROM EMPLOYEE WHERE EMPNO = :empno
if (sqlcode != 0):
print("Employee "+ empno + " left the company!")
else:
print("Employee "+ empno + " salary is " + str(ans1[1][0]))
%sql -t VALUES 1,2,3,4,5,6,7,8,9
%sql option runtime 5
%sql -t VALUES 1,2,3,4,5,6,7,8,9
%sql option runtime 1
%%sql
VALUES
'{
"empno":"000010",
"firstnme":"CHRISTINE",
"midinit":"I",
"lastname":"HAAS",
"workdept":"A00",
"phoneno":[3978],
"hiredate":"01/01/1995",
"job":"PRES",
"edlevel":18,
"sex":"F",
"birthdate":"08/24/1963",
"pay" : {
"salary":152750.00,
"bonus":1000.00,
"comm":4220.00}
}'
%%sql -j
VALUES
'{
"empno":"000010",
"firstnme":"CHRISTINE",
"midinit":"I",
"lastname":"HAAS",
"workdept":"A00",
"phoneno":[3978],
"hiredate":"01/01/1995",
"job":"PRES",
"edlevel":18,
"sex":"F",
"birthdate":"08/24/1963",
"pay" : {
"salary":152750.00,
"bonus":1000.00,
"comm":4220.00}
}'
employee = {
"firstname" : "John",
"lastname" : "Williams",
"age" : 45
}
%%sql -q
DROP TABLE SHOWJSON;
CREATE TABLE SHOWJSON(JSONIN VARCHAR(128));
%sql INSERT INTO SHOWJSON VALUES (:employee)
%sql SELECT * FROM SHOWJSON
x = %sql -j SELECT * FROM SHOWJSON
print("First Name is " + x[0]["firstname"] + " and the last name is " + x[0]['lastname'])
%sql values 1,2,3,4,5
%sql -pb values 1,2,3,4,5
%sql -pp values 1,2,3,4,5
%sql -pl values 1,2,3,4,5
%sql -pb values ('A',1),('B',2),('C',3),('D',4),('E',5)
%sql -pp values ('A',1),('B',2),('C',3),('D',4),('E',5)
%sql -pl values ('A',1),('B',2),('C',3),('D',4),('E',5)
%%sql -pb
SELECT WORKDEPT, COUNT(*)
FROM EMPLOYEE
GROUP BY WORKDEPT
%sql -sampledata
%sql select * from employee fetch first 3 rows only
x = %sql select * from employee fetch first 3 rows only
x
rows = %sql -r select * from employee fetch first 3 rows only
print(rows[1][0])
print(len(rows)-1)
for row in rows:
line = ""
for col in row:
line = line + str(col) + ","
print(line)
for row in rows[1:]:
line = ""
for col in row:
line = line + str(col) + ","
print(line)
try:
print("Education level="+rows[1][8])
except Exception as err:
print("Oops... Something went wrong!")
print(err)
print("Education Level="+str(rows[1][8]))
x = %sql PREPARE SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=?
for y in ['000010','000020','000030']:
%sql execute :x using :y
| 0.139104 | 0.986058 |
# Create data
```
import pandas as pd
import numpy as np
from scipy.stats import f_oneway
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.graphics.gofplots import qqplot
import warnings
from IPython.display import display, Math, Latex, Markdown
warnings.filterwarnings("ignore")
cotton_weight_percent = [
15,
15,
15,
15,
15,
20,
20,
20,
20,
20,
25,
25,
25,
25,
25,
30,
30,
30,
30,
30,
35,
35,
35,
35,
35,
]
observations = [
7,
7,
15,
11,
9,
12,
16,
12,
18,
18,
14,
19,
19,
18,
18,
19,
25,
22,
19,
23,
7,
10,
11,
15,
11,
]
df = pd.DataFrame(
{"observations": observations, "cotton_weight_percent": cotton_weight_percent}
)
df
```
# One-way ANOVA
```
model = ols("observations ~ C(cotton_weight_percent)", df).fit()
model.summary()
res = anova_lm(model, typ=1)
def model_evaluation(
model,
independent_name: str = "cotton",
dependent_name: str = "tensile strength",
alpha=0.5,
):
p_value = model.f_pvalue
display(
Markdown(
f"""
**Null hypothesis**: All means are equal.<br>
**Alternative hypothesis**: Not all mean are equal<br>
**Significance level**: α = {alpha}
The F-statistic of the model is {round(model.fvalue, 6)}. The p-value of the model is {round(p_value, 6)}."""
)
)
if p_value > alpha:
display(
Markdown(
f"""Since the p-value is greater than the significance level of {alpha}, the differences between the means are not statistically significant."""
)
)
else:
display(
Markdown(
f"""Since the p-value is less than the significance level of {alpha}, there is enough evidence to claim that the differences between some of the means are statistically significant."""
)
)
model_evaluation(model)
```
# Compare Each Pair of Means Using Tukey's HSD
```
comparison = MultiComparison(df["observations"], df["cotton_weight_percent"])
comparison_results = comparison.tukeyhsd()
comparison_results.summary()
fig_15 = comparison_results.plot_simultaneous(comparison_name=15)
fig_20 = comparison_results.plot_simultaneous(comparison_name=20)
fig_25 = comparison_results.plot_simultaneous(comparison_name=25)
fig_30 = comparison_results.plot_simultaneous(comparison_name=30)
fig_35 = comparison_results.plot_simultaneous(comparison_name=35)
```
# Check model assumptions
```
residuals = model.resid
plot = qqplot(residuals, line="s")
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from scipy.stats import f_oneway
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.graphics.gofplots import qqplot
import warnings
from IPython.display import display, Math, Latex, Markdown
warnings.filterwarnings("ignore")
cotton_weight_percent = [
15,
15,
15,
15,
15,
20,
20,
20,
20,
20,
25,
25,
25,
25,
25,
30,
30,
30,
30,
30,
35,
35,
35,
35,
35,
]
observations = [
7,
7,
15,
11,
9,
12,
16,
12,
18,
18,
14,
19,
19,
18,
18,
19,
25,
22,
19,
23,
7,
10,
11,
15,
11,
]
df = pd.DataFrame(
{"observations": observations, "cotton_weight_percent": cotton_weight_percent}
)
df
model = ols("observations ~ C(cotton_weight_percent)", df).fit()
model.summary()
res = anova_lm(model, typ=1)
def model_evaluation(
model,
independent_name: str = "cotton",
dependent_name: str = "tensile strength",
alpha=0.5,
):
p_value = model.f_pvalue
display(
Markdown(
f"""
**Null hypothesis**: All means are equal.<br>
**Alternative hypothesis**: Not all mean are equal<br>
**Significance level**: α = {alpha}
The F-statistic of the model is {round(model.fvalue, 6)}. The p-value of the model is {round(p_value, 6)}."""
)
)
if p_value > alpha:
display(
Markdown(
f"""Since the p-value is greater than the significance level of {alpha}, the differences between the means are not statistically significant."""
)
)
else:
display(
Markdown(
f"""Since the p-value is less than the significance level of {alpha}, there is enough evidence to claim that the differences between some of the means are statistically significant."""
)
)
model_evaluation(model)
comparison = MultiComparison(df["observations"], df["cotton_weight_percent"])
comparison_results = comparison.tukeyhsd()
comparison_results.summary()
fig_15 = comparison_results.plot_simultaneous(comparison_name=15)
fig_20 = comparison_results.plot_simultaneous(comparison_name=20)
fig_25 = comparison_results.plot_simultaneous(comparison_name=25)
fig_30 = comparison_results.plot_simultaneous(comparison_name=30)
fig_35 = comparison_results.plot_simultaneous(comparison_name=35)
residuals = model.resid
plot = qqplot(residuals, line="s")
| 0.73914 | 0.833257 |
# Interpolation with polynomials
This notebook demonstrates the Lagrange polynomial and Newtons divided-difference methods for fitting an $n^\text{th}$-order polynomial to a data set with $n+1$ elements
## Lagrange polynomials
```
import numpy as np
import matplotlib.pyplot as plt
# The below commands make the font and image size bigger
plt.rcParams.update({'font.size': 22})
plt.rcParams["figure.figsize"] = (15,10)
```
Below we enter the data we want to interpolate. The data comes in $(x,y)$ pairs, and does not need to be in any order.
```
data = np.array([[0,0], [1,10], [3,3], [-1,4], [4,10], [5,10]])
print(data.shape)
```
The formula for the Lagrange polynomial is: $$f_n(x) = \sum_{i=1}^{n+1}f(x_i)L_i(x)$$ where $$L_i(x) = \prod_{j=1,\ne i}^{n+1} \frac{x-x_j}{x_i-x_j}$$ Below is a function that implements this.
```
# For the arguements:
# x is the data point (or array of points) to evaluate the intepolating polynomial at.
# data is the data to be interpolated.
def LagrangePoly(x, data):
n = data.shape[0] - 1
i = 1
fn = 0
while i <= n + 1:
j = 1
Li = 1
while j <= n+1:
if(j == i):
j += 1
continue
Li *= (x - data[j-1,0])/(data[i-1,0] - data[j-1,0])
j += 1
fn += data[i-1,1]*Li
i += 1
return fn
```
The function above works for a single value of x and also, by the wonders of NumPy, for an array of values. Let's prepare some x-values over which we want to plot the interpolating polynomial. We then provide this numpy array as an argument to the LagrangePoly( ) function.
```
xmin = np.min(data[:,0])
xmax = np.max(data[:,0])
x = np.linspace(xmin, xmax, 100)
y = LagrangePoly(x, data)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(data[:,0],data[:,1], color='red', linewidths=10);
plt.plot(x,y);
```
## Newtons divided-difference polynomials
Define the same data set we saw in the lectures
```
testdata = np.array([[1,1],[2,8],[3,27],[4,64],[6,216],[7,343]])
```
Define the Finite Difference (FD) function: $$ FD_{j,i}(x) = \frac{f(x_{i+1} - f(x_i)}{x_{i+j} - x_i}$$
```
def FD(x, fx, j):
n = fx.size
FD = np.zeros(n-1)
i = 0
while i < n-1:
FD[i] = (fx[i+1] - fx[i])/(x[i+j] - x[i])
i += 1
return FD
```
Repeatly apply the Finite Difference function to get the table we saw in the lectures
```
fn = testdata[:,1]
n = 1
while n < testdata.shape[0]:
print(fn)
fn = FD(testdata[:,0], fn, n)
n += 1
# This interpolates from the first data point onwards
def NewtonsDividedDifference(x, data, nmax):
fn = data[0,1]
n = 0
xi = data[:,0]
FDi = FD(xi, data[:,1],1)
while n < nmax :
coeff = 1
i = 0
while i <= n:
coeff *= (x - data[i,0])
i += 1
fn += coeff*FDi[0]
FDi = FD(xi, FDi, n+2)
n += 1
return fn
```
Compute and plot the linear, quadratic and cubic approximation
```
x = np.linspace(0, 7, 100)
y1 = NewtonsDividedDifference(x, testdata, 1)
y2 = NewtonsDividedDifference(x, testdata, 2)
y3 = NewtonsDividedDifference(x, testdata, 3)
plt.grid(True)
plt.scatter(testdata[:,0], testdata[:,1], color='red', linewidths=10);
plt.plot(x,y1);
plt.plot(x,y2);
plt.plot(x,y3);
```
Use the Newton divided-difference method to fit the same data as we used with the Lagrange polynomials
```
x = np.linspace(-1, 5, 100)
y = NewtonsDividedDifference(x, data, 5)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(data[:,0],data[:,1], color='red', linewidths=10);
plt.plot(x,y);
```
## Runge's phenomenon
Polynomial interpolation does not always converge as you increase the interpolation order. A classic example which exhibts oscillations at the edges is the Runge Function
```
def RungeFunction(x):
return 1/(1 + 25*x**2)
```
As you increase $n$ in the code below the polynomial convergs to the Runge Function near $x=0$ but oscillates wildly near the edges at $x=\{-1,1\}$
```
x = np.linspace(-1,1,200)
y = RungeFunction(x)
n = 13
xn = np.linspace(-1,1,n)
yn = RungeFunction(xn)
pn = LagrangePoly(x, np.column_stack((xn, yn)) )
plt.grid(True)
plt.plot(x,y)
plt.ylim([-0.2,1.2])
plt.plot(x,pn);
plt.legend(['Runge Function', 'Interpolating polynomial']);
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
# The below commands make the font and image size bigger
plt.rcParams.update({'font.size': 22})
plt.rcParams["figure.figsize"] = (15,10)
data = np.array([[0,0], [1,10], [3,3], [-1,4], [4,10], [5,10]])
print(data.shape)
# For the arguements:
# x is the data point (or array of points) to evaluate the intepolating polynomial at.
# data is the data to be interpolated.
def LagrangePoly(x, data):
n = data.shape[0] - 1
i = 1
fn = 0
while i <= n + 1:
j = 1
Li = 1
while j <= n+1:
if(j == i):
j += 1
continue
Li *= (x - data[j-1,0])/(data[i-1,0] - data[j-1,0])
j += 1
fn += data[i-1,1]*Li
i += 1
return fn
xmin = np.min(data[:,0])
xmax = np.max(data[:,0])
x = np.linspace(xmin, xmax, 100)
y = LagrangePoly(x, data)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(data[:,0],data[:,1], color='red', linewidths=10);
plt.plot(x,y);
testdata = np.array([[1,1],[2,8],[3,27],[4,64],[6,216],[7,343]])
def FD(x, fx, j):
n = fx.size
FD = np.zeros(n-1)
i = 0
while i < n-1:
FD[i] = (fx[i+1] - fx[i])/(x[i+j] - x[i])
i += 1
return FD
fn = testdata[:,1]
n = 1
while n < testdata.shape[0]:
print(fn)
fn = FD(testdata[:,0], fn, n)
n += 1
# This interpolates from the first data point onwards
def NewtonsDividedDifference(x, data, nmax):
fn = data[0,1]
n = 0
xi = data[:,0]
FDi = FD(xi, data[:,1],1)
while n < nmax :
coeff = 1
i = 0
while i <= n:
coeff *= (x - data[i,0])
i += 1
fn += coeff*FDi[0]
FDi = FD(xi, FDi, n+2)
n += 1
return fn
x = np.linspace(0, 7, 100)
y1 = NewtonsDividedDifference(x, testdata, 1)
y2 = NewtonsDividedDifference(x, testdata, 2)
y3 = NewtonsDividedDifference(x, testdata, 3)
plt.grid(True)
plt.scatter(testdata[:,0], testdata[:,1], color='red', linewidths=10);
plt.plot(x,y1);
plt.plot(x,y2);
plt.plot(x,y3);
x = np.linspace(-1, 5, 100)
y = NewtonsDividedDifference(x, data, 5)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(data[:,0],data[:,1], color='red', linewidths=10);
plt.plot(x,y);
def RungeFunction(x):
return 1/(1 + 25*x**2)
x = np.linspace(-1,1,200)
y = RungeFunction(x)
n = 13
xn = np.linspace(-1,1,n)
yn = RungeFunction(xn)
pn = LagrangePoly(x, np.column_stack((xn, yn)) )
plt.grid(True)
plt.plot(x,y)
plt.ylim([-0.2,1.2])
plt.plot(x,pn);
plt.legend(['Runge Function', 'Interpolating polynomial']);
| 0.427516 | 0.984411 |
# Advanced databases
## Like and Similar to, group by, aggregation function, set operations
### dr inż. Waldemar Bauer
## Like and ILike
1. It allows you to find simple patterns in strings.
2. The Like match draws attention to characters and ILike does not.
3. You can use it in the following forms:
- ~~ is equivalent to LIKE
- ~~* is equivalent to ILIKE
- !~~ is equivalent to NOT LIKE
- !~~* is equivalent to NOT ILIKE
## Like pattern
- Percent ( %) for matching any sequence of characters.
- Underscore ( _) for matching any single character.
```sql
SELECT
'xyz' LIKE 'xyz', -- true
'xyz' LIKE 'x%', -- true
'xyz' LIKE '_y_', -- true
'xyz' LIKE 'x_', -- false
'XYZ' LIKE 'xyz', -- false
'XYZ' ILIKE 'xyz' -- true
'xyz' ILIKE 'XYZ' -- true
```
## Example Like
```sql
Select first_name, last_name from actor where first_name ~~ 'B%' and last_name ~~* '%S'
```
| first_name | last_name |
|:----------: |:---------: |
| "Burt" | "Dukakis" |
| "Ben" | "Willis" |
| "Ben" | "Harris" |
## Similar to
1. SIMILAR TO operator succeeds only if its pattern matches the entire string; this is unlike common regular expression behavior where the pattern can match any part of the string.
2. It uses _ and % as wildcard characters denoting any single character and any string.
## Similar to pattern
- | denotes alternation (either of two alternatives).
- \* denotes repetition of the previous item zero or more times.
- \+ denotes repetition of the previous item one or more times.
- ? denotes repetition of the previous item zero or one time.
- {m} denotes repetition of the previous item exactly m times.
- {m,} denotes repetition of the previous item m or more times.
- {m,n} denotes repetition of the previous item at least m and not more than n times.
- Parentheses () can be used to group items into a single logical item.
- A bracket expression [...] specifies a character class, just as in POSIX regular expressions.
## Similar to example of use
```sql
Select
'xyz' SIMILAR TO 'xyz' --true
'xyz' SIMILAR TO 'x' --false
'xyz' SIMILAR TO '%(y|a)%' --true
'xyz' SIMILAR TO '(y|z)%' --false
```
## Similar to example
```sql
Select first_name, last_name from actor where first_name similar to 'B%'
and
last_name similar to '%s'
```
| first_name | last_name |
|:----------: |:---------: |
| "Burt" | "Dukakis" |
| "Ben" | "Willis" |
| "Ben" | "Harris" |
## Explain Like and Similar to
Like
```sql
"Seq Scan on actor (cost=0.00..5.00 rows=1 width=13) (actual time=0.023..0.036 rows=3 loops=1)"
" Filter: (((first_name)::text ~~ 'B%'::text) AND ((last_name)::text ~~ '%s'::text))"
" Rows Removed by Filter: 197"
"Planning Time: 0.121 ms"
"Execution Time: 0.044 ms"
```
Similar to:
```sql
"Seq Scan on actor (cost=0.00..5.00 rows=1 width=13) (actual time=0.304..0.859 rows=3 loops=1)"
" Filter: (((first_name)::text ~ '^(?:B.*)$'::text) AND ((last_name)::text ~ '^(?:.*s)$'::text))"
" Rows Removed by Filter: 197"
"Planning Time: 1.621 ms"
"Execution Time: 0.994 ms"
```
## Group by
- Divides the rows returned from the SELECT statement into groups.
- For each group, you can apply an aggregate function
```sql
SELECT
column_1,
column_2,
aggregate_function(column_3)
FROM
table_name
GROUP BY
column_1,
column_2;
```
## Group by example
```sql
SELECT
actor_id
FROM
film_actor
GROUP BY
actor_id;
```
| actor_id |
|:----: |
|150|
|140|
|139|
|193|
|12|
|164|
|137|
|...|
## Group by example
```sql
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f
on f.film_id = fa.film_id
GROUP BY
first_name, last_name
order by count;
```
| first_name | last_name | count |
|:----------: |:-----------: |:-----: |
| "Emily" | "Dee" | 14 |
| "Julia" | "Fawcett" | 15 |
| "Judy" | "Dean" | 15 |
| "Julia" | "Zellweger" | 16 |
| "Adam" | "Grant" | 18 |
| "Sissy" | "Sobieski" | 18 |
| "Penelope" | "Guiness" | 19 |
| ... | ... | .. |
## Having
- Clause in conjunction with the GROUP BY clause
- Filter group rows that do not satisfy a specified condition
```sql
SELECT
column_1,
aggregate_function (column_2)
FROM
tbl_name
GROUP BY
column_1
HAVING
condition;
```
## HAVING example
```sql
SELECT
actor_id
FROM
film_actor
GROUP BY
actor_id
HAVING
actor_id< 10 and actor_id > 5
```
| actor_id |
|:----: |
|6|
|9|
|7|
|8|
## HAVING example
```sql
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on
f.film_id = fa.film_id
GROUP BY
first_name, last_name
HAVING
count(title) > 40
order by count;
```
| first_name | last_name | count |
|:----------: |:-----------: |:-----: |
| "Walter" | "Torn" | 41 |
| "Gina" | "Degeneres" | 42 |
| "Susan" | "Davis" | 54 |
## Equivalent example
```sql
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on f.film_id = fa.film_id
GROUP BY
first_name, last_name
HAVING
count > 40
order by count;
```
is equivalent of
```sql
SELECT tab.first_name, tab.last_name, tab.count from (SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on f.film_id = fa.film_id
GROUP BY
first_name, last_name) as tab
WHERE tab.count > 40
```
## Explain have
```sql
"Sort (cost=254.04..254.15 rows=43 width=21) (actual time=3.240..3.240 rows=3 loops=1)"
" Sort Key: (count(f.title))"
" Sort Method: quicksort Memory: 25kB"
" -> HashAggregate (cost=251.27..252.87 rows=43 width=21) (actual time=3.223..3.231 rows=3 loops=1)"
" Group Key: a.first_name, a.last_name"
" Filter: (count(f.title) > 40)"
" Rows Removed by Filter: 196"
" -> Hash Join (cost=83.00..196.65 rows=5462 width=28) (actual time=0.291..2.182 rows=5462 loops=1)"
" Hash Cond: (fa.film_id = f.film_id)"
" -> Hash Join (cost=6.50..105.76 rows=5462 width=15) (actual time=0.060..1.208 rows=5462 loops=1)"
" Hash Cond: (fa.actor_id = a.actor_id)"
" -> Seq Scan on film_actor fa (cost=0.00..84.62 rows=5462 width=4) (actual time=0.008..0.312 rows=5462 loops=1)"
" -> Hash (cost=4.00..4.00 rows=200 width=17) (actual time=0.046..0.047 rows=200 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 18kB"
" -> Seq Scan on actor a (cost=0.00..4.00 rows=200 width=17) (actual time=0.007..0.021 rows=200 loops=1)"
" -> Hash (cost=64.00..64.00 rows=1000 width=19) (actual time=0.226..0.226 rows=1000 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 60kB"
" -> Seq Scan on film f (cost=0.00..64.00 rows=1000 width=19) (actual time=0.004..0.122 rows=1000 loops=1)"
"Planning Time: 0.311 ms"
"Execution Time: 6.744 ms"
```
## Explain select with subquery
```sql
"HashAggregate (cost=251.27..252.87 rows=43 width=21) (actual time=11.709..11.737 rows=3 loops=1)"
" Group Key: a.first_name, a.last_name"
" Filter: (count(f.title) > 40)"
" Rows Removed by Filter: 196"
" -> Hash Join (cost=83.00..196.65 rows=5462 width=28) (actual time=1.168..7.836 rows=5462 loops=1)"
" Hash Cond: (fa.film_id = f.film_id)"
" -> Hash Join (cost=6.50..105.76 rows=5462 width=15) (actual time=0.198..4.220 rows=5462 loops=1)"
" Hash Cond: (fa.actor_id = a.actor_id)"
" -> Seq Scan on film_actor fa (cost=0.00..84.62 rows=5462 width=4) (actual time=0.026..1.040 rows=5462 loops=1)"
" -> Hash (cost=4.00..4.00 rows=200 width=17) (actual time=0.155..0.156 rows=200 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 18kB"
" -> Seq Scan on actor a (cost=0.00..4.00 rows=200 width=17) (actual time=0.015..0.063 rows=200 loops=1)"
" -> Hash (cost=64.00..64.00 rows=1000 width=19) (actual time=0.952..0.952 rows=1000 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 60kB"
" -> Seq Scan on film f (cost=0.00..64.00 rows=1000 width=19) (actual time=0.012..0.492 rows=1000 loops=1)"
"Planning Time: 0.989 ms"
"Execution Time: 11.897 ms"
```
## Aggregate Functions
| Aggregate function | Description |
|:------------------: |:------------------------------------------------------------------------------------------------------------------------------------------------------- |
| AVG | The AVG() aggregate function calculates the average of non-NULL values in a set. |
| CHECKSUM_AGG | The CHECKSUM_AGG() function calculates a checksum value based on a group of rows. |
| COUNT | The COUNT() aggregate function returns the number of rows in a group, including rows with NULL values. |
| COUNT_BIG | The COUNT_BIG() aggregate function returns the number of rows (with BIGINT data type) in a group, including rows with NULL values. |
| MAX | The MAX() aggregate function returns the highest value (maximum) in a set of non-NULL values. |
| MIN | The MIN() aggregate function returns the lowest value (minimum) in a set of non-NULL values. |
| STDEV | The STDEV() function returns the statistical standard deviation of all values provided in the expression based on a sample of the data population. |
| STDEVP | The STDEVP() function also returns the standard deviation for all values in the provided expression, but does so based on the entire data population. |
| SUM | The SUM() aggregate function returns the summation of all non-NULL values a set. |
| VAR | The VAR() function returns the statistical variance of values in an expression based on a sample of the specified population. |
| VARP | The VARP() function returns the statistical variance of values in an expression but does so based on the entire data population. |
## Exampel of use AVG, MIN, MAX and SUM
```sql
select first_name, last_name, round(avg(length),2), sum(length), min(length), max(length)
from actor a
inner join film_actor fa on a.actor_id = fa.actor_id
inner join film f on f.film_id = fa.actor_id
group by first_name, last_name
Having max(length) >= 180
order by last_name, first_name;
```
| firs_name | last_name | avg | sum | min | max |
|:---------: |:-------------: |:------: |:----: |:---: |:---: |
| "Debbie" | "Akroyd" | 185.00 | 4440 | 185 | 185 |
| "Michael" | "Bening" | 180.00 | 4320 | 180 | 180 |
| "Fred" | "Costner" | 180.00 | 4860 | 180 | 180 |
| "Cate" | "Harris" | 185.00 | 5180 | 185 | 185 |
| "Natalie" | "Hopkins" | 182.00 | 5824 | 182 | 182 |
| "Mary" | "Keitel" | 184.00 | 7360 | 184 | 184 |
| "Cate" | "Mcqueen" | 183.00 | 5490 | 183 | 183 |
| "Jeff" | "Silverstone" | 184.00 | 4600 | 184 | 184 |
| "Cameron" | "Streep" | 181.00 | 4344 | 181 | 181 |
## Any with subquery
- The subquery must return exactly one column.
- The ANY operator must be preceded by one of the following comparison operator =, <=, >, <, > and <>
- The ANY operator returns true if any value of the subquery meets the condition, otherwise, it returns false.
## Any with subquery example
```sql
SELECT title, length, rating
FROM film
WHERE length >= ANY(
SELECT Count( length )
FROM film
INNER JOIN film_category USING(film_id)
GROUP BY category_id )
order by length;
```
Subquery result:
| count |
|:-----: |
| 57 |
| 61 |
| 60 |
| 61 |
| 62 |
| 63 |
| 73 |
| 64 |
| 58 |
| ... |
## Any with subquery example result
| title | length | rating |
|:---------------------: |:--------: |:-------: |
| "Hall Cassidy" | 51 | "NC-17" |
| "Champion Flatliners" | 51 | "PG" |
| "Deep Crusade" | 51 | "PG-13" |
| "Simon North" | 51 | "NC-17" |
| "English Bulworth" | 51 | "PG-13" |
| "Excitement Eve" | 51 | "G" |
| "Frisco Forrest" | 51 | "PG" |
| "Harper Dying" | 52 | "G" |
| ... | ... | ... |
## All with subquery
- The ALL operator must be followed by a subquery.
- The ALL operator must be preceded by a comparison operator
ALL operators works:
1. column1 > ALL (subquery) - true if a value is greater than the biggest value returned by the subquery.
1. column1 >= ALL (subquery) - true if a value is greater than or equal to the biggest value returned by the subquery.
1. column1 < ALL (subquery) - true if a value is less than the smallest value returned by the subquery.
1. column1 <= ALL (subquery) - true if a value is less than or equal to the smallest value returned by the subquery.
1. column1 = ALL (subquery) - if a value is equal to any value returned by the subquery.
1. column1 != ALL (subquery) - true if a value is not equal to any value returned by the subquery.
## All example
```sql
SELECT
title, length
FROM
film
WHERE length > ALL (
SELECT AVG(length)
FROM film GROUP BY rating
)
ORDER BY
length;
```
| title | length |
|:-------------------: |:------: |
| "Dangerous Uptown" | 121 |
| "Boogie Amelie" | 121 |
| "Harry Idaho" | 121 |
| "Brannigan Sunrise" | 121 |
| "Pure Runner" | 121 |
| "Arizona Bang" | 121 |
| "Paris Weekend" | 121 |
| ... | ... |
## Exist
- If the subquery returns at least one row, the result is true.
- In othere case result is false.
- EXISTS is often used with the correlated subquery.
```sql
SELECT
title
FROM
film
WHERE EXISTS( SELECT category_id
FROM
category
WHERE
name = 'Comedy');
```
**Returns all titles. Why?**
## SQL set operation
- Union
- Intersect
- Except
General rules:
- Both queries must return the same number of columns.
- The corresponding columns in the queries must have compatible data types.
## Union
```sql
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
Union
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
```
## Union example result
| category | title |
|:-----------: |:----------------------: |
| "Comedy" | "Airplane Sierra" |
| "Animation" | "Alter Victory" |
| "Animation" | "Anaconda Confessions" |
| "Comedy" | "Anthem Luke" |
| "Animation" | "Argonauts Town" |
| "Animation" | "Bikini Borrowers" |
| "Animation" | "Blackout Private" |
| "Comedy" | "Bringing Hysterical" |
| "Comedy" | "Caper Motions" |
| "Comedy" | "Cat Coneheads" |
## Intersect
```sql
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
intersect
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
```
| category | title |
|:-----------: |:----------------------: |
## Except
```sql
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
Except
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
```
| category | title |
|:--------: |:---------------------: |
| "Comedy" | "Airplane Sierra" |
| "Comedy" | "Anthem Luke" |
| "Comedy" | "Bringing Hysterical" |
| "Comedy" | "Caper Motions" |
| "Comedy" | "Cat Coneheads" |
## Grouping sets
- Define multiple grouping sets in the same query.
- Query generated a single result set with the aggregates for all grouping sets.
```sql
SELECT name, title, round(avg(length),2), SUM (rental_duration)
FROM film join film_category using (film_id) join category using (category_id)
GROUP BY
GROUPING SETS (
(name, title), -- group by name, title
(name), -- or group by name
(title) -- or group by title
)
ORDER BY name, title;
```
## Example results
| category | title | round | sum |
|:--------: |:---------------------: |:------: |:---: |
| "Action" | "Amadeus Holy" | 113.00 | 6 |
| "Action" | "American Circus" | 129.00 | 3 |
| "Action" | "Antitrust Tomatoes" | 168.00 | 5 |
| "Action" | "Ark Ridgemont" | 68.00 | 6 |
| "Action" | "Barefoot Manchurian" | 129.00 | 6 |
| "Action" | "Berets Agent" | 77.00 | 5 |
| ... | ... | ... | ... |
| "Action" | null | 111.6 | 317 |
| ... | ... | ... | ... |
| null | "Zhivago Core" | 105.00 | 6 |
| ... | ... | ... | ... |
## Cube
- Generate multiple grouping sets.
- Generate all posible grouping sets.
```sql
SELECT
c1,
c2,
c3,
aggregate (c4)
FROM
table_name
GROUP BY
CUBE (c1, c2, c3);
```
Explain:
```sql
CUBE(c1,c2,c3) <=> GROUPING SETS (
(c1,c2,c3),
(c1,c2),
(c1,c3),
(c2,c3),
(c1),
(c2),
(c3),
()
)
```
## Cube example
```sql
SELECT name, title, round(avg(length),2), SUM (rental_duration)
FROM
film join film_category using (film_id) join category using (category_id)
GROUP BY
CUBE (name, title)
ORDER BY
name,
title;
```
## Example results
| category | title | round | sum |
|:--------: |:---------------------: |:------: |:---: |
| "Action" | "Amadeus Holy" | 113.00 | 6 |
| "Action" | "American Circus" | 129.00 | 3 |
| "Action" | "Antitrust Tomatoes" | 168.00 | 5 |
| "Action" | "Ark Ridgemont" | 68.00 | 6 |
| "Action" | "Barefoot Manchurian" | 129.00 | 6 |
| "Action" | "Berets Agent" | 77.00 | 5 |
| ... | ... | ... | ... |
| "Action" | null | 111.6 | 317 |
| ... | ... | ... | ... |
| null | "Zhivago Core" | 105.00 | 6 |
| ... | ... | ... | ... |
| null | null | 115.27 | 4985 |
## Roll up
```sql
SELECT
c1,
c2,
c3,
aggregate (c4)
FROM
table_name
GROUP BY
ROLLUP (c1, c2, c3);
```
Explain:
```sql
ROLLUP(c1,c2,c3) <=> GROUPING SETS (
(c1, c2, c3)
(c1, c2)
(c1)
()
)
```
## ROLLUP example
```sql
SELECT name, title, round(avg(length),2),
SUM (rental_duration)
FROM
film join film_category using (film_id) join category using (category_id)
GROUP BY
ROLLUP (name, title)
ORDER BY
name,
title;
```
## Example results
| category | title | round | sum |
|:--------: |:---------------------: |:------: |:---: |
| "Action" | "Amadeus Holy" | 113.00 | 6 |
| "Action" | "American Circus" | 129.00 | 3 |
| "Action" | "Antitrust Tomatoes" | 168.00 | 5 |
| "Action" | "Ark Ridgemont" | 68.00 | 6 |
| "Action" | "Barefoot Manchurian" | 129.00 | 6 |
| "Action" | "Berets Agent" | 77.00 | 5 |
| ... | ... | ... | ... |
| "Action" | null | 111.6 | 317 |
| ... | ... | ... | ... |
| null | null | 115.27 | 4985 |
|
github_jupyter
|
SELECT
'xyz' LIKE 'xyz', -- true
'xyz' LIKE 'x%', -- true
'xyz' LIKE '_y_', -- true
'xyz' LIKE 'x_', -- false
'XYZ' LIKE 'xyz', -- false
'XYZ' ILIKE 'xyz' -- true
'xyz' ILIKE 'XYZ' -- true
Select first_name, last_name from actor where first_name ~~ 'B%' and last_name ~~* '%S'
Select
'xyz' SIMILAR TO 'xyz' --true
'xyz' SIMILAR TO 'x' --false
'xyz' SIMILAR TO '%(y|a)%' --true
'xyz' SIMILAR TO '(y|z)%' --false
Select first_name, last_name from actor where first_name similar to 'B%'
and
last_name similar to '%s'
"Seq Scan on actor (cost=0.00..5.00 rows=1 width=13) (actual time=0.023..0.036 rows=3 loops=1)"
" Filter: (((first_name)::text ~~ 'B%'::text) AND ((last_name)::text ~~ '%s'::text))"
" Rows Removed by Filter: 197"
"Planning Time: 0.121 ms"
"Execution Time: 0.044 ms"
"Seq Scan on actor (cost=0.00..5.00 rows=1 width=13) (actual time=0.304..0.859 rows=3 loops=1)"
" Filter: (((first_name)::text ~ '^(?:B.*)$'::text) AND ((last_name)::text ~ '^(?:.*s)$'::text))"
" Rows Removed by Filter: 197"
"Planning Time: 1.621 ms"
"Execution Time: 0.994 ms"
SELECT
column_1,
column_2,
aggregate_function(column_3)
FROM
table_name
GROUP BY
column_1,
column_2;
SELECT
actor_id
FROM
film_actor
GROUP BY
actor_id;
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f
on f.film_id = fa.film_id
GROUP BY
first_name, last_name
order by count;
SELECT
column_1,
aggregate_function (column_2)
FROM
tbl_name
GROUP BY
column_1
HAVING
condition;
SELECT
actor_id
FROM
film_actor
GROUP BY
actor_id
HAVING
actor_id< 10 and actor_id > 5
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on
f.film_id = fa.film_id
GROUP BY
first_name, last_name
HAVING
count(title) > 40
order by count;
SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on f.film_id = fa.film_id
GROUP BY
first_name, last_name
HAVING
count > 40
order by count;
SELECT tab.first_name, tab.last_name, tab.count from (SELECT
first_name, last_name, count(title)
FROM
actor a join film_actor fa on a.actor_id = fa.actor_id join film f on f.film_id = fa.film_id
GROUP BY
first_name, last_name) as tab
WHERE tab.count > 40
"Sort (cost=254.04..254.15 rows=43 width=21) (actual time=3.240..3.240 rows=3 loops=1)"
" Sort Key: (count(f.title))"
" Sort Method: quicksort Memory: 25kB"
" -> HashAggregate (cost=251.27..252.87 rows=43 width=21) (actual time=3.223..3.231 rows=3 loops=1)"
" Group Key: a.first_name, a.last_name"
" Filter: (count(f.title) > 40)"
" Rows Removed by Filter: 196"
" -> Hash Join (cost=83.00..196.65 rows=5462 width=28) (actual time=0.291..2.182 rows=5462 loops=1)"
" Hash Cond: (fa.film_id = f.film_id)"
" -> Hash Join (cost=6.50..105.76 rows=5462 width=15) (actual time=0.060..1.208 rows=5462 loops=1)"
" Hash Cond: (fa.actor_id = a.actor_id)"
" -> Seq Scan on film_actor fa (cost=0.00..84.62 rows=5462 width=4) (actual time=0.008..0.312 rows=5462 loops=1)"
" -> Hash (cost=4.00..4.00 rows=200 width=17) (actual time=0.046..0.047 rows=200 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 18kB"
" -> Seq Scan on actor a (cost=0.00..4.00 rows=200 width=17) (actual time=0.007..0.021 rows=200 loops=1)"
" -> Hash (cost=64.00..64.00 rows=1000 width=19) (actual time=0.226..0.226 rows=1000 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 60kB"
" -> Seq Scan on film f (cost=0.00..64.00 rows=1000 width=19) (actual time=0.004..0.122 rows=1000 loops=1)"
"Planning Time: 0.311 ms"
"Execution Time: 6.744 ms"
"HashAggregate (cost=251.27..252.87 rows=43 width=21) (actual time=11.709..11.737 rows=3 loops=1)"
" Group Key: a.first_name, a.last_name"
" Filter: (count(f.title) > 40)"
" Rows Removed by Filter: 196"
" -> Hash Join (cost=83.00..196.65 rows=5462 width=28) (actual time=1.168..7.836 rows=5462 loops=1)"
" Hash Cond: (fa.film_id = f.film_id)"
" -> Hash Join (cost=6.50..105.76 rows=5462 width=15) (actual time=0.198..4.220 rows=5462 loops=1)"
" Hash Cond: (fa.actor_id = a.actor_id)"
" -> Seq Scan on film_actor fa (cost=0.00..84.62 rows=5462 width=4) (actual time=0.026..1.040 rows=5462 loops=1)"
" -> Hash (cost=4.00..4.00 rows=200 width=17) (actual time=0.155..0.156 rows=200 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 18kB"
" -> Seq Scan on actor a (cost=0.00..4.00 rows=200 width=17) (actual time=0.015..0.063 rows=200 loops=1)"
" -> Hash (cost=64.00..64.00 rows=1000 width=19) (actual time=0.952..0.952 rows=1000 loops=1)"
" Buckets: 1024 Batches: 1 Memory Usage: 60kB"
" -> Seq Scan on film f (cost=0.00..64.00 rows=1000 width=19) (actual time=0.012..0.492 rows=1000 loops=1)"
"Planning Time: 0.989 ms"
"Execution Time: 11.897 ms"
select first_name, last_name, round(avg(length),2), sum(length), min(length), max(length)
from actor a
inner join film_actor fa on a.actor_id = fa.actor_id
inner join film f on f.film_id = fa.actor_id
group by first_name, last_name
Having max(length) >= 180
order by last_name, first_name;
SELECT title, length, rating
FROM film
WHERE length >= ANY(
SELECT Count( length )
FROM film
INNER JOIN film_category USING(film_id)
GROUP BY category_id )
order by length;
SELECT
title, length
FROM
film
WHERE length > ALL (
SELECT AVG(length)
FROM film GROUP BY rating
)
ORDER BY
length;
SELECT
title
FROM
film
WHERE EXISTS( SELECT category_id
FROM
category
WHERE
name = 'Comedy');
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
Union
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
intersect
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
(select name, title
from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Comedy' order by title limit 5)
Except
(select name, title from film f
join film_category fa using(film_id)
join category c using (category_id)
where name = 'Animation' order by title limit 5)
order by title
## Example results
| category | title | round | sum |
|:--------: |:---------------------: |:------: |:---: |
| "Action" | "Amadeus Holy" | 113.00 | 6 |
| "Action" | "American Circus" | 129.00 | 3 |
| "Action" | "Antitrust Tomatoes" | 168.00 | 5 |
| "Action" | "Ark Ridgemont" | 68.00 | 6 |
| "Action" | "Barefoot Manchurian" | 129.00 | 6 |
| "Action" | "Berets Agent" | 77.00 | 5 |
| ... | ... | ... | ... |
| "Action" | null | 111.6 | 317 |
| ... | ... | ... | ... |
| null | "Zhivago Core" | 105.00 | 6 |
| ... | ... | ... | ... |
## Cube
- Generate multiple grouping sets.
- Generate all posible grouping sets.
Explain:
## Cube example
## Example results
| category | title | round | sum |
|:--------: |:---------------------: |:------: |:---: |
| "Action" | "Amadeus Holy" | 113.00 | 6 |
| "Action" | "American Circus" | 129.00 | 3 |
| "Action" | "Antitrust Tomatoes" | 168.00 | 5 |
| "Action" | "Ark Ridgemont" | 68.00 | 6 |
| "Action" | "Barefoot Manchurian" | 129.00 | 6 |
| "Action" | "Berets Agent" | 77.00 | 5 |
| ... | ... | ... | ... |
| "Action" | null | 111.6 | 317 |
| ... | ... | ... | ... |
| null | "Zhivago Core" | 105.00 | 6 |
| ... | ... | ... | ... |
| null | null | 115.27 | 4985 |
## Roll up
Explain:
## ROLLUP example
| 0.377541 | 0.797793 |
```
# default_exp optimizers
```
# Optimizers
> The basics for building and training models are contained in this module.
```
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
%matplotlib inline
# export
from collections.abc import Iterable
from functools import partial
from torch.optim import Adam
from incendio.metrics import batch_size
from incendio.utils import quick_stats, DEVICE
# Used in notebook but not needed in package.
import numpy as np
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from htools import assert_raises
```
## Optimizers
Optimizers like Adam or RMSProp can contain multiple "parameter groups", each with a different learning rate. (Other hyperparameters can vary as well, but we ignore that for now.) The functions below allow us to get a new optimizer or update an existing one. It allows us to easily use differential learning rate, but that is not required: it can also use the same LR for each parameter group.
```
# export
def variable_lr_optimizer(model, lr=3e-3, lr_mult=1.0, optimizer=Adam,
eps=1e-3, **kwargs):
"""Get an optimizer that uses different learning rates for different layer
groups. Additional keyword arguments can be used to alter momentum and/or
weight decay, for example, but for the sake of simplicity these values
will be the same across layer groups.
Parameters
-----------
model: nn.Module
A model object. If you intend to use differential learning rates,
the model must have an attribute `groups` containing a ModuleList of
layer groups in the form of Sequential objects. The number of layer
groups must match the number of learning rates passed in.
lr: float, Iterable[float]
A number of list of numbers containing the learning rates to use for
each layer group. There should generally be one LR for each layer group
in the model. If fewer LR's are provided, lr_mult will be used to
compute additional LRs. See `update_optimizer` for details.
lr_mult: float
If you pass in fewer LRs than layer groups, `lr_mult` will be used to
compute additional learning rates from the one that was passed in.
optimizer: torch optimizer
The Torch optimizer to be created (Adam by default).
eps: float
Hyperparameter used by optimizer. The default of 1e-8 can lead to
exploding gradients, so we typically override this.
Examples
---------
optim = variable_lr_optimizer(model, lrs=[3e-3, 3e-2, 1e-1])
"""
groups = getattr(model, 'groups', [model])
# Placeholder LR used. We update this afterwards.
data = [{'params': group.parameters(), 'lr': 0} for group in groups]
optim = optimizer(data, eps=eps, **kwargs)
update_optimizer(optim, lr, lr_mult)
return optim
# export
def update_optimizer(optim, lrs, lr_mult=1.0):
"""Pass in 1 or more learning rates, 1 for each layer group, and update the
optimizer accordingly. The optimizer is updated in place so nothing is
returned.
Parameters
----------
optim: torch.optim
Optimizer object.
lrs: float, Iterable[float]
One or more learning rates. If using multiple values, usually the
earlier values will be smaller and later values will be larger. This
can be achieved by passing in a list of LRs that is the same length as
the number of layer groups in the optimizer, or by passing in a single
LR and a value for lr_mult.
lr_mult: float
If you pass in fewer LRs than layer groups, `lr_mult` will be used to
compute additional learning rates from the one that was passed in.
Returns
-------
None
Examples
--------
If optim has 3 layer groups, this will result in LRs of [3e-5, 3e-4, 3e-3]
in that order:
update_optimizer(optim, lrs=3e-3, lr_mult=0.1)
Again, optim has 3 layer groups. We leave the default lr_mult of 1.0 so
each LR will be 3e-3.
update_optimizer(optim, lrs=3e-3)
Again, optim has 3 layer groups. 3 LRs are passed in so lr_mult is unused.
update_optimizer(optim, lrs=[1e-3, 1e-3, 3e-3])
"""
if not isinstance(lrs, Iterable): lrs = [lrs]
n_missing = len(optim.param_groups) - len(lrs)
if n_missing < 0:
raise ValueError('Received more learning rates than layer groups.')
while n_missing > 0:
lrs.insert(0, lrs[0] * lr_mult)
n_missing -= 1
for group, lr in zip(optim.param_groups, lrs):
group['lr'] = lr
# export
adam = partial(Adam, eps=1e-3)
```
|
github_jupyter
|
# default_exp optimizers
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
%matplotlib inline
# export
from collections.abc import Iterable
from functools import partial
from torch.optim import Adam
from incendio.metrics import batch_size
from incendio.utils import quick_stats, DEVICE
# Used in notebook but not needed in package.
import numpy as np
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from htools import assert_raises
# export
def variable_lr_optimizer(model, lr=3e-3, lr_mult=1.0, optimizer=Adam,
eps=1e-3, **kwargs):
"""Get an optimizer that uses different learning rates for different layer
groups. Additional keyword arguments can be used to alter momentum and/or
weight decay, for example, but for the sake of simplicity these values
will be the same across layer groups.
Parameters
-----------
model: nn.Module
A model object. If you intend to use differential learning rates,
the model must have an attribute `groups` containing a ModuleList of
layer groups in the form of Sequential objects. The number of layer
groups must match the number of learning rates passed in.
lr: float, Iterable[float]
A number of list of numbers containing the learning rates to use for
each layer group. There should generally be one LR for each layer group
in the model. If fewer LR's are provided, lr_mult will be used to
compute additional LRs. See `update_optimizer` for details.
lr_mult: float
If you pass in fewer LRs than layer groups, `lr_mult` will be used to
compute additional learning rates from the one that was passed in.
optimizer: torch optimizer
The Torch optimizer to be created (Adam by default).
eps: float
Hyperparameter used by optimizer. The default of 1e-8 can lead to
exploding gradients, so we typically override this.
Examples
---------
optim = variable_lr_optimizer(model, lrs=[3e-3, 3e-2, 1e-1])
"""
groups = getattr(model, 'groups', [model])
# Placeholder LR used. We update this afterwards.
data = [{'params': group.parameters(), 'lr': 0} for group in groups]
optim = optimizer(data, eps=eps, **kwargs)
update_optimizer(optim, lr, lr_mult)
return optim
# export
def update_optimizer(optim, lrs, lr_mult=1.0):
"""Pass in 1 or more learning rates, 1 for each layer group, and update the
optimizer accordingly. The optimizer is updated in place so nothing is
returned.
Parameters
----------
optim: torch.optim
Optimizer object.
lrs: float, Iterable[float]
One or more learning rates. If using multiple values, usually the
earlier values will be smaller and later values will be larger. This
can be achieved by passing in a list of LRs that is the same length as
the number of layer groups in the optimizer, or by passing in a single
LR and a value for lr_mult.
lr_mult: float
If you pass in fewer LRs than layer groups, `lr_mult` will be used to
compute additional learning rates from the one that was passed in.
Returns
-------
None
Examples
--------
If optim has 3 layer groups, this will result in LRs of [3e-5, 3e-4, 3e-3]
in that order:
update_optimizer(optim, lrs=3e-3, lr_mult=0.1)
Again, optim has 3 layer groups. We leave the default lr_mult of 1.0 so
each LR will be 3e-3.
update_optimizer(optim, lrs=3e-3)
Again, optim has 3 layer groups. 3 LRs are passed in so lr_mult is unused.
update_optimizer(optim, lrs=[1e-3, 1e-3, 3e-3])
"""
if not isinstance(lrs, Iterable): lrs = [lrs]
n_missing = len(optim.param_groups) - len(lrs)
if n_missing < 0:
raise ValueError('Received more learning rates than layer groups.')
while n_missing > 0:
lrs.insert(0, lrs[0] * lr_mult)
n_missing -= 1
for group, lr in zip(optim.param_groups, lrs):
group['lr'] = lr
# export
adam = partial(Adam, eps=1e-3)
| 0.918256 | 0.928733 |
# Abhishek Sharma
## Data Science and Business Analytics Intern @TSF
### Task #6 : Prediction using Decision tree algorithm
### Dataset : Iris.csv (https://bit.ly/34SRn3b)
**Algorithm**
One of the most important considerations when choosing a machine learning algorithm is how interpretable it is. The ability to explain how an algorithm makes predictions is useful to not only you, but also to potential stakeholders. A very interpretable machine learning algorithm is a decision tree which you can think of as a series of questions designed to assign a class or predict a continuous value depending on the task. The example image is a decision tree designed for classification.
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
df=pd.read_csv('Iris.csv')
df
df.info()
features = ['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']
# Create features matrix
x = df.loc[:, features].values
y=df.Species
x_train,x_test,y_train,y_test=train_test_split(x, y, random_state=0)
clf = DecisionTreeClassifier(max_depth = 2,
random_state = 0)
clf.fit(x_train, y_train)
clf.predict(x_test[0:1])
from sklearn import metrics
import seaborn as sns
score = clf.score(x_test, y_test)
print(score)
print(metrics.classification_report(y_test,clf.predict(x_test)))
cm = metrics.confusion_matrix(y_test, clf.predict(x_test))
plt.figure(figsize=(7,7))
sns.heatmap(cm, annot=True,
fmt=".0f",
linewidths=.5,
square = True,
cmap = 'Blues');
plt.ylabel('Actual label', fontsize = 17);
plt.xlabel('Predicted label', fontsize = 17);
plt.title('Accuracy Score: {}'.format(score), size = 17);
plt.tick_params(labelsize= 15)
# List of values to try for max_depth:
max_depth_range = list(range(1, 6))
# List to store the average RMSE for each value of max_depth:
accuracy = []
for depth in max_depth_range:
clf = DecisionTreeClassifier(max_depth = depth,
random_state = 0)
clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
accuracy.append(score)
#ploting accuracy score depth wise
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10,7));
ax.plot(max_depth_range,
accuracy,
lw=2,
color='k')
ax.set_xlim([1, 5])
ax.set_ylim([.50, 1.00])
ax.grid(True,
axis = 'both',
zorder = 0,
linestyle = ':',
color = 'k')
ax.tick_params(labelsize = 18)
ax.set_xticks([1,2,3,4,5])
ax.set_xlabel('max_depth', fontsize = 24)
ax.set_ylabel('Accuracy', fontsize = 24)
fig.tight_layout()
#fig.savefig('images/max_depth_vs_accuracy.png', dpi = 300)
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 150)
tree.plot_tree(clf);
# Putting the feature names and class names into variables
fn = ['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)']
cn = ['setosa', 'versicolor', 'virginica']
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 300)
tree.plot_tree(clf,
feature_names = fn,
class_names=cn,
filled = True);
#fig.savefig('images/plottreefncn.png')
```
### Conclusion
- **After Importing, Fit our dataset in our model, accuracy is 89.47%.**
- **We can clearly see model performance by confusion matrix and classification report.**
- **By ploting accuracy score depth wise graph, optimal depth for model is 3.**
### Thank You!
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
df=pd.read_csv('Iris.csv')
df
df.info()
features = ['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']
# Create features matrix
x = df.loc[:, features].values
y=df.Species
x_train,x_test,y_train,y_test=train_test_split(x, y, random_state=0)
clf = DecisionTreeClassifier(max_depth = 2,
random_state = 0)
clf.fit(x_train, y_train)
clf.predict(x_test[0:1])
from sklearn import metrics
import seaborn as sns
score = clf.score(x_test, y_test)
print(score)
print(metrics.classification_report(y_test,clf.predict(x_test)))
cm = metrics.confusion_matrix(y_test, clf.predict(x_test))
plt.figure(figsize=(7,7))
sns.heatmap(cm, annot=True,
fmt=".0f",
linewidths=.5,
square = True,
cmap = 'Blues');
plt.ylabel('Actual label', fontsize = 17);
plt.xlabel('Predicted label', fontsize = 17);
plt.title('Accuracy Score: {}'.format(score), size = 17);
plt.tick_params(labelsize= 15)
# List of values to try for max_depth:
max_depth_range = list(range(1, 6))
# List to store the average RMSE for each value of max_depth:
accuracy = []
for depth in max_depth_range:
clf = DecisionTreeClassifier(max_depth = depth,
random_state = 0)
clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
accuracy.append(score)
#ploting accuracy score depth wise
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10,7));
ax.plot(max_depth_range,
accuracy,
lw=2,
color='k')
ax.set_xlim([1, 5])
ax.set_ylim([.50, 1.00])
ax.grid(True,
axis = 'both',
zorder = 0,
linestyle = ':',
color = 'k')
ax.tick_params(labelsize = 18)
ax.set_xticks([1,2,3,4,5])
ax.set_xlabel('max_depth', fontsize = 24)
ax.set_ylabel('Accuracy', fontsize = 24)
fig.tight_layout()
#fig.savefig('images/max_depth_vs_accuracy.png', dpi = 300)
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 150)
tree.plot_tree(clf);
# Putting the feature names and class names into variables
fn = ['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)']
cn = ['setosa', 'versicolor', 'virginica']
fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 300)
tree.plot_tree(clf,
feature_names = fn,
class_names=cn,
filled = True);
#fig.savefig('images/plottreefncn.png')
| 0.581897 | 0.9601 |
# Basic Python Containers: Lists, Dictionaries, Sets, Tuples
**Based on lecture materials by Milad Fatenejad, Joshua R. Smith, Will Trimble, and Anthony Scopatz**
Python would be a fairly useless language if it weren't for the compound data types. The main two are **lists** and **dictionaries**, but we'll discuss **sets** and **tuples** as well. I'll also go over reading text data from files.
## Lists
A list is an ordered, indexable collection of data. Lets say you have collected some current and voltage data that looks like this:
voltage:
-2.0
-1.0
0.0
1.0
2.0
current:
-1.0
-0.5
0.0
0.5
1.0
So you could put that data into lists like:
```
voltage = [-2.0, -1.0, 0.0, 1.0, 2.0]
current = [-1.0, -0.5, 0.0, 0.5, 1.0]
```
**voltage** is of type list:
```
type(voltage)
```
Python lists have the feature that they are indexed from zero. Therefore, to find the value of the first item in voltage:
```
voltage[0]
```
And to find the value of the third item
```
voltage[2]
```
Lists can be indexed from the back using a negative index. The last item of current
```
current[-1]
```
and the next-to-last
```
current[-2]
```
You can "slice" items from within a list. Lets say we wanted the second through fourth items from voltage
```
voltage[1:4]
```
Or from the third item to the end
```
voltage[2:]
```
Or the first to third item
```
voltage[:2]
```
and so on. Remember that `list[a:b]` includes **a** and excludes **b**.
### Append and Extend
Just like strings have methods, lists do too.
```
dir(list)
```
One useful method is `append()`. Lets say we want to stick the following data on the end of both our lists :
voltage:
3.0
4.0
current:
1.5
2.0
If you want to append items to the end of a list, use the `append()` method.
```
voltage.append(3.0)
voltage.append(4.0)
voltage
```
You can see how that approach might be tedious in certain cases. If you want to concatenate a list onto the end of another one, use `extend()`. The **+** operator also does this.
```
current.extend([1.5, 2.0])
current
```
### Length of Lists
Sometimes you want to know how many items are in a list. Use the `len()` command.
```
len(voltage)
```
### Heterogeneous Data
Lists can contain hetergeneous data.
```
data = ["experiment: current vs. voltage",
"run", 47,
"temperature", 372.756,
"current", [-1.0, -0.5, 0.0, 0.5, 1.0],
"voltage", [-2.0, -1.0, 0.0, 1.0, 2.0],
]
print(data)
```
We've got strings, ints, floats, and even other lists in there. The slashes are there so we can continue on the next line. They aren't necessary but they can sometimes make things look better.
## Assigning Variables to Other Variables
Something that might cause you headaches in the future is how python deals with assignment of one list to another. When you set a list equal to another, both variables point to the same thing. Changing the first one ends up changing the second. Be careful about this fact.
```
a = [1,2,3]
b = a
a.append(4)
b
b.append(5)
a
```
There's a ton more to know about lists, but lets press on. Check out Dive Into Python or the help documentation for more info.
## Tuples
Tuples are another of Python's basic container data types. They are very similar to lists but with one major difference. Tuples are **immutable**. Once data is placed into a tuple, the tuple cannot be changed. You define a tuple as follows:
```
tup = ("red", "white", "blue")
type(tup)
```
You can slice and index the tuple exactly like you would a list. Tuples are used in the inner workings of python, and a tuple can be used as a key in a dictionary, whereas a list cannot as we will see in a moment.
See if you can retrieve the third element of **tup**:
## Sets
Most introductory python courses do not go over sets this early (or at all), but I've found this data type to be useful. The python set type is similar to the idea of a mathematical set: it is an unordered collection of unique things. Consider:
```
fruit = {"apple", "banana", "pear", "banana"}
```
you have to use a list to create a set.
Since sets contain only unique items, there's only one banana in the set fruit.
You can do things like intersections, unions, etc. on sets just like in math. Here's an example of an intersection of two sets (the common items in both sets).
```
bowl1 = {"apple", "banana", "pear", "peach"}
bowl2 = {"peach", "watermelon", "orange", "apple"}
bowl1 & bowl2
bowl1 | bowl2
```
You can check out more info using the help docs. We won't be returning to sets, but its good for you to know they exist.
## Dictionaries
A Python dictionary is a unordered collection of key-value pairs. Dictionaries are by far the most important data type in Python. The key is a way to name the data, and the value is the data itself. Here's a way to create a dictionary that contains all the data in our data.dat file in a more sensible way than a list.
```
data = {"experiment": "current vs. voltage",
"run": 47,
"temperature": 372.756,
"current": [-1.0, -0.5, 0.0, 0.5, 1.0],
"voltage": [-2.0, -1.0, 0.0, 1.0, 2.0],
}
print(data)
```
This model is clearly better because you no longer have to remember that the run number is in the second position of the list, you just refer directly to "run":
```
data["run"]
```
If you wanted the voltage data list:
```
data["voltage"]
```
Or perhaps you wanted the last element of the current data list
```
data["current"][-1]
```
Once a dictionary has been created, you can change the values of the data if you like.
```
data["temperature"] = 3275.39
```
You can also add new keys to the dictionary. Note that dictionaries are indexed with square braces, just like lists--they look the same, even though they're very different.
```
data["user"] = "Johann G. von Ulm"
```
Dictionaries, like strings, lists, and all the rest, have built-in methods. Lets say you wanted all the keys from a particular dictionary.
```
data.keys()
```
also, values
```
data.values()
```
# Exercise 1
1. Calculate the mean of the numbers 2, 3, and 10, using the given list.
```
a = [2, 3, 10]
```
# Exercise 2
1. Make a list with 5 things in it.
2. Add two more things.
```
```
# Exercise 2
1. Make a dictionary whose keys are the strings "zero" through "nine" and whose values are ints 0 through 9.
```
digits = {}
# Your code goes here
print(digits)
```
# Exercise 3
1. Make a dictionary and experiment using different types as keys. Can containers be keys?
```
```
# Exercise 4
1. Read the file OtherFiles/lines.txt.
2. Call the method `readlines()` on the file to get a list of lines as strings.
3. Print the last two lines. Notice anything strange with line 3?
```
file = None # open the file here
lines = [] # convert file to list of lines
lastTwo = [] # splice list to find last two lines
print(lastTwo)
```
|
github_jupyter
|
voltage = [-2.0, -1.0, 0.0, 1.0, 2.0]
current = [-1.0, -0.5, 0.0, 0.5, 1.0]
type(voltage)
voltage[0]
voltage[2]
current[-1]
current[-2]
voltage[1:4]
voltage[2:]
voltage[:2]
dir(list)
voltage.append(3.0)
voltage.append(4.0)
voltage
current.extend([1.5, 2.0])
current
len(voltage)
data = ["experiment: current vs. voltage",
"run", 47,
"temperature", 372.756,
"current", [-1.0, -0.5, 0.0, 0.5, 1.0],
"voltage", [-2.0, -1.0, 0.0, 1.0, 2.0],
]
print(data)
a = [1,2,3]
b = a
a.append(4)
b
b.append(5)
a
tup = ("red", "white", "blue")
type(tup)
fruit = {"apple", "banana", "pear", "banana"}
bowl1 = {"apple", "banana", "pear", "peach"}
bowl2 = {"peach", "watermelon", "orange", "apple"}
bowl1 & bowl2
bowl1 | bowl2
data = {"experiment": "current vs. voltage",
"run": 47,
"temperature": 372.756,
"current": [-1.0, -0.5, 0.0, 0.5, 1.0],
"voltage": [-2.0, -1.0, 0.0, 1.0, 2.0],
}
print(data)
data["run"]
data["voltage"]
data["current"][-1]
data["temperature"] = 3275.39
data["user"] = "Johann G. von Ulm"
data.keys()
data.values()
a = [2, 3, 10]
```
# Exercise 2
1. Make a dictionary whose keys are the strings "zero" through "nine" and whose values are ints 0 through 9.
# Exercise 3
1. Make a dictionary and experiment using different types as keys. Can containers be keys?
# Exercise 4
1. Read the file OtherFiles/lines.txt.
2. Call the method `readlines()` on the file to get a list of lines as strings.
3. Print the last two lines. Notice anything strange with line 3?
| 0.545528 | 0.979726 |
# Static vs Dynamic Neural Networks in NNabla
NNabla allows you to define static and dynamic neural networks. Static neural networks have a fixed layer architecture, i.e., a static computation graph. In contrast, dynamic neural networks use a dynamic computation graph, e.g., randomly dropping layers for each minibatch.
This tutorial compares both computation graphs.
```
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla.git
%cd nnabla/tutorial
%matplotlib inline
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
np.random.seed(0)
GPU = 0 # ID of GPU that we will use
batch_size = 64 # Reduce to fit your device memory
```
### Dataset loading
We will first setup the digits dataset from scikit-learn:
```
from tiny_digits import *
digits = load_digits()
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
```
Each sample in this dataset is a grayscale image of size 8x8 and belongs to one of the ten classes `0`, `1`, ..., `9`.
```
img, label = data.next()
print(img.shape, label.shape)
```
### Network definition
As an example, we define a (unnecessarily) deep CNN:
```
def cnn(x):
"""Unnecessarily Deep CNN.
Args:
x : Variable, shape (B, 1, 8, 8)
Returns:
y : Variable, shape (B, 10)
"""
with nn.parameter_scope("cnn"): # Parameter scope can be nested
with nn.parameter_scope("conv1"):
h = F.tanh(PF.batch_normalization(
PF.convolution(x, 64, (3, 3), pad=(1, 1))))
for i in range(10): # unnecessarily deep
with nn.parameter_scope("conv{}".format(i + 2)):
h = F.tanh(PF.batch_normalization(
PF.convolution(h, 128, (3, 3), pad=(1, 1))))
with nn.parameter_scope("conv_last"):
h = F.tanh(PF.batch_normalization(
PF.convolution(h, 512, (3, 3), pad=(1, 1))))
h = F.average_pooling(h, (2, 2))
with nn.parameter_scope("fc"):
h = F.tanh(PF.affine(h, 1024))
with nn.parameter_scope("classifier"):
y = PF.affine(h, 10)
return y
```
## Static computation graph
First, we will look at the case of a static computation graph where the neural network does not change during training.
```
from nnabla.ext_utils import get_extension_context
# setup cuda extension
ctx_cuda = get_extension_context('cudnn', device_id=GPU) # replace 'cudnn' by 'cpu' if you want to run the example on the CPU
nn.set_default_context(ctx_cuda)
# create variables for network input and label
x = nn.Variable(img.shape)
t = nn.Variable(label.shape)
# create network
static_y = cnn(x)
static_y.persistent = True
# define loss function for training
static_l = F.mean(F.softmax_cross_entropy(static_y, t))
```
Setup solver for training
```
solver = S.Adam(alpha=1e-3)
solver.set_parameters(nn.get_parameters())
```
Create data iterator
```
loss = []
def epoch_end_callback(epoch):
global loss
print("[", epoch, np.mean(loss), itr, "]", end='')
loss = []
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
data.register_epoch_end_callback(epoch_end_callback)
```
Perform training iterations and output training loss:
```
%%time
for epoch in range(30):
itr = 0
while data.epoch == epoch:
x.d, t.d = data.next()
static_l.forward(clear_no_need_grad=True)
solver.zero_grad()
static_l.backward(clear_buffer=True)
solver.update()
loss.append(static_l.d.copy())
itr += 1
print('')
```
## Dynamic computation graph
Now, we will use a dynamic computation graph, where the neural network is setup each time we want to do a forward/backward pass through it. This allows us to, e.g., randomly dropout layers or to have network architectures that depend on input data. In this example, we will use for simplicity the same neural network structure and only dynamically create it. For example, adding a `if np.random.rand() > dropout_probability:` into `cnn()` allows to dropout layers.
First, we setup the solver and the data iterator for the training:
```
nn.clear_parameters()
solver = S.Adam(alpha=1e-3)
solver.set_parameters(nn.get_parameters())
loss = []
def epoch_end_callback(epoch):
global loss
print("[", epoch, np.mean(loss), itr, "]", end='')
loss = []
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
data.register_epoch_end_callback(epoch_end_callback)
%%time
for epoch in range(30):
itr = 0
while data.epoch == epoch:
x.d, t.d = data.next()
with nn.auto_forward():
dynamic_y = cnn(x)
dynamic_l = F.mean(F.softmax_cross_entropy(dynamic_y, t))
solver.set_parameters(nn.get_parameters(), reset=False, retain_state=True) # this can be done dynamically
solver.zero_grad()
dynamic_l.backward(clear_buffer=True)
solver.update()
loss.append(dynamic_l.d.copy())
itr += 1
print('')
```
Comparing the two processing times, we can observe that both schemes ("static" and "dynamic") takes the same execution time, i.e., although we created the computation graph dynamically, we did not loose performance.
|
github_jupyter
|
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla.git
%cd nnabla/tutorial
%matplotlib inline
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
np.random.seed(0)
GPU = 0 # ID of GPU that we will use
batch_size = 64 # Reduce to fit your device memory
from tiny_digits import *
digits = load_digits()
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
img, label = data.next()
print(img.shape, label.shape)
def cnn(x):
"""Unnecessarily Deep CNN.
Args:
x : Variable, shape (B, 1, 8, 8)
Returns:
y : Variable, shape (B, 10)
"""
with nn.parameter_scope("cnn"): # Parameter scope can be nested
with nn.parameter_scope("conv1"):
h = F.tanh(PF.batch_normalization(
PF.convolution(x, 64, (3, 3), pad=(1, 1))))
for i in range(10): # unnecessarily deep
with nn.parameter_scope("conv{}".format(i + 2)):
h = F.tanh(PF.batch_normalization(
PF.convolution(h, 128, (3, 3), pad=(1, 1))))
with nn.parameter_scope("conv_last"):
h = F.tanh(PF.batch_normalization(
PF.convolution(h, 512, (3, 3), pad=(1, 1))))
h = F.average_pooling(h, (2, 2))
with nn.parameter_scope("fc"):
h = F.tanh(PF.affine(h, 1024))
with nn.parameter_scope("classifier"):
y = PF.affine(h, 10)
return y
from nnabla.ext_utils import get_extension_context
# setup cuda extension
ctx_cuda = get_extension_context('cudnn', device_id=GPU) # replace 'cudnn' by 'cpu' if you want to run the example on the CPU
nn.set_default_context(ctx_cuda)
# create variables for network input and label
x = nn.Variable(img.shape)
t = nn.Variable(label.shape)
# create network
static_y = cnn(x)
static_y.persistent = True
# define loss function for training
static_l = F.mean(F.softmax_cross_entropy(static_y, t))
solver = S.Adam(alpha=1e-3)
solver.set_parameters(nn.get_parameters())
loss = []
def epoch_end_callback(epoch):
global loss
print("[", epoch, np.mean(loss), itr, "]", end='')
loss = []
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
data.register_epoch_end_callback(epoch_end_callback)
%%time
for epoch in range(30):
itr = 0
while data.epoch == epoch:
x.d, t.d = data.next()
static_l.forward(clear_no_need_grad=True)
solver.zero_grad()
static_l.backward(clear_buffer=True)
solver.update()
loss.append(static_l.d.copy())
itr += 1
print('')
nn.clear_parameters()
solver = S.Adam(alpha=1e-3)
solver.set_parameters(nn.get_parameters())
loss = []
def epoch_end_callback(epoch):
global loss
print("[", epoch, np.mean(loss), itr, "]", end='')
loss = []
data = data_iterator_tiny_digits(digits, batch_size=batch_size, shuffle=True)
data.register_epoch_end_callback(epoch_end_callback)
%%time
for epoch in range(30):
itr = 0
while data.epoch == epoch:
x.d, t.d = data.next()
with nn.auto_forward():
dynamic_y = cnn(x)
dynamic_l = F.mean(F.softmax_cross_entropy(dynamic_y, t))
solver.set_parameters(nn.get_parameters(), reset=False, retain_state=True) # this can be done dynamically
solver.zero_grad()
dynamic_l.backward(clear_buffer=True)
solver.update()
loss.append(dynamic_l.d.copy())
itr += 1
print('')
| 0.812904 | 0.975273 |
```
# Import all required libraries
import pandas
from pandas import *
import numpy
from datetime import datetime
# Initialize values
sample_size_train = 0
enable_scaler=True
nb_models = 10
# Number of value on which to train, if null, train on all value
# -1 = 50% up, 50% down
sample_size_train = 200000
sample_internal_test = 10000
# Read training data + test data
df_data = pandas.read_csv("../input/application_train.csv")
df_test = pandas.read_csv("../input/application_test.csv")
df_data['TARGET'].mean()
# Only select a small sample, faster local testing
# Choose randomly 1k value out of those
df_data_1 = df_data.loc[df_data['TARGET'] == 1]
df_data_0 = df_data.loc[df_data['TARGET'] == 0].sample(24825)
df_internal_test = pandas.concat([df_data_0, df_data_1])
df_internal_test = df_internal_test.sample(sample_internal_test)
print("Sample internal test = {0}".format(df_internal_test['TARGET'].count()))
# We should remove them from df_data now
df_train = df_data.sample(sample_size_train)
display(df_internal_test.sample(3))
display(df_train.sample(10))
# Preparing all data
def prepare_data(df, dummies = []):
df = pandas.get_dummies(df, prefix=dummies,
columns=dummies)
# Should we give more information? Like nb sales previous X weeks?
# Previous sales of last 2/3 year at the same date (bank holiday, black friday, etc.)
# Probably yes as this information is available, would be a good try
return df
dummies = ['NAME_CONTRACT_TYPE', 'CODE_GENDER', 'FLAG_OWN_CAR',
'FLAG_OWN_REALTY', 'NAME_TYPE_SUITE', 'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'NAME_HOUSING_TYPE']
print(dummies)
df_train = prepare_data(df_train, dummies)
df_test = prepare_data(df_test, dummies)
df_internal_test = prepare_data(df_internal_test, dummies)
for column in df_train.columns:
if column not in df_test.columns:
df_test[column] = 0
if column not in df_internal_test.columns:
df_internal_test[column] = 0
for column in df_test.columns:
if column not in df_train.columns:
df_train[column] = 0
if column not in df_internal_test.columns:
df_internal_test[column] = 0
for column in df_internal_test.columns:
if column not in df_train.columns:
df_train[column] = 0
if column not in df_test.columns:
df_test[column] = 0
# Order the columns so that they all have the same
df_test = df_test[df_train.columns]
df_internal_test = df_internal_test[df_train.columns]
# Generate our training/validation datasets
from sklearn import model_selection
# Name of the result column
result_cols = ['TARGET']
result_excl_cols = 'TARGET_'
input_cols = []
for dummy in dummies:
input_cols.append("{0}_".format(dummy))
# Then, we add some columns based on the feeling
final_cols = ["DAYS_BIRTH", "DAYS_EMPLOYED", "DAYS_REGISTRATION", "DAYS_ID_PUBLISH",
#"AMT_GOODS_PRICE", "AMT_ANNUITY",
"AMT_CREDIT", "AMT_INCOME_TOTAL", "CNT_CHILDREN"]
#final_cols = ["DAYS_BIRTH"]
input_cols = input_cols + final_cols
# Get the final values
def get_values(df, cols=[], excl_cols = "doqwidjoqwidjqwoidjqwoidjqwodijqw"):
columns = df.columns.values
# Remove all columns that are not inside the list
for column in columns:
find = False
if column.startswith(excl_cols):
print("Ignoring {0}".format(column))
else:
for col in cols:
if column.startswith(col):
find = True
if not find:
df = df.drop(columns=[column])
else:
df[column] = df[column].fillna(value=0)
new_order = sorted(df.columns.values)
print(new_order)
# Same order for both training and testing set
df = df[new_order]
return df.values
print(input_cols)
X_train = get_values(df_train, input_cols)
y_train = get_values(df_train, result_cols, result_excl_cols).ravel()
X_test = get_values(df_internal_test, input_cols)
y_test = get_values(df_internal_test, result_cols, result_excl_cols).ravel()
X_final_test = get_values(df_test, input_cols)
final_cols = ["DAYS_BIRTH", "DAYS_EMPLOYED", "DAYS_REGISTRATION", "DAYS_ID_PUBLISH",
#"AMT_GOODS_PRICE", "AMT_ANNUITY",
"AMT_CREDIT", "AMT_INCOME_TOTAL", "CNT_CHILDREN"]
for col in final_cols:
print(col)
print(df_train[[col]].isnull().values.any())
# Normalize the data
X_all = [x + y + z for x, y, z in zip(X_train, X_final_test, X_test)]
#print(len(X_all))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Don't cheat - fit only on training data
# Def adding x_train + X_test + X_validation to fit all of them
if enable_scaler:
scaler.fit(X_all)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_final_test = scaler.transform(X_final_test)
from sklearn.cross_validation import train_test_split
# Defining all those nice sets
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
def get_comparator3000(Y_validation, Y_validation_predict):
best_result = 0
for r in range(0, 100):
boundary = r/100.0
result = 0
for i in range(0, len(Y_validation)):
if (Y_validation[i] == 1 and Y_validation_predict[i] > boundary) or (Y_validation[i] == 0 and Y_validation_predict[i] <= boundary):
result += 1
if result > best_result:
best_result = result
print("current best boundary = {0} with score {1}".format(boundary, best_result/ len(Y_validation)))
return best_result / len(Y_validation)
# Import algorithm
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
models = []
for i in range(5, 5 + nb_models):
models.append(('MLPRegressor_adam_{0}'.format(i), MLPRegressor(hidden_layer_sizes=(8,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=i, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)))
# models.append(('MLPClassifier_adam_{0}'.format(i), MLPClassifier(hidden_layer_sizes=(8,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
# learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
# random_state=i, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
# early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)))
#models.append(('LR', LogisticRegression()))
#models.append(('LDA', LinearDiscriminantAnalysis()))
#models.append(('KNN', KNeighborsClassifier()))
#models.append(('CART', DecisionTreeClassifier()))
#models.append(('NB', GaussianNB()))
#models.append(('SVM', SVC()))
# High value until first model get solved
max_score = 10000
best_model = "UNKNOWN"
res = []
submission = []
# Testing all models, one by one
for name, model in models:
print("Executing for model {0}".format(name))
time_start = datetime.now()
# Training the model
model.fit(X_train, y_train)
print("Finish fit for {0}".format(name))
y_test_result = model.predict(X_test)
res.append(y_test_result)
# We can calculate the avg error
score = get_comparator3000(y_test, y_test_result)
print("Model {0} got score of {1}, time: {2}".format(name, score, datetime.now() - time_start))
# Doing as well on final one
submission.append(model.predict(X_final_test))
# For all result in res, if test, display the result, if not, write it to a file
final_res = []
nb_variable = len(res[0])
for variable in range(0, nb_variable):
final_res.append(0.0)
for i in range(0, len(res)):
final_res[variable] += res[i][variable]
final_res[variable] = final_res[variable] / len(res)
# We can calculate the avg error
score = get_comparator3000(y_test, final_res)
print("avg model got score of {0}".format(score))
# Same for the submission
final_submission = []
nb_variable = len(submission[0])
for variable in range(0, nb_variable):
final_submission.append(0.0)
for i in range(0, len(submission)):
final_submission[variable] += submission[i][variable]
final_submission[variable] = final_submission[variable] / len(submission)
if final_submission[variable] < 0.08:
final_submission[variable] = 0.0
else:
final_submission[variable] = 1.0
print("Writing output file merged.csv".format(name))
df_test['TARGET'] = final_submission
result_df = df_test[['SK_ID_CURR', 'TARGET']]
result_df['TARGET'] = final_submission
result_df.to_csv("merged.csv".format(name), index=False)
```
|
github_jupyter
|
# Import all required libraries
import pandas
from pandas import *
import numpy
from datetime import datetime
# Initialize values
sample_size_train = 0
enable_scaler=True
nb_models = 10
# Number of value on which to train, if null, train on all value
# -1 = 50% up, 50% down
sample_size_train = 200000
sample_internal_test = 10000
# Read training data + test data
df_data = pandas.read_csv("../input/application_train.csv")
df_test = pandas.read_csv("../input/application_test.csv")
df_data['TARGET'].mean()
# Only select a small sample, faster local testing
# Choose randomly 1k value out of those
df_data_1 = df_data.loc[df_data['TARGET'] == 1]
df_data_0 = df_data.loc[df_data['TARGET'] == 0].sample(24825)
df_internal_test = pandas.concat([df_data_0, df_data_1])
df_internal_test = df_internal_test.sample(sample_internal_test)
print("Sample internal test = {0}".format(df_internal_test['TARGET'].count()))
# We should remove them from df_data now
df_train = df_data.sample(sample_size_train)
display(df_internal_test.sample(3))
display(df_train.sample(10))
# Preparing all data
def prepare_data(df, dummies = []):
df = pandas.get_dummies(df, prefix=dummies,
columns=dummies)
# Should we give more information? Like nb sales previous X weeks?
# Previous sales of last 2/3 year at the same date (bank holiday, black friday, etc.)
# Probably yes as this information is available, would be a good try
return df
dummies = ['NAME_CONTRACT_TYPE', 'CODE_GENDER', 'FLAG_OWN_CAR',
'FLAG_OWN_REALTY', 'NAME_TYPE_SUITE', 'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'NAME_HOUSING_TYPE']
print(dummies)
df_train = prepare_data(df_train, dummies)
df_test = prepare_data(df_test, dummies)
df_internal_test = prepare_data(df_internal_test, dummies)
for column in df_train.columns:
if column not in df_test.columns:
df_test[column] = 0
if column not in df_internal_test.columns:
df_internal_test[column] = 0
for column in df_test.columns:
if column not in df_train.columns:
df_train[column] = 0
if column not in df_internal_test.columns:
df_internal_test[column] = 0
for column in df_internal_test.columns:
if column not in df_train.columns:
df_train[column] = 0
if column not in df_test.columns:
df_test[column] = 0
# Order the columns so that they all have the same
df_test = df_test[df_train.columns]
df_internal_test = df_internal_test[df_train.columns]
# Generate our training/validation datasets
from sklearn import model_selection
# Name of the result column
result_cols = ['TARGET']
result_excl_cols = 'TARGET_'
input_cols = []
for dummy in dummies:
input_cols.append("{0}_".format(dummy))
# Then, we add some columns based on the feeling
final_cols = ["DAYS_BIRTH", "DAYS_EMPLOYED", "DAYS_REGISTRATION", "DAYS_ID_PUBLISH",
#"AMT_GOODS_PRICE", "AMT_ANNUITY",
"AMT_CREDIT", "AMT_INCOME_TOTAL", "CNT_CHILDREN"]
#final_cols = ["DAYS_BIRTH"]
input_cols = input_cols + final_cols
# Get the final values
def get_values(df, cols=[], excl_cols = "doqwidjoqwidjqwoidjqwoidjqwodijqw"):
columns = df.columns.values
# Remove all columns that are not inside the list
for column in columns:
find = False
if column.startswith(excl_cols):
print("Ignoring {0}".format(column))
else:
for col in cols:
if column.startswith(col):
find = True
if not find:
df = df.drop(columns=[column])
else:
df[column] = df[column].fillna(value=0)
new_order = sorted(df.columns.values)
print(new_order)
# Same order for both training and testing set
df = df[new_order]
return df.values
print(input_cols)
X_train = get_values(df_train, input_cols)
y_train = get_values(df_train, result_cols, result_excl_cols).ravel()
X_test = get_values(df_internal_test, input_cols)
y_test = get_values(df_internal_test, result_cols, result_excl_cols).ravel()
X_final_test = get_values(df_test, input_cols)
final_cols = ["DAYS_BIRTH", "DAYS_EMPLOYED", "DAYS_REGISTRATION", "DAYS_ID_PUBLISH",
#"AMT_GOODS_PRICE", "AMT_ANNUITY",
"AMT_CREDIT", "AMT_INCOME_TOTAL", "CNT_CHILDREN"]
for col in final_cols:
print(col)
print(df_train[[col]].isnull().values.any())
# Normalize the data
X_all = [x + y + z for x, y, z in zip(X_train, X_final_test, X_test)]
#print(len(X_all))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Don't cheat - fit only on training data
# Def adding x_train + X_test + X_validation to fit all of them
if enable_scaler:
scaler.fit(X_all)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_final_test = scaler.transform(X_final_test)
from sklearn.cross_validation import train_test_split
# Defining all those nice sets
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
def get_comparator3000(Y_validation, Y_validation_predict):
best_result = 0
for r in range(0, 100):
boundary = r/100.0
result = 0
for i in range(0, len(Y_validation)):
if (Y_validation[i] == 1 and Y_validation_predict[i] > boundary) or (Y_validation[i] == 0 and Y_validation_predict[i] <= boundary):
result += 1
if result > best_result:
best_result = result
print("current best boundary = {0} with score {1}".format(boundary, best_result/ len(Y_validation)))
return best_result / len(Y_validation)
# Import algorithm
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
models = []
for i in range(5, 5 + nb_models):
models.append(('MLPRegressor_adam_{0}'.format(i), MLPRegressor(hidden_layer_sizes=(8,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=i, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)))
# models.append(('MLPClassifier_adam_{0}'.format(i), MLPClassifier(hidden_layer_sizes=(8,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
# learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
# random_state=i, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
# early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)))
#models.append(('LR', LogisticRegression()))
#models.append(('LDA', LinearDiscriminantAnalysis()))
#models.append(('KNN', KNeighborsClassifier()))
#models.append(('CART', DecisionTreeClassifier()))
#models.append(('NB', GaussianNB()))
#models.append(('SVM', SVC()))
# High value until first model get solved
max_score = 10000
best_model = "UNKNOWN"
res = []
submission = []
# Testing all models, one by one
for name, model in models:
print("Executing for model {0}".format(name))
time_start = datetime.now()
# Training the model
model.fit(X_train, y_train)
print("Finish fit for {0}".format(name))
y_test_result = model.predict(X_test)
res.append(y_test_result)
# We can calculate the avg error
score = get_comparator3000(y_test, y_test_result)
print("Model {0} got score of {1}, time: {2}".format(name, score, datetime.now() - time_start))
# Doing as well on final one
submission.append(model.predict(X_final_test))
# For all result in res, if test, display the result, if not, write it to a file
final_res = []
nb_variable = len(res[0])
for variable in range(0, nb_variable):
final_res.append(0.0)
for i in range(0, len(res)):
final_res[variable] += res[i][variable]
final_res[variable] = final_res[variable] / len(res)
# We can calculate the avg error
score = get_comparator3000(y_test, final_res)
print("avg model got score of {0}".format(score))
# Same for the submission
final_submission = []
nb_variable = len(submission[0])
for variable in range(0, nb_variable):
final_submission.append(0.0)
for i in range(0, len(submission)):
final_submission[variable] += submission[i][variable]
final_submission[variable] = final_submission[variable] / len(submission)
if final_submission[variable] < 0.08:
final_submission[variable] = 0.0
else:
final_submission[variable] = 1.0
print("Writing output file merged.csv".format(name))
df_test['TARGET'] = final_submission
result_df = df_test[['SK_ID_CURR', 'TARGET']]
result_df['TARGET'] = final_submission
result_df.to_csv("merged.csv".format(name), index=False)
| 0.340047 | 0.498474 |
# Meteorology-SnowDegreeDay coupling: Grid ptypes
**Goal:** Try to successfully run a coupled `Meteorology-SnowDegreeDay` simulation, with `Meteorology` as the driver, and with Grid inputs for `SnowDegreeDay`.
Import the Babel-wrapped `Meteorology` and `SnowDegreeDay` components and create instances:
```
from cmt.components import Meteorology, SnowDegreeDay
met, sno = Meteorology(), SnowDegreeDay()
```
Initialize the components with cfg files that, for simplicity, use the same time step and run duration:
```
met.initialize('./input/meteorology-2.cfg')
sno.initialize('./input/snow_degree_day-2.cfg')
```
Store initial values of time, snow depth, and air temperature:
```
time = [met.get_current_time()]
snow_depth = [sno.get_value('snowpack__depth').max()]
air_temp = [met.get_value('atmosphere_bottom_air__temperature').max()]
```
Run the coupled models to completion. In each time step, perform the following actions:
1. Get variables from `Meteorology`; set into `SnowDegreeDay`
1. Advance `SnowDegreeDay`
1. Get variables from `SnowDegreeDay`; set into `Meteorology`
1. Advance `Meteorology`
```
count = 1
while met.get_current_time() < met.get_end_time():
T_air = met.get_value('atmosphere_bottom_air__temperature')
P_snow = met.get_value('atmosphere_water__snowfall_leq-volume_flux')
T_surf = met.get_value('land_surface__temperature')
rho_H2O = met.get_value('water-liquid__mass-per-volume_density')
sno.set_value('atmosphere_bottom_air__temperature', T_air)
sno.set_value('atmosphere_water__snowfall_leq-volume_flux', P_snow)
sno.set_value('land_surface__temperature', T_surf)
sno.set_value('water-liquid__mass-per-volume_density', rho_H2O)
sno.update(sno.get_time_step()*count)
rho_snow = sno.get_value('snowpack__z_mean_of_mass-per-volume_density')
h_snow = sno.get_value('snowpack__depth')
h_swe = sno.get_value('snowpack__liquid-equivalent_depth')
SM = sno.get_value('snowpack__melt_volume_flux')
met.set_value('snowpack__z_mean_of_mass-per-volume_density', rho_snow)
met.set_value('snowpack__depth', h_snow)
met.set_value('snowpack__liquid-equivalent_depth', h_swe)
met.set_value('snowpack__melt_volume_flux', SM)
met.update(met.get_time_step()*count)
time.append(met.get_current_time())
snow_depth.append(sno.get_value('snowpack__depth').max())
air_temp.append(met.get_value('atmosphere_bottom_air__temperature').max())
count += 1
print time
print snow_depth
print air_temp
```
Finalize the components:
```
met.finalize(), sno.finalize()
```
Plot snow depth versus time.
```
%matplotlib inline
from matplotlib import pyplot as plt
plt.plot(time[1:], snow_depth[1:])
plt.title('Snow depth versus time')
plt.xlabel('Time [s]')
plt.ylabel('Snow depth [m]')
```
**Result:** Works!
|
github_jupyter
|
from cmt.components import Meteorology, SnowDegreeDay
met, sno = Meteorology(), SnowDegreeDay()
met.initialize('./input/meteorology-2.cfg')
sno.initialize('./input/snow_degree_day-2.cfg')
time = [met.get_current_time()]
snow_depth = [sno.get_value('snowpack__depth').max()]
air_temp = [met.get_value('atmosphere_bottom_air__temperature').max()]
count = 1
while met.get_current_time() < met.get_end_time():
T_air = met.get_value('atmosphere_bottom_air__temperature')
P_snow = met.get_value('atmosphere_water__snowfall_leq-volume_flux')
T_surf = met.get_value('land_surface__temperature')
rho_H2O = met.get_value('water-liquid__mass-per-volume_density')
sno.set_value('atmosphere_bottom_air__temperature', T_air)
sno.set_value('atmosphere_water__snowfall_leq-volume_flux', P_snow)
sno.set_value('land_surface__temperature', T_surf)
sno.set_value('water-liquid__mass-per-volume_density', rho_H2O)
sno.update(sno.get_time_step()*count)
rho_snow = sno.get_value('snowpack__z_mean_of_mass-per-volume_density')
h_snow = sno.get_value('snowpack__depth')
h_swe = sno.get_value('snowpack__liquid-equivalent_depth')
SM = sno.get_value('snowpack__melt_volume_flux')
met.set_value('snowpack__z_mean_of_mass-per-volume_density', rho_snow)
met.set_value('snowpack__depth', h_snow)
met.set_value('snowpack__liquid-equivalent_depth', h_swe)
met.set_value('snowpack__melt_volume_flux', SM)
met.update(met.get_time_step()*count)
time.append(met.get_current_time())
snow_depth.append(sno.get_value('snowpack__depth').max())
air_temp.append(met.get_value('atmosphere_bottom_air__temperature').max())
count += 1
print time
print snow_depth
print air_temp
met.finalize(), sno.finalize()
%matplotlib inline
from matplotlib import pyplot as plt
plt.plot(time[1:], snow_depth[1:])
plt.title('Snow depth versus time')
plt.xlabel('Time [s]')
plt.ylabel('Snow depth [m]')
| 0.448909 | 0.940024 |
IPython Notebooks
==================
* You can run a cell by pressing ``[shift] + [Enter]`` or by pressing the "play" button in the menu.
* You can get help on a function or object by pressing ``[shift] + [tab]`` after the opening parenthesis ``function(``
* You can also get help by executing ``function?``
## Numpy Arrays
Manipulating `numpy` arrays is an important part of doing machine learning
(or, really, any type of scientific computation) in python. This will likely
be review for most: we'll quickly go through some of the most important features.
```
import numpy as np
# Generating a random array
X = np.random.random((3, 5)) # a 3 x 5 array
print(X)
# Accessing elements
# get a single element
print(X[0, 0])
# get a row
print(X[1])
# get a column
print(X[:, 1])
# Transposing an array
print(X.T)
# Turning a row vector into a column vector
y = np.linspace(0, 12, 5)
print(y)
# make into a column vector
print(y[:, np.newaxis])
# getting the shape or reshaping an array
print(X.shape)
print(X.reshape(5, 3))
# indexing by an array of integers (fancy indexing)
indices = np.array([3, 1, 0])
print(indices)
X[:, indices]
```
There is much, much more to know, but these few operations are fundamental to what we'll
do during this tutorial.
## Scipy Sparse Matrices
We won't make very much use of these in this tutorial, but sparse matrices are very nice
in some situations. In some machine learning tasks, especially those associated
with textual analysis, the data may be mostly zeros. Storing all these zeros is very
inefficient, and representing in a way that only contains the "non-zero" values can be much more efficient. We can create and manipulate sparse matrices as follows:
```
from scipy import sparse
# Create a random array with a lot of zeros
X = np.random.random((10, 5))
print(X)
# set the majority of elements to zero
X[X < 0.7] = 0
print(X)
# turn X into a csr (Compressed-Sparse-Row) matrix
X_csr = sparse.csr_matrix(X)
print(X_csr)
# convert the sparse matrix to a dense array
print(X_csr.toarray())
```
The CSR representation can be very efficient for computations, but it is not
as good for adding elements. For that, the LIL (List-In-List) representation
is better:
```
# Create an empty LIL matrix and add some items
X_lil = sparse.lil_matrix((5, 5))
for i, j in np.random.randint(0, 5, (15, 2)):
X_lil[i, j] = i + j
print(X_lil)
print(X_lil.toarray())
```
Often, once an LIL matrix is created, it is useful to convert it to a CSR format
(many scikit-learn algorithms require CSR or CSC format)
```
print(X_lil.tocsr())
```
The available sparse formats that can be useful for various problems:
- `CSR` (compressed sparse row)
- `CSC` (compressed sparse column)
- `BSR` (block sparse row)
- `COO` (coordinate)
- `DIA` (diagonal)
- `DOK` (dictionary of keys)
- `LIL` (list in list)
The ``scipy.sparse`` submodule also has a lot of functions for sparse matrices
including linear algebra, sparse solvers, graph algorithms, and much more.
## Matplotlib
Another important part of machine learning is visualization of data. The most common
tool for this in Python is `matplotlib`. It is an extremely flexible package, but
we will go over some basics here.
First, something special to IPython notebook. We can turn on the "IPython inline" mode,
which will make plots show up inline in the notebook.
```
%matplotlib inline
import matplotlib.pyplot as plt
# plotting a line
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x))
# scatter-plot points
x = np.random.normal(size=500)
y = np.random.normal(size=500)
plt.scatter(x, y)
# showing images
x = np.linspace(1, 12, 100)
y = x[:, np.newaxis]
im = y * np.sin(x) * np.cos(y)
print(im.shape)
# imshow - note that origin is at the top-left by default!
plt.imshow(im)
# Contour plot - note that origin here is at the bottom-left by default!
plt.contour(im)
# 3D plotting
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
xgrid, ygrid = np.meshgrid(x, y.ravel())
ax.plot_surface(xgrid, ygrid, im, cmap=plt.cm.jet, cstride=2, rstride=2, linewidth=0)
```
There are many, many more plot types available. One useful way to explore these is by
looking at the matplotlib gallery: http://matplotlib.org/gallery.html
You can test these examples out easily in the notebook: simply copy the ``Source Code``
link on each page, and put it in a notebook using the ``%load`` magic.
For example:
```
# %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:,np.newaxis], Y.ravel()[:,np.newaxis]))
ww = X/10.0
hh = Y/15.0
aa = X*9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X+Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y')
plt.show()
```
|
github_jupyter
|
import numpy as np
# Generating a random array
X = np.random.random((3, 5)) # a 3 x 5 array
print(X)
# Accessing elements
# get a single element
print(X[0, 0])
# get a row
print(X[1])
# get a column
print(X[:, 1])
# Transposing an array
print(X.T)
# Turning a row vector into a column vector
y = np.linspace(0, 12, 5)
print(y)
# make into a column vector
print(y[:, np.newaxis])
# getting the shape or reshaping an array
print(X.shape)
print(X.reshape(5, 3))
# indexing by an array of integers (fancy indexing)
indices = np.array([3, 1, 0])
print(indices)
X[:, indices]
from scipy import sparse
# Create a random array with a lot of zeros
X = np.random.random((10, 5))
print(X)
# set the majority of elements to zero
X[X < 0.7] = 0
print(X)
# turn X into a csr (Compressed-Sparse-Row) matrix
X_csr = sparse.csr_matrix(X)
print(X_csr)
# convert the sparse matrix to a dense array
print(X_csr.toarray())
# Create an empty LIL matrix and add some items
X_lil = sparse.lil_matrix((5, 5))
for i, j in np.random.randint(0, 5, (15, 2)):
X_lil[i, j] = i + j
print(X_lil)
print(X_lil.toarray())
print(X_lil.tocsr())
%matplotlib inline
import matplotlib.pyplot as plt
# plotting a line
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x))
# scatter-plot points
x = np.random.normal(size=500)
y = np.random.normal(size=500)
plt.scatter(x, y)
# showing images
x = np.linspace(1, 12, 100)
y = x[:, np.newaxis]
im = y * np.sin(x) * np.cos(y)
print(im.shape)
# imshow - note that origin is at the top-left by default!
plt.imshow(im)
# Contour plot - note that origin here is at the bottom-left by default!
plt.contour(im)
# 3D plotting
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
xgrid, ygrid = np.meshgrid(x, y.ravel())
ax.plot_surface(xgrid, ygrid, im, cmap=plt.cm.jet, cstride=2, rstride=2, linewidth=0)
# %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:,np.newaxis], Y.ravel()[:,np.newaxis]))
ww = X/10.0
hh = Y/15.0
aa = X*9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X+Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y')
plt.show()
| 0.648021 | 0.936518 |
# Load Packages
```
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
```
# Load Data Points (Do not modify the following block)
```
with open('training_data.npz', 'rb') as f:
data = np.load(f)
x_list = data['x_list']
y_list = data['y_list']
x_data = data['x_data']
y_data = data['y_data']
n_data = len(x_data)
w = data['w']
original_degree = data['order']
# Print information of original function.
print("=================================")
print("We have", n_data, "number of data")
print("=================================")
weight_info_string = ''
for d in range(original_degree):
weight_info_string += 'w'+str(d)+':'+str(round(w[d],ndigits=3))+' '
print("Coefficients of the original polynomial")
print(weight_info_string)
print("=================================")
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
```
# Polynomial Regression (Programming Assignment)
### Variable Explanation (Do not change variable names)
- 'w' is true coefficients of the original polynomial function
- 'original_degree' is the order of the original polynomial function
- 'x_list' is a list of the points at $x$-axis
- 'y_list' is a list of function value $f(x)$ corresponding to 'x_list'. In other words, y_list = $f($x_list$)$
- 'x_data' is an input data
- 'y_data' is an output data
- 'n_data' is the number of data points
### Our goal is to estimate 'w' from data points, 'x_data' and 'y_data'. Answer the following problems.
### 1. Compute a Vandermonde matrix when the degree of polynomial is $4$ (30pt)
- The variable 'degree' is the order of polynomial. In this problem, we set degree=$4$
- Use the variable 'A' for the Vandermonde matrix. Now, 'A' is initialized as a zero matrix whose elements are all zero. Fill in the element of the Vandermonde matrix by using power operator (\*\*), for loop, and np.concatenation.
```
degree = 4
A = np.zeros((n_data, degree+1))
for i in range(0, degree + 1):
A[ :,i] = x_data ** i
```
### Print results (do not modify the following block)
```
print(A)
```
### 2. Compute the coefficients of polynomial regression using a $4$ degree polynomial (40pt)
- Use the variable 'degree' and the Vandermonde matrix 'A' in Problem 1.
- The variable 'w_est' is the coefficients of polynomial regression. Now, 'w_est' is initialized as a zero vector. Compute the 'w_est' from 'A' and 'y'
- The variable 'y_est' is an estimated function value corresponding to the input points 'x_list'. Now, it is a zero list and fill the list by computing the estimated function values. In other words, y_est = $\hat{f}($x_list$)$
```
pseudo_A = np.linalg.pinv(A)
w_est = np.dot(pseudo_A, y_data)
for i in range(0, degree + 1):
y_est = w_est[i] * x_list ** i
```
### Print results (do not modify the following block)
```
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(degree))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
```
### 3. Compute the polynomial regression with $1$ degree polynomials (15pt)
- Repeat Problem 1 and Problem 2 with degree $1$.
- Use the following variables.
> degree1, A1, w_est1, y_est1
```
degree1 = 1
A1 = np.zeros((n_data, degree1+1))
for i in range(0, degree1 + 1):
A1[ :,i] = x_data ** i
pseudo_A1 = np.linalg.pinv(A1)
w_est1 = np.dot(pseudo_A1, y_data)
for i in range(0, degree1 + 1):
y_est1 = w_est1[i] * x_list ** i
```
### Print results (do not modify the following block)
```
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(degree1))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
```
### 4. Compute the polynomial regression with $10$ degree polynomials (15pt)
- Repeat Problem 1 and Problem 2 with degree $10$.
- Use the following variables.
> degree2, A2, w_est2, y_est2
```
degree2 = 10
A2 = np.zeros((n_data, degree2+1))
for i in range(0, degree2 + 1):
A2[ :,i] = x_data ** i
pseudo_A2 = np.linalg.pinv(A2)
w_est2 = np.dot(pseudo_A2, y_data)
for i in range(0, degree2 + 1):
y_est2 = w_est2[i] * x_list ** i
```
### Print results (do not modify the following block)
```
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(degree2))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
```
### 5. [Challenging Problem] Explain the effect of degree (20pt)
- By solving the above problems, we can observe the behaviors of polynomial regression with different degrees (1, 4, 10)
- Explain pros and cons of high degree polynomial
- Explain pros and cons of low degree polynomial|
- What is this phenomenon called in machine learning?
### Answer
- In high degree like 4 we can estimate data close to original form, but extremely high demension like 10 estimate diffrent from original form.
- In low degree like 1 we can estimate data cleraly, but too simple to verify data.
- From data we can estimate random value in condition.
### The following figure shows all regression results with different degrees.
```
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(1))
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(4))
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(10))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
with open('training_data.npz', 'rb') as f:
data = np.load(f)
x_list = data['x_list']
y_list = data['y_list']
x_data = data['x_data']
y_data = data['y_data']
n_data = len(x_data)
w = data['w']
original_degree = data['order']
# Print information of original function.
print("=================================")
print("We have", n_data, "number of data")
print("=================================")
weight_info_string = ''
for d in range(original_degree):
weight_info_string += 'w'+str(d)+':'+str(round(w[d],ndigits=3))+' '
print("Coefficients of the original polynomial")
print(weight_info_string)
print("=================================")
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
degree = 4
A = np.zeros((n_data, degree+1))
for i in range(0, degree + 1):
A[ :,i] = x_data ** i
print(A)
pseudo_A = np.linalg.pinv(A)
w_est = np.dot(pseudo_A, y_data)
for i in range(0, degree + 1):
y_est = w_est[i] * x_list ** i
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(degree))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
degree1 = 1
A1 = np.zeros((n_data, degree1+1))
for i in range(0, degree1 + 1):
A1[ :,i] = x_data ** i
pseudo_A1 = np.linalg.pinv(A1)
w_est1 = np.dot(pseudo_A1, y_data)
for i in range(0, degree1 + 1):
y_est1 = w_est1[i] * x_list ** i
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(degree1))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
degree2 = 10
A2 = np.zeros((n_data, degree2+1))
for i in range(0, degree2 + 1):
A2[ :,i] = x_data ** i
pseudo_A2 = np.linalg.pinv(A2)
w_est2 = np.dot(pseudo_A2, y_data)
for i in range(0, degree2 + 1):
y_est2 = w_est2[i] * x_list ** i
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(degree2))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(1))
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(4))
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(10))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
| 0.534127 | 0.938463 |
# Questions
Fill in your name and the link to this file on your github.
* Name: ___
* Link to github URL: ___
```
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import scipy
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn import datasets
from sklearn.model_selection import train_test_split
```
# ML: Linear Regression
So this starts with linear regression. If you want a deeper dive than what I cover in class, you can refer to [this page](https://realpython.com/linear-regression-in-python/)
The exercises come from this workbook, which has somewhat helpful explanations too: https://csmastersuh.github.io/data_analysis_with_python_2020/linear_regression.html
# Exercise 10: Linear Regression
You'll need to make up some data for this. Don't spend too much time on this one, it's less interesting compared to the others.
```
n=20
# Linearly increasing x values
x = np.linspace(0, 10, n)
# Wonky line of points
y = x*2 + 1 + 1*np.random.randn(n)
display(x, y)
plt.scatter(x, y)
plt.show()
# Do actual linear regression here
```
# Exercise 11: Mystery Data
This one is far more interesting. You can download the file from [here](https://raw.githubusercontent.com/AnkS4/hy-data-analysis-with-python-2020/master/part05-e11_mystery_data/src/mystery_data.tsv). Make sure it gets the right filename!
You don't need to define any functions, as they demand, although you might find that helpful to do.
## Exercise 12: Coefficient of Determination
Read over this entire problem, parts 1 and 2.
This reuses the same `mystery_data.tsv` file as before.
Again, you do not need to define their function. Just calculate the R2 scores and print them, as they direct.
## Exercise 13: Cycling Weather
I've already prepared the data that they require for this assignment. You can download it [here](https://gist.githubusercontent.com/acbart/466174a04e9a2505c4c25f91fc6dd4f6/raw/726865070677ec7dede17a08095624e0ea35e7cd/biking.csv).
The first column is the index, you can safely ignore it. The next 7 columns are straightforward. The last few columns are locations in Finland that have measuring stations. I recommend using `Baana` as they say in the instructions for testing.
# ML Naive Bayes Classification
This is the next section of the exercises, from: https://csmastersuh.github.io/data_analysis_with_python_2020/bayes.html
In addition to the reading, I recommend this video: https://www.youtube.com/watch?v=CPqOCI0ahss
## Exercise 1: Blob Classification
(**OPTIONAL**) This one is very vague, and they're actually asking you to generate your own test data using the `make_blobs` function from `sklearn`'s `datasets` submodule. I've already started that work for you. But honestly if you want to skip it, I don't think it's a helpful starting question.
```
def blob_classification(X, y):
# Put ML stuff here
pass
# Create the training data and validation data
X, y = datasets.make_blobs(100, 2, centers=2, random_state=2, cluster_std=2.5)
# Run your ML predictions
print("The accuracy score is", blob_classification(X, y))
# Run this on some new data
a=np.array([[2, 2, 0, 2.5],
[2, 3, 1, 1.5],
[2, 2, 6, 3.5],
[2, 2, 3, 1.2],
[2, 4, 4, 2.7]])
accuracies = []
for row in a:
X,y = datasets.make_blobs(100, int(row[0]), centers=int(row[1]),
random_state=int(row[2]), cluster_std=row[3])
accuracies.append(blob_classification(X, y))
print(repr(np.hstack([a, np.array(accuracies)[:,np.newaxis]])))
# The last column should be the categorizations
```
## Exercise 2: Plant Classification
This is a much better question. The Iris dataset is a classic: https://en.wikipedia.org/wiki/Iris_flower_data_set
The wikipedia page gives an example of how to load the dataset.
## Exercise 3: Word Classification
(**Skip**)
This one is too much. They give some of the data as an XML file. It's an interesting problem, and you can find the data (and solution) [here](https://github.com/AnkS4/hy-data-analysis-with-python-2020/tree/master/part06-e03_word_classification/src) if you want to tackle it, but I'm skipping it.
## Exercise 4: Spam Detection
Download [ham.txt.gz](https://github.com/AnkS4/hy-data-analysis-with-python-2020/raw/master/part06-e04_spam_detection/src/ham.txt.gz) and [spam.txt.gz](https://github.com/AnkS4/hy-data-analysis-with-python-2020/raw/master/part06-e04_spam_detection/src/spam.txt.gz).
This one is much more interesting and reasonable. It requires processing some large text files, but that's actually the easiest part, as shown in the code below. The idea is that you have spam (bad emails) and ham (good emails), and you want to determine which is which. I've done similar email processing (detecting job ads for a conference) and I was impressed with how easily I could train a little data and get very good results.
```
import gzip
# Load the spam emails as strings in a list.
with gzip.open('spam.txt.gz', 'rb') as spam_file:
spam = spam_file.readlines()
print("Number of spam emails loaded as strings:", len(spam))
# Now do the same thing with the `ham.txt.gz`
# And then do the actual ML stuff
```
# ML Clustering
This is the last section: https://csmastersuh.github.io/data_analysis_with_python_2020/clustering.html
This section is one of the most interesting in my opinion. K-Means is a pretty straightforward tool, and is really worth learning how to use it.
## Exercise 5: Plant Clustering
Same deal as before; use the IRIS dataset. Since this has so many parameters, it can be tricky to make a good visualization.
## Exercise 6: Non-convex Clusters
The data for this question is [here](https://raw.githubusercontent.com/AnkS4/hy-data-analysis-with-python-2020/master/part06-e06_nonconvex_clusters/src/data.tsv).
This one shows off a different clustering algorithm ([`DBSCAN`](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)), which is "Good for data which contains clusters of similar density". I wasn't very familiar with DBSCAN, but it does seem much better than KMeans. It doesn't require you to figure out the number of clusters, and seems to be tricked less by unusual data. [This page](https://www.kdnuggets.com/2020/04/dbscan-clustering-algorithm-machine-learning.html) was very helpful in breaking that difference down.
The reference answer uses a `for` loop and `np.arange` to try `e` values from 0.05 to 0.2 in 0.05 increments, but I don't mind if you just manually try some different `e` values.
Please do make a visualization with clusters colored, since I think that really highlights what we are doing!
## Exercise 7: Binding Sites
Download the [`data.seq` file](https://raw.githubusercontent.com/AnkS4/hy-data-analysis-with-python-2020/master/part06-e07_binding_sites/src/data.seq); note that it is just a plain textual data file, despite the fancy extension.
They ask you to define `get_features_and_labels` to accept a filename, even though there's only one test file. Up to you if you want to hardcode the file path in or make it a flexible function.
There are multiple parts here, and they ask you to compare the euclidean and hamming distance. I think it's worth thinking about - if you don't get what they mean, do ask!
```
# The `find_permutation` function provided in the text, for your convenience
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import scipy
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn import datasets
from sklearn.model_selection import train_test_split
n=20
# Linearly increasing x values
x = np.linspace(0, 10, n)
# Wonky line of points
y = x*2 + 1 + 1*np.random.randn(n)
display(x, y)
plt.scatter(x, y)
plt.show()
# Do actual linear regression here
def blob_classification(X, y):
# Put ML stuff here
pass
# Create the training data and validation data
X, y = datasets.make_blobs(100, 2, centers=2, random_state=2, cluster_std=2.5)
# Run your ML predictions
print("The accuracy score is", blob_classification(X, y))
# Run this on some new data
a=np.array([[2, 2, 0, 2.5],
[2, 3, 1, 1.5],
[2, 2, 6, 3.5],
[2, 2, 3, 1.2],
[2, 4, 4, 2.7]])
accuracies = []
for row in a:
X,y = datasets.make_blobs(100, int(row[0]), centers=int(row[1]),
random_state=int(row[2]), cluster_std=row[3])
accuracies.append(blob_classification(X, y))
print(repr(np.hstack([a, np.array(accuracies)[:,np.newaxis]])))
# The last column should be the categorizations
import gzip
# Load the spam emails as strings in a list.
with gzip.open('spam.txt.gz', 'rb') as spam_file:
spam = spam_file.readlines()
print("Number of spam emails loaded as strings:", len(spam))
# Now do the same thing with the `ham.txt.gz`
# And then do the actual ML stuff
# The `find_permutation` function provided in the text, for your convenience
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
| 0.614625 | 0.872239 |
# Transfer Learning
Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture.
<img src="assets/cnnarchitecture.jpg" width=700px>
VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.
You can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf).
## Pretrained VGGNet
We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. Make sure to clone this repository to the directory you're working from. You'll also want to rename it so it has an underscore instead of a dash.
```
git clone https://github.com/machrisaa/tensorflow-vgg.git tensorflow_vgg
```
This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. **You'll need to clone the repo into the folder containing this notebook.** Then download the parameter file using the next cell.
```
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
```
## Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining).
```
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
```
## ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py)):
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use
```
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
```
This creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer,
```
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
```
```
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
```
Below I'm running images through the VGG network in batches.
> **Exercise:** Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).
```
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 32
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3], name='input')
with tf.name_scope('vgg'):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
```
## Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.
```
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
```
### Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
> **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels.
```
from sklearn import preprocessing
pre = preprocessing.LabelBinarizer()
pre.fit(labels)
labels_vecs = pre.transform(labels)
labels_vecs[:10]
```
Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn.
You can create the splitter like so:
```
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
```
Then split the data with
```
splitter = ss.split(x, y)
```
`ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split).
> **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.
```
from sklearn.model_selection import StratifiedShuffleSplit
splits = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(splits.split(codes, labels))
middle = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:middle], val_idx[middle:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
```
If you did it right, you should see these sizes for the training sets:
```
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
```
### Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
> **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.
```
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
full = tf.contrib.layers.fully_connected(inputs_, 512)
logits = tf.contrib.layers.fully_connected(full, labels_vecs.shape[1], activation_fn=None)# output layer logits
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))# cross entropy loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
```
### Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.
```
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
```
### Training
Here, we'll train the network.
> **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the `get_batches` function I wrote before to get your batches like `for x, y in get_batches(train_x, train_y)`. Or write your own!
```
epochs = 50
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for batch_x, batch_y in get_batches(train_x, train_y):
_, loss = sess.run([optimizer, cost], feed_dict = {inputs_: batch_x, labels_: batch_y})
iteration += 1
print('Epoch: {} Iteration: {} Training loss: {} '.format(epoch, iteration, loss))
if not iteration%10:
valid_acc = sess.run(accuracy, feed_dict = {inputs_: val_x, labels_: val_y})
print('Epoch: {} Iteration: {} Validation Acc : {} '.format(epoch, iteration, valid_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
```
### Testing
Below you see the test accuracy. You can also see the predictions returned for images.
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
```
Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
```
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), pre.classes_)
```
|
github_jupyter
|
git clone https://github.com/machrisaa/tensorflow-vgg.git tensorflow_vgg
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 32
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3], name='input')
with tf.name_scope('vgg'):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
from sklearn import preprocessing
pre = preprocessing.LabelBinarizer()
pre.fit(labels)
labels_vecs = pre.transform(labels)
labels_vecs[:10]
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
splitter = ss.split(x, y)
from sklearn.model_selection import StratifiedShuffleSplit
splits = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(splits.split(codes, labels))
middle = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:middle], val_idx[middle:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
full = tf.contrib.layers.fully_connected(inputs_, 512)
logits = tf.contrib.layers.fully_connected(full, labels_vecs.shape[1], activation_fn=None)# output layer logits
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))# cross entropy loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
epochs = 50
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for batch_x, batch_y in get_batches(train_x, train_y):
_, loss = sess.run([optimizer, cost], feed_dict = {inputs_: batch_x, labels_: batch_y})
iteration += 1
print('Epoch: {} Iteration: {} Training loss: {} '.format(epoch, iteration, loss))
if not iteration%10:
valid_acc = sess.run(accuracy, feed_dict = {inputs_: val_x, labels_: val_y})
print('Epoch: {} Iteration: {} Validation Acc : {} '.format(epoch, iteration, valid_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), pre.classes_)
| 0.541288 | 0.961822 |
# 从 Matlab 到 Numpy
##Numpy 和 Matlab 比较
**`Numpy`** 和 **`Matlab`** 有很多相似的地方,但 **`Numpy`** 并非 **`Matlab`** 的克隆,它们之间存在很多差异,例如:
`MATLAB®`|`Numpy`
---|---
基本类型为双精度浮点数组,以二维矩阵为主 | 基本类型为 `ndarray`,有特殊的 `matrix` 类
1-based 索引 | 0-based 索引
脚本主要用于线性代数计算 | 可以使用其他的 **Python** 特性
采用值传递的方式进行计算<br>切片返回复制 | 采用引用传递的方式进行计算<br>切片返回引用
文件名必须和函数名相同 | 函数可以在任何地方任何文件中定义
收费 | 免费
2D,3D图像支持 | 依赖第三方库如 `matplotlib` 等
完全的编译环境 | 依赖于 **Python** 提供的编译环境
## array 还是 matrix?
`Numpy` 中不仅提供了 `array` 这个基本类型,还提供了支持矩阵操作的类 `matrix`,但是一般推荐使用 `array`:
- 很多 `numpy` 函数返回的是 `array`,不是 `matrix`
- 在 `array` 中,逐元素操作和矩阵操作有着明显的不同
- 向量可以不被视为矩阵
具体说来:
- `*, dot(), multiply()`
- `array`:`*` -逐元素乘法,`dot()` -矩阵乘法
- `matrix`:`*` -矩阵乘法,`multiply()` -逐元素乘法
- 处理向量
- `array`:形状为 `1xN, Nx1, N` 的向量的意义是不同的,类似于 `A[:,1]` 的操作返回的是一维数组,形状为 `N`,一维数组的转置仍是自己本身
- `matrix`:形状为 `1xN, Nx1`,`A[:,1]` 返回的是二维 `Nx1` 矩阵
- 高维数组
- `array`:支持大于2的维度
- `matrix`:维度只能为2
- 属性
- `array`:`.T` 表示转置
- `matrix`:`.H` 表示复共轭转置,`.I` 表示逆,`.A` 表示转化为 `array` 类型
- 构造函数
- `array`:`array` 函数接受一个(嵌套)序列作为参数——`array([[1,2,3],[4,5,6]])`
- `matrix`:`matrix` 函数额外支持字符串参数——`matrix("[1 2 3; 4 5 6]")`
其优缺点各自如下:
- **`array`**
- `[GOOD]` 一维数组既可以看成列向量,也可以看成行向量。`v` 在 `dot(A,v)` 被看成列向量,在 `dot(v,A)` 中被看成行向量,这样省去了转置的麻烦
- `[BAD!]` 矩阵乘法需要使用 `dot()` 函数,如: `dot(dot(A,B),C)` vs `A*B*C`
- `[GOOD]` 逐元素乘法很简单: `A*B`
- `[GOOD]` 作为基本类型,是很多基于 `numpy` 的第三方库函数的返回类型
- `[GOOD]` 所有的操作 `*,/,+,**,...` 都是逐元素的
- `[GOOD]` 可以处理任意维度的数据
- `[GOOD]` 张量运算
- **`matrix`**
- `[GOOD]` 类似与 **`MATLAB`** 的操作
- `[BAD!]` 最高维度为2
- `[BAD!]` 最低维度也为2
- `[BAD!]` 很多函数返回的是 `array`,即使传入的参数是 `matrix`
- `[GOOD]` `A*B` 是矩阵乘法
- `[BAD!]` 逐元素乘法需要调用 `multiply` 函数
- `[BAD!]` `/` 是逐元素操作
当然在实际使用中,二者的使用取决于具体情况。
二者可以互相转化:
- `asarray` :返回数组
- `asmatrix`(或者`mat`) :返回矩阵
- `asanyarray` :返回数组或者数组的子类,注意到矩阵是数组的一个子类,所以输入是矩阵的时候返回的也是矩阵
## 类 Matlab 函数
有很多类似的函数:
- `ones, zeros, empty, eye, rand, repmat`
通常这些函数的返回值是 `array`,不过 `numpy` 提供了一个 `matlib` 的子模块,子模块中的这些函数返回值为 `matrix`:
```
import numpy
import numpy.matlib
a = numpy.ones(7)
print a.shape
print type(a)
a = numpy.matlib.ones(7)
print a.shape
print type(a)
```
`mat` 函数将一个数组转化为矩阵:
```
a = numpy.array([1,2,3])
b = numpy.mat(a)
print type(b)
```
有些函数被放到子模块中了,例如调用 `rand()` 函数需要使用 `numpy.random.rand()` (或者从 `matlib` 模块中生成矩阵):
```
a = numpy.random.rand(10)
print a
```
## 等效操作
假定我们已经这样导入了 `Numpy`:
```
from numpy import *
import scipy.linalg
```
以下 `linalg` 表示的是 `numpy.linalg`,与 `scipy.linalg` 不同。
注意:**`MATLAB`** 与 **`Numpy`** 下标之间有这样几处不同:
- `1-base` vs `0-base`
- `()` vs `[]`
- `MATLAB`:`beg(:step):end`,包含结束值 `end`
- `Numpy`:`beg:end(:step)`,不包含结束值 `end`
MATLAB|Numpy|注释
---|---|---
`help func` | `info(func)`, `help(func)`, `func?`(IPython)| 查看函数帮助
`which func` | | 查看函数在什么地方定义
`type func` | `source(func)`, `func??`(IPython)| 查看函数源代码
`a && b` | `a and b` | 逻辑 `AND`
`1*i, 1*j, 1i, 1j` | `1j` | 复数
`eps` | `spacing(1)` | `1` 与最近浮点数的距离
`ndims(a)` | `ndim(a), a.ndim` | `a` 的维数
`numel(a)` | `size(a), a.size` | `a` 的元素个数
`size(a)` | `shape(a), a.shape` | `a` 的形状
`size(a,n)` | `a.shape[n-1]` | 第 n 维的大小
`a(2,5)` | `a[1,4]` | 第 2 行第 5 列元素
`a(2,:)` | `a[1], a[1,:]` | 第 2 行
`a(1:5,:)` | `a[0:5]` | 第 1 至 5 行
`a(end-4:end,:)` | `a[-5:]` | 后 5 行
`a(1:3,5:9)` | `a[0:3][:,4:9]` | 特定行列(1~3 行,5~9 列)
`a([2,4,5],[1,3])` | `a[ix_([1,3,4],[0,2])]` | 特定行列(2,4,5 行的 1,3 列)
`a(3:2:21,:)` | `a[2:21:2,:]` | 特定行列(3,5,...,21 行)
`a(1:2:end,:)` | `a[ ::2,:]` | 奇数行
`a([1:end 1],:)` | `a[r_[:len(a),0]]` | 将第一行添加到末尾
`a.'` | `a.T` | 转置
`a ./ b` | `a/b` | 逐元素除法
`(a>0.5)` | `(a>0.5)` | 各个元素是否大于 0.5
`find(a>0.5)` | `nonzero(a>0.5)` | 大于 0.5 的位置
`a(a<0.5)=0` | `a[a<0.5]=0` | 小于 0.5 的设为 0
`a(:) = 3` | `a[:] = 3` | 所有元素设为 3
`y=x` | `y=x.copy()` | 将 y 设为 x
`y=x(2,:)` | `y=x[1,:].copy()` | 注意值传递和引用传递的区别
`y=x(:)` | `y=x.flatten(1)` | 将矩阵变为一个向量,这里 `1` 表示沿着列进行转化
`max(max(a))` | `a.max()` | 最大值
`max(a)` | `a.max(0)` | 每一列的最大值
`max(a,[],2)` | `a.max(1)` | 每一行的最大值
`max(a,b)` | `maximum(a,b)` | 逐元素比较,取较大的值
`a & b` | `logical_and(a, b)` | 逻辑 AND
`bitand(a, b)` | `a & b` | 逐比特 AND
`inv(a)` | `linalg.inv(a)` | a 的逆
`pinv(a)` | `linalg.inv(a)` | 伪逆
`rank(a)` | `linalg.matrix_rank(a)` | 秩
`a\b` | `linalg.solve(a,b)(如果a是方阵),linalg.lstsq(a,b)` | 解 `a x = b`
`b/a` | 求解 `a.T x.T = b.T` | 解 `x a = b`
`[U,S,V]=svd(a)` | `U, S, Vh = linalg.svd(a), V = Vh.T` | 奇异值分解
`chol(a)` | `linalg.cholesky(a).T` | Cholesky 分解
`[V,D]=eig(a)` | `D,V = linalg.eig(a)` | 特征值分解
`[V,D]=eig(a,b)` | `V,D = scipy.linalg.eig(a,b)` |
`[V,D]=eigs(a,k)` | | 前 k 大特征值对应的特征向量
`` | `` |
`` | `` |
`` | `` |
`` | `` |
MATLAB|numpy.array|numpy.matrix|注释
---|---|---|---
`[1,2,3;4,5,6]` | `array([[1.,2.,3.],[4.,5.,6.]])` | `mat([[1.,2.,3.],[4.,5.,6.]]), mat('1,2,3;4,5,6')` | `2x3` 矩阵
`[a b;c d]` | `vstack([hstack([a,b]), hsatck([c,d])]])` | `bmat('a b;c d')` | 分块矩阵构造
`a(end)` | `a[-1]` | `a[:,-1][0,0]` | 最后一个元素
`a'` | `a.conj().T` | `a.H` | 复共轭转置
`a * b` | `dot(a,b)` | `a * b` | 矩阵乘法
`a .* b` | `a * b` | `multiply(a,b)` | 逐元素乘法
`a.^3` | `a**3` | `power(a,3)` | 逐元素立方
`a(:,find(v>0.5))` | `a[:,nonzero(v>0.5)[0]]` | `a[:,nonzero(v.A>0.5)[0]]` | 找出行向量 `v>0.5` 对应的 `a` 中的列
`a(:,find(v>0.5))` | `a[:,v.T>0.5]` | `a[:,v.T>0.5)]` | 找出列向量 `v>0.5` 对应的 `a` 中的列
`a .* (a>0.5)` | `a * (a>0.5)` | `mat(a.A * (a>0.5).A)` | 将所有小于 0.5 的元素设为 0
`1:10` | `arange(1.,11.), r_[1.:11.], r_[1:10:10j]` | `mat(arange(1.,11.)), r_[1.:11., 'r']` | 这里 `1.` 是为了将其转化为浮点数组
`0:9` | `arange(10.), r_[:10.], r_[:9:10j]` | `mat(arange(10.)), r_[:10., 'r']` |
`[1:10]'` | `arange(1.,11.)[:,newaxis]` | `r_[1.:11.,'c']` | 列向量
`zeros, ones, eye, diag, linspace` | `zeros, ones, eye, diag, linspace` | `mat(...)` |
`rand(3,4)` | `random.rand(3,4)` | `mat(...)` | 0~1 随机数
`[x,y]=meshgrid(0:8,0:5)` | `mgrid[0:9., 0:6.], meshgrid(r_[0:9.],r_[0:6.])` | `mat(...)` | 网格
| `ogrid[0:9.,0:6.], ix_(r_[0:9.],r_[0:6.])` | `mat()` | 建议在 `Numpy` 中使用
`[x,y]=meshgrid([1,2,4],[2,4,5])`|`meshgrid([1,2,4],[2,4,5])`|`mat(...)`|
|`ix_([1,2,4],[2,4,5])`|`mat(...)`|
`repmat(a, m, n)`|`tile(a, (m,n))`|`mat(...)`| 产生 `m x n` 个 `a`
`[a b]` | `c_[a,b]`|`concatenate((a,b),1)`| 列对齐连接
`[a; b]` | `r_[a,b]`|`concatenate((a,b))`| 行对齐连接
`norm(v)` | `sqrt(dot(v,v)), linalg.norm(v)` | `sqrt(dot(v.A,v.A)), linalg.norm(v)` | 模
`[Q,R,P]=qr(a,0)` | `Q,R = scipy.linalg.qr(a)` | `mat(...)` | QR 分解
`[L,U,P]=lu(a)` | `L,U = Sci.linalg.lu(a)` | `mat(...)` | LU 分解
`fft(a)` | `fft(a)` | `mat(...)` | FFT
`ifft(a)` | `ifft(a)` | `mat(...)` | IFFT
`sort(a)` | `sort(a),a.sort` | `mat(...)` | 排序
参考:http://wiki.scipy.org/NumPy_for_Matlab_Users#whichNotes
|
github_jupyter
|
import numpy
import numpy.matlib
a = numpy.ones(7)
print a.shape
print type(a)
a = numpy.matlib.ones(7)
print a.shape
print type(a)
a = numpy.array([1,2,3])
b = numpy.mat(a)
print type(b)
a = numpy.random.rand(10)
print a
from numpy import *
import scipy.linalg
| 0.215516 | 0.958265 |
### Q1. Describe the differences between text and binary files in a single paragraph.
Ans :
Binary files typically contain a sequence of bytes, or ordered groupings of eight bits.When creating a custom file format for a program, these bytes are arranged into a format that stores the necessary information for the application. Binary file formats may include multiple types of data in the same file, such as image, video, and audio data. This data can be interpreted by supporting programs, but will show up as garbled text in a
text editor.
Text files are more restrictive than binary files since they can only contain textual data.However, unlike binary files, they are less likely to become corrupted. While a small error in a binary file may make it unreadable, a small error in a text file may simply show up once the file has been opened. Text files may be saved in either a plain text (.TXT) format and rich text (.RTF) format. A typical plain text file contains several lines of text that are each followed by an End-of-Line (EOL) character. An End-of-File (EOF) marker is placed after the final character, which signals the end of the file. Rich text files use a similar file structure, but may also include text styles, such as bold and italics, as well as page formatting information. Both plain text and rich text files include a (character encoding) scheme that determines how the characters are interpreted and what characters can be displayed. Since text files use a simple, standard
format, many programs are capable of reading and editing text files.
### Q2. What are some scenarios where using text files will be the better option? When would you like to use binary files instead of text files?
Ans:
Text files include small size and versatility. Kilobytes or megabytes smaller than the same data stored in other formats, they can be rapidly and massively exchanged via email or disk. Most can be opened on computers running diverse operating systems, using very basic software. Binary files is that they are more efficient. In terms of memory, storing values using numeric formats, rather than as text characters, tends to use less memory. In addition, binary formats also offer advantages in terms of speed of access.
### Q3. What are some of the issues with using binary operations to read and write a Python integer directly to disc?
Ans : When we read or write a python integer using binary operations
* Binary operations deal with raw data
* one needs to identify how many bytes one would read or write.
### Q4. Describe a benefit of using the with keyword instead of explicitly opening a file.
Ans : When a file is opened using the 'with' keyword, if some exceptions occur after opening a file, or at the end of the file it automatically does the closing of the file. There by not leaving an file in open mode and there would no need to explicitly close a file.
### Q5. Does Python have the trailing newline while reading a line of text? Does Python append a newline when you write a line of text?
While reading a newline of text from atext file, python reads the newline also. While writing the python doesnt append a new line at end of line. It has to be handled explicitly.
### Q6. What file operations enable for random-access operation?
Ans : The file operations like seek(pos,orig), tell() enable random access operations.
### Q7. When do you think you'll use the struct package the most?
```
# Ans : The struct package is mostly used while converting a common python types into 'C' language types.This is done
# by packing python variables into data fields of specific sizes.So, when we try read , right number of bytes are
# read. This useful when interacting with existing binary files.
# Reading and writing a single integer using struct package
from struct import pack, unpack, calcsize
def write_file(fname,int_n):
with open(fname,'wb') as f:
bss=pack('h',int_n)
f.write(bss)
def read_file(fname):
with open(fname,'rb') as rf:
bss=rf.read(calcsize('h'))
return unpack('h',bss)
write_file('struct_file1.dat',155)
read_file('struct_file1.dat')
```
### Q8. When is pickling the best option?
Ans : Once a file is created it can be read by other python programs. The functions available in the pickle package take care how to represent the data is written in th file.
### Q9. When will it be best to use the shelve package?
Ans : Shelve package is used to pickle data but treats the treats the entire file as dictionary. The location of any object is looked up according to its key and is returned easily.
### Q10. What is a special restriction when using the shelve package, as opposed to using other data dictionaries?
Ans : A “shelf” is a persistent, dictionary-like object. The difference with “dbm” databases is that the values (not the keys!) in a shelf can be essentially arbitrary Python objects — anything that the pickle module can handle. This includes most class instances, recursive data types, and objects containing lots of shared sub-objects. The keys are ordinary strings.
|
github_jupyter
|
# Ans : The struct package is mostly used while converting a common python types into 'C' language types.This is done
# by packing python variables into data fields of specific sizes.So, when we try read , right number of bytes are
# read. This useful when interacting with existing binary files.
# Reading and writing a single integer using struct package
from struct import pack, unpack, calcsize
def write_file(fname,int_n):
with open(fname,'wb') as f:
bss=pack('h',int_n)
f.write(bss)
def read_file(fname):
with open(fname,'rb') as rf:
bss=rf.read(calcsize('h'))
return unpack('h',bss)
write_file('struct_file1.dat',155)
read_file('struct_file1.dat')
| 0.408985 | 0.936981 |
## Importing necessary libraries
```
%matplotlib inline
import random
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import ipywidgets as widgets
import warnings
warnings.filterwarnings('ignore')
```
## Helper Functions
```
def colour(n):
ret = {}
for i in range(n):
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
ret[i] = "#{:02x}{:02x}{:02x}".format(r,g,b)
return ret
colorList = colour(5000)
minDegNode = lambda G: min(G, key=G.degree)
maxDegNode = lambda G: max(G, key=G.degree)
def transformDict(myDict):
for key,value in myDict.items():
myDict[key] = colorList[value]
return myDict
```
## List of Strategies Used:
1. Greedy Graph Coloring Algorithm (Vanilla)
2. Greedy Graph Coloring By Ordering Nodes By Largest Degree First
3. Greedy Graph Coloring By Random Ordering of Nodes
4. Welsh Powell
5. Greedy Graph Colouring Using BFS
6. Greedy Graph Colouring Using DFS
```
def greedyGraphColoring(G):
colors = {}
graphNodes = G.nodes()
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
def greedyDegreeSort(G):
colors = {}
graphNodes = list(G.nodes())
graphNodes.sort(key=lambda node: -G.degree(node))
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
def greedyRandomShuffling(G):
colors = {}
graphNodes = G.nodes()
random.shuffle(list(graphNodes))
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
if(False):
def welsh_powell(G):
x = sorted(G.degree, key=lambda x: x[1], reverse=True)
len_g = len(G)
no_colored = 0
k = 1
colors = dict()
while no_colored < len_g:
colored = set()
colorednodes = set()
for node in x:
y = set(G.neighbors(node[0]))
if y.intersection(colorednodes) == set():
colors[node[0]] = k
no_colored +=1
colored.add(node)
colorednodes.add(node[0])
x = list(set(x) - colored)
k+=1
return colors
def welsh_powell(G):
colors = {}
x = sorted(G.degree, key=lambda x: x[1], reverse=True)
len_g = len(G)
no_colored = 0
k = 1
while no_colored < len_g:
colored = set()
for new_node in x:
if colored not in G.neighbors(new_node[0]):
colors[new_node[0]] = k
no_colored +=1
colored.add(new_node)
x = list(set(x) - colored)
k+=1
return colors
def strategy_connected_sequential_bfs(G, colors):
return strategy_connected_sequential(G, colors, 'bfs')
def strategy_connected_sequential_dfs(G, colors):
return strategy_connected_sequential(G, colors, 'dfs')
def strategy_connected_sequential(G, colors, traversal='bfs'):
for component_graph in nx.connected_component_subgraphs(G):
source = component_graph.nodes()[0]
yield source
if traversal == 'bfs':
tree = nx.bfs_edges(component_graph, source)
elif traversal == 'dfs':
tree = nx.dfs_edges(component_graph, source)
else:
raise nx.NetworkXError('Please specify bfs or dfs for connected sequential ordering')
for (_, end) in tree:
yield end
def greedy_color(G, strategy, interchange=False):
colors = {}
if len(G):
if interchange:
raise nx.NetworkXPointlessConcept('Interchange is not applicable for GIS and SLF')
nodes = strategy(G, colors)
if nodes:
if interchange:
return (_interchange.greedy_coloring_with_interchange(G, nodes))
else:
for node in nodes:
neighbour_colors = set()
for neighbour in G.neighbors_iter(node):
if neighbour in colors:
neighbour_colors.add(colors[neighbour])
for color in itertools.count():
if color not in neighbour_colors:
break
colors[node] = color
return colors
```
## Function to check time taken by various strategies:
```
def printGraphAndAnalyse(G, sizeoffigure):
fig = plt.figure(figsize=(2*sizeoffigure,sizeoffigure))
plt.subplot(1, 2, 1)
nx.draw(G, with_labels=True)
plt.subplot(1, 2, 2)
nodeColors = transformDict(greedyGraphColoring(G))
nx.draw(G, with_labels=True, node_color=list(nodeColors.values()))
plt.show()
print("Time taken by different Algorithms:\n")
print("Basic Greedy Algorithm:")
%timeit -n 10 -r 2 greedyGraphColoring(G)
print("\nGreedy Graph Coloring By Ordering Nodes (By Largest Degree First):")
%timeit -n 10 -r 2 greedyDegreeSort(G)
print("\nGreedy Algorithm With Random Shuffling:")
%timeit -n 10 -r 2 greedyRandomShuffling(G)
print("\nWelsh Powell:")
%timeit -n 10 -r 2 welsh_powell(G)
print("\nGreedy Algorithm using DFS ")
%timeit -n 10 -r 2 nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_connected_sequential_dfs)
print("\nGreedy Algorithm using BFS ")
%timeit -n 10 -r 2 nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_connected_sequential_bfs)
def graph_colouring(Type, noOfNodes, plotSize, edgeProbability, branchingFactor_bt, height_bt):
if Type == "Complete Graph" :
G = nx.complete_graph(noOfNodes)
elif Type == "Balanced Tree":
G = nx.balanced_tree(branchingFactor_bt, height_bt)
elif Type == "Cycle Graph":
G = nx.cycle_graph(noOfNodes)
elif Type == "Random graph":
G = nx.fast_gnp_random_graph(noOfNodes,edgeProbability)
printGraphAndAnalyse(G, plotSize)
style = {'description_align':'left','description_width': '25%'}
layout = widgets.Layout(width='90%')
layout1 = widgets.Layout(width='85%')
cuf = True
interactive_plot = widgets.interactive(graph_colouring,
noOfNodes = widgets.IntSlider(description='Number of nodes in graph', min = 2, max = 100, step = 1, value = 3, style = style, layout = layout, continuous_update = cuf),
Type = widgets.Dropdown(options=['Complete Graph','Balanced Tree', 'Cycle Graph', 'Random graph'], value='Complete Graph', description='Type of Graph:', style = style, layout = layout1),
plotSize = widgets.IntSlider(description='Size of Graph (Inch x Inch)', min = 3, max = 15, step = 1, value = 10, style = style, layout = layout, continuous_update = cuf),
edgeProbability = widgets.FloatSlider(description='Edge Probabibility (Erdős–Rényi model)', min = 0, max = 6.6, step = 0.01, value = 0.4, style = style, layout = layout, continuous_update = cuf),
branchingFactor_bt = widgets.IntSlider(description='Branching Factor of the Balanced Tree)', min = 1, max = 10, step = 1, value = 3, style = style, layout = layout, continuous_update = cuf),
height_bt = widgets.IntSlider(description='Height of the Balanced Tree)', min = 1, max = 10, step = 1, value = 2, style = style, layout = layout, continuous_update = cuf))
output = interactive_plot.children[-1]
output.layout.height = '1100px'
interactive_plot
```
|
github_jupyter
|
%matplotlib inline
import random
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import ipywidgets as widgets
import warnings
warnings.filterwarnings('ignore')
def colour(n):
ret = {}
for i in range(n):
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
ret[i] = "#{:02x}{:02x}{:02x}".format(r,g,b)
return ret
colorList = colour(5000)
minDegNode = lambda G: min(G, key=G.degree)
maxDegNode = lambda G: max(G, key=G.degree)
def transformDict(myDict):
for key,value in myDict.items():
myDict[key] = colorList[value]
return myDict
def greedyGraphColoring(G):
colors = {}
graphNodes = G.nodes()
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
def greedyDegreeSort(G):
colors = {}
graphNodes = list(G.nodes())
graphNodes.sort(key=lambda node: -G.degree(node))
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
def greedyRandomShuffling(G):
colors = {}
graphNodes = G.nodes()
random.shuffle(list(graphNodes))
for node in graphNodes:
adjColors = set()
for adjNode in G.neighbors(node):
if adjNode in colors:
adjColors.add(colors[adjNode])
for color in itertools.count():
if color not in adjColors:
break
colors[node] = color
return colors
if(False):
def welsh_powell(G):
x = sorted(G.degree, key=lambda x: x[1], reverse=True)
len_g = len(G)
no_colored = 0
k = 1
colors = dict()
while no_colored < len_g:
colored = set()
colorednodes = set()
for node in x:
y = set(G.neighbors(node[0]))
if y.intersection(colorednodes) == set():
colors[node[0]] = k
no_colored +=1
colored.add(node)
colorednodes.add(node[0])
x = list(set(x) - colored)
k+=1
return colors
def welsh_powell(G):
colors = {}
x = sorted(G.degree, key=lambda x: x[1], reverse=True)
len_g = len(G)
no_colored = 0
k = 1
while no_colored < len_g:
colored = set()
for new_node in x:
if colored not in G.neighbors(new_node[0]):
colors[new_node[0]] = k
no_colored +=1
colored.add(new_node)
x = list(set(x) - colored)
k+=1
return colors
def strategy_connected_sequential_bfs(G, colors):
return strategy_connected_sequential(G, colors, 'bfs')
def strategy_connected_sequential_dfs(G, colors):
return strategy_connected_sequential(G, colors, 'dfs')
def strategy_connected_sequential(G, colors, traversal='bfs'):
for component_graph in nx.connected_component_subgraphs(G):
source = component_graph.nodes()[0]
yield source
if traversal == 'bfs':
tree = nx.bfs_edges(component_graph, source)
elif traversal == 'dfs':
tree = nx.dfs_edges(component_graph, source)
else:
raise nx.NetworkXError('Please specify bfs or dfs for connected sequential ordering')
for (_, end) in tree:
yield end
def greedy_color(G, strategy, interchange=False):
colors = {}
if len(G):
if interchange:
raise nx.NetworkXPointlessConcept('Interchange is not applicable for GIS and SLF')
nodes = strategy(G, colors)
if nodes:
if interchange:
return (_interchange.greedy_coloring_with_interchange(G, nodes))
else:
for node in nodes:
neighbour_colors = set()
for neighbour in G.neighbors_iter(node):
if neighbour in colors:
neighbour_colors.add(colors[neighbour])
for color in itertools.count():
if color not in neighbour_colors:
break
colors[node] = color
return colors
def printGraphAndAnalyse(G, sizeoffigure):
fig = plt.figure(figsize=(2*sizeoffigure,sizeoffigure))
plt.subplot(1, 2, 1)
nx.draw(G, with_labels=True)
plt.subplot(1, 2, 2)
nodeColors = transformDict(greedyGraphColoring(G))
nx.draw(G, with_labels=True, node_color=list(nodeColors.values()))
plt.show()
print("Time taken by different Algorithms:\n")
print("Basic Greedy Algorithm:")
%timeit -n 10 -r 2 greedyGraphColoring(G)
print("\nGreedy Graph Coloring By Ordering Nodes (By Largest Degree First):")
%timeit -n 10 -r 2 greedyDegreeSort(G)
print("\nGreedy Algorithm With Random Shuffling:")
%timeit -n 10 -r 2 greedyRandomShuffling(G)
print("\nWelsh Powell:")
%timeit -n 10 -r 2 welsh_powell(G)
print("\nGreedy Algorithm using DFS ")
%timeit -n 10 -r 2 nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_connected_sequential_dfs)
print("\nGreedy Algorithm using BFS ")
%timeit -n 10 -r 2 nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_connected_sequential_bfs)
def graph_colouring(Type, noOfNodes, plotSize, edgeProbability, branchingFactor_bt, height_bt):
if Type == "Complete Graph" :
G = nx.complete_graph(noOfNodes)
elif Type == "Balanced Tree":
G = nx.balanced_tree(branchingFactor_bt, height_bt)
elif Type == "Cycle Graph":
G = nx.cycle_graph(noOfNodes)
elif Type == "Random graph":
G = nx.fast_gnp_random_graph(noOfNodes,edgeProbability)
printGraphAndAnalyse(G, plotSize)
style = {'description_align':'left','description_width': '25%'}
layout = widgets.Layout(width='90%')
layout1 = widgets.Layout(width='85%')
cuf = True
interactive_plot = widgets.interactive(graph_colouring,
noOfNodes = widgets.IntSlider(description='Number of nodes in graph', min = 2, max = 100, step = 1, value = 3, style = style, layout = layout, continuous_update = cuf),
Type = widgets.Dropdown(options=['Complete Graph','Balanced Tree', 'Cycle Graph', 'Random graph'], value='Complete Graph', description='Type of Graph:', style = style, layout = layout1),
plotSize = widgets.IntSlider(description='Size of Graph (Inch x Inch)', min = 3, max = 15, step = 1, value = 10, style = style, layout = layout, continuous_update = cuf),
edgeProbability = widgets.FloatSlider(description='Edge Probabibility (Erdős–Rényi model)', min = 0, max = 6.6, step = 0.01, value = 0.4, style = style, layout = layout, continuous_update = cuf),
branchingFactor_bt = widgets.IntSlider(description='Branching Factor of the Balanced Tree)', min = 1, max = 10, step = 1, value = 3, style = style, layout = layout, continuous_update = cuf),
height_bt = widgets.IntSlider(description='Height of the Balanced Tree)', min = 1, max = 10, step = 1, value = 2, style = style, layout = layout, continuous_update = cuf))
output = interactive_plot.children[-1]
output.layout.height = '1100px'
interactive_plot
| 0.306735 | 0.822688 |
# XMLRPC接口服务
XMLRPC是python标准库[xmlrpc](https://docs.python.org/zh-cn/3/library/xmlrpc.html)提供的RPC框架,一般我们会根据不同的需求配合使用标准库中的`socketserver.ThreadingMixIn`或者`socketserver.ForkingMixIn`来使用多线程或者多进程(注意只能在支持fork的平台使用)处理请求.注意XMLRPC并不安全,需要防止被人利用[XML漏洞](https://docs.python.org/zh-cn/3/library/xml.html#xml-vulnerabilities)
下面是一个简单的例子,我们把它放在[C0](https://github.com/TutorialForPython/python-io/tree/master/%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/RPC%E6%9C%8D%E5%8A%A1/code/XMLRPC%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/C0)中.这个例子我们计算输入的字符串的md5哈希值
## 最简单的服务端
`logger.py`用于定义结构化log. xmlrpc默认的会使用方法`SimpleXMLRPCRequestHandler.log_error(format, *args)`和`SimpleXMLRPCRequestHandler.log_request(code='-', size="-")`打印请求信息和错误信息到`stderr`,默认的打印方式是`plain text`格式的,这种非结构化的数据往往难以收集分析,我们使用`structlog`将其结构化.
```python
import sys
import logging
import structlog
LOG_LEVEL = logging.INFO
SERVER_NAME = "xmlrpc-server"
structlog.configure(
processors=[
structlog.stdlib.filter_by_level, # 判断是否接受某个level的log消息
structlog.stdlib.add_logger_name, # 增加字段logger
structlog.stdlib.add_log_level, # 增加字段level
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso"), # 增加字段timestamp且使用iso格式输出
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info, # 捕获异常的栈信息
structlog.processors.StackInfoRenderer(), # 详细栈信息
structlog.processors.JSONRenderer() # json格式输出,第一个参数会被放入event字段
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
handler = logging.StreamHandler(sys.stdout)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(LOG_LEVEL) # 设置最低log等级
log = structlog.get_logger(SERVER_NAME)
```
`server.py`用于实现算法逻辑.
```python
import time
from hashlib import md5
from http import HTTPStatus
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
from socketserver import ThreadingMixIn
from logger import log
HOST = "localhost"
PORT = 5000
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/XMLRPC',)
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
log.error("server error",address=self.address_string(),errormsg=format%args)
def log_request(self, code='-', size="-"):
"""Log an accepted request.
This is called by send_response().
"""
if isinstance(code, HTTPStatus):
code = code.value
log.info("request info",address=self.address_string(),requestline=self.requestline, code=str(code), size=str(size))
class ThreadingXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
def md5_func(text: str)->str:
"""md5哈希."""
start = time.time()
result = md5(text.encode("utf-8")).hexdigest()
end = time.time()
log.info("time it",seconds=end-start)
return result
def main():
with ThreadingXMLRPCServer((HOST, PORT), requestHandler=RequestHandler, allow_none=True) as server:
# 注册所有可调用函数的名字到system.listMethods方法
# 注册可调用函数的docstring到system.methodHelp(func_name)方法
# 注册可调用函数的签名到system.methodSignature(func_name)方法
server.register_introspection_functions()
# 这个函数的作用是可以使客户端同时调用服务端的的多个函数。
server.register_multicall_functions()
# 注册一个函数,使它可以被调用,后面的字符串就是被调用的函数名
server.register_function(md5_func, md5_func.__name__)
try:
log.info("server start", msg=f"xmlrpc start @ {HOST}:{PORT}!")
server.serve_forever()
except Exception:
log.info("server error", exc_info=True, stack_info=True)
raise
finally:
log.info("server stoped", msg=f"xmlrpc @ {HOST}:{PORT} stoped!")
if __name__ == "__main__":
main()
```
xmlrpc只要将函数注册到server对象,然后执行`server_forever()`方法即可.
## 最简单的客户端
```python
import xmlrpc.client
HOST = "localhost"
PORT = 5000
with xmlrpc.client.ServerProxy(f"http://{HOST}:{PORT}/XMLRPC") as proxy:
result = proxy.md5_func("这是一个测试")
print(result)
```
这个例子中我们使用多线程以提高rpc服务的并发性能,但一般来说rpc是计算密集型任务,并发上去并不解决问题,更关键的是将每个核利用起来,我们当然也可以多起实例通多代理做负载均衡,但更简单的方案是利用多核将重计算的部分放在多进行中执行.
## 利用多核
在多进程部分我们讲过如何[使用concurrent.futures进行高层抽象的多进程操作](http://blog.hszofficial.site/TutorialForPython/%E8%AF%AD%E6%B3%95%E7%AF%87/%E6%B5%81%E7%A8%8B%E6%8E%A7%E5%88%B6/%E5%A4%9A%E8%BF%9B%E7%A8%8B.html#%E4%BD%BF%E7%94%A8concurrentfutures%E8%BF%9B%E8%A1%8C%E9%AB%98%E5%B1%82%E6%8A%BD%E8%B1%A1%E7%9A%84%E5%A4%9A%E8%BF%9B%E7%A8%8B%E6%93%8D%E4%BD%9C),这边我们还是使用这种方式,代码[C1](https://github.com/TutorialForPython/python-io/tree/master/%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/RPC%E6%9C%8D%E5%8A%A1/code/XMLRPC%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/C1)展示了如何改造上面的例子
+ server.py
```python
...
from concurrent.futures import ProcessPoolExecutor,wait
...
WORKER = 4
...
# 注意此处修改函数名字
def _md5_func(text: str)->str:
"""md5哈希."""
start = time.time()
result = md5(text.encode("utf-8")).hexdigest()
end = time.time()
log.info("time it",seconds=end-start)
return result
def main():
with ProcessPoolExecutor(WORKER) as executor:
# 将要执行的业务包装,实际执行交给执行器
def md5_func(text:str)->str:
fut = executor.submit(_md5_func, text)
wait([fut])
return fut.result()
with ThreadingXMLRPCServer((HOST, PORT), requestHandler=RequestHandler, allow_none=True) as server:
# 注册所有可调用函数的名字到system.listMethods方法
# 注册可调用函数的docstring到system.methodHelp(func_name)方法
# 注册可调用函数的签名到system.methodSignature(func_name)方法
server.register_introspection_functions()
# 这个函数的作用是可以使客户端同时调用服务端的的多个函数。
server.register_multicall_functions()
# 注册一个函数,使它可以被调用,后面的字符串就是被调用的函数名
server.register_function(md5_func, md5_func.__name__)
try:
log.info("server start", msg=f"xmlrpc start @ {HOST}:{PORT}!")
server.serve_forever()
except Exception:
log.info("server error", exc_info=True, stack_info=True)
raise
finally:
log.info("server stoped", msg=f"xmlrpc @ {HOST}:{PORT} stoped!")
if __name__ == "__main__":
main()
```
|
github_jupyter
|
import sys
import logging
import structlog
LOG_LEVEL = logging.INFO
SERVER_NAME = "xmlrpc-server"
structlog.configure(
processors=[
structlog.stdlib.filter_by_level, # 判断是否接受某个level的log消息
structlog.stdlib.add_logger_name, # 增加字段logger
structlog.stdlib.add_log_level, # 增加字段level
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso"), # 增加字段timestamp且使用iso格式输出
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info, # 捕获异常的栈信息
structlog.processors.StackInfoRenderer(), # 详细栈信息
structlog.processors.JSONRenderer() # json格式输出,第一个参数会被放入event字段
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
handler = logging.StreamHandler(sys.stdout)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(LOG_LEVEL) # 设置最低log等级
log = structlog.get_logger(SERVER_NAME)
```
`server.py`用于实现算法逻辑.
xmlrpc只要将函数注册到server对象,然后执行`server_forever()`方法即可.
## 最简单的客户端
这个例子中我们使用多线程以提高rpc服务的并发性能,但一般来说rpc是计算密集型任务,并发上去并不解决问题,更关键的是将每个核利用起来,我们当然也可以多起实例通多代理做负载均衡,但更简单的方案是利用多核将重计算的部分放在多进行中执行.
## 利用多核
在多进程部分我们讲过如何[使用concurrent.futures进行高层抽象的多进程操作](http://blog.hszofficial.site/TutorialForPython/%E8%AF%AD%E6%B3%95%E7%AF%87/%E6%B5%81%E7%A8%8B%E6%8E%A7%E5%88%B6/%E5%A4%9A%E8%BF%9B%E7%A8%8B.html#%E4%BD%BF%E7%94%A8concurrentfutures%E8%BF%9B%E8%A1%8C%E9%AB%98%E5%B1%82%E6%8A%BD%E8%B1%A1%E7%9A%84%E5%A4%9A%E8%BF%9B%E7%A8%8B%E6%93%8D%E4%BD%9C),这边我们还是使用这种方式,代码[C1](https://github.com/TutorialForPython/python-io/tree/master/%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/RPC%E6%9C%8D%E5%8A%A1/code/XMLRPC%E6%8E%A5%E5%8F%A3%E6%9C%8D%E5%8A%A1/C1)展示了如何改造上面的例子
+ server.py
| 0.28877 | 0.541045 |
**1. Getting the Dataset**
---
```
import keras
imdb = keras.datasets.imdb
VOCAB_SIZE = 10000
INDEX_FROM = 2
(train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words=VOCAB_SIZE,
index_from=INDEX_FROM)
```
Sanity check:
```
print('Sample review:', train_data[0])
print('\n Sample label:', test_labels[1])
```
**2. Readying the Inputs for the LSTM**
---
```
from keras.preprocessing.sequence import pad_sequences
MAXIMUM_LENGTH = 500
print('Length of sample train_data before preprocessing:', len(train_data[0]))
preprocessed_train_data=pad_sequences(train_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
print('Length of sample train_data after preprocessing:', len(preprocessed_train_data[0]))
```
**3. Building the Model**
---
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense
from keras.optimizers import Adam
EMBED_SIZE = 100
model = Sequential()
model.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model.add(LSTM(100, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
```
Sanity Check:
```
model.summary()
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils import vis_utils
SVG(vis_utils.model_to_dot(model, show_shapes=True, show_layer_names=True, dpi = 70).create(prog='dot', format='svg'))
history = model.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
```
Based on your plot, what do you think the optimal stopping point for the model should
have been?
The training should have been stopped after the second epoch. The third epoch results in overfitting
**5. Evaluating the Model on the Test Data**
---
```
processed_test_data = pad_sequences(test_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
result = model.evaluate(processed_test_data, test_labels)
```
Sanity Check:
```
print('test_loss:', result[0], 'test_accuracy:', result[1])
```
**6. Extracting the Word Embeddings**
---
```
word_embeddings=model.get_layer('layer_embedding').get_weights()[0]
```
Sanity Check:
```
print('Shape of word_embeddings:', word_embeddings.shape)
```
**7. Visualizing the Reviews**
---
```
word2idx = imdb.get_word_index()
word2idx = {k:(v+INDEX_FROM) for k,v in word2idx.items() if v < 9998}
word2idx["<PAD>"] = 0
word2idx["<START>"] = 1
word2idx["<UNK>"] = 2
```
In the next line, create the idx2word map for all the words in the dataset
```
idx2word = {}
for key, value in word2idx.items():
idx2word[value] = key
```
Sanity Check:
```
print(' '.join(idx2word[idx] for idx in train_data[0]))
print(train_data[0])
```
**8. Visualizing the Word_Embeddings**
---
```
from pandas import DataFrame
print(DataFrame(word_embeddings, index=idx2word.values()).head(10))
```
**Plot the word embeddings using TSNE**
---
```
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
tsne = TSNE(perplexity=3, n_components=2, init='pca', n_iter=5000, method='exact')
np.set_printoptions(suppress=True)
start = 3
plot_only = 54
T = tsne.fit_transform(word_embeddings[start:plot_only, :])
labels = [idx2word[i] for i in range(start, plot_only)]
plt.figure(figsize=(14, 8))
plt.scatter(T[:, 0], T[:, 1])
for label, x, y in zip(labels, T[:, 0], T[:, 1]):
plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points', ha='right',va='bottom')
```
**9. Questions**
---
1. Create a new model that is a copy of the model step 3. To this new model, add two dropout
layers, one between the embedding layer and the LSTM layer and another between the
LSTM layer and the output layer. Repeat steps 4 and 5 for this model. What do you observe?
How about if you train this new model for 6 epochs instead?
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model2 = Sequential()
model2.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model2.add(Dropout(0.5))
model2.add(LSTM(100, activation='tanh'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model2.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history2 = model2.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history2.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
processed_test_data = pad_sequences(test_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
result = model2.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
```
it can be observed that by adding two dropout layer, the overfitting of the model reduced. The train accuracy for the first model was 0.92 and the second model is 0.90. The gap between the training accuracy and validation accuracy decreased.
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model3 = Sequential()
model3.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model3.add(Dropout(0.5))
model3.add(LSTM(100, activation='tanh'))
model3.add(Dropout(0.5))
model3.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model3.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history3 = model3.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=6, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history3.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
result = model3.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
```
Training the model for 6 epoch doesn't change the test accuracy a lot. But the train accuracy increases to 95% which shows overfitting when compared to the previous model.
2. Experiment with compiling the model with batch sizes of 1, 32, len(training_data). What do
you observe?
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model4 = Sequential()
model4.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model4.add(LSTM(100, activation='tanh'))
model4.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model4.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history4 = model4.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=32)
```
Different batch sizes effect the training of the model. Batch size of 1 gives the highest test accuracy but is very difficult to train as it takes a long time. The test accuracy decreases as we increase the batch size. It can be said that batch size plays an important role in generalization. The smaller the batch size, the model generalises better but it also becomes more difficult to train.
3. (optional) Can you retrain with a Bidirectional LSTM instead of an LSTM? What do you
observe about the Bi-LSTM model at 3 epochs? What about at 6 epochs?
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from keras.optimizers import Adam
EMBED_SIZE = 100
model5 = Sequential()
model5.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model5.add(Bidirectional(LSTM(100, activation='tanh')))
model5.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model5.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history5 = model5.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
result = model5.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
```
The test accuracy for bidirectional LSTM is more than LSTM. Therefore it can be infered that bi-LSTM are better.
```
import matplotlib.pyplot as plt
history_dict = history5.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from keras.optimizers import Adam
EMBED_SIZE = 100
model6 = Sequential()
model6.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model6.add(Bidirectional(LSTM(100, activation='tanh')))
model6.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model6.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history6 = model6.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=6, batch_size=100)
```
When the bi-LSTM is run for 6 epochs instead of 3, it can be seen that the validation accuracy starts to drop which suggests overfitting.
|
github_jupyter
|
import keras
imdb = keras.datasets.imdb
VOCAB_SIZE = 10000
INDEX_FROM = 2
(train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words=VOCAB_SIZE,
index_from=INDEX_FROM)
print('Sample review:', train_data[0])
print('\n Sample label:', test_labels[1])
from keras.preprocessing.sequence import pad_sequences
MAXIMUM_LENGTH = 500
print('Length of sample train_data before preprocessing:', len(train_data[0]))
preprocessed_train_data=pad_sequences(train_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
print('Length of sample train_data after preprocessing:', len(preprocessed_train_data[0]))
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense
from keras.optimizers import Adam
EMBED_SIZE = 100
model = Sequential()
model.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model.add(LSTM(100, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils import vis_utils
SVG(vis_utils.model_to_dot(model, show_shapes=True, show_layer_names=True, dpi = 70).create(prog='dot', format='svg'))
history = model.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
processed_test_data = pad_sequences(test_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
result = model.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
word_embeddings=model.get_layer('layer_embedding').get_weights()[0]
print('Shape of word_embeddings:', word_embeddings.shape)
word2idx = imdb.get_word_index()
word2idx = {k:(v+INDEX_FROM) for k,v in word2idx.items() if v < 9998}
word2idx["<PAD>"] = 0
word2idx["<START>"] = 1
word2idx["<UNK>"] = 2
idx2word = {}
for key, value in word2idx.items():
idx2word[value] = key
print(' '.join(idx2word[idx] for idx in train_data[0]))
print(train_data[0])
from pandas import DataFrame
print(DataFrame(word_embeddings, index=idx2word.values()).head(10))
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
tsne = TSNE(perplexity=3, n_components=2, init='pca', n_iter=5000, method='exact')
np.set_printoptions(suppress=True)
start = 3
plot_only = 54
T = tsne.fit_transform(word_embeddings[start:plot_only, :])
labels = [idx2word[i] for i in range(start, plot_only)]
plt.figure(figsize=(14, 8))
plt.scatter(T[:, 0], T[:, 1])
for label, x, y in zip(labels, T[:, 0], T[:, 1]):
plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points', ha='right',va='bottom')
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model2 = Sequential()
model2.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model2.add(Dropout(0.5))
model2.add(LSTM(100, activation='tanh'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model2.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history2 = model2.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history2.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
processed_test_data = pad_sequences(test_data, maxlen=MAXIMUM_LENGTH,padding='pre', truncating='pre')
result = model2.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model3 = Sequential()
model3.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model3.add(Dropout(0.5))
model3.add(LSTM(100, activation='tanh'))
model3.add(Dropout(0.5))
model3.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model3.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history3 = model3.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=6, batch_size=100)
import matplotlib.pyplot as plt
history_dict = history3.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
result = model3.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
EMBED_SIZE = 100
model4 = Sequential()
model4.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model4.add(LSTM(100, activation='tanh'))
model4.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model4.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history4 = model4.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=32)
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from keras.optimizers import Adam
EMBED_SIZE = 100
model5 = Sequential()
model5.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model5.add(Bidirectional(LSTM(100, activation='tanh')))
model5.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model5.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history5 = model5.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=3, batch_size=100)
result = model5.evaluate(processed_test_data, test_labels)
print('test_loss:', result[0], 'test_accuracy:', result[1])
import matplotlib.pyplot as plt
history_dict = history5.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional
from keras.optimizers import Adam
EMBED_SIZE = 100
model6 = Sequential()
model6.add(Embedding(input_dim=VOCAB_SIZE,
output_dim=EMBED_SIZE,
input_length=MAXIMUM_LENGTH,
name='layer_embedding'))
model6.add(Bidirectional(LSTM(100, activation='tanh')))
model6.add(Dense(1, activation='sigmoid'))
optimizer = Adam()
model6.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history6 = model6.fit(preprocessed_train_data, train_labels, validation_split=0.08, epochs=6, batch_size=100)
| 0.788583 | 0.927659 |
# Benchmarking models of glycolysis
In this notebook, we construct models of glycolysis using a stoichiometric network as a scaffold. Separate rate laws are then used to model the individual reactions, and the simulation results are compared with each other to explore how well simple rate laws approximate complex rate laws.
```
import numpy as np
import BondGraphTools as bgt
import matplotlib as mpl
from matplotlib import pyplot as plt
import glycolysis_models
from glycolysis_models import (GlycolysisGK, GlycolysisMM, GlycolysisMA,
GlycolysisGK_Static, GlycolysisMM_Static, GlycolysisMA_Static)
from bgt_juliasim import simulate
import sys
import bgt_extensions as bgex
```
## Glycolysis model
We consider the model of glycolysis in the figure below, adapted from Mason and Covert (doi: 10.1016/j.jtbi.2018.10.041).
<img src="glycolysis.svg" style="width: 700px;"/>
As shown in the figure above, we benchmark three rate laws for this model: the generalised kinetics rate law (used in Mason and Covert), Michaelis-Menten and mass action.
### Generalised kinetics model
In the code below, we construct the generalised kinetics model and run a simulation. As expected, the steady state is consistent with that used in Mason and Covert.
```
GK_model = GlycolysisGK()
# Define initial conditions
ic = glycolysis_models.default_initial_conditions
# Map onto state space
x0 = np.array(np.zeros(len(GK_model.state_vars)))
for i,v in enumerate(GK_model.state_vars.values()):
species_name = v[0].name
x0[i] = ic[species_name]
tspan = (0.0,300.0)
cv = bgex.gather_cv(GK_model)
sol = simulate(GK_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
```
### Michaelis-Menten model
The code below constructs the Michaelis-Menten model. Its steady state agrees with the generalised kinetics model.
```
MM_model = GlycolysisMM()
cv = bgex.gather_cv(MM_model)
tspan = (0.0,300.0)
sol = simulate(MM_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
```
### Mass action model
The code below constructs the Michaelis-Menten model. Its steady state agrees with the other two models.
```
MA_model = GlycolysisMA()
cv = bgex.gather_cv(MA_model)
tspan = (0.0,300.0)
sol = simulate(MA_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
```
## Perturbation simulations
### Instantaneous perturbations
In the code below, we benchmark the separate rate laws against each other. This is done by performing instantaneous perturbations of the internal species and comparing the results. Specifically, the concentration of each internal species is increased/decreased by 30%. For each perturbation, we record the relaxation time defined as the final point in time where the model is over 5% of the maximum deviation.
The code below defines the models and sets plotting options.
```
from glycolysis_models import internal_perturbation, plot_internal_perturbation
GK_model = GlycolysisGK_Static()
MM_model = GlycolysisMM_Static()
MA_model = GlycolysisMA_Static()
GK_model.update_constitutive_relations()
MM_model.update_constitutive_relations()
MA_model.update_constitutive_relations()
from plot_options import set_color_palette
set_color_palette()
font = {'family' : 'sans-serif',
'weight' : 'normal',
'sans-serif' : ['Helvetica','Arial','DevaVu Sans'],
'size': 13
}
mpl.rc('font', **font)
mpl.rc('xtick', direction='in')
mpl.rc('ytick', direction='in')
mpl.rc('legend', frameon=False)
```
As seen in the plots below, the generalised kinetics and Michaelis-Menten rate laws behave similarly. However, the mass action rate law has a shorter response time and also tends to cause perturbations of greater magnitudes. This pattern is common across all perturbations, and occurs whether the species are increased or reduced in concentration.
```
perturbation = 0.3
sim_results,relax_times = internal_perturbation(GK_model,MM_model,MA_model,perturbation)
fig_in_up,ax_in_up = plot_internal_perturbation(sim_results,relax_times)
ax_in_up[1,2].set_yticks(np.arange(1.1,1.21,0.04)*1e-4)
plt.show()
perturbation = -0.3
sim_results,relax_times = internal_perturbation(GK_model,MM_model,MA_model,perturbation)
fig_in_down,ax_in_down = plot_internal_perturbation(sim_results,relax_times)
ax_in_down[1,2].set_yticks(np.arange(1.0,1.1,0.04)*1e-4)
plt.show()
```
## Prolonged perturbation
In the code below, we compare the models under prolonged perturbations. To model this, we change the concentrations of the external species held at constant concentrations. This causes the model to relax to a different steady state. As with the internal perturbations, we record the response time and the steady state deviation.
In contrast to internal perturbations, the generalised kinetics rate law and Michaelis-Menten rate laws behave quite differently. Nonetheless, they seem to exhibit similar qualitative behaviour, whereas the mass action model is qualitatively different for several species.
```
from glycolysis_models import external_perturbations, plot_external_perturbation
perturbation = 0.3
sim_results,relax_times,ss_dev = external_perturbations(GK_model,MM_model,MA_model,perturbation)
fig_ex_up,ax_ex_up = plot_external_perturbation(sim_results,relax_times,ss_dev)
ax_ex_up[2,2].set_yticks(np.arange(1.1,1.17,0.02)*1e-4)
plt.show()
perturbation = -0.3
sim_results,relax_times,ss_dev = external_perturbations(GK_model,MM_model,MA_model,perturbation)
fig_ex_down,ax_ex_down = plot_external_perturbation(sim_results,relax_times,ss_dev)
from general import save_figure
save_figure(fig_in_up,"output/gly_internal_up")
save_figure(fig_in_down,"output/gly_internal_down")
save_figure(fig_ex_up,"output/gly_external_up")
save_figure(fig_ex_down,"output/gly_external_down")
```
## Energetic analysis
An benefit of bond graph modelling is that one can analyse the energetics of the glycolysis pathway. In the code below, we run such an analysis, isolating the glycolysis pathway by switching off the fbp and pps enzymes.
```
GK_model = GlycolysisGK()
GK_model.enzyme_concentrations["fbp"] = 0
GK_model.enzyme_concentrations["pps"] = 0
#GK_model.update_constitutive_relations
x0 = glycolysis_models.x_ss
tspan = (0.0,1000.0)
cv = bgex.gather_cv(GK_model)
sol = simulate(GK_model,tspan,x0,control_vars=cv)
x_ss = sol.u[-1]
V_ss = {r:glycolysis_models.compute_flux(GK_model,r,x_ss) for r,_,_ in GK_model.reactions}
V_ss
```
In the code below, we calculate the affinity of the glycolysis pathway, by calculating the free energy of the reaction
$$
\mathrm{G6P + 3ADP + 2NAD + 2Pi \rightleftharpoons 2PYR + 3ATP + H + 2NADH + 2H2O}
$$
```
from copy import deepcopy
R = glycolysis_models.R
def reaction_affinity(model,reaction,x):
T = 310
exp_Af,exp_Ar = glycolysis_models.split_ma_terms(model,reaction,x)
Af = R*T*np.log(exp_Af)
Ar = R*T*np.log(exp_Ar)
return Af-Ar
def gather_params(model,potentials):
params = deepcopy(model.chemostat_parameters)
for k,v in potentials.items():
params[k] = v
return params
def glycolysis_affinity(model,potentials={},RT=R*310):
params = gather_params(model,potentials)
Af = params["G6P"] + 3*params["ADP"] + 2*params["NAD"] + 2*params["Pi"]
Ar = 2*params["PYR"] + 3*params["ATP"] + params["H"] + 2*params["NADH"] + 2*params["H2O"]
return RT*(Af-Ar)
glycolysis_affinity(GK_model)
```
This pathway affinity can be broken up into contributions from individual reactions, since the pathway consists of
$$
\mathrm{pgi + pfk + fba + tpi + 2gap + 2pgk + 2gpm + 2eno + 2pyk}
$$
Thus, the contribution of each reaction to overall affinity can be calculated as shown below. As expected, these sum to the affinity of the glycolysis pathway.
```
d_pathway = {
"pgi": 1,
"pfk": 1,
"fbp": 0,
"fba": 1,
"tpi": 1,
"gap": 2,
"pgk": 2,
"gpm": 2,
"eno": 2,
"pyk": 2,
"pps": 0,
}
total_A = 0
for r,_,_ in GK_model.reactions:
A = reaction_affinity(GK_model,r,x_ss)
print(f"{r}: {A}")
A_scaled = d_pathway[r]*A
total_A += A_scaled
print(f"Total affinity: {total_A}")
```
|
github_jupyter
|
import numpy as np
import BondGraphTools as bgt
import matplotlib as mpl
from matplotlib import pyplot as plt
import glycolysis_models
from glycolysis_models import (GlycolysisGK, GlycolysisMM, GlycolysisMA,
GlycolysisGK_Static, GlycolysisMM_Static, GlycolysisMA_Static)
from bgt_juliasim import simulate
import sys
import bgt_extensions as bgex
GK_model = GlycolysisGK()
# Define initial conditions
ic = glycolysis_models.default_initial_conditions
# Map onto state space
x0 = np.array(np.zeros(len(GK_model.state_vars)))
for i,v in enumerate(GK_model.state_vars.values()):
species_name = v[0].name
x0[i] = ic[species_name]
tspan = (0.0,300.0)
cv = bgex.gather_cv(GK_model)
sol = simulate(GK_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
MM_model = GlycolysisMM()
cv = bgex.gather_cv(MM_model)
tspan = (0.0,300.0)
sol = simulate(MM_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
MA_model = GlycolysisMA()
cv = bgex.gather_cv(MA_model)
tspan = (0.0,300.0)
sol = simulate(MA_model,tspan,x0,control_vars=cv)
plt.figure()
plt.plot(sol.t,sol.u)
plt.show()
from glycolysis_models import internal_perturbation, plot_internal_perturbation
GK_model = GlycolysisGK_Static()
MM_model = GlycolysisMM_Static()
MA_model = GlycolysisMA_Static()
GK_model.update_constitutive_relations()
MM_model.update_constitutive_relations()
MA_model.update_constitutive_relations()
from plot_options import set_color_palette
set_color_palette()
font = {'family' : 'sans-serif',
'weight' : 'normal',
'sans-serif' : ['Helvetica','Arial','DevaVu Sans'],
'size': 13
}
mpl.rc('font', **font)
mpl.rc('xtick', direction='in')
mpl.rc('ytick', direction='in')
mpl.rc('legend', frameon=False)
perturbation = 0.3
sim_results,relax_times = internal_perturbation(GK_model,MM_model,MA_model,perturbation)
fig_in_up,ax_in_up = plot_internal_perturbation(sim_results,relax_times)
ax_in_up[1,2].set_yticks(np.arange(1.1,1.21,0.04)*1e-4)
plt.show()
perturbation = -0.3
sim_results,relax_times = internal_perturbation(GK_model,MM_model,MA_model,perturbation)
fig_in_down,ax_in_down = plot_internal_perturbation(sim_results,relax_times)
ax_in_down[1,2].set_yticks(np.arange(1.0,1.1,0.04)*1e-4)
plt.show()
from glycolysis_models import external_perturbations, plot_external_perturbation
perturbation = 0.3
sim_results,relax_times,ss_dev = external_perturbations(GK_model,MM_model,MA_model,perturbation)
fig_ex_up,ax_ex_up = plot_external_perturbation(sim_results,relax_times,ss_dev)
ax_ex_up[2,2].set_yticks(np.arange(1.1,1.17,0.02)*1e-4)
plt.show()
perturbation = -0.3
sim_results,relax_times,ss_dev = external_perturbations(GK_model,MM_model,MA_model,perturbation)
fig_ex_down,ax_ex_down = plot_external_perturbation(sim_results,relax_times,ss_dev)
from general import save_figure
save_figure(fig_in_up,"output/gly_internal_up")
save_figure(fig_in_down,"output/gly_internal_down")
save_figure(fig_ex_up,"output/gly_external_up")
save_figure(fig_ex_down,"output/gly_external_down")
GK_model = GlycolysisGK()
GK_model.enzyme_concentrations["fbp"] = 0
GK_model.enzyme_concentrations["pps"] = 0
#GK_model.update_constitutive_relations
x0 = glycolysis_models.x_ss
tspan = (0.0,1000.0)
cv = bgex.gather_cv(GK_model)
sol = simulate(GK_model,tspan,x0,control_vars=cv)
x_ss = sol.u[-1]
V_ss = {r:glycolysis_models.compute_flux(GK_model,r,x_ss) for r,_,_ in GK_model.reactions}
V_ss
from copy import deepcopy
R = glycolysis_models.R
def reaction_affinity(model,reaction,x):
T = 310
exp_Af,exp_Ar = glycolysis_models.split_ma_terms(model,reaction,x)
Af = R*T*np.log(exp_Af)
Ar = R*T*np.log(exp_Ar)
return Af-Ar
def gather_params(model,potentials):
params = deepcopy(model.chemostat_parameters)
for k,v in potentials.items():
params[k] = v
return params
def glycolysis_affinity(model,potentials={},RT=R*310):
params = gather_params(model,potentials)
Af = params["G6P"] + 3*params["ADP"] + 2*params["NAD"] + 2*params["Pi"]
Ar = 2*params["PYR"] + 3*params["ATP"] + params["H"] + 2*params["NADH"] + 2*params["H2O"]
return RT*(Af-Ar)
glycolysis_affinity(GK_model)
d_pathway = {
"pgi": 1,
"pfk": 1,
"fbp": 0,
"fba": 1,
"tpi": 1,
"gap": 2,
"pgk": 2,
"gpm": 2,
"eno": 2,
"pyk": 2,
"pps": 0,
}
total_A = 0
for r,_,_ in GK_model.reactions:
A = reaction_affinity(GK_model,r,x_ss)
print(f"{r}: {A}")
A_scaled = d_pathway[r]*A
total_A += A_scaled
print(f"Total affinity: {total_A}")
| 0.426799 | 0.98011 |
```
from IPython.display import Image, SVG
```
# Data Creation
## Accounts
* Create several gzipped files
* Each line in each file is a JSON encoded dictionary with the following keys
```
id: Unique identifier of the customer
name: Name of the customer
transactions: List of transaction-id, amount pairs, one for each transaction for the customer in that file
```
```
from accounts import create_accounts_json
num_files = 25
n = 100000 # number of accounts per file
k = 500 # number of transactions
create_accounts_json(num_files, n, k)
```
## Denormalize NFS Data
* The NFS data is *normalized* to eliminate redundancy
```
from nfs import create_denormalized
create_denormalize()
```
## Random Array
* Create a billion number array of 32-bit floats on disk using HDF5
* HDF5 is an implementation of the Hierarchical Data Format common in scientific applications
* Multiple data formats (tables, nd-arrays, raster images)
* Fast lookups via B-tree indices (like SQL)
* Filesystem-like data format
* Support for meta-information
* The result of this operation is 4 GB
```
from random_array import random_array
random_array()
```
# Dask
## Introduction
Dask is a flexible parallel computing library for analytics. Dask emphasizes the following virtues:
* **Familiar**: Provides parallelized NumPy array and Pandas DataFrame objects
* **Native**: Enables distributed computing in Pure Python with access to the PyData stack.
* **Fast**: Operates with low overhead, low latency, and minimal serialization necessary for fast numerical algorithms
* **Flexible**: Supports complex and messy workloads
* **Scales up**: Runs resiliently on clusters with 100s of nodes
* **Scales down**: Trivial to set up and run on a laptop in a single process
* **Responsive**: Designed with interactive computing in mind it provides rapid feedback and diagnostics to aid humans
### The Dask Computational Model
* Parallel programming with task scheduling
* Familiar abstractions for executing tasks in parallel on data that doesn't fit into memory
* Arrays, DataFrames
* Task graphs
* Representation of a parallel computation
* Scheduling
* Executes task graphs in parallel on a single machine using threads or processes
* Preliminary support for parallel execution using `dask.distributed`
* Workflows for the distributed scheduler would be quite different than those presented below
### Note
* If you don't have a big data problem, don't use a big data tool
* Many of the below examples could easily be handled in-memory with some better choices
```
Image("http://dask.pydata.org/en/latest/_images/collections-schedulers.png")
```
## Dask Array
* Subset of ndarray interface using blocked algorithms
* Dask array complements large on-disk array stores like HDF5, NetCDF, and BColz
```
SVG("http://dask.pydata.org/en/latest/_images/dask-array-black-text.svg")
```
* Arithmetic and scalar mathematics, `+, *, exp, log, ...`
* Reductions along axes, `sum(), mean(), std(), sum(axis=0), ...`
* Tensor contractions / dot products / matrix multiply, `tensordot`
* Axis reordering / transpose, `transpose`
* Slicing, `x[:100, 500:100:-2]`
* Fancy indexing along single axes with lists or numpy arrays, `x[:, [10, 1, 5]]`
* The array protocol `__array__`
* Some linear algebra `svd, qr, solve, solve_triangular, lstsq`
[Full API Documentation](http://dask.pydata.org/en/latest/array-api.html)
```
import dask.array as da
```
* The idea of the `chunk` is important and has performance implications
```
x = da.arange(25, chunks=5)
y = x ** 2
```
* Dask operates on a delayed computation model
* It builds up an expression of the computation in chunks
* Creates a **Task Graph** that you can explore
```
y
y.visualize()
y.dask.keys()
```
* You can execute the graph by using **`compute`**
```
y.compute()
```
* As an example of the `__array__` protocol
```
np.array(y)
```
### Scheduling Backends
* You can control the scheduler backend that is used by `compute`
* These choices can be important in a few situations
* Debugging
* Fast tasks
* Cross-task communication
* `dask.get` is an alias for the synchronous backend. Useful for debugging.
#### Synchronous Queue Scheduler
```
y.compute(get=dask.get)
```
#### Threaded Scheduler
* `dask.threaded.get` is the default
* Uses a thread pool backend
* A thread is the smallest unit of work that an OS can schedule
* Threads are "lightweight"
* They execute within the same process and thus shares the same memory and file resources ([everything is a file](https://en.wikipedia.org/wiki/Everything_is_a_file) in unix)
* Limitations
* Limited by the Global Interpreter Lock (GIL)
* A GIL means that only one thread can execute at the same time
* Pure python functions likely won't show a speed-up (with a few exceptions)
* C code can release the GIL
* I/O tasks are not blocked by the GIL
```
y.compute(get=dask.threaded.get)
```
* By default, dask will use as many threads as there are logical processors on your machine
```
from multiprocessing import cpu_count
cpu_count()
```
#### Process Scheduler
* Backend that uses multiprocessing
* Uses a process pool backend
* On unix-like system this is a system call to `fork`
* Calling `fork` creates a new child process which is a *copy*(-on-write) of the parent process
* Owns its own resources. This is "heavy"
* Limitations
* Relies on serializing objects for the workers (slow and error prone)
* Workers must communicate through parent process
```
y.compute(get=dask.multiprocessing.get)
```
#### Distributed Executor
* This is part of the `dask.distributed` library
* Distributes work over the network across machines using web sockets and an asychronous web framework for Python (tornado)
* Some recent additions make this work for, e.g., distributed DataFrames
### Blocked Algorithms
* Dask works on arrays by executing blocked algorithms on chunks of data
* For example, consider taking the mean of a billion numbers. We might instead break up the array into 1,000 chunks, each of size 1,000,000, take the sum of each chunk, and then take the sum of the intermediate sums and divide this by the total number of observations.
* the result (one sum on one billion numbers) is performed by many smaller results (one thousand sums on one million numbers each, followed by another sum of a thousand numbers.)
```
import h5py
import os
f = h5py.File(os.path.join('..', 'data', 'random.hdf5'))
dset = f['/x']
```
* If were to implement this ourselves it might look like this
1. Computing the sum of each 1,000,000 sized chunk of the array
2. Computing the sum of the 1,000 intermediate sums
```
sums = []
for i in range(0, 1000000000, 1000000):
chunk = dset[i: i + 1000000]
sums.append(chunk.sum())
total = np.sum(sums)
print(total / 1e9)
```
* Dask does this for you and uses the backend scheduler to do so in parallel
* Create a dask array from an array-like structure (any object that implements numpy-like slicing)
```
x = da.from_array(dset, chunks=(1000000, ))
```
* x looks and behaves much like a numpy array
* Arithmetic, slicing, reductions
* Use tab-completion to look at the methods of `x`
```
result = x.mean()
result
result.compute()
x[:10].compute()
```
### Exercise
Use `dask.array.random.normal` to create a 20,000 x 20,000 array $X ~ \sim N(10, .1)$ with `chunks` set to `(1000, 1000)`
Take the mean of every 100 elements along axis 0.
*Hint*: Recall you can slice with the following syntax [start:end:step]
```
# [Solution here]
%load solutions/dask_array.py
```
## Performance vs. NumPy
Your performance may vary. If you attempt the NumPy version then please ensure that you have more than 4GB of main memory.
```
import numpy as np
%%time
x = np.random.normal(10, 0.1, size=(20000, 20000))
y = x.mean(axis=0)[::100]
y
```
Faster and needs only MB of memory
```
%%time
x = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000))
y = x.mean(axis=0)[::100]
y.compute()
```
## Linear Algebra
* Dask implements a few linear algebra functions that are paraellizable
* `da.linalg.qr`
* `da.linalg.cholesky`
* `da.linalg.svd`
## Dask Bag
* Parallel lists for semi-structured data
* Nested, variable length, heterogenously typed, etc.
* E.g., JSON blobs or text data
* Anything that can be represented as a large collection of generic Python objects
* Mainly for cleaning and processing
* I.e., usually the first step in a workflow
* Bag implements a number of useful methods for operation on sequences like `map`, `filter`, `fold`, `frequencies` and `groupby`
* Streaming computation on top of generators
* Bags use the multiprocessing backend by default
### Example
* Using the accounts data we created above
```
import os
import dask.bag as db
bag = db.read_text(os.path.join('..', 'data', 'accounts.*.json.gz'))
bag.take(3)
```
* Using map to process the lines in the text files
```
import json
js = bag.map(json.loads)
js.take(3)
counts = js.pluck('name').frequencies()
counts.compute()
```
### Exercise
* Use `filter` and `take` all of the transactions for the first five users named "Alice"
* Define a function `count_transactions` that takes a dictionary from `accounts` and returns a dictionary that holds the `name` and a key `count` that is the number of transactions for that user id.
* Use `filter` to get the accounts where the user is named Alice and `map` the function you just created to get the number of transactions for each user named Alice. `pluck` the count and display the first 5.
```
%load solutions/bag_alice.py
```
### GroupBy / FoldBy
* Groupby collects items in your collection so that all items with the same value under some function are collected together into a key-value pair.
* This requires a full on-disk shuffle and is *very* inefficient
* You almost never want to do this in a real workflow if you can avoid it
```
b = db.from_sequence(['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'])
b.groupby(len).compute()
```
* Group by evens and odds
```
b = db.from_sequence(list(range(10)))
b.groupby(lambda x: x % 2).compute()
```
Group by eevens and odds and take the largest value
```
b.groupby(lambda x: x % 2).map(lambda k, v: (k, max(v))).compute()
```
* FoldBby, while harder to grok, is much more efficient
* This does a streaming combined groupby and reduction
* Familiar to Spark users as the `combineByKey` method on `RDD`
When using foldby you provide
1. A key function on which to group elements
2. A binary operator such as you would pass to reduce that you use to perform reduction per each group
3. A combine binary operator that can combine the results of two reduce calls on different parts of your dataset.
Your reduction must be associative. It will happen in parallel in each of the partitions of your dataset. Then all of these intermediate results will be combined by the combine binary operator.
This is just what we saw in `sum` above
* `functools.reduce` works like so
```
import functools
values = range(10)
def func(acc, y):
print(acc)
print(y)
print()
return acc + y
functools.reduce(func, values)
b.foldby(lambda x: x % 2, binop=max, combine=max).compute()
```
Using the accounts data above, find the number of people with the same name
```
js.take(1)
from dask.diagnostics import ProgressBar
counts = js.foldby(key='name',
binop=lambda total, x: total + 1,
initial=0,
combine=lambda a, b: a + b,
combine_initial=0)
with ProgressBar():
result = counts.compute()
result
```
### Exercise
* Compute the total amounts for each name
* First, create a function that computes the total for each user id
* Change the above example to accumulate the total amount instead of count
```
%load solutions/bag_foldby.py
```
## Dask DataFrame
* subset of the pandas API
* Good for analyzing heterogenously typed tabular data arranged along an index
<img src="http://dask.pydata.org/en/latest/_images/dask-dataframe.svg", width="30%">
**Trivially parallelizable operations (fast)**:
* Elementwise operations: `df.x + df.y, df * df`
* Row-wise selections: `df[df.x > 0]`
* Loc: `df.loc[4.0:10.5]`
* Common aggregations: `df.x.max(), df.max()`
* Is in: `df[df.x.isin([1, 2, 3])]`
* Datetime/string accessors: `df.timestamp.month`
**Cleverly parallelizable operations (fast)**:
* groupby-aggregate (with common aggregations): `df.groupby(df.x).y.max(), df.groupby('x').max()`
* value_counts: `df.x.value_counts()`
* Drop duplicates: `df.x.drop_duplicates()`
* Join on index: `dd.merge(df1, df2, left_index=True, right_index=True)`
* Join with Pandas DataFrames: `dd.merge(df1, df2, on='id')`
* Elementwise operations with different partitions / divisions: `df1.x + df2.y`
* Datetime resampling: `df.resample(...)`
* Rolling averages: `df.rolling(...)`
* Pearson Correlations: `df[['col1', 'col2']].corr()`
**Operations requiring a shuffle (slow-ish, unless on index)**
* Set index: `df.set_index(df.x)`
* groupby-apply (with anything): `df.groupby(df.x).apply(myfunc)`
* Join not on the index: `dd.merge(df1, df2, on='name')`
[Full DataFrame API](http://dask.pydata.org/en/latest/dataframe-api.html)
### Reading data
```
import dask.dataframe as dd
df = dd.read_csv("../data/NationalFoodSurvey/NFS*.csv")
```
* `DataFrame.head` is one operation that is not lazy
```
df.head(5)
```
### Partitions
* By default the data is partitioned by the file
* In our case, this is good. The files have a natural partition
* When this is not the case, you must do a disk-based shuffle which is slow
```
df.npartitions
df.known_divisions
```
* We are going to set the partition explicitly to `styr` to make some operations more performant
* Partitions are denoted by the left-side of the bins for the partitions.
* The final value is assumed to be the inclusive right-side for the last bin.
So
```
[1974, 1975, 1976]
```
Would be 2 partitions. The first contains 1974. The second contains 1975 and 1976. To get three partitions, one for the final observation, duplicate it.
```
[1974, 1975, 1976, 1976]
```
```
partitions = list(range(1974, 2001)) + [2000]
df = df.set_partition('styr', divisions=partitions)
df.known_divisions
df.divisions
```
* Nothing yet is loaded in to memory
* Meta-information from pandas is available
```
df.info()
```
## DataFrame API
* In addition to the (supported) pandas DataFrame API, dask provides a few more convenient methods
* `DataFrame.categorize`
* `DataFrame.map_partions`
* `DataFrame.get_division`
* `DataFrame.repartition`
* `DataFrame.set_partition`
* `DataFrame.to_{bag|castra}`
* `DataFrame.visualize`
* A few methods have a slightly different API
* `DataFrame.apply`
* `GroupBy.apply`
### get_division
```
df2000 = df.get_division(26)
type(df2000)
```
What food group was consumed the most times in 2000?
```
df2000.set_index('minfd')
```
* NOTE: We could speed up subsequent operations by setting partitions
```
grp = df2000.groupby('minfd')
size = grp.apply(len, columns='size')
size.head()
```
* There isn't (yet) support for idxmin/idxmax.
* Turn it into a Series first
```
minfd = size.compute().idxmax()
print(minfd)
```
* Get the pre-processed mapping across food grouping variables
```
food_mapping = pd.read_csv("../data/NationalFoodSurvey/food_mapping.csv")
```
* Pandas provides the efficient `isin` method
```
food_mapping.ix[food_mapping.minfd.isin([minfd])]
```
### Exercise
* What was the most consumed food group in 1974?
```
# [Solution here]
%load solutions/nfs_most_purchased.py
```
### map_partitions
* Map partitions does what you might expect
* Maps a function across partitions
* Let's calculate the most frequently purchase food group for each year
```
def most_frequent_food(partition):
# partition is a pandas.DataFrame
grpr = partition.groupby('minfd')
size = grpr.size()
minfd = size.idxmax()
idx = food_mapping.minfd.isin([minfd])
description = food_mapping.ix[idx].minfddesc.iloc[0]
year = int(partition.styr.iloc[0])
return year, description
mnfd_year = df.map_partitions(most_frequent_food)
mnfd_year.compute()
zip(mnfd_year.compute(),)
```
### Exercise
* Within each year, group by household `minfd` and calculate daily per capita consumption of each food group. Hint, you want to use `map_partitions`.
```
%load solutions/average_consumption.py
```
## Aside on Storage Formats: Thinking about Computers
* csv is a terrible format (performance-wise)
* compressed csv is not much better
* memory-bound workloads
* [Why Modern CPUs are Starving and What Can Be Done about It](http://www.blosc.org/docs/StarvingCPUs-CISE-2010.pdf)
* [Latency numbers every programmer should know](http://people.eecs.berkeley.edu/~rcs/research/interactive_latency.html)
* trend towards columnar-comprseed storage formats
### blosc
* Meta-compression format for (binary) data
* Cache-aware
* Can be faster query data on disk with blosc (bcolz) than pandas in memory
```
Image('images/bcolz_bench.png')
```
## Dask Resources
[Examples and Tutorials](http://dask.pydata.org/en/latest/examples-tutorials.html)
|
github_jupyter
|
from IPython.display import Image, SVG
id: Unique identifier of the customer
name: Name of the customer
transactions: List of transaction-id, amount pairs, one for each transaction for the customer in that file
from accounts import create_accounts_json
num_files = 25
n = 100000 # number of accounts per file
k = 500 # number of transactions
create_accounts_json(num_files, n, k)
from nfs import create_denormalized
create_denormalize()
from random_array import random_array
random_array()
Image("http://dask.pydata.org/en/latest/_images/collections-schedulers.png")
SVG("http://dask.pydata.org/en/latest/_images/dask-array-black-text.svg")
import dask.array as da
x = da.arange(25, chunks=5)
y = x ** 2
y
y.visualize()
y.dask.keys()
y.compute()
np.array(y)
y.compute(get=dask.get)
y.compute(get=dask.threaded.get)
from multiprocessing import cpu_count
cpu_count()
y.compute(get=dask.multiprocessing.get)
import h5py
import os
f = h5py.File(os.path.join('..', 'data', 'random.hdf5'))
dset = f['/x']
sums = []
for i in range(0, 1000000000, 1000000):
chunk = dset[i: i + 1000000]
sums.append(chunk.sum())
total = np.sum(sums)
print(total / 1e9)
x = da.from_array(dset, chunks=(1000000, ))
result = x.mean()
result
result.compute()
x[:10].compute()
# [Solution here]
%load solutions/dask_array.py
import numpy as np
%%time
x = np.random.normal(10, 0.1, size=(20000, 20000))
y = x.mean(axis=0)[::100]
y
%%time
x = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000))
y = x.mean(axis=0)[::100]
y.compute()
import os
import dask.bag as db
bag = db.read_text(os.path.join('..', 'data', 'accounts.*.json.gz'))
bag.take(3)
import json
js = bag.map(json.loads)
js.take(3)
counts = js.pluck('name').frequencies()
counts.compute()
%load solutions/bag_alice.py
b = db.from_sequence(['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'])
b.groupby(len).compute()
b = db.from_sequence(list(range(10)))
b.groupby(lambda x: x % 2).compute()
b.groupby(lambda x: x % 2).map(lambda k, v: (k, max(v))).compute()
import functools
values = range(10)
def func(acc, y):
print(acc)
print(y)
print()
return acc + y
functools.reduce(func, values)
b.foldby(lambda x: x % 2, binop=max, combine=max).compute()
js.take(1)
from dask.diagnostics import ProgressBar
counts = js.foldby(key='name',
binop=lambda total, x: total + 1,
initial=0,
combine=lambda a, b: a + b,
combine_initial=0)
with ProgressBar():
result = counts.compute()
result
%load solutions/bag_foldby.py
import dask.dataframe as dd
df = dd.read_csv("../data/NationalFoodSurvey/NFS*.csv")
df.head(5)
df.npartitions
df.known_divisions
[1974, 1975, 1976]
[1974, 1975, 1976, 1976]
partitions = list(range(1974, 2001)) + [2000]
df = df.set_partition('styr', divisions=partitions)
df.known_divisions
df.divisions
df.info()
df2000 = df.get_division(26)
type(df2000)
df2000.set_index('minfd')
grp = df2000.groupby('minfd')
size = grp.apply(len, columns='size')
size.head()
minfd = size.compute().idxmax()
print(minfd)
food_mapping = pd.read_csv("../data/NationalFoodSurvey/food_mapping.csv")
food_mapping.ix[food_mapping.minfd.isin([minfd])]
# [Solution here]
%load solutions/nfs_most_purchased.py
def most_frequent_food(partition):
# partition is a pandas.DataFrame
grpr = partition.groupby('minfd')
size = grpr.size()
minfd = size.idxmax()
idx = food_mapping.minfd.isin([minfd])
description = food_mapping.ix[idx].minfddesc.iloc[0]
year = int(partition.styr.iloc[0])
return year, description
mnfd_year = df.map_partitions(most_frequent_food)
mnfd_year.compute()
zip(mnfd_year.compute(),)
%load solutions/average_consumption.py
Image('images/bcolz_bench.png')
| 0.482185 | 0.963643 |
# Unit 5 - Financial Planning
```
# Initial imports
import os
import requests
import pandas as pd
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
from MCForecastTools import MCSimulation
import json
%matplotlib inline
# Load .env enviroment variables
load_dotenv()
```
## Part 1 - Personal Finance Planner
### Collect Crypto Prices Using the `requests` Library
```
# Set current amount of crypto assets
my_btc = 1.2
my_eth = 5.3
# Crypto API URLs
btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD"
eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD"
# Fetch current BTC price
btc_url = btc_url + "?format=json"
btc_response = requests.get(btc_url)
btc_data = btc_response.json()
btc_price = btc_data['data']['1']['quotes']['USD']['price']
# Fetch current ETH price
eth_url = eth_url + "?format=json"
eth_response = requests.get(eth_url)
eth_data = eth_response.json()
eth_price = eth_data['data']['1027']['quotes']['USD']['price']
# Compute current value of my crpto
my_btc_value = btc_price * my_btc
my_eth_value = eth_price * my_eth
# Print current crypto wallet balance
print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}")
print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}")
```
### Collect Investments Data Using Alpaca: `SPY` (stocks) and `AGG` (bonds)
```
# Current amount of shares
my_agg = 200
my_spy = 50
# Set Alpaca API key and secret
alpaca_api_key = os.getenv("ALPACA_API_KEY")
alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY")
# Create the Alpaca API object
api = tradeapi.REST(
alpaca_api_key,
alpaca_secret_key,
api_version = "v2"
)
# Format current date as ISO format
today = pd.Timestamp("2020-10-16", tz="America/New_York").isoformat()
# Set the tickers
tickers = ["AGG", "SPY"]
# Set timeframe to '1D' for Alpaca API
timeframe = "1D"
# Get current closing prices for SPY and AGG
df_prices = api.get_barset(
tickers,
timeframe,
start = today,
end = today
).df
# Preview DataFrame
df_prices.head()
# Pick AGG and SPY close prices
agg_close_price = df_prices['AGG']['close'].item()
spy_close_price = df_prices['SPY']['close'].item()
# Print AGG and SPY close prices
print(f"Current AGG closing price: ${agg_close_price}")
print(f"Current SPY closing price: ${spy_close_price}")
# Compute the current value of shares
my_agg_value = agg_close_price * my_agg
my_spy_value = spy_close_price * my_spy
# Print current value of share
print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}")
print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}")
```
### Savings Health Analysis
```
# Set monthly household income
monthly_income = 12000
# Create savings DataFrame
crypto_value = my_btc_value + my_eth_value
stock_value = my_agg_value + my_spy_value
assets = ['crypto','shares']
df_savings = pd.DataFrame([crypto_value,stock_value],index=assets,columns=['amount'])
# Display savings DataFrame
display(df_savings)
# Plot savings pie chart
df_savings.plot.pie(y='amount')
# Set ideal emergency fund
emergency_fund = monthly_income * 3
# Calculate total amount of savings
savings = df_savings['amount'].sum()
# Validate saving health
if savings > emergency_fund:
print("Congratulations you have more than enough emergency funds! Please consider our premium investment products for your excess cash.")
elif savings == emergency_fund:
print("Congratulations you have enough savings for emergencies!")
else:
deficit = emergency_fund - savings
print(f"You do not have enough savings. You will need ${deficit} to reach financial security")
```
## Part 2 - Retirement Planning
### Monte Carlo Simulation
```
# Set start and end dates of five years back from today.
# Sample results may vary from the solution based on the time frame chosen
start_date = pd.Timestamp('2015-08-07', tz='America/New_York').isoformat()
end_date = pd.Timestamp('2020-08-07', tz='America/New_York').isoformat()
# Get 5 years' worth of historical data for SPY and AGG
df_stock_data = api.get_barset(
tickers,
timeframe,
start = start_date,
end = end_date
).df
# Display sample data
df_stock_data.head()
# Configuring a Monte Carlo simulation to forecast 30 years cumulative returns
# Set number of simulations
num_sims = 500
MC_30_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.40,.60],
num_simulation = num_sims,
num_trading_days = 252*30
)
# Printing the simulation input data
MC_30_year.portfolio_data.head()
# Running a Monte Carlo simulation to forecast 30 years cumulative returns
MC_30_year.calc_cumulative_return()
# Plot simulation outcomes
MC_30_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_30_year.plot_distribution()
```
### Retirement Analysis
```
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_30_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
```
### Calculate the expected portfolio return at the 95% lower and upper confidence intervals based on a `$20,000` initial investment.
```
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000
ci_lower = round(tbl[8]*initial_investment,2)
ci_upper = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 30 years will end within in the range of"
f" ${ci_lower} and ${ci_upper}")
```
### Calculate the expected portfolio return at the `95%` lower and upper confidence intervals based on a `50%` increase in the initial investment.
```
# Set initial investment
initial_investment = 20000 * 1.5
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000
ci_lower = round(tbl[8]*initial_investment,2)
ci_upper = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 30 years will end within in the range of"
f" ${ci_lower} and ${ci_upper}")
```
## Optional Challenge - Early Retirement
### Five Years Retirement Option
```
# Configuring a Monte Carlo simulation to forecast 5 years cumulative returns
num_sims = 500
MC_5_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.05,.95],
num_simulation = num_sims,
num_trading_days = 252*5
)
# Running a Monte Carlo simulation to forecast 5 years cumulative returns
MC_5_year.calc_cumulative_return()
# Plot simulation outcomes
MC_5_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_5_year.plot_distribution()
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_5_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000
ci_lower_five = round(tbl[8]*initial_investment,2)
ci_upper_five = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 5 years will end within in the range of"
f" ${ci_lower_five} and ${ci_upper_five}")
```
### Ten Years Retirement Option
```
# Configuring a Monte Carlo simulation to forecast 10 years cumulative returns
num_sims = 500
MC_10_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.05,.95],
num_simulation = num_sims,
num_trading_days = 252*10
)
# Running a Monte Carlo simulation to forecast 10 years cumulative returns
MC_10_year.calc_cumulative_return()
# Plot simulation outcomes
MC_10_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_10_year.plot_distribution()
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_10_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000
ci_lower_ten = round(tbl[8]*initial_investment,2)
ci_upper_ten = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 10 years will end within in the range of"
f" ${ci_lower_ten} and ${ci_upper_ten}")
```
|
github_jupyter
|
# Initial imports
import os
import requests
import pandas as pd
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
from MCForecastTools import MCSimulation
import json
%matplotlib inline
# Load .env enviroment variables
load_dotenv()
# Set current amount of crypto assets
my_btc = 1.2
my_eth = 5.3
# Crypto API URLs
btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD"
eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD"
# Fetch current BTC price
btc_url = btc_url + "?format=json"
btc_response = requests.get(btc_url)
btc_data = btc_response.json()
btc_price = btc_data['data']['1']['quotes']['USD']['price']
# Fetch current ETH price
eth_url = eth_url + "?format=json"
eth_response = requests.get(eth_url)
eth_data = eth_response.json()
eth_price = eth_data['data']['1027']['quotes']['USD']['price']
# Compute current value of my crpto
my_btc_value = btc_price * my_btc
my_eth_value = eth_price * my_eth
# Print current crypto wallet balance
print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}")
print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}")
# Current amount of shares
my_agg = 200
my_spy = 50
# Set Alpaca API key and secret
alpaca_api_key = os.getenv("ALPACA_API_KEY")
alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY")
# Create the Alpaca API object
api = tradeapi.REST(
alpaca_api_key,
alpaca_secret_key,
api_version = "v2"
)
# Format current date as ISO format
today = pd.Timestamp("2020-10-16", tz="America/New_York").isoformat()
# Set the tickers
tickers = ["AGG", "SPY"]
# Set timeframe to '1D' for Alpaca API
timeframe = "1D"
# Get current closing prices for SPY and AGG
df_prices = api.get_barset(
tickers,
timeframe,
start = today,
end = today
).df
# Preview DataFrame
df_prices.head()
# Pick AGG and SPY close prices
agg_close_price = df_prices['AGG']['close'].item()
spy_close_price = df_prices['SPY']['close'].item()
# Print AGG and SPY close prices
print(f"Current AGG closing price: ${agg_close_price}")
print(f"Current SPY closing price: ${spy_close_price}")
# Compute the current value of shares
my_agg_value = agg_close_price * my_agg
my_spy_value = spy_close_price * my_spy
# Print current value of share
print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}")
print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}")
# Set monthly household income
monthly_income = 12000
# Create savings DataFrame
crypto_value = my_btc_value + my_eth_value
stock_value = my_agg_value + my_spy_value
assets = ['crypto','shares']
df_savings = pd.DataFrame([crypto_value,stock_value],index=assets,columns=['amount'])
# Display savings DataFrame
display(df_savings)
# Plot savings pie chart
df_savings.plot.pie(y='amount')
# Set ideal emergency fund
emergency_fund = monthly_income * 3
# Calculate total amount of savings
savings = df_savings['amount'].sum()
# Validate saving health
if savings > emergency_fund:
print("Congratulations you have more than enough emergency funds! Please consider our premium investment products for your excess cash.")
elif savings == emergency_fund:
print("Congratulations you have enough savings for emergencies!")
else:
deficit = emergency_fund - savings
print(f"You do not have enough savings. You will need ${deficit} to reach financial security")
# Set start and end dates of five years back from today.
# Sample results may vary from the solution based on the time frame chosen
start_date = pd.Timestamp('2015-08-07', tz='America/New_York').isoformat()
end_date = pd.Timestamp('2020-08-07', tz='America/New_York').isoformat()
# Get 5 years' worth of historical data for SPY and AGG
df_stock_data = api.get_barset(
tickers,
timeframe,
start = start_date,
end = end_date
).df
# Display sample data
df_stock_data.head()
# Configuring a Monte Carlo simulation to forecast 30 years cumulative returns
# Set number of simulations
num_sims = 500
MC_30_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.40,.60],
num_simulation = num_sims,
num_trading_days = 252*30
)
# Printing the simulation input data
MC_30_year.portfolio_data.head()
# Running a Monte Carlo simulation to forecast 30 years cumulative returns
MC_30_year.calc_cumulative_return()
# Plot simulation outcomes
MC_30_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_30_year.plot_distribution()
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_30_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000
ci_lower = round(tbl[8]*initial_investment,2)
ci_upper = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 30 years will end within in the range of"
f" ${ci_lower} and ${ci_upper}")
# Set initial investment
initial_investment = 20000 * 1.5
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000
ci_lower = round(tbl[8]*initial_investment,2)
ci_upper = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 30 years will end within in the range of"
f" ${ci_lower} and ${ci_upper}")
# Configuring a Monte Carlo simulation to forecast 5 years cumulative returns
num_sims = 500
MC_5_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.05,.95],
num_simulation = num_sims,
num_trading_days = 252*5
)
# Running a Monte Carlo simulation to forecast 5 years cumulative returns
MC_5_year.calc_cumulative_return()
# Plot simulation outcomes
MC_5_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_5_year.plot_distribution()
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_5_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000
ci_lower_five = round(tbl[8]*initial_investment,2)
ci_upper_five = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 5 years will end within in the range of"
f" ${ci_lower_five} and ${ci_upper_five}")
# Configuring a Monte Carlo simulation to forecast 10 years cumulative returns
num_sims = 500
MC_10_year = MCSimulation(
portfolio_data = df_stock_data,
weights = [.05,.95],
num_simulation = num_sims,
num_trading_days = 252*10
)
# Running a Monte Carlo simulation to forecast 10 years cumulative returns
MC_10_year.calc_cumulative_return()
# Plot simulation outcomes
MC_10_year.plot_simulation()
# Plot probability distribution and confidence intervals
MC_10_year.plot_distribution()
# Fetch summary statistics from the Monte Carlo simulation results
tbl = MC_10_year.summarize_cumulative_return()
# Print summary statistics
print(tbl)
# Set initial investment
initial_investment = 20000
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000
ci_lower_ten = round(tbl[8]*initial_investment,2)
ci_upper_ten = round(tbl[9]*initial_investment,2)
# Print results
print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio"
f" over the next 10 years will end within in the range of"
f" ${ci_lower_ten} and ${ci_upper_ten}")
| 0.667581 | 0.768429 |
# Zoo Animal Clasification
Use Machine Learning Methods to Correctly Classify Animals Based Upon Attributes.
Dataset by Kaggle. More information can be found [here](https://www.kaggle.com/uciml/zoo-animal-classification).
```
# Get or create a spark session
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.master('local[*]') \
.appName("Intro") \
.getOrCreate()
```
# Provide custome schema for the data
```
from pyspark.sql.types import StructField, StructType, StringType, DoubleType
# notice that although the most of the columns are of integet type, the custome schema will use integer type.
# This is because this is the statistic functionality expected numeric type.
custom_schema = StructType([
StructField("animal_name", StringType(), True),
StructField("hair", DoubleType(), True),
StructField("feathers", DoubleType(), True),
StructField("eggs", DoubleType(), True),
StructField("milk", DoubleType(), True),
StructField("airborne", DoubleType(), True),
StructField("aquatic", DoubleType(), True),
StructField("predator", DoubleType(), True),
StructField("toothed", DoubleType(), True),
StructField("backbone", DoubleType(), True),
StructField("breathes", DoubleType(), True),
StructField("venomous", DoubleType(), True),
StructField("fins", DoubleType(), True),
StructField("legs", DoubleType(), True),
StructField("tail", DoubleType(), True),
StructField("domestic", DoubleType(), True),
StructField("catsize", DoubleType(), True),
StructField("class_type", StringType(), True)])
# load data
zoo_data = spark.read.format("csv")\
.schema(custom_schema) \
.option("header", True) \
.load("../datasets/zoo.csv")
zoo_data.take(1)
zoo_data.printSchema()
```
# Calculate statistics
for this, we will use the Summarizer functionality
```
# Statistic functionaly can only work on vector.
# Hence we will drop the columns of type string we dont need at the moment.
zoo_data_for_statistics = zoo_data.drop('animal_name','lass_type')
```
## Turn the columns into a vector
Notice that for simplifying the example, we are going to examin the following columns:
* feathers
* milk
* fins
* domestic
```
from pyspark.ml.feature import VectorAssembler
# use vector transformer as describe in the book under transofrmers in chapter 3
vecAssembler = VectorAssembler(outputCol="features")
# assemble only part of the columns for the example
vecAssembler.setInputCols(["feathers","milk","fins","domestic"])
vector_df = vecAssembler.transform(zoo_data_for_statistics)
vector_df.printSchema()
from pyspark.ml.stat import Summarizer
from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
# create summarizer for multiple metrics "mean","variance","normL1","normL2","std" and "sum".
summarizer = Summarizer.metrics("mean","variance","normL1","normL2","std","sum","numNonZeros","max","min")
# compute statistics for multiple metrics with weight
statistics_df = vector_df.select(summarizer.summary(vector_df.features))
statistics_df.show(truncate=False)
```
Notice that statistics dataframe has only one column named aggregate_metrics, where aggregate_metrics coluumns has more columns, where each one of them is a vector.
```
statistics_df.printSchema()
```
For enabling easier access to the data, we use explode functionality that flattens one hirarchy:
```
# compute statistics for single metric "std" without the rest
vector_df.select(Summarizer.std(vector_df.features)).show(truncate=False)
```
From [wikipedia](https://en.wikipedia.org/wiki/Standard_deviation) std - Standard deviation is a measure of the amount of variation or dispersion of a set of values. A low standard deviation indicates that the values tend to be close to the mean (also called the expected value) of the set, while a high standard deviation indicates that the values are spread out over a wider range.
Looking at the vector results, the distance from the among each individual feature is lower than 0.5
Our features: "feathers","milk","fins","domestic"
The reson for it, mainly is, the data should be represented in boolean since each feature is a yes/no fearure.
Feathers =1 , means that this animal has feathers and so on.
Now that we know this, let's take a look at count, which will tell us how many animals in the database has feathers, milk, fins or domestic.
```
# compute statistics for single metric "sum" without the rest
vector_df.select(Summarizer.sum(vector_df.features)).show(truncate=False)
```
`sum` provides us with a more relatable information that we can use to understand the data.
```
# compute statistics for single metric "variance" without the rest
vector_df.select(Summarizer.variance(vector_df.features)).show(truncate=False)
# compute statistics for single metric "count" without the rest
vector_df.select(Summarizer.count(vector_df.features)).show(truncate=False)
# compute statistics for single metric "numNonZeros" without the rest
vector_df.select(Summarizer.numNonZeros(vector_df.features)).show(truncate=False)
# compute statistics for single metric "max" without the rest
vector_df.select(Summarizer.max(vector_df.features)).show(truncate=False)
# compute statistics for single metric "normL1" without the rest
vector_df.select(Summarizer.normL1(vector_df.features)).show(truncate=False)
# compute statistics for single metric "normL2" without the rest
vector_df.select(Summarizer.normL2(vector_df.features)).show(truncate=False)
```
# Testing features correlations
As part of understanding each featres statistics on its own, let's understand the correlation between the features.
### Notice
This functionality also requires a vector, we will use the one from the earlier computation - `vector_df`
```
from pyspark.ml.stat import Correlation
from pyspark.ml.stat import KolmogorovSmirnovTest
r1 = Correlation.corr(vector_df, "features").head()
print("Pearson correlation matrix:\n" + str(r1[0])+ "\n")
r2 = Correlation.corr(vector_df, "features", "spearman").head()
print("Spearman correlation matrix:\n" + str(r2[0]))
```
Breakdown of the correlation metrix is in the book, chapter 3 under statistics.
```
from pyspark.ml.stat import KolmogorovSmirnovTest
```
## ChiSquareTest
Testing the p-value of the columns:
This requeires vector as well Hence we will use the prcompute vector from before.
Notice that label in this case, has to be of type numberic.
To tranform the label into numberic, we will use the StringIndexer transofmer functionality
```
from pyspark.ml.feature import StringIndexer
indexer = StringIndexer(inputCol="class_type", outputCol="label")
indexed_lable = indexer.fit(vector_df).transform(vector_df)
indexed_lable.printSchema()
indexed_lable.select("features").take(1)
indexed_lable
from pyspark.ml.stat import ChiSquareTest
chiSqResult = ChiSquareTest.test(indexed_lable, 'features', 'label')
chiSqResult.select("degreesOfFreedom").collect()[0]
chiSqResult = ChiSquareTest.test(indexed_lable, 'features', 'label', True)
row = chiSqResult.orderBy("featureIndex").collect()
row[0].statistic
row
```
Reminder that for simplifying the example, we used the following columns:
* feathers
* milk
* fins
* domestic
|
github_jupyter
|
# Get or create a spark session
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.master('local[*]') \
.appName("Intro") \
.getOrCreate()
from pyspark.sql.types import StructField, StructType, StringType, DoubleType
# notice that although the most of the columns are of integet type, the custome schema will use integer type.
# This is because this is the statistic functionality expected numeric type.
custom_schema = StructType([
StructField("animal_name", StringType(), True),
StructField("hair", DoubleType(), True),
StructField("feathers", DoubleType(), True),
StructField("eggs", DoubleType(), True),
StructField("milk", DoubleType(), True),
StructField("airborne", DoubleType(), True),
StructField("aquatic", DoubleType(), True),
StructField("predator", DoubleType(), True),
StructField("toothed", DoubleType(), True),
StructField("backbone", DoubleType(), True),
StructField("breathes", DoubleType(), True),
StructField("venomous", DoubleType(), True),
StructField("fins", DoubleType(), True),
StructField("legs", DoubleType(), True),
StructField("tail", DoubleType(), True),
StructField("domestic", DoubleType(), True),
StructField("catsize", DoubleType(), True),
StructField("class_type", StringType(), True)])
# load data
zoo_data = spark.read.format("csv")\
.schema(custom_schema) \
.option("header", True) \
.load("../datasets/zoo.csv")
zoo_data.take(1)
zoo_data.printSchema()
# Statistic functionaly can only work on vector.
# Hence we will drop the columns of type string we dont need at the moment.
zoo_data_for_statistics = zoo_data.drop('animal_name','lass_type')
from pyspark.ml.feature import VectorAssembler
# use vector transformer as describe in the book under transofrmers in chapter 3
vecAssembler = VectorAssembler(outputCol="features")
# assemble only part of the columns for the example
vecAssembler.setInputCols(["feathers","milk","fins","domestic"])
vector_df = vecAssembler.transform(zoo_data_for_statistics)
vector_df.printSchema()
from pyspark.ml.stat import Summarizer
from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
# create summarizer for multiple metrics "mean","variance","normL1","normL2","std" and "sum".
summarizer = Summarizer.metrics("mean","variance","normL1","normL2","std","sum","numNonZeros","max","min")
# compute statistics for multiple metrics with weight
statistics_df = vector_df.select(summarizer.summary(vector_df.features))
statistics_df.show(truncate=False)
statistics_df.printSchema()
# compute statistics for single metric "std" without the rest
vector_df.select(Summarizer.std(vector_df.features)).show(truncate=False)
# compute statistics for single metric "sum" without the rest
vector_df.select(Summarizer.sum(vector_df.features)).show(truncate=False)
# compute statistics for single metric "variance" without the rest
vector_df.select(Summarizer.variance(vector_df.features)).show(truncate=False)
# compute statistics for single metric "count" without the rest
vector_df.select(Summarizer.count(vector_df.features)).show(truncate=False)
# compute statistics for single metric "numNonZeros" without the rest
vector_df.select(Summarizer.numNonZeros(vector_df.features)).show(truncate=False)
# compute statistics for single metric "max" without the rest
vector_df.select(Summarizer.max(vector_df.features)).show(truncate=False)
# compute statistics for single metric "normL1" without the rest
vector_df.select(Summarizer.normL1(vector_df.features)).show(truncate=False)
# compute statistics for single metric "normL2" without the rest
vector_df.select(Summarizer.normL2(vector_df.features)).show(truncate=False)
from pyspark.ml.stat import Correlation
from pyspark.ml.stat import KolmogorovSmirnovTest
r1 = Correlation.corr(vector_df, "features").head()
print("Pearson correlation matrix:\n" + str(r1[0])+ "\n")
r2 = Correlation.corr(vector_df, "features", "spearman").head()
print("Spearman correlation matrix:\n" + str(r2[0]))
from pyspark.ml.stat import KolmogorovSmirnovTest
from pyspark.ml.feature import StringIndexer
indexer = StringIndexer(inputCol="class_type", outputCol="label")
indexed_lable = indexer.fit(vector_df).transform(vector_df)
indexed_lable.printSchema()
indexed_lable.select("features").take(1)
indexed_lable
from pyspark.ml.stat import ChiSquareTest
chiSqResult = ChiSquareTest.test(indexed_lable, 'features', 'label')
chiSqResult.select("degreesOfFreedom").collect()[0]
chiSqResult = ChiSquareTest.test(indexed_lable, 'features', 'label', True)
row = chiSqResult.orderBy("featureIndex").collect()
row[0].statistic
row
| 0.760828 | 0.953188 |
# GradientBoostingClassifier with StandardScaler
This Code template is for the Classification tasks using a GradientBoostingClassifier based on the Gradient Boosting Ensemble Learning Technique and feature rescaling technique StandardScaler
### Required Packages
```
import warnings as wr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandarScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
wr.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path) #reading file
df.head()#displaying initial entries
print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1])
df.columns.tolist()
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
plt.figure(figsize = (20, 12))
corr = df.corr()
mask = np.triu(np.ones_like(corr, dtype = bool))
sns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = ".2f")
plt.show()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
#spliting data into X(features) and Y(Target)
X=df[features]
Y=df[target]
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
```
#### Distribution Of Target Variable
```
plt.figure(figsize = (10,6))
sns.countplot(Y,palette='pastel')
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
#we can choose randomstate and test_size as over requerment
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) #performing datasplitting
```
# StandarScaler
* It will transform your data such that its distribution will have a mean value 0 and STD of 1
* In case of multivariate data, this is done feature-wise
* We will **fit** an object of StandardScaler to training data then transform the same data by **fit_transform(X_train)** method
```
scaler=StandardScaler() #making a object of StandardScaler
X_train=scaler.fit_transform(X_train) #fiting the data on the training set
X_test=scaler.transform(X_test) #scaling testing set
```
* Now over data is scaled, let's trained the moder
## Model
**GradientBoostingClassifier**
Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions.In each stage nclasses regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function.
#### Model Tuning Parameters
1. loss : {‘deviance’, ‘exponential’}, default=’deviance’
> The loss function to be optimized. ‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm.
2. learning_ratefloat, default=0.1
> Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.
3. n_estimators : int, default=100
> The number of trees in the forest.
4. criterion : {‘friedman_mse’, ‘mse’, ‘mae’}, default=’friedman_mse’
> The function to measure the quality of a split. Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘mse’ for mean squared error, and ‘mae’ for the mean absolute error. The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases.
5. max_depth : int, default=3
> The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables.
6. max_features : {‘auto’, ‘sqrt’, ‘log2’}, int or float, default=None
> The number of features to consider when looking for the best split:
7. random_state : int, RandomState instance or None, default=None
> Controls both the randomness of the bootstrapping of the samples used when building trees (if <code>bootstrap=True</code>) and the sampling of the features to consider when looking for the best split at each node (if `max_features < n_features`).
8. verbose : int, default=0
> Controls the verbosity when fitting and predicting.
9. n_iter_no_change : int, default=None
> n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified.
10. tol : float, default=1e-4
> Tolerance for the early stopping. When the loss is not improving by at least tol for <code>n_iter_no_change</code> iterations (if set to a number), the training stops.
```
#training the GradientBoostingClassifier
model = GradientBoostingClassifier(random_state = 50)
model.fit(X_train, y_train)
```
#### Model Accuracy
score() method return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
```
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
#prediction on testing set
prediction=model.predict(X_test)
```
#### Confusion Matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
#ploting_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues)
cf_matrix=confusion_matrix(y_test,prediction)
plt.figure(figsize=(7,6))
sns.heatmap(cf_matrix,annot=True,fmt="d")
```
#### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
* **where**:
- Precision:- Accuracy of positive predictions.
- Recall:- Fraction of positives that were correctly identified.
- f1-score:- percent of positive predictions were correct
- support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test,model.predict(X_test)))
```
#### Feature Importances.
The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
```
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
```
#### Creator: Vipin Kumar , Github: [Profile](https://github.com/devVipin01)
|
github_jupyter
|
import warnings as wr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandarScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
wr.filterwarnings('ignore')
#filepath
file_path= ""
#x_values
features=[]
#y_value
target=''
df=pd.read_csv(file_path) #reading file
df.head()#displaying initial entries
print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1])
df.columns.tolist()
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
plt.figure(figsize = (20, 12))
corr = df.corr()
mask = np.triu(np.ones_like(corr, dtype = bool))
sns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = ".2f")
plt.show()
#spliting data into X(features) and Y(Target)
X=df[features]
Y=df[target]
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
plt.figure(figsize = (10,6))
sns.countplot(Y,palette='pastel')
#we can choose randomstate and test_size as over requerment
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) #performing datasplitting
scaler=StandardScaler() #making a object of StandardScaler
X_train=scaler.fit_transform(X_train) #fiting the data on the training set
X_test=scaler.transform(X_test) #scaling testing set
#training the GradientBoostingClassifier
model = GradientBoostingClassifier(random_state = 50)
model.fit(X_train, y_train)
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
#prediction on testing set
prediction=model.predict(X_test)
#ploting_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues)
cf_matrix=confusion_matrix(y_test,prediction)
plt.figure(figsize=(7,6))
sns.heatmap(cf_matrix,annot=True,fmt="d")
print(classification_report(y_test,model.predict(X_test)))
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
| 0.286269 | 0.972389 |
```
import pandas as pd
import numpy as np
u_data = pd.read_csv('u.data',delimiter='\t',names=['userId','movieId','rating','timestamp'])
u_data_sorted = u_data.sort_values(['userId','movieId'])
matrix = np.zeros((1682,943),dtype=int)
u_data_sorted_array = u_data_sorted.to_numpy()
for i in u_data_sorted_array:
matrix[i[1]-1][i[0]-1]=i[2]
#matrix
rated_unrated_mat = np.zeros((1682,943),dtype=int)
row_index = 0
col_index = 0
for i in matrix:
for j in i:
if j!=0:
rated_unrated_mat[row_index][col_index]=1
col_index+=1
row_index+=1
col_index = 0
movies_rated_per_user = rated_unrated_mat.sum(axis=0)
movies_watched = rated_unrated_mat.sum(axis=1)
unrated_movies_per_user = 1682-movies_rated_per_user
movies_unwatched = 943-movies_watched
#unrated_movies_per_user
zeros = unrated_movies_per_user.sum()
#movies_unwatched
```
count the zeros and find the possiblities and find the percentage
```
#sparcity percentage
total = 1682*943
sparcity = (zeros/total)*100
sparcity
u_item = pd.read_csv('u.item',delimiter='|',names=list('abcdefghijklmnopqrstuvwx'))
u_item = u_item[u_item['c'].notnull()]
u_item_date_array = u_item['c'].to_numpy()
release_year = np.zeros((1681),dtype=int)
index = 0
for i in u_item_date_array:
temp = i.split('-')
release_year[index] = int(temp[2])
index+=1
release_year.sort()
release_year_x =np.unique(release_year)
yearcount_y = np.zeros(71,dtype=int)
index=0
for i in release_year_x:
count=0
for j in release_year:
if i == j :
count+=1
yearcount_y[index]=count
index+=1
import matplotlib.pyplot as plt
fig1 = plt.figure()
ax1 = fig1.add_axes([0,0,1,1])
ax1.set_title('number of releases per year')
ax1.set_xlabel("year")
ax1.set_ylabel('number of movies')
ax1.bar(release_year_x,yearcount_y)
u_user = pd.read_csv('u.user',delimiter='|',names=['userId','age','gender','occupation','zipcode'])
u_user_sorted = u_user.sort_values('age')
ages = np.unique(u_user_sorted.age)
u_user_agegrouped_count = u_user_sorted.groupby(['age']).count()['userId']
u_user_agegrouped_count = u_user_agegrouped_count.to_numpy()
u_user_agegrouped_count
fig2 = plt.figure()
ax2 = fig2.add_axes([0,0,1,1])
ax2.set_title('frequency distribution of movies watched per age')
ax2.set_xlabel("age")
ax2.set_ylabel('number of movies')
ax2.bar(ages,u_user_agegrouped_count)
u_user_sorted = u_user.sort_values('occupation')
occupation = np.unique(u_user_sorted.occupation)
occupation_grouped = u_user_sorted.groupby('occupation').count()
occupation_grouped = occupation_grouped.userId.to_numpy()
fig3 = plt.figure()
ax3 = fig3.add_axes([0,0,1,1])
ax3.set_title('occupation grouped data')
ax3.set_ylabel('occupation')
ax3.set_xlabel('movies watched')
ax3.barh(occupation,occupation_grouped,align='center')
u_data_sorted
user_movie = u_data_sorted[['userId','movieId']]
u_user_gender = u_user.gender
u_user_gender = u_user_gender.to_numpy()
user_movie_array = user_movie.to_numpy()
user_movie_array
```
|
github_jupyter
|
import pandas as pd
import numpy as np
u_data = pd.read_csv('u.data',delimiter='\t',names=['userId','movieId','rating','timestamp'])
u_data_sorted = u_data.sort_values(['userId','movieId'])
matrix = np.zeros((1682,943),dtype=int)
u_data_sorted_array = u_data_sorted.to_numpy()
for i in u_data_sorted_array:
matrix[i[1]-1][i[0]-1]=i[2]
#matrix
rated_unrated_mat = np.zeros((1682,943),dtype=int)
row_index = 0
col_index = 0
for i in matrix:
for j in i:
if j!=0:
rated_unrated_mat[row_index][col_index]=1
col_index+=1
row_index+=1
col_index = 0
movies_rated_per_user = rated_unrated_mat.sum(axis=0)
movies_watched = rated_unrated_mat.sum(axis=1)
unrated_movies_per_user = 1682-movies_rated_per_user
movies_unwatched = 943-movies_watched
#unrated_movies_per_user
zeros = unrated_movies_per_user.sum()
#movies_unwatched
#sparcity percentage
total = 1682*943
sparcity = (zeros/total)*100
sparcity
u_item = pd.read_csv('u.item',delimiter='|',names=list('abcdefghijklmnopqrstuvwx'))
u_item = u_item[u_item['c'].notnull()]
u_item_date_array = u_item['c'].to_numpy()
release_year = np.zeros((1681),dtype=int)
index = 0
for i in u_item_date_array:
temp = i.split('-')
release_year[index] = int(temp[2])
index+=1
release_year.sort()
release_year_x =np.unique(release_year)
yearcount_y = np.zeros(71,dtype=int)
index=0
for i in release_year_x:
count=0
for j in release_year:
if i == j :
count+=1
yearcount_y[index]=count
index+=1
import matplotlib.pyplot as plt
fig1 = plt.figure()
ax1 = fig1.add_axes([0,0,1,1])
ax1.set_title('number of releases per year')
ax1.set_xlabel("year")
ax1.set_ylabel('number of movies')
ax1.bar(release_year_x,yearcount_y)
u_user = pd.read_csv('u.user',delimiter='|',names=['userId','age','gender','occupation','zipcode'])
u_user_sorted = u_user.sort_values('age')
ages = np.unique(u_user_sorted.age)
u_user_agegrouped_count = u_user_sorted.groupby(['age']).count()['userId']
u_user_agegrouped_count = u_user_agegrouped_count.to_numpy()
u_user_agegrouped_count
fig2 = plt.figure()
ax2 = fig2.add_axes([0,0,1,1])
ax2.set_title('frequency distribution of movies watched per age')
ax2.set_xlabel("age")
ax2.set_ylabel('number of movies')
ax2.bar(ages,u_user_agegrouped_count)
u_user_sorted = u_user.sort_values('occupation')
occupation = np.unique(u_user_sorted.occupation)
occupation_grouped = u_user_sorted.groupby('occupation').count()
occupation_grouped = occupation_grouped.userId.to_numpy()
fig3 = plt.figure()
ax3 = fig3.add_axes([0,0,1,1])
ax3.set_title('occupation grouped data')
ax3.set_ylabel('occupation')
ax3.set_xlabel('movies watched')
ax3.barh(occupation,occupation_grouped,align='center')
u_data_sorted
user_movie = u_data_sorted[['userId','movieId']]
u_user_gender = u_user.gender
u_user_gender = u_user_gender.to_numpy()
user_movie_array = user_movie.to_numpy()
user_movie_array
| 0.086944 | 0.524029 |
# Scientific Python Basics
Prepared by: Cindee Madison, Thomas Kluyver (Any errors are our own)
Thanks to: Justin Kitzes, Matt Davis
## 1. Individual things
The most basic component of any programming language are "things", also called variables or
(in special cases) objects.
The most common basic "things" in Python are integers, floats, strings, booleans, and
some special objects of various types. We'll meet many of these as we go through the lesson.
__TIP:__ To run the code in a cell quickly, press Ctrl-Enter.
__TIP:__ To quickly create a new cell below an existing one, type Ctrl-m then b.
Other shortcuts for making, deleting, and moving cells are in the menubar at the top of the
screen.
```
# A thing
2
# Use print to show multiple things in the same cell
# Note that you can use single or double quotes for strings
print(2)
print('hello')
# Things can be stored as variables
a = 2
b = 'hello'
c = True # This is case sensitive
print(a, b, c)
# The type function tells us the type of thing we have
print(type(a))
print(type(b))
print(type(c))
# What happens when a new variable point to a previous variable?
a = 1
b = a
a = 2
## What is b?
print b
```
## 2. Commands that operate on things
Just storing data in variables isn't much use to us. Right away, we'd like to start performing
operations and manipulations on data and variables.
There are three very common means of performing an operation on a thing.
### 2.1 Use an operator
All of the basic math operators work like you think they should for numbers. They can also
do some useful operations on other things, like strings. There are also boolean operators that
compare quantities and give back a `bool` variable as a result.
```
# Standard math operators work as expected on numbers
a = 2
b = 3
print(a + b)
print(a * b)
print(a ** b) # a to the power of b (a^b does something completely different!)
print(a / b) # Careful with dividing integers if you use Python 2
# There are also operators for strings
print('hello' + 'world')
print('hello' * 3)
#print('hello' / 3) # You can't do this!
# Boolean operators compare two things
a = (1 > 3)
b = (3 == 3)
print(a)
print(b)
print(a or b)
print(a and b)
```
### 2.2 Use a function
These will be very familiar to anyone who has programmed in any language, and work like you
would expect.
```
# There are thousands of functions that operate on things
print(type(3))
print(len('hello'))
print(round(3.3))
```
__TIP:__ To find out what a function does, you can type it's name and then a question mark to
get a pop up help window. Or, to see what arguments it takes, you can type its name, an open
parenthesis, and hit tab.
```
round?
#round(
round(3.14159, 2)
```
__TIP:__ Many useful functions are not in the Python built in library, but are in external
scientific packages. These need to be imported into your Python notebook (or program) before
they can be used. Probably the most important of these are numpy and matplotlib.
```
# Many useful functions are in external packages
# Let's meet numpy
import numpy as np
# To see what's in a package, type the name, a period, then hit tab
#np?
np.
# Some examples of numpy functions and "things"
print(np.sqrt(4))
print(np.pi) # Not a function, just a variable
print(np.sin(np.pi))
```
### 2.3 Use a method
Before we get any farther into the Python language, we have to say a word about "objects". We
will not be teaching object oriented programming in this workshop, but you will encounter objects
throughout Python (in fact, even seemingly simple things like ints and strings are actually
objects in Python).
In the simplest terms, you can think of an object as a small bundled "thing" that contains within
itself both data and functions that operate on that data. For example, strings in Python are
objects that contain a set of characters and also various functions that operate on the set of
characters. When bundled in an object, these functions are called "methods".
Instead of the "normal" `function(arguments)` syntax, methods are called using the
syntax `variable.method(arguments)`.
```
# A string is actually an object
a = 'hello, world'
print(type(a))
# Objects have bundled methods
#a.
print(a.capitalize())
print(a.replace('l', 'X'))
```
###Exercise 1 - Conversion
Throughout this lesson, we will successively build towards a program that will calculate the
variance of some measurements, in this case `Height in Metres`. The first thing we want to do is convert from an antiquated measurement system.
To change inches into metres we use the following equation (conversion factor is rounded)
##$metre = \frac{inches}{39}$
1. Create a variable for the conversion factor, called `inches_in_metre`.
1. Create a variable (`inches`) for your height in inches, as inaccurately as you want.
2. Divide `inches` by `inches_in_metre`, and store the result in a new variable, `metres`.
1. Print the result
__Bonus__
Convert from feet and inches to metres.
__TIP:__ A 'gotcha' for all python 2 users (it was changed in python 3) is the result of integer division. To make it work the obvious way, either:
1. `inches_in_metre = 39.` (add the decimal to cast to a float, or use 39.4 to be more accurate)
2. `from __future__ import division` - Put this at the **top** of the code and it will work
## 3. Collections of things
While it is interesting to explore your own height, in science we work with larger slightly more complex datasets. In this example, we are interested in the characteristics and distribution of heights. Python provides us with a number of objects to handle collections of things.
Probably 99% of your work in scientific Python will use one of four types of collections:
`lists`, `tuples`, `dictionaries`, and `numpy arrays`. We'll look quickly at each of these and what
they can do for you.
### 3.1 Lists
Lists are probably the handiest and most flexible type of container.
Lists are declared with square brackets [].
Individual elements of a list can be selected using the syntax `a[ind]`.
```
# Lists are created with square bracket syntax
a = ['blueberry', 'strawberry', 'pineapple']
print(a, type(a))
# Lists (and all collections) are also indexed with square brackets
# NOTE: The first index is zero, not one
print(a[0])
print(a[1])
## You can also count from the end of the list
print('last item is:', a[-1])
print('second to last item is:', a[-2])
# you can access multiple items from a list by slicing, using a colon between indexes
# NOTE: The end value is not inclusive
print('a =', a)
print('get first two:', a[0:2])
# You can leave off the start or end if desired
print(a[:2])
print(a[2:])
print(a[:])
print(a[:-1])
# Lists are objects, like everything else, and have methods such as append
a.append('banana')
print(a)
a.append([1,2])
print(a)
a.pop()
print(a)
```
__TIP:__ A 'gotcha' for some new Python users is that many collections, including lists,
actually store pointers to data, not the data itself.
Remember when we set `b=a` and then changed `a`?
What happens when we do this in a list?
__HELP:__ look into the `copy` module
```
a = 1
b = a
a = 2
## What is b?
print('What is b?', b)
a = [1, 2, 3]
b = a
print('original b', b)
a[0] = 42
print('What is b after we change a ?', b)
```
### EXERCISE 2 - Store a bunch of heights (in metres) in a list
1. Ask five people around you for their heights (in metres).
2. Store these in a list called `heights`.
3. Append your own height, calculated above in the variable *metres*, to the list.
4. Get the first height from the list and print it.
__Bonus__
1. Extract the last value in two different ways: first, by using the index for
the last item in the list, and second, presuming that you do not know how long the list is.
__HINT:__ **len()** can be used to find the length of a collection
### 3.2 Tuples
We won't say a whole lot about tuples except to mention that they basically work just like lists, with
two major exceptions:
1. You declare tuples using () instead of []
1. Once you make a tuple, you can't change what's in it (referred to as immutable)
You'll see tuples come up throughout the Python language, and over time you'll develop a feel for when
to use them.
In general, they're often used instead of lists:
1. to group items when the position in the collection is critical, such as coord = (x,y)
1. when you want to make prevent accidental modification of the items, e.g. shape = (12,23)
```
xy = (23, 45)
print(xy[0])
xy[0] = "this won't work with a tuple"
```
### Anatomy of a traceback error
Traceback errors are `raised` when you try to do something with code it isn't meant to do. It is also meant to be informative, but like many things, it is not always as informative as we would like.
Looking at our error:
TypeError Traceback (most recent call last)
<ipython-input-25-4d15943dd557> in <module>()
1 xy = (23, 45)
2 xy[0]
----> 3 xy[0] = 'this wont work with a tuple'
TypeError: 'tuple' object does not support item assignment
1. The command you tried to run raise a **TypeError** This suggests you are using a variable in a way that its **Type** doesnt support
2. the arrow ----> points to the line where the error occurred, In this case on line 3 of your code form the above line.
3. Learning how to **read** a traceback error is an important skill to develop, and helps you know how to ask questions about what has gone wrong in your code.
### 3.3 Dictionaries
Dictionaries are the collection to use when you want to store and retrieve things by their names
(or some other kind of key) instead of by their position in the collection. A good example is a set
of model parameters, each of which has a name and a value. Dictionaries are declared using {}.
```
# Make a dictionary of model parameters
convertors = {'inches_in_feet' : 12,
'inches_in_metre' : 39}
print(convertors)
print(convertors['inches_in_feet'])
## Add a new key:value pair
convertors['metres_in_mile'] = 1609.34
print(convertors)
# Raise a KEY error
print(convertors['blueberry'])
```
### 3.4 Numpy arrays (ndarrays)
Even though numpy arrays (often written as ndarrays, for n-dimensional arrays) are not part of the
core Python libraries, they are so useful in scientific Python that we'll include them here in the
core lesson. Numpy arrays are collections of things, all of which must be the same type, that work
similarly to lists (as we've described them so far). The most important are:
1. You can easily perform elementwise operations (and matrix algebra) on arrays
1. Arrays can be n-dimensional
1. There is no equivalent to append, although arrays can be concatenated
Arrays can be created from existing collections such as lists, or instantiated "from scratch" in a
few useful ways.
When getting started with scientific Python, you will probably want to try to use ndarrays whenever
possible, saving the other types of collections for those cases when you have a specific reason to use
them.
```
# We need to import the numpy library to have access to it
# We can also create an alias for a library, this is something you will commonly see with numpy
import numpy as np
# Make an array from a list
alist = [2, 3, 4]
blist = [5, 6, 7]
a = np.array(alist)
b = np.array(blist)
print(a, type(a))
print(b, type(b))
# Do arithmetic on arrays
print(a**2)
print(np.sin(a))
print(a * b)
print(a.dot(b), np.dot(a, b))
# Boolean operators work on arrays too, and they return boolean arrays
print(a > 2)
print(b == 6)
c = a > 2
print(c)
print(type(c))
print(c.dtype)
# Indexing arrays
print(a[0:2])
c = np.random.rand(3,3)
print(c)
print('\n')
print(c[1:3,0:2])
c[0,:] = a
print('\n')
print(c)
# Arrays can also be indexed with other boolean arrays
print(a)
print(b)
print(a > 2)
print(a[a > 2])
print(b[a > 2])
b[a == 3] = 77
print(b)
# ndarrays have attributes in addition to methods
#c.
print(c.shape)
print(c.prod())
# There are handy ways to make arrays full of ones and zeros
print(np.zeros(5), '\n')
print(np.ones(5), '\n')
print(np.identity(5), '\n')
# You can also easily make arrays of number sequences
print(np.arange(0, 10, 2))
```
### EXERCISE 3 - Using Arrays for simple analysis
Revisit your list of heights
1. turn it into an array
2. calculate the mean
3. create a mask of all heights greater than a certain value (your choice)
4. find the mean of the masked heights
__BONUS__
1. find the number of heights greater than your threshold
2. mean() can take an optional argument called axis, which allows you to calculate the mean across different axes, eg across rows or across columns. Create an array with two dimensions (not equal sized) and calculate the mean across rows and mean across columns. Use 'shape' to understand how the means are calculated.
## 4. Repeating yourself
So far, everything that we've done could, in principle, be done by hand calculation. In this section
and the next, we really start to take advantage of the power of programming languages to do things
for us automatically.
We start here with ways to repeat yourself. The two most common ways of doing this are known as for
loops and while loops. For loops in Python are useful when you want to cycle over all of the items
in a collection (such as all of the elements of an array), and while loops are useful when you want to
cycle for an indefinite amount of time until some condition is met.
The basic examples below will work for looping over lists, tuples, and arrays. Looping over dictionaries
is a bit different, since there is a key and a value for each item in a dictionary. Have a look at the
Python docs for more information.
```
# A basic for loop - don't forget the white space!
wordlist = ['hi', 'hello', 'bye']
for word in wordlist:
print(word + '!')
```
**Note on indentation**: Notice the indentation once we enter the for loop. Every idented statement after the for loop declaration is part of the for loop. This rule holds true for while loops, if statements, functions, etc. Required identation is one of the reasons Python is such a beautiful language to read.
If you do not have consistent indentation you will get an `IndentationError`. Fortunately, most code editors will ensure your indentation is correction.
__NOTE__ In Python the default is to use four (4) spaces for each indentation, most editros can be configured to follow this guide.
```
# Indentation error: Fix it!
for word in wordlist:
new_word = word.capitalize()
print(new_word + '!') # Bad indent
# Sum all of the values in a collection using a for loop
numlist = [1, 4, 77, 3]
total = 0
for num in numlist:
total = total + num
print("Sum is", total)
# Often we want to loop over the indexes of a collection, not just the items
print(wordlist)
for i, word in enumerate(wordlist):
print(i, word, wordlist[i])
# While loops are useful when you don't know how many steps you will need,
# and want to stop once a certain condition is met.
step = 0
prod = 1
while prod < 100:
step = step + 1
prod = prod * 2
print(step, prod)
print('Reached a product of', prod, 'at step number', step)
```
__TIP:__ Once we start really generating useful and large collections of data, it becomes unwieldy to
inspect our results manually. The code below shows how to make a very simple plot of an array.
We'll do much more plotting later on, this is just to get started.
```
# Load up pylab, a useful plotting library
%matplotlib inline
import matplotlib.pyplot as plt
# Make some x and y data and plot it
y = np.arange(100)**2
plt.plot(y)
```
### EXERCISE 4 - Variance
We can now calculate the variance of the heights we collected before.
As a reminder, **sample variance** is the calculated from the sum of squared differences of each observation from the mean:
###$variance = \frac{\Sigma{(x-mean)^2}}{n-1}$
where **mean** is the mean of our observations, **x** is each individual observation, and **n** is the number of observations.
First, we need to calculate the mean:
1. Create a variable `total` for the sum of the heights.
2. Using a `for` loop, add each height to `total`.
3. Find the mean by dividing this by the number of measurements, and store it as `mean`.
__Note__: To get the number of things in a list, use `len(the_list)`.
Now we'll use another loop to calculate the variance:
1. Create a variable `sum_diffsq` for the sum of squared differences.
2. Make a second `for` loop over `heights`.
- At each step, subtract the height from the mean and call it `diff`.
- Square this and call it `diffsq`.
- Add `diffsq` on to `sum_diffsq`.
3. Divide `diffsq` by `n-1` to get the variance.
4. Display the variance.
__Note__: To square a number in Python, use `**`, eg. `5**2`.
__Bonus__
1. Test whether `variance` is larger than 0.01, and print out a line that says "variance more than 0.01: "
followed by the answer (either True or False).
## 5. Making choices
Often we want to check if a condition is True and take one action if it is, and another action if the
condition is False. We can achieve this in Python with an if statement.
__TIP:__ You can use any expression that returns a boolean value (True or False) in an if statement.
Common boolean operators are ==, !=, <, <=, >, >=. You can also use `is` and `is not` if you want to
check if two variables are identical in the sense that they are stored in the same location in memory.
```
# A simple if statement
x = 3
if x > 0:
print('x is positive')
elif x < 0:
print('x is negative')
else:
print('x is zero')
# If statements can rely on boolean variables
x = -1
test = (x > 0)
print(type(test)); print(test)
if test:
print('Test was true')
```
## 6. Creating chunks with functions and modules
One way to write a program is to simply string together commands, like the ones described above, in a long
file, and then to run that file to generate your results. This may work, but it can be cognitively difficult
to follow the logic of programs written in this style. Also, it does not allow you to reuse your code
easily - for example, what if we wanted to run our logistic growth model for several different choices of
initial parameters?
The most important ways to "chunk" code into more manageable pieces is to create functions and then
to gather these functions into modules, and eventually packages. Below we will discuss how to create
functions and modules. A third common type of "chunk" in Python is classes, but we will not be covering
object-oriented programming in this workshop.
```
# We've been using functions all day
x = 3.333333
print(round(x, 2))
print(np.sin(x))
# It's very easy to write your own functions
def multiply(x, y):
return x*y
# Once a function is "run" and saved in memory, it's available just like any other function
print(type(multiply))
print(multiply(4, 3))
# It's useful to include docstrings to describe what your function does
def say_hello(time, people):
'''
Function says a greeting. Useful for engendering goodwill
'''
return 'Good ' + time + ', ' + people
```
**Docstrings**: A docstring is a special type of comment that tells you what a function does. You can see them when you ask for help about a function.
```
say_hello('afternoon', 'friends')
# All arguments must be present, or the function will return an error
say_hello('afternoon')
# Keyword arguments can be used to make some arguments optional by giving them a default value
# All mandatory arguments must come first, in order
def say_hello(time, people='friends'):
return 'Good ' + time + ', ' + people
say_hello('afternoon')
say_hello('afternoon', 'students')
```
### EXERCISE 5 - Creating a variance function
Finally, let's turn our variance calculation into a function that we can use over and over again.
Copy your code from Exercise 4 into the box below, and do the following:
1. Turn your code into a function called `calculate_variance` that takes a list of values and returns their variance.
1. Write a nice docstring describing what your function does.
1. In a subsequent cell, call your function with different sets of numbers to make sure it works.
__Bonus__
1. Refactor your function by pulling out the section that calculates the mean into another function, and calling that inside your `calculate_variance` function.
2. Make sure it can works properly when all the data are integers as well.
3. Give a better error message when it's passed an empty list. Use the web to find out how to raise exceptions in Python.
### EXERCISE 6 - Putting the `calculate_mean` and `calculate_variance` function(s) in a module
We can make our functions more easily reusable by placing them into modules that we can import, just
like we have been doing with `numpy`. It's pretty simple to do this.
1. Copy your function(s) into a new text file, in the same directory as this notebook,
called `stats.py`.
1. In the cell below, type `import stats` to import the module. Type `stats.` and hit tab to see the available
functions in the module. Try calculating the variance of a number of samples of heights (or other random numbers) using your imported module.
|
github_jupyter
|
# A thing
2
# Use print to show multiple things in the same cell
# Note that you can use single or double quotes for strings
print(2)
print('hello')
# Things can be stored as variables
a = 2
b = 'hello'
c = True # This is case sensitive
print(a, b, c)
# The type function tells us the type of thing we have
print(type(a))
print(type(b))
print(type(c))
# What happens when a new variable point to a previous variable?
a = 1
b = a
a = 2
## What is b?
print b
# Standard math operators work as expected on numbers
a = 2
b = 3
print(a + b)
print(a * b)
print(a ** b) # a to the power of b (a^b does something completely different!)
print(a / b) # Careful with dividing integers if you use Python 2
# There are also operators for strings
print('hello' + 'world')
print('hello' * 3)
#print('hello' / 3) # You can't do this!
# Boolean operators compare two things
a = (1 > 3)
b = (3 == 3)
print(a)
print(b)
print(a or b)
print(a and b)
# There are thousands of functions that operate on things
print(type(3))
print(len('hello'))
print(round(3.3))
round?
#round(
round(3.14159, 2)
# Many useful functions are in external packages
# Let's meet numpy
import numpy as np
# To see what's in a package, type the name, a period, then hit tab
#np?
np.
# Some examples of numpy functions and "things"
print(np.sqrt(4))
print(np.pi) # Not a function, just a variable
print(np.sin(np.pi))
# A string is actually an object
a = 'hello, world'
print(type(a))
# Objects have bundled methods
#a.
print(a.capitalize())
print(a.replace('l', 'X'))
# Lists are created with square bracket syntax
a = ['blueberry', 'strawberry', 'pineapple']
print(a, type(a))
# Lists (and all collections) are also indexed with square brackets
# NOTE: The first index is zero, not one
print(a[0])
print(a[1])
## You can also count from the end of the list
print('last item is:', a[-1])
print('second to last item is:', a[-2])
# you can access multiple items from a list by slicing, using a colon between indexes
# NOTE: The end value is not inclusive
print('a =', a)
print('get first two:', a[0:2])
# You can leave off the start or end if desired
print(a[:2])
print(a[2:])
print(a[:])
print(a[:-1])
# Lists are objects, like everything else, and have methods such as append
a.append('banana')
print(a)
a.append([1,2])
print(a)
a.pop()
print(a)
a = 1
b = a
a = 2
## What is b?
print('What is b?', b)
a = [1, 2, 3]
b = a
print('original b', b)
a[0] = 42
print('What is b after we change a ?', b)
xy = (23, 45)
print(xy[0])
xy[0] = "this won't work with a tuple"
# Make a dictionary of model parameters
convertors = {'inches_in_feet' : 12,
'inches_in_metre' : 39}
print(convertors)
print(convertors['inches_in_feet'])
## Add a new key:value pair
convertors['metres_in_mile'] = 1609.34
print(convertors)
# Raise a KEY error
print(convertors['blueberry'])
# We need to import the numpy library to have access to it
# We can also create an alias for a library, this is something you will commonly see with numpy
import numpy as np
# Make an array from a list
alist = [2, 3, 4]
blist = [5, 6, 7]
a = np.array(alist)
b = np.array(blist)
print(a, type(a))
print(b, type(b))
# Do arithmetic on arrays
print(a**2)
print(np.sin(a))
print(a * b)
print(a.dot(b), np.dot(a, b))
# Boolean operators work on arrays too, and they return boolean arrays
print(a > 2)
print(b == 6)
c = a > 2
print(c)
print(type(c))
print(c.dtype)
# Indexing arrays
print(a[0:2])
c = np.random.rand(3,3)
print(c)
print('\n')
print(c[1:3,0:2])
c[0,:] = a
print('\n')
print(c)
# Arrays can also be indexed with other boolean arrays
print(a)
print(b)
print(a > 2)
print(a[a > 2])
print(b[a > 2])
b[a == 3] = 77
print(b)
# ndarrays have attributes in addition to methods
#c.
print(c.shape)
print(c.prod())
# There are handy ways to make arrays full of ones and zeros
print(np.zeros(5), '\n')
print(np.ones(5), '\n')
print(np.identity(5), '\n')
# You can also easily make arrays of number sequences
print(np.arange(0, 10, 2))
# A basic for loop - don't forget the white space!
wordlist = ['hi', 'hello', 'bye']
for word in wordlist:
print(word + '!')
# Indentation error: Fix it!
for word in wordlist:
new_word = word.capitalize()
print(new_word + '!') # Bad indent
# Sum all of the values in a collection using a for loop
numlist = [1, 4, 77, 3]
total = 0
for num in numlist:
total = total + num
print("Sum is", total)
# Often we want to loop over the indexes of a collection, not just the items
print(wordlist)
for i, word in enumerate(wordlist):
print(i, word, wordlist[i])
# While loops are useful when you don't know how many steps you will need,
# and want to stop once a certain condition is met.
step = 0
prod = 1
while prod < 100:
step = step + 1
prod = prod * 2
print(step, prod)
print('Reached a product of', prod, 'at step number', step)
# Load up pylab, a useful plotting library
%matplotlib inline
import matplotlib.pyplot as plt
# Make some x and y data and plot it
y = np.arange(100)**2
plt.plot(y)
# A simple if statement
x = 3
if x > 0:
print('x is positive')
elif x < 0:
print('x is negative')
else:
print('x is zero')
# If statements can rely on boolean variables
x = -1
test = (x > 0)
print(type(test)); print(test)
if test:
print('Test was true')
# We've been using functions all day
x = 3.333333
print(round(x, 2))
print(np.sin(x))
# It's very easy to write your own functions
def multiply(x, y):
return x*y
# Once a function is "run" and saved in memory, it's available just like any other function
print(type(multiply))
print(multiply(4, 3))
# It's useful to include docstrings to describe what your function does
def say_hello(time, people):
'''
Function says a greeting. Useful for engendering goodwill
'''
return 'Good ' + time + ', ' + people
say_hello('afternoon', 'friends')
# All arguments must be present, or the function will return an error
say_hello('afternoon')
# Keyword arguments can be used to make some arguments optional by giving them a default value
# All mandatory arguments must come first, in order
def say_hello(time, people='friends'):
return 'Good ' + time + ', ' + people
say_hello('afternoon')
say_hello('afternoon', 'students')
| 0.483405 | 0.941922 |
<a href="https://colab.research.google.com/github/khavitidala/fuyukai-desu/blob/main/literature/11_midlevel_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#hide
!pip install -Uqq fastbook
import fastbook
#fastbook.setup_book()
#hide
from fastbook import *
from IPython.display import display,HTML
```
# Data Munging with fastai's Mid-Level API
## Going Deeper into fastai's Layered API
```
from fastai.text.all import *
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')
path = untar_data(URLs.IMDB)
dls = DataBlock(
blocks=(TextBlock.from_folder(path),CategoryBlock),
get_y = parent_label,
get_items=partial(get_text_files, folders=['train', 'test']),
splitter=GrandparentSplitter(valid_name='test')
).dataloaders(path)
```
### Transforms
```
files = get_text_files(path, folders = ['train', 'test'])
txts = L(o.open().read() for o in files[:2000])
tok = Tokenizer.from_folder(path)
tok.setup(txts)
toks = txts.map(tok)
toks[0]
num = Numericalize()
num.setup(toks)
nums = toks.map(num)
nums[0][:10]
nums_dec = num.decode(nums[0][:10]); nums_dec
tok.decode(nums_dec)
tok((txts[0], txts[1]))
```
### Writing Your Own Transform
```
def f(x:int): return x+1
tfm = Transform(f)
tfm(2),tfm(2.0)
@Transform
def f(x:int): return x+1
f(2),f(2.0)
class NormalizeMean(Transform):
def setups(self, items): self.mean = sum(items)/len(items)
def encodes(self, x): return x-self.mean
def decodes(self, x): return x+self.mean
tfm = NormalizeMean()
tfm.setup([1,2,3,4,5])
start = 2
y = tfm(start)
z = tfm.decode(y)
tfm.mean,y,z
```
### Pipeline
```
tfms = Pipeline([tok, num])
t = tfms(txts[0]); t[:20]
tfms.decode(t)[:100]
```
## TfmdLists and Datasets: Transformed Collections
### TfmdLists
```
tls = TfmdLists(files, [Tokenizer.from_folder(path), Numericalize])
t = tls[0]; t[:20]
tls.decode(t)[:100]
tls.show(t)
cut = int(len(files)*0.8)
splits = [list(range(cut)), list(range(cut,len(files)))]
tls = TfmdLists(files, [Tokenizer.from_folder(path), Numericalize],
splits=splits)
tls.valid[0][:20]
lbls = files.map(parent_label)
lbls
cat = Categorize()
cat.setup(lbls)
cat.vocab, cat(lbls[0])
tls_y = TfmdLists(files, [parent_label, Categorize()])
tls_y[0]
```
### Datasets
```
x_tfms = [Tokenizer.from_folder(path), Numericalize]
y_tfms = [parent_label, Categorize()]
dsets = Datasets(files, [x_tfms, y_tfms])
x,y = dsets[0]
x[:20],y
x_tfms = [Tokenizer.from_folder(path), Numericalize]
y_tfms = [parent_label, Categorize()]
dsets = Datasets(files, [x_tfms, y_tfms], splits=splits)
x,y = dsets.valid[0]
x[:20],y
t = dsets.valid[0]
dsets.decode(t)
dls = dsets.dataloaders(bs=64, before_batch=pad_input)
pad_input??
tfms = [[Tokenizer.from_folder(path), Numericalize], [parent_label, Categorize]]
files = get_text_files(path, folders = ['train', 'test'])
splits = GrandparentSplitter(valid_name='test')(files)
dsets = Datasets(files, tfms, splits=splits)
dls = dsets.dataloaders(dl_type=SortedDL, before_batch=pad_input)
path = untar_data(URLs.IMDB)
dls = DataBlock(
blocks=(TextBlock.from_folder(path),CategoryBlock),
get_y = parent_label,
get_items=partial(get_text_files, folders=['train', 'test']),
splitter=GrandparentSplitter(valid_name='test')
).dataloaders(path)
```
## Applying the Mid-Level Data API: SiamesePair
```
from fastai.vision.all import *
path = untar_data(URLs.PETS)
files = get_image_files(path/"images")
class SiameseImage(fastuple):
def show(self, ctx=None, **kwargs):
img1,img2,same_breed = self
if not isinstance(img1, Tensor):
if img2.size != img1.size: img2 = img2.resize(img1.size)
t1,t2 = tensor(img1),tensor(img2)
t1,t2 = t1.permute(2,0,1),t2.permute(2,0,1)
else: t1,t2 = img1,img2
line = t1.new_zeros(t1.shape[0], t1.shape[1], 10)
return show_image(torch.cat([t1,line,t2], dim=2),
title=same_breed, ctx=ctx)
img = PILImage.create(files[0])
s = SiameseImage(img, img, True)
s.show();
img1 = PILImage.create(files[1])
s1 = SiameseImage(img, img1, False)
s1.show();
s2 = Resize(224)(s1)
s2.show();
def label_func(fname):
return re.match(r'^(.*)_\d+.jpg$', fname.name).groups()[0]
class SiameseTransform(Transform):
def __init__(self, files, label_func, splits):
self.labels = files.map(label_func).unique()
self.lbl2files = {l: L(f for f in files if label_func(f) == l)
for l in self.labels}
self.label_func = label_func
self.valid = {f: self._draw(f) for f in files[splits[1]]}
def encodes(self, f):
f2,t = self.valid.get(f, self._draw(f))
img1,img2 = PILImage.create(f),PILImage.create(f2)
return SiameseImage(img1, img2, t)
def _draw(self, f):
same = random.random() < 0.5
cls = self.label_func(f)
if not same:
cls = random.choice(L(l for l in self.labels if l != cls))
return random.choice(self.lbl2files[cls]),same
splits = RandomSplitter()(files)
tfm = SiameseTransform(files, label_func, splits)
tfm(files[0]).show();
tls = TfmdLists(files, tfm, splits=splits)
show_at(tls.valid, 0);
dls = tls.dataloaders(after_item=[Resize(224), ToTensor],
after_batch=[IntToFloatTensor, Normalize.from_stats(*imagenet_stats)])
```
|
github_jupyter
|
#hide
!pip install -Uqq fastbook
import fastbook
#fastbook.setup_book()
#hide
from fastbook import *
from IPython.display import display,HTML
from fastai.text.all import *
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')
path = untar_data(URLs.IMDB)
dls = DataBlock(
blocks=(TextBlock.from_folder(path),CategoryBlock),
get_y = parent_label,
get_items=partial(get_text_files, folders=['train', 'test']),
splitter=GrandparentSplitter(valid_name='test')
).dataloaders(path)
files = get_text_files(path, folders = ['train', 'test'])
txts = L(o.open().read() for o in files[:2000])
tok = Tokenizer.from_folder(path)
tok.setup(txts)
toks = txts.map(tok)
toks[0]
num = Numericalize()
num.setup(toks)
nums = toks.map(num)
nums[0][:10]
nums_dec = num.decode(nums[0][:10]); nums_dec
tok.decode(nums_dec)
tok((txts[0], txts[1]))
def f(x:int): return x+1
tfm = Transform(f)
tfm(2),tfm(2.0)
@Transform
def f(x:int): return x+1
f(2),f(2.0)
class NormalizeMean(Transform):
def setups(self, items): self.mean = sum(items)/len(items)
def encodes(self, x): return x-self.mean
def decodes(self, x): return x+self.mean
tfm = NormalizeMean()
tfm.setup([1,2,3,4,5])
start = 2
y = tfm(start)
z = tfm.decode(y)
tfm.mean,y,z
tfms = Pipeline([tok, num])
t = tfms(txts[0]); t[:20]
tfms.decode(t)[:100]
tls = TfmdLists(files, [Tokenizer.from_folder(path), Numericalize])
t = tls[0]; t[:20]
tls.decode(t)[:100]
tls.show(t)
cut = int(len(files)*0.8)
splits = [list(range(cut)), list(range(cut,len(files)))]
tls = TfmdLists(files, [Tokenizer.from_folder(path), Numericalize],
splits=splits)
tls.valid[0][:20]
lbls = files.map(parent_label)
lbls
cat = Categorize()
cat.setup(lbls)
cat.vocab, cat(lbls[0])
tls_y = TfmdLists(files, [parent_label, Categorize()])
tls_y[0]
x_tfms = [Tokenizer.from_folder(path), Numericalize]
y_tfms = [parent_label, Categorize()]
dsets = Datasets(files, [x_tfms, y_tfms])
x,y = dsets[0]
x[:20],y
x_tfms = [Tokenizer.from_folder(path), Numericalize]
y_tfms = [parent_label, Categorize()]
dsets = Datasets(files, [x_tfms, y_tfms], splits=splits)
x,y = dsets.valid[0]
x[:20],y
t = dsets.valid[0]
dsets.decode(t)
dls = dsets.dataloaders(bs=64, before_batch=pad_input)
pad_input??
tfms = [[Tokenizer.from_folder(path), Numericalize], [parent_label, Categorize]]
files = get_text_files(path, folders = ['train', 'test'])
splits = GrandparentSplitter(valid_name='test')(files)
dsets = Datasets(files, tfms, splits=splits)
dls = dsets.dataloaders(dl_type=SortedDL, before_batch=pad_input)
path = untar_data(URLs.IMDB)
dls = DataBlock(
blocks=(TextBlock.from_folder(path),CategoryBlock),
get_y = parent_label,
get_items=partial(get_text_files, folders=['train', 'test']),
splitter=GrandparentSplitter(valid_name='test')
).dataloaders(path)
from fastai.vision.all import *
path = untar_data(URLs.PETS)
files = get_image_files(path/"images")
class SiameseImage(fastuple):
def show(self, ctx=None, **kwargs):
img1,img2,same_breed = self
if not isinstance(img1, Tensor):
if img2.size != img1.size: img2 = img2.resize(img1.size)
t1,t2 = tensor(img1),tensor(img2)
t1,t2 = t1.permute(2,0,1),t2.permute(2,0,1)
else: t1,t2 = img1,img2
line = t1.new_zeros(t1.shape[0], t1.shape[1], 10)
return show_image(torch.cat([t1,line,t2], dim=2),
title=same_breed, ctx=ctx)
img = PILImage.create(files[0])
s = SiameseImage(img, img, True)
s.show();
img1 = PILImage.create(files[1])
s1 = SiameseImage(img, img1, False)
s1.show();
s2 = Resize(224)(s1)
s2.show();
def label_func(fname):
return re.match(r'^(.*)_\d+.jpg$', fname.name).groups()[0]
class SiameseTransform(Transform):
def __init__(self, files, label_func, splits):
self.labels = files.map(label_func).unique()
self.lbl2files = {l: L(f for f in files if label_func(f) == l)
for l in self.labels}
self.label_func = label_func
self.valid = {f: self._draw(f) for f in files[splits[1]]}
def encodes(self, f):
f2,t = self.valid.get(f, self._draw(f))
img1,img2 = PILImage.create(f),PILImage.create(f2)
return SiameseImage(img1, img2, t)
def _draw(self, f):
same = random.random() < 0.5
cls = self.label_func(f)
if not same:
cls = random.choice(L(l for l in self.labels if l != cls))
return random.choice(self.lbl2files[cls]),same
splits = RandomSplitter()(files)
tfm = SiameseTransform(files, label_func, splits)
tfm(files[0]).show();
tls = TfmdLists(files, tfm, splits=splits)
show_at(tls.valid, 0);
dls = tls.dataloaders(after_item=[Resize(224), ToTensor],
after_batch=[IntToFloatTensor, Normalize.from_stats(*imagenet_stats)])
| 0.417509 | 0.945601 |
```
"""
Update Parameters Here
"""
COLLECTION_NAME = "MekaVerse"
CONTRACT = "0x9a534628b4062e123ce7ee2222ec20b86e16ca8f"
LAST_N_EVENTS = 150
GRIFTER_ADDRESS = "" # optional overlay of grifter sales on map (grifter sales must have occured in last 'LAST_N_EVENTS' sales)
"""
@author: mdigi14
"""
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from utils import opensea
from utils import constants
from utils import config
RARITY_DB = pd.read_csv(f"{config.RARITY_FOLDER}/{COLLECTION_NAME}_raritytools.csv")
sales = []
"""
Plot params
"""
plt.rcParams.update({"figure.facecolor": "white", "savefig.facecolor": "white"})
"""
Helper Functions
"""
def get_opensea_data(contract, continuous):
data = opensea.get_opensea_events(
contract_address=contract,
event_type="successful",
continuous=continuous,
)
return data
"""
Generate Plot
"""
if LAST_N_EVENTS <= constants.OPENSEA_MAX_LIMIT:
events = get_opensea_data(CONTRACT, continuous=False)
else:
events = get_opensea_data(CONTRACT, continuous=True)
events = events[:LAST_N_EVENTS]
for event in events:
try:
token_id = int(event["asset"]["token_id"])
sale = dict()
sale["TOKEN_ID"] = token_id
sale["USER"] = event["transaction"]["from_account"]["address"]
sale["SELLER"] = event["seller"]["address"]
sale["DATE"] = event["created_date"]
sale["RANK"] = int(RARITY_DB[RARITY_DB["TOKEN_ID"] == token_id]["Rank"])
sale["PRICE"] = float(event["total_price"]) / constants.ETHER_UNITS
except:
continue
sales.append(sale)
df = pd.DataFrame(sales)
df = df[df["RANK"].notna()]
df.to_csv(f"{config.ROOT_DATA_FOLDER}/recent_sales.csv")
X = df["RANK"].values.reshape(-1, 1)
Y = df["PRICE"].values.reshape(-1, 1)
linear_regressor = LinearRegression()
linear_regressor.fit(X, Y)
Y_pred = linear_regressor.predict(X)
df = df.sort_values(by="RANK")
ax = df.plot.scatter(
x="RANK",
y="PRICE",
grid=True,
alpha=0.5,
title=COLLECTION_NAME,
figsize=(14, 7),
)
if GRIFTER_ADDRESS != "":
GRIFTER_DB = df[df["SELLER"] == GRIFTER_ADDRESS]
ranks = GRIFTER_DB["RANK"]
prices = GRIFTER_DB["PRICE"]
plt.scatter(x=ranks, y=prices, color="black", s=25)
plt.plot(X, Y_pred, color="red")
plt.xlabel("Rarity Rank (lower rank is better)")
plt.ylabel("Price (Ether)")
plt.title(
f"{COLLECTION_NAME} - Last {LAST_N_EVENTS} Sales (Before {datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S %Z')})"
)
plt.savefig(f"{config.FIGURES_FOLDER}/{COLLECTION_NAME}_price_vs_rank.png")
plt.show()
```
|
github_jupyter
|
"""
Update Parameters Here
"""
COLLECTION_NAME = "MekaVerse"
CONTRACT = "0x9a534628b4062e123ce7ee2222ec20b86e16ca8f"
LAST_N_EVENTS = 150
GRIFTER_ADDRESS = "" # optional overlay of grifter sales on map (grifter sales must have occured in last 'LAST_N_EVENTS' sales)
"""
@author: mdigi14
"""
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from utils import opensea
from utils import constants
from utils import config
RARITY_DB = pd.read_csv(f"{config.RARITY_FOLDER}/{COLLECTION_NAME}_raritytools.csv")
sales = []
"""
Plot params
"""
plt.rcParams.update({"figure.facecolor": "white", "savefig.facecolor": "white"})
"""
Helper Functions
"""
def get_opensea_data(contract, continuous):
data = opensea.get_opensea_events(
contract_address=contract,
event_type="successful",
continuous=continuous,
)
return data
"""
Generate Plot
"""
if LAST_N_EVENTS <= constants.OPENSEA_MAX_LIMIT:
events = get_opensea_data(CONTRACT, continuous=False)
else:
events = get_opensea_data(CONTRACT, continuous=True)
events = events[:LAST_N_EVENTS]
for event in events:
try:
token_id = int(event["asset"]["token_id"])
sale = dict()
sale["TOKEN_ID"] = token_id
sale["USER"] = event["transaction"]["from_account"]["address"]
sale["SELLER"] = event["seller"]["address"]
sale["DATE"] = event["created_date"]
sale["RANK"] = int(RARITY_DB[RARITY_DB["TOKEN_ID"] == token_id]["Rank"])
sale["PRICE"] = float(event["total_price"]) / constants.ETHER_UNITS
except:
continue
sales.append(sale)
df = pd.DataFrame(sales)
df = df[df["RANK"].notna()]
df.to_csv(f"{config.ROOT_DATA_FOLDER}/recent_sales.csv")
X = df["RANK"].values.reshape(-1, 1)
Y = df["PRICE"].values.reshape(-1, 1)
linear_regressor = LinearRegression()
linear_regressor.fit(X, Y)
Y_pred = linear_regressor.predict(X)
df = df.sort_values(by="RANK")
ax = df.plot.scatter(
x="RANK",
y="PRICE",
grid=True,
alpha=0.5,
title=COLLECTION_NAME,
figsize=(14, 7),
)
if GRIFTER_ADDRESS != "":
GRIFTER_DB = df[df["SELLER"] == GRIFTER_ADDRESS]
ranks = GRIFTER_DB["RANK"]
prices = GRIFTER_DB["PRICE"]
plt.scatter(x=ranks, y=prices, color="black", s=25)
plt.plot(X, Y_pred, color="red")
plt.xlabel("Rarity Rank (lower rank is better)")
plt.ylabel("Price (Ether)")
plt.title(
f"{COLLECTION_NAME} - Last {LAST_N_EVENTS} Sales (Before {datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S %Z')})"
)
plt.savefig(f"{config.FIGURES_FOLDER}/{COLLECTION_NAME}_price_vs_rank.png")
plt.show()
| 0.652241 | 0.226848 |
<h1><strong><center>FocusedFashion</center><strong></h1>
<h3><center>Mary Gibbs and Jessica Fogerty</center></h3>
<h1><center>Introduction</center></h1>
<h2><center>Problem</center></h2>
<center>We all have our favorite pieces of clothing that we consistently wear. Over time, these clothing items may not fit anymore or degrade in quality. Subsequently, it may be difficult to find the same clothing item or time-intensive to find similar clothing items due to the vast amount of clothing websites and clothing retail stores. This is a common issue for both of us, and it is something we would like to streamline!</center>
<h2><center>Solution</center></h2>
<center>Our solution involves a two-step approach. First, we train a convolutional neural network on fashion images in order to extract the feature maps associated with different clothing items. Second, we use the feature maps as input to a KNN model that will find the five closest neighbors to a given query image that will serve as recommendations.</center>
<h1><center>Overview</center></h1>
<ul>
<li>Related Work</li>
<li>Dataset</li>
<li>Models</li>
<li>Recommendation</li>
<li>Conclusion</li>
<li>Limitations & Future Work</li>
<li>References</li>
</ul>
<h1> <center>Related Work</center></h1>
- <font size="5"> <b> Recommending Similar Fashion Images with Deep Learning (Le, 2019) </b>
- Used the DeepFashion dataset and focused on tops
- Used ResNet to classify the clothing into 6 classes and obtain feature maps
- Implemented a nearest neightbors-based search on feature maps
- <b>Image Based Fashion Product Recommendation with Deep Learning (Tuinhof, Pirker, & Haltmeier, 2018) </b>
- Used the ImageLab Garment Selection and Color Classification dataset
- Used AlexNet and BN-inception to extract feature maps
- Implemented a KNN with Euclidean distance to return ranked recommendations
- <b>FashionNet: Personalized Outfit Recommendation with Deep Neural Network (He & Hu, 2018)</b>
- Used a personalized, user-based scoring approach based on fashion images that constitute an outfit
- Concatenated all images in an outfit and used CNN models, specifically VGGNet, to obtain a probability that the user likes or dislikes the outfit </font>
<h1><center>Dataset</center></h1>
<ul>
<h3><center>Dataset comes from the Kaggle competition: iMaterialist Challenge (Fashion) at FGVC5</center></h3>
<br>
<center>
<img src="kaggle_fashion_dataset.png" alt="centered image" />
</center>
</ul>
<h1><center>Dataset Preprocessing</center></h1>
<br>
<center>
<img src="dataset_summary.png" alt="centered image" />
</center>
<center>
<img src="labels_summary.png" alt="centered image" />
</center>
<h1><center>Label Distribution</center></h1>
<br>
<center>
<img src="labels_histogram.png" alt="centered image" />
</center>
Label Distribution
- Minimum number of labels for an image: 6 </li>
- Maximum number of labels for an image: 142 </li>
- Average number of labels for an image: 36 </li>
<h1><center>Label Frequency</center></h1>
<br>
<center>
<img src="labels_word_cloud.png" alt="centered image" />
</center>
<h1><center>Models</center></h1>
- <h5>Simple CNNs</h5>
- Our own architecture
<br>
- <h5>Pretrained CNN</h5>
- MobileNetV2
<br>
- <h5>Performance metrics</h5>
- Binary cross-entropy loss with logits
- Micro-averaged F1
<br>
<h1><center> Simple CNN </center></h1>
<h1><center> Architecture </center></h1>
<center>
<img src="simple_cnn_architecture.png" alt="centered image" />
</center>
<h1><center>Model Summary</center></h1>
<h4><center>Input Size: (3,50,50)</center></h4>
<center>
<img src="jessica_model_5_summary.png" alt="centered image"/>
</center>
<center>
<img src="convolution_calculations.png" alt="centered image"/>
</center>
<h1><center>Training Loss</center></h1>
<center>
<img src="jessica_model_5_loss_plot.png" alt="centered image" />
</center>
<h1><center>Test Performance</center></h1>
<br>
<center>
<img src="jessica_model_5_f1_plot.png" alt="centered image" />
</center>
<h1><center>MobileNetV2</center></h1>
<h1><center>Convolutions</center></h1>
<h3><center>Makes use of depthwise convolutions followed by pointwise convolutions</center></h3>
<br>
<center>
<img src="mobilenet_diagram_1.png" alt="centered image" />
</center>
<h1><center>Architecture</center></h1>
<br>
<center>
<img src="mobilenet_diagram_2.png" alt="centered image" />
</center>
MobileNetV2
- Expansion layer - input low dimension, increase dimensions
- Depthwise layer - filter on high dimensions
- Projection layer - decrease dimensions, output low dimension
- Residual connection - gradient flow
<h1><center>Training Loss</center></h1>
<br>
<center>
<img src="mobilenet_loss_plot.png" alt="centered image" />
</center>
<h1><center>Test Performance</center></h1>
<br>
<center>
<img src="mobilenet_f1_plot.png" alt="centered image" />
</center>
<h1><center>Recommendation System</center></h1>
<br>
<center>
<img src="recommendation_diagram.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="jeans_recommendations.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="skirt_recommendations.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="jessica_recommendations.png" alt="centered image" />
</center>
<h1><center>Conclusion</center></h1>
<ul>
<li>Built a deep learning-based fashion recommendation system</li>
<li>MobileNetV2 feature extraction w/ KNN ranking > Simple CNN feature extraction w/ KNN ranking > Baseline KNN ranking</li>
<li>Euclidean distance</li>
</ul>
<h1><center>Limitations</center></h1>
<ul>
<li>Due to memory constraints, we could only use a small image/batch size</li>
<li>With 300,000 images, training models is a time intensive process</li>
<li>Fashion dataset contained a class imbalance and did not contain bounding boxes or pose estimation</li>
</ul>
<h1><center>Future Work</center></h1>
<ul>
<li>Deal with class imbalance in the fashion dataset</li>
<li>Work with the DeepFashion and DeepFashion2 datasets, which include bounding boxes and pose estimation that we can use to extract feature maps for individual clothing items</li>
<li>Try assessing model performance based on Top-k accuracy since we are only interested in the first k items being correct/relevant </li>
</ul>
<h1><center>References</center></h1>
<font size="4.5">- He, T., & Hu, Y. (2018). FashionNet: Personalized Outfit Recommendation with Deep Neural Network, 1–8.<br>
- Hollemans, M. (n.d.). Retrieved from https://machinethink.net/blog/googles-mobile-net-architecture-on-iphone/.<br>
- Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., … Adam, H. (2017). MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.<br>
- Huang, G., Liu, Z., & van der Maaten, L. (2018). Densely Connected Convolutional Networks. Densely Connected Convolutional Networks, 1–8. Retrieved from https://arxiv.org/pdf/1608.06993.pdf <br>
- Le, J. (2019, August 16). Recommending Similar Fashion Images with Deep Learning. Retrieved from https://blog.floydhub.com/similar-fashion-images/. <br>
- Nazi, Z. A., & Abir, T. A. (2018). Automatic Skin Lesion Segmentation and Melanoma Detection: Transfer Learning approach with U-Net and DCNN-SVM. International Joint Conference on Computational Intelligence. Retrieved from https://www.researchgate.net/publication/330564744_Automatic_Skin_Lesion_Segmentation_and_Melanoma_Detection_Transfer_Learning_approach_with_U-Net_and_DCNN-SVM <br>
- Tsang, S.-H. (2019, March 20). Review: DenseNet - Dense Convolutional Network (Image Classification). Retrieved from https://towardsdatascience.com/review-densenet-image-classification-b6631a8ef803.<br>
- Tuinhof, H., Pirker, C., & Haltmeier, M. (2018). Image Based Fashion Product Recommendation with Deep Learning, 1–10.</font>
|
github_jupyter
|
<h1><strong><center>FocusedFashion</center><strong></h1>
<h3><center>Mary Gibbs and Jessica Fogerty</center></h3>
<h1><center>Introduction</center></h1>
<h2><center>Problem</center></h2>
<center>We all have our favorite pieces of clothing that we consistently wear. Over time, these clothing items may not fit anymore or degrade in quality. Subsequently, it may be difficult to find the same clothing item or time-intensive to find similar clothing items due to the vast amount of clothing websites and clothing retail stores. This is a common issue for both of us, and it is something we would like to streamline!</center>
<h2><center>Solution</center></h2>
<center>Our solution involves a two-step approach. First, we train a convolutional neural network on fashion images in order to extract the feature maps associated with different clothing items. Second, we use the feature maps as input to a KNN model that will find the five closest neighbors to a given query image that will serve as recommendations.</center>
<h1><center>Overview</center></h1>
<ul>
<li>Related Work</li>
<li>Dataset</li>
<li>Models</li>
<li>Recommendation</li>
<li>Conclusion</li>
<li>Limitations & Future Work</li>
<li>References</li>
</ul>
<h1> <center>Related Work</center></h1>
- <font size="5"> <b> Recommending Similar Fashion Images with Deep Learning (Le, 2019) </b>
- Used the DeepFashion dataset and focused on tops
- Used ResNet to classify the clothing into 6 classes and obtain feature maps
- Implemented a nearest neightbors-based search on feature maps
- <b>Image Based Fashion Product Recommendation with Deep Learning (Tuinhof, Pirker, & Haltmeier, 2018) </b>
- Used the ImageLab Garment Selection and Color Classification dataset
- Used AlexNet and BN-inception to extract feature maps
- Implemented a KNN with Euclidean distance to return ranked recommendations
- <b>FashionNet: Personalized Outfit Recommendation with Deep Neural Network (He & Hu, 2018)</b>
- Used a personalized, user-based scoring approach based on fashion images that constitute an outfit
- Concatenated all images in an outfit and used CNN models, specifically VGGNet, to obtain a probability that the user likes or dislikes the outfit </font>
<h1><center>Dataset</center></h1>
<ul>
<h3><center>Dataset comes from the Kaggle competition: iMaterialist Challenge (Fashion) at FGVC5</center></h3>
<br>
<center>
<img src="kaggle_fashion_dataset.png" alt="centered image" />
</center>
</ul>
<h1><center>Dataset Preprocessing</center></h1>
<br>
<center>
<img src="dataset_summary.png" alt="centered image" />
</center>
<center>
<img src="labels_summary.png" alt="centered image" />
</center>
<h1><center>Label Distribution</center></h1>
<br>
<center>
<img src="labels_histogram.png" alt="centered image" />
</center>
Label Distribution
- Minimum number of labels for an image: 6 </li>
- Maximum number of labels for an image: 142 </li>
- Average number of labels for an image: 36 </li>
<h1><center>Label Frequency</center></h1>
<br>
<center>
<img src="labels_word_cloud.png" alt="centered image" />
</center>
<h1><center>Models</center></h1>
- <h5>Simple CNNs</h5>
- Our own architecture
<br>
- <h5>Pretrained CNN</h5>
- MobileNetV2
<br>
- <h5>Performance metrics</h5>
- Binary cross-entropy loss with logits
- Micro-averaged F1
<br>
<h1><center> Simple CNN </center></h1>
<h1><center> Architecture </center></h1>
<center>
<img src="simple_cnn_architecture.png" alt="centered image" />
</center>
<h1><center>Model Summary</center></h1>
<h4><center>Input Size: (3,50,50)</center></h4>
<center>
<img src="jessica_model_5_summary.png" alt="centered image"/>
</center>
<center>
<img src="convolution_calculations.png" alt="centered image"/>
</center>
<h1><center>Training Loss</center></h1>
<center>
<img src="jessica_model_5_loss_plot.png" alt="centered image" />
</center>
<h1><center>Test Performance</center></h1>
<br>
<center>
<img src="jessica_model_5_f1_plot.png" alt="centered image" />
</center>
<h1><center>MobileNetV2</center></h1>
<h1><center>Convolutions</center></h1>
<h3><center>Makes use of depthwise convolutions followed by pointwise convolutions</center></h3>
<br>
<center>
<img src="mobilenet_diagram_1.png" alt="centered image" />
</center>
<h1><center>Architecture</center></h1>
<br>
<center>
<img src="mobilenet_diagram_2.png" alt="centered image" />
</center>
MobileNetV2
- Expansion layer - input low dimension, increase dimensions
- Depthwise layer - filter on high dimensions
- Projection layer - decrease dimensions, output low dimension
- Residual connection - gradient flow
<h1><center>Training Loss</center></h1>
<br>
<center>
<img src="mobilenet_loss_plot.png" alt="centered image" />
</center>
<h1><center>Test Performance</center></h1>
<br>
<center>
<img src="mobilenet_f1_plot.png" alt="centered image" />
</center>
<h1><center>Recommendation System</center></h1>
<br>
<center>
<img src="recommendation_diagram.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="jeans_recommendations.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="skirt_recommendations.png" alt="centered image" />
</center>
<h1><center>Recommendations</center></h1>
<br>
<center>
<img src="jessica_recommendations.png" alt="centered image" />
</center>
<h1><center>Conclusion</center></h1>
<ul>
<li>Built a deep learning-based fashion recommendation system</li>
<li>MobileNetV2 feature extraction w/ KNN ranking > Simple CNN feature extraction w/ KNN ranking > Baseline KNN ranking</li>
<li>Euclidean distance</li>
</ul>
<h1><center>Limitations</center></h1>
<ul>
<li>Due to memory constraints, we could only use a small image/batch size</li>
<li>With 300,000 images, training models is a time intensive process</li>
<li>Fashion dataset contained a class imbalance and did not contain bounding boxes or pose estimation</li>
</ul>
<h1><center>Future Work</center></h1>
<ul>
<li>Deal with class imbalance in the fashion dataset</li>
<li>Work with the DeepFashion and DeepFashion2 datasets, which include bounding boxes and pose estimation that we can use to extract feature maps for individual clothing items</li>
<li>Try assessing model performance based on Top-k accuracy since we are only interested in the first k items being correct/relevant </li>
</ul>
<h1><center>References</center></h1>
<font size="4.5">- He, T., & Hu, Y. (2018). FashionNet: Personalized Outfit Recommendation with Deep Neural Network, 1–8.<br>
- Hollemans, M. (n.d.). Retrieved from https://machinethink.net/blog/googles-mobile-net-architecture-on-iphone/.<br>
- Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., … Adam, H. (2017). MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.<br>
- Huang, G., Liu, Z., & van der Maaten, L. (2018). Densely Connected Convolutional Networks. Densely Connected Convolutional Networks, 1–8. Retrieved from https://arxiv.org/pdf/1608.06993.pdf <br>
- Le, J. (2019, August 16). Recommending Similar Fashion Images with Deep Learning. Retrieved from https://blog.floydhub.com/similar-fashion-images/. <br>
- Nazi, Z. A., & Abir, T. A. (2018). Automatic Skin Lesion Segmentation and Melanoma Detection: Transfer Learning approach with U-Net and DCNN-SVM. International Joint Conference on Computational Intelligence. Retrieved from https://www.researchgate.net/publication/330564744_Automatic_Skin_Lesion_Segmentation_and_Melanoma_Detection_Transfer_Learning_approach_with_U-Net_and_DCNN-SVM <br>
- Tsang, S.-H. (2019, March 20). Review: DenseNet - Dense Convolutional Network (Image Classification). Retrieved from https://towardsdatascience.com/review-densenet-image-classification-b6631a8ef803.<br>
- Tuinhof, H., Pirker, C., & Haltmeier, M. (2018). Image Based Fashion Product Recommendation with Deep Learning, 1–10.</font>
| 0.744842 | 0.937153 |
# OLCI spatial plotting, quality control and data interrogation
Version: 2.0
Date: 10/04/2019
Author: Ben Loveday and Hayley Evers-King (Plymouth Marine Laboratory)
Credit: This code was developed for EUMETSAT under contracts for the Copernicus
programme.
License: This code is offered as free-to-use in the public domain, with no warranty.
This aim of this code is to introduce you to Python and to simply import a netCDF file in to your python workspace, conduct some basic operations, and plot an image. In this case, we will be using a level-2 OLCI image, but the script can be easily adapted to plot any netCDF variable.
The first step in any python code is usually to import libraries that you will need. Libraries are usually code modules that perform specific tasks or provide specific capability (e.g. statistical analysis or plotting routines). In this case we will import the xarray library for handling netCDF files, the numpy library which will help to conduct various operations on the data, and the matplotlib plotting library to generate some images. We will also import the os library, that allows python access to some command-line-eqsue capability like 'list directory', as well as the python library that governs the reporting of warning (so that we can turn them off here, and make the code run without being so 'noisy').
```
%matplotlib inline
# libraries are imported here, and we can import any library with an alias that allows us easy access to them later.
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
warnings.filterwarnings('ignore')
```
Usually we also define functions at the top of a Python script. Functions are routines that can be called elsewhere in our script and perform a specific task. Typically we would use a function to take care of any process that we are going to perform more than once. The box below defines a function that will mask our data according to quality flags. We will call this function later on.
```
def flag_data_fast(flags_we_want, flag_names, flag_values, flag_data, flag_type='WQSF'):
flag_bits = np.uint64()
if flag_type == 'SST':
flag_bits = np.uint8()
elif flag_type == 'WQSF_lsb':
flag_bits = np.uint32()
for flag in flags_we_want:
try:
flag_bits = flag_bits | flag_values[flag_names.index(flag)]
except:
print(flag + " not present")
return (flag_data & flag_bits) > 0
```
Now we will start our script, proper.
To run this script, you will need to point it to where your data is. If you keep your scripts and files in the same folder, you will not need to set the full path to the data file. However you may want to store things in different places and so it is good practice to be specific. To help to find your data, please complete the MYPATH variable below with the output generated by the /Configuration_Testing/Data_Path_Checker.ipynb Jupyter notebook in the Configuration_Testing folder.
```
# e.g. MYPATH = os.path.join("C:/","Users","me","Desktop")
MYPATH = "<please insert your path from Data_Path_Checker.ipynb here, removing the quotes and chevrons>"
input_root = os.path.join(MYPATH,'OLCI_test_data')
input_path = 'S3A_OL_2_WRR____20180203T061351_20180203T065737_20180204T113446_2626_027_248______MAR_O_NT_002.SEN3'
file_name_chl = 'chl_nn.nc'
```
We'll quickly check, in the next box, if your data path is ok, and that the data file exists check.
```
# quick path length check (some windows versions have a problem with long file paths)
if len(os.path.join(input_root,input_path,file_name_chl)) > 259 \
or len(os.path.join(input_root,input_path,file_name_chl)) > 248:
print('Beware, your path name is quite long. Consider moving your data to a new directory')
else:
print('Path length name seems fine')
if os.path.exists(os.path.join(input_root,input_path,file_name_chl)):
print('Found the required data file')
else:
print('Data file missing. Please check your path and file name')
```
We read the file using functions from the netCDF4 (alias "nc") library. Note that to use a library in python you use the imported alias followed by a dot, and then the function you want (e.g. nc.Dataset).
```
OLCI_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_chl))
```
To access a variable you can use the following command, where the name of the variable you are interested in, follows the hash. If you remove the hash in the following box, put the cursor after the dot and hit 'tab' you will be presented with a list of all of the variables and methods associated with the OLCI_file object. Python is an 'object orientatated' language, which means that all objects have relevant methods associated with them.
note: If you want to run all this code in one go, remember to put the hash back at the start of this line beforehand.
```
#OLCI_file.
```
So, lets load in some data, and then close our data file
```
CHL = OLCI_file.CHL_NN.data
OLCI_file.close()
```
You can look at the variables in your workspace in interactive python
environments (like this, or ipython) by typing 'whos'. This will tell you the name of the variable, it's type and then information on it, such as its size and shape.
```
#whos
```
Lets take a look at our data..
```
plt.imshow(CHL);
```
This is not the prettiest plot - Python can do much better. For a start, we may wish to look at a smaller area. We'll do this now, using the relevant indexes for area of data you wish to use.
```
row1=4000
row2=8000
col1=0
col2=3000
CHL_subset = CHL[row1:row2, col1:col2]
plt.imshow(CHL_subset);
```
You will notice a few problems with displaying plots like this. Firstly - they don't look very pretty (the colour scheme is not ideal, it is hard to see the coastline, and you can't differentiate the land from cloud), and secondly - the axes don't provide any information on the location (other than within the array) and there is no colour bar.
To make a better plot we will need to add a few more tools to the libraries/modules we've currently imported. Below are a few lines of code to import cartopy (which will help us make a better, map based plot) and a few other tools to tweak how the data is displayed.
The Cartopy is module allows us to use map projections to display data in a geographically relevant way. For those that are familiar to python, Cartopy has largely replaced the Basemap library.
```
import cartopy.crs as ccrs
import cartopy.feature as cfeature
land_resolution = '50m'
land_poly = cfeature.NaturalEarthFeature('physical', 'land', land_resolution,
edgecolor='k',
facecolor=cfeature.COLORS['land'])
```
We will also need to load other data to make the plot - the longitude and latitude data associated with each pixel of the chlorophyll data. This data can be found in the geo_coordinates.nc file, within each S3 OLCI L2 folder. We load this in a very similar way to how we loaded the chlorophyll data, just with different file and variable names. The data path remains the same, referring the folder that contains all the netcdf files.
```
file_name_geo = 'geo_coordinates.nc'
GEO_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_geo))
LAT = GEO_file.variables['latitude'][:]
LON = GEO_file.variables['longitude'][:]
LAT_subset = LAT[row1:row2, col1:col2]
LON_subset = LON[row1:row2, col1:col2]
GEO_file.close()
```
Then we need to initialise the map we will use for plotting. The important things to choose here are:
1. the projection you wish to use (this may depend on your region of interest, particularly if you are looking at polar data, more information about the different projects is available here: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html)
2. The limits of your map (by default, this will be set to your data limits)
3. The resolution of the map coastline.
See more information about the options for Cartopy here: https://scitools.org.uk/cartopy/docs/latest/
(If you don't have basemap installed, you can type "conda install -c scitools/label/archive cartopy" in your command prompt, with the anaconda distribution).
We start by defining a figure (line 1 below) and then defining a map projection (line 2). All mapping instructions are taken care of using our map object 'm'. Now we make the plot (this may take some time to draw!)
```
fig1 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON, LAT, np.ma.masked_invalid(CHL), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
```
You can also save the figure using the code below (this will save in the folder where you are running the code, if you want to save elsewhere you need to specify the path).
```
fig2 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig2.savefig('OLCI_CHL_spatial_demo_no_flags.png', bbox_inches='tight')
```
However, this data is not flag masked. This means that we may have data that is subject to glint, or cloud, or a variety of other conditions that variously undermine quality. So, lets apply some flags. We are going to flag extensively, removing all data that corresponds to the following conditions...
```
flags_we_want = ['CLOUD', 'CLOUD_AMBIGUOUS', 'CLOUD_MARGIN', 'INVALID', 'COSMETIC', 'SATURATED', 'SUSPECT',
'HISOLZEN', 'HIGHGLINT', 'SNOW_ICE', 'AC_FAIL', 'WHITECAPS', 'ANNOT_ABSO_D', 'ANNOT_MIXR1',
'ANNOT_DROUT', 'ANNOT_TAU06', 'RWNEG_O2', 'RWNEG_O3', 'RWNEG_O4', 'RWNEG_O5', 'RWNEG_O6',
'RWNEG_O7', 'RWNEG_O8']
file_name_flags = 'wqsf.nc'
FLAG_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_flags))
# get all the flag names
flag_names = FLAG_file['WQSF'].flag_meanings.split(' ')
# get all the flag bit values
flag_vals = FLAG_file['WQSF'].flag_masks
# get the flag field itself
FLAGS = FLAG_file.variables['WQSF'].data
FLAG_file.close()
# make the flag mask using the function we defined above "flag_data_fast"
flag_mask = flag_data_fast(flags_we_want, flag_names, flag_vals, FLAGS, flag_type='WQSF')
flag_mask = flag_mask.astype(float)
flag_mask[flag_mask == 0.0] = np.nan
# subset the flag mask
FLAG_subset = flag_mask[row1:row2, col1:col2]
```
And now we apply the flag data to our data and plot again...
```
CHL_subset[np.isfinite(FLAG_subset)] = np.nan
fig3 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig3.savefig('OLCI_CHL_spatial_demo_flags.png', bbox_inches='tight')
```
The flags_we_want variable can be customised to any flag combination required, and, if you wish, the box above can be adapted to plog flags instead of the CHL field. You could also run the flags_we_want routine with each flag individially, to get a mask for every flag. That can then be used in more advanced plotting. But thats up to you...
<br> <a href="./index_ocean.ipynb"><< OLCI information</a><span style="float:right;"><a href="./13_OLCI_spectral_interrogation.ipynb">13 - Ocean and Land Colour Instrument - spectral interrogation >></a> <hr> <p style="text-align:left;">This project is licensed under the <a href="./LICENSE">MIT License</a> <span style="float:right;"><a href="https://gitlab.eumetsat.int/eo-lab-usc-open/ocean">View on GitLab</a> | <a href="https://training.eumetsat.int/">EUMETSAT Training</a> | <a href=mailto:training@eumetsat.int>Contact</a></span></p>
|
github_jupyter
|
%matplotlib inline
# libraries are imported here, and we can import any library with an alias that allows us easy access to them later.
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
warnings.filterwarnings('ignore')
def flag_data_fast(flags_we_want, flag_names, flag_values, flag_data, flag_type='WQSF'):
flag_bits = np.uint64()
if flag_type == 'SST':
flag_bits = np.uint8()
elif flag_type == 'WQSF_lsb':
flag_bits = np.uint32()
for flag in flags_we_want:
try:
flag_bits = flag_bits | flag_values[flag_names.index(flag)]
except:
print(flag + " not present")
return (flag_data & flag_bits) > 0
# e.g. MYPATH = os.path.join("C:/","Users","me","Desktop")
MYPATH = "<please insert your path from Data_Path_Checker.ipynb here, removing the quotes and chevrons>"
input_root = os.path.join(MYPATH,'OLCI_test_data')
input_path = 'S3A_OL_2_WRR____20180203T061351_20180203T065737_20180204T113446_2626_027_248______MAR_O_NT_002.SEN3'
file_name_chl = 'chl_nn.nc'
# quick path length check (some windows versions have a problem with long file paths)
if len(os.path.join(input_root,input_path,file_name_chl)) > 259 \
or len(os.path.join(input_root,input_path,file_name_chl)) > 248:
print('Beware, your path name is quite long. Consider moving your data to a new directory')
else:
print('Path length name seems fine')
if os.path.exists(os.path.join(input_root,input_path,file_name_chl)):
print('Found the required data file')
else:
print('Data file missing. Please check your path and file name')
OLCI_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_chl))
#OLCI_file.
CHL = OLCI_file.CHL_NN.data
OLCI_file.close()
#whos
plt.imshow(CHL);
row1=4000
row2=8000
col1=0
col2=3000
CHL_subset = CHL[row1:row2, col1:col2]
plt.imshow(CHL_subset);
import cartopy.crs as ccrs
import cartopy.feature as cfeature
land_resolution = '50m'
land_poly = cfeature.NaturalEarthFeature('physical', 'land', land_resolution,
edgecolor='k',
facecolor=cfeature.COLORS['land'])
file_name_geo = 'geo_coordinates.nc'
GEO_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_geo))
LAT = GEO_file.variables['latitude'][:]
LON = GEO_file.variables['longitude'][:]
LAT_subset = LAT[row1:row2, col1:col2]
LON_subset = LON[row1:row2, col1:col2]
GEO_file.close()
fig1 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON, LAT, np.ma.masked_invalid(CHL), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig2 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig2.savefig('OLCI_CHL_spatial_demo_no_flags.png', bbox_inches='tight')
flags_we_want = ['CLOUD', 'CLOUD_AMBIGUOUS', 'CLOUD_MARGIN', 'INVALID', 'COSMETIC', 'SATURATED', 'SUSPECT',
'HISOLZEN', 'HIGHGLINT', 'SNOW_ICE', 'AC_FAIL', 'WHITECAPS', 'ANNOT_ABSO_D', 'ANNOT_MIXR1',
'ANNOT_DROUT', 'ANNOT_TAU06', 'RWNEG_O2', 'RWNEG_O3', 'RWNEG_O4', 'RWNEG_O5', 'RWNEG_O6',
'RWNEG_O7', 'RWNEG_O8']
file_name_flags = 'wqsf.nc'
FLAG_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_flags))
# get all the flag names
flag_names = FLAG_file['WQSF'].flag_meanings.split(' ')
# get all the flag bit values
flag_vals = FLAG_file['WQSF'].flag_masks
# get the flag field itself
FLAGS = FLAG_file.variables['WQSF'].data
FLAG_file.close()
# make the flag mask using the function we defined above "flag_data_fast"
flag_mask = flag_data_fast(flags_we_want, flag_names, flag_vals, FLAGS, flag_type='WQSF')
flag_mask = flag_mask.astype(float)
flag_mask[flag_mask == 0.0] = np.nan
# subset the flag mask
FLAG_subset = flag_mask[row1:row2, col1:col2]
CHL_subset[np.isfinite(FLAG_subset)] = np.nan
fig3 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig3.savefig('OLCI_CHL_spatial_demo_flags.png', bbox_inches='tight')
| 0.241579 | 0.950041 |
## Add weights to edges, remove excess nodes and compute shortest path
```
from math import inf
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import oot_graph_builder as ogb
canvas_size = 12
font_color = 'gray'
font_size = 20
font_weight = 'bold'
all_actors_df = pd.read_csv("resources/VerboseOcarina/actors.csv", sep=';')
all_transition_actors_df = pd.read_csv("resources/VerboseOcarina/transition_actors.csv", sep=';')
all_spawns_df = pd.read_csv("resources/VerboseOcarina/spawns.csv", sep=';')
G_scene = ogb.build_scene_graph(all_spawns_df, all_actors_df, all_transition_actors_df, 85, 0, False)
pos_dict = ogb.get_pos_dict(G_scene, True)
nx.set_edge_attributes(G_scene, inf, 'weight')
s85setup0_fastest_edge_weights = [
('s-459', '4846', 5.17),
('4846', 't-509', 9.94),
('t-509', '5138', 11.23),
('5138', '5148', 3.57),
('5148', '5137', 19.16),
('5137', 't-509', 7.98),
('t-509', '4885', 10.53),
('4885', '4876', 3.25),
('4876', '4850', 8.78),
('4850', '4881', 3.65),
('4881', '4918', 8.86),
('4918', '4861', 19.42),
# ('4881', '4861', 15.00),
]
G_scene.add_weighted_edges_from(s85setup0_fastest_edge_weights)
s85setup0_nodes_to_remove = [
'4844', # 4844,003B:0001,Ambient Sound Effects, Sound: Stream,,398,-29,-483,0,0,0,85,0,0,False,0
'4851', # 4851,0097:0000,Environmental Effects, Flying Fairies, Dustmotes,,355,1,-150,0,0,0,85,0,0,False,7
'5057', #,0097:0000,Environmental Effects, Flying Fairies, Dustmotes,,3409,-143,-818,0,0,0,85,1,0,False,0
'4897',
# Dummy Signpost in Room 0
# 4897,0141:0340,Square Signpost, Message ID: 0340,,-784,120,1675,0,32768,0,85,0,0,False,53
'5146',
# Dummy Bush in Room 2
# 5146,0125:FF00,Single Bush/Grass, Normal shrub. Random drops., Spawns set of 3 bugs: False, Random Drop Table: No Table,,-757,120,708,0,0,0,85,2,0,False,9
's-463',
# Wierd Spawn on top of Link's house
# 85,False,0,0,7,0000:0DFF,"Link, Spawn Type: Stand, Camera Initial Focus: No Special Positioning,",-40,344,1244,0,26396,0
]
G_scene.remove_nodes_from(s85setup0_nodes_to_remove)
print(G_scene)
plt.figure(figsize=(canvas_size, canvas_size))
nx.draw(G_scene,
pos_dict,
arrows=False,
with_labels=True,
font_color=font_color,
font_size=font_size,
)
```
## Highlight route
```
route = nx.shortest_path(G_scene,
source='s-459',
target='4861',
weight='weight'
)
print(route)
print(nx.path_weight(G_scene, route, 'weight'))
print(G_scene)
route_edges = list(zip(route, route[1:]))
plt.figure(figsize=(canvas_size, canvas_size))
nx.draw(G_scene,
pos=pos_dict,
arrows=False,
alpha=0.5,
node_size=50)
nx.draw_networkx_nodes(G_scene,
pos_dict,
nodelist=route,
node_color='r',
node_size=100
)
nx.draw_networkx_edges(G_scene,
pos_dict,
edgelist=route_edges,
arrows=True,
edge_color='r',
width=2
)
nx.draw_networkx_edge_labels(G_scene,
pos_dict,
edge_labels={(u, v): w for u, v, w in G_scene.edges.data('weight') if
(u, v) in route_edges},
)
nx.draw_networkx_labels(G_scene,
pos_dict,
labels={k: v.split(',')[0] for k, v in G_scene.nodes(data='description') if k in route},
font_color=font_color,
# font_size=font_size,
font_weight=font_weight
)
```
## Experimental GraphML export
```
# nx.write_graphml(G_scene, f"output/scene{85}.graphml")
```
|
github_jupyter
|
from math import inf
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import oot_graph_builder as ogb
canvas_size = 12
font_color = 'gray'
font_size = 20
font_weight = 'bold'
all_actors_df = pd.read_csv("resources/VerboseOcarina/actors.csv", sep=';')
all_transition_actors_df = pd.read_csv("resources/VerboseOcarina/transition_actors.csv", sep=';')
all_spawns_df = pd.read_csv("resources/VerboseOcarina/spawns.csv", sep=';')
G_scene = ogb.build_scene_graph(all_spawns_df, all_actors_df, all_transition_actors_df, 85, 0, False)
pos_dict = ogb.get_pos_dict(G_scene, True)
nx.set_edge_attributes(G_scene, inf, 'weight')
s85setup0_fastest_edge_weights = [
('s-459', '4846', 5.17),
('4846', 't-509', 9.94),
('t-509', '5138', 11.23),
('5138', '5148', 3.57),
('5148', '5137', 19.16),
('5137', 't-509', 7.98),
('t-509', '4885', 10.53),
('4885', '4876', 3.25),
('4876', '4850', 8.78),
('4850', '4881', 3.65),
('4881', '4918', 8.86),
('4918', '4861', 19.42),
# ('4881', '4861', 15.00),
]
G_scene.add_weighted_edges_from(s85setup0_fastest_edge_weights)
s85setup0_nodes_to_remove = [
'4844', # 4844,003B:0001,Ambient Sound Effects, Sound: Stream,,398,-29,-483,0,0,0,85,0,0,False,0
'4851', # 4851,0097:0000,Environmental Effects, Flying Fairies, Dustmotes,,355,1,-150,0,0,0,85,0,0,False,7
'5057', #,0097:0000,Environmental Effects, Flying Fairies, Dustmotes,,3409,-143,-818,0,0,0,85,1,0,False,0
'4897',
# Dummy Signpost in Room 0
# 4897,0141:0340,Square Signpost, Message ID: 0340,,-784,120,1675,0,32768,0,85,0,0,False,53
'5146',
# Dummy Bush in Room 2
# 5146,0125:FF00,Single Bush/Grass, Normal shrub. Random drops., Spawns set of 3 bugs: False, Random Drop Table: No Table,,-757,120,708,0,0,0,85,2,0,False,9
's-463',
# Wierd Spawn on top of Link's house
# 85,False,0,0,7,0000:0DFF,"Link, Spawn Type: Stand, Camera Initial Focus: No Special Positioning,",-40,344,1244,0,26396,0
]
G_scene.remove_nodes_from(s85setup0_nodes_to_remove)
print(G_scene)
plt.figure(figsize=(canvas_size, canvas_size))
nx.draw(G_scene,
pos_dict,
arrows=False,
with_labels=True,
font_color=font_color,
font_size=font_size,
)
route = nx.shortest_path(G_scene,
source='s-459',
target='4861',
weight='weight'
)
print(route)
print(nx.path_weight(G_scene, route, 'weight'))
print(G_scene)
route_edges = list(zip(route, route[1:]))
plt.figure(figsize=(canvas_size, canvas_size))
nx.draw(G_scene,
pos=pos_dict,
arrows=False,
alpha=0.5,
node_size=50)
nx.draw_networkx_nodes(G_scene,
pos_dict,
nodelist=route,
node_color='r',
node_size=100
)
nx.draw_networkx_edges(G_scene,
pos_dict,
edgelist=route_edges,
arrows=True,
edge_color='r',
width=2
)
nx.draw_networkx_edge_labels(G_scene,
pos_dict,
edge_labels={(u, v): w for u, v, w in G_scene.edges.data('weight') if
(u, v) in route_edges},
)
nx.draw_networkx_labels(G_scene,
pos_dict,
labels={k: v.split(',')[0] for k, v in G_scene.nodes(data='description') if k in route},
font_color=font_color,
# font_size=font_size,
font_weight=font_weight
)
# nx.write_graphml(G_scene, f"output/scene{85}.graphml")
| 0.432543 | 0.693259 |
The purpose of this notebook is to download the assembled contigs, blast them against nt and nr, then return those contigs to AWS.
1. Set up an m5a.12xlarge instance, with keys and ports for jupyter from the `czbiohub-miniconda` AMI. Storage is at `/mnt/data`.
`aegea launch --iam-role S3fromEC2 --ami-tags Name=czbiohub-jupyter -t m5a.12xlarge batson-blast`
Setup jupyter on the remote
`aegea ssh batson@batson-blast`
`tmux`
`jupyter notebook`
Port forwarding from laptop
`aegea ssh batson@batson-blast -NL localhost:8899:localhost:8888`
2. Download contigs
`mkdir /mnt/data/contigs`
`aws s3 sync s3://czbiohub-mosquito/contigs/ /mnt/data/contigs --exclude "*" --include "*.fasta" --dryrun`
3. Install requirements
conda install -c bioconda -c conda-forge blast
mkdir /mnt/data/blast
cd /mnt/data/blast
update_blastdb.pl --decompress nt nr taxdb
4. Run BLAST
Loop over all contigs, and run for each sample
`BLASTDB=/mnt/data/blast blastn -db nt -num_threads 48 -query /mnt/data/contigs/{SAMPLE}/contigs.fasta -outfmt 7 -out /mnt/data/contigs/{SAMPLE}/blastn_nt.m9 -evalue 1e-1`
`BLASTDB=/mnt/data/blast blastx -db nr -num_threads 48 -query /mnt/data/contigs/{SAMPLE}/contigs.fasta -outfmt 7 -out /mnt/data/contigs/{SAMPLE}/blastx_nr.m9 -evalue 1e-1`
5. Upload samples
`aws s3 sync /mnt/data/contigs/ s3://czbiohub-mosquito/contigs/ --exclude "*" --include "*.m9" --dryrun`
```
!mkdir /mnt/data/contigs
!aws s3 sync s3://czbiohub-mosquito/contigs/ /mnt/data/contigs --exclude "*" --include "*.fasta" --dryrun | wc -l
!aws s3 sync s3://czbiohub-mosquito/contigs/ /mnt/data/contigs --exclude "*" --include "*.fasta"
```
To download the contigs, we will sync to
`s3://czbiohub-mosquito/contigs/SAMPLE/contigs.fasta`
To setup blast db, follow https://czbiohub.atlassian.net/wiki/spaces/DS/pages/903905690/nt+nr+BLAST+etc+on+EC2
```
samples = !ls /mnt/data/contigs
test_samples = ['CMS002_045c_Rb_S185_L004']
for sample in samples:
print("NT: Beginning sample ", sample)
!BLASTDB=/mnt/data/blast blastn -db nt -num_threads 48 \
-query /mnt/data/contigs/{sample}/contigs.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/contigs/{sample}/blast_nt.m9 -evalue 1e-1
for sample in samples:
print("NR: Beginning sample ", sample)
!BLASTDB=/mnt/data/blast blastx -db nr -num_threads 48 \
-query /mnt/data/contigs/{sample}/contigs.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/contigs/{sample}/blast_nr.m9 -evalue 1e-1
BLASTDB=/mnt/data/blast blastn -task dc-megablast -db nt -num_threads 48 \
-query /mnt/data/contigs/test.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/test_dc_megablast.m9 -evalue 1e-2
1=-outfmt "$BLAST_OUTFMT"
"7 std staxid ssciname scomname stitle"
!blastx -help
!aws s3 sync /mnt/data/contigs/ s3://czbiohub-mosquito/contigs/ --exclude "*" --include "*_nt.m9" --dryrun
for sample in samples:
!cat /mnt/data/contigs/{sample}/contigs.fasta | sed 's/>N/>{samples[0]}~N/g' > /mnt/data/contigs/prefixed/{sample}_contigs.fasta
!ls /mnt/data/contigs/prefixed | wc -l
!cat /mnt/data/contigs/prefixed/*.fasta > /mnt/data/contigs/all.fasta
!head /mnt/data/contigs/all.fasta
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastx \
-i /mnt/data/contigs/all.fasta \
-d /mnt/data/blast/nr.pal \
-o /mnt/data/plast_output.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-bargraph -verbose \
-max-database-size 200000000
!/home/ubuntu/plastbinary_linux_20160121/plast -h
# plast for nt
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastn \
-i /mnt/data/contigs/all.fasta \
-d /mnt/data/blast/nr.pal \
-o /mnt/data/plast_output.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-G 5 -E 2 -r 2 -q 3 \
-bargraph -verbose \
-max-database-size 200000000
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastn \
-i /mnt/data/contigs/test.fasta \
-d /mnt/data/blast/nt.pal \
-o /mnt/data/plastn_test.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-G 5 -E 2 -r 2 -q 3 \
-bargraph -verbose \
-max-database-size 200000000
```
|
github_jupyter
|
!mkdir /mnt/data/contigs
!aws s3 sync s3://czbiohub-mosquito/contigs/ /mnt/data/contigs --exclude "*" --include "*.fasta" --dryrun | wc -l
!aws s3 sync s3://czbiohub-mosquito/contigs/ /mnt/data/contigs --exclude "*" --include "*.fasta"
samples = !ls /mnt/data/contigs
test_samples = ['CMS002_045c_Rb_S185_L004']
for sample in samples:
print("NT: Beginning sample ", sample)
!BLASTDB=/mnt/data/blast blastn -db nt -num_threads 48 \
-query /mnt/data/contigs/{sample}/contigs.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/contigs/{sample}/blast_nt.m9 -evalue 1e-1
for sample in samples:
print("NR: Beginning sample ", sample)
!BLASTDB=/mnt/data/blast blastx -db nr -num_threads 48 \
-query /mnt/data/contigs/{sample}/contigs.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/contigs/{sample}/blast_nr.m9 -evalue 1e-1
BLASTDB=/mnt/data/blast blastn -task dc-megablast -db nt -num_threads 48 \
-query /mnt/data/contigs/test.fasta -outfmt "7 std staxid ssciname scomname stitle" \
-out /mnt/data/test_dc_megablast.m9 -evalue 1e-2
1=-outfmt "$BLAST_OUTFMT"
"7 std staxid ssciname scomname stitle"
!blastx -help
!aws s3 sync /mnt/data/contigs/ s3://czbiohub-mosquito/contigs/ --exclude "*" --include "*_nt.m9" --dryrun
for sample in samples:
!cat /mnt/data/contigs/{sample}/contigs.fasta | sed 's/>N/>{samples[0]}~N/g' > /mnt/data/contigs/prefixed/{sample}_contigs.fasta
!ls /mnt/data/contigs/prefixed | wc -l
!cat /mnt/data/contigs/prefixed/*.fasta > /mnt/data/contigs/all.fasta
!head /mnt/data/contigs/all.fasta
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastx \
-i /mnt/data/contigs/all.fasta \
-d /mnt/data/blast/nr.pal \
-o /mnt/data/plast_output.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-bargraph -verbose \
-max-database-size 200000000
!/home/ubuntu/plastbinary_linux_20160121/plast -h
# plast for nt
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastn \
-i /mnt/data/contigs/all.fasta \
-d /mnt/data/blast/nr.pal \
-o /mnt/data/plast_output.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-G 5 -E 2 -r 2 -q 3 \
-bargraph -verbose \
-max-database-size 200000000
!/home/ubuntu/plastbinary_linux_20160121/plast -p plastn \
-i /mnt/data/contigs/test.fasta \
-d /mnt/data/blast/nt.pal \
-o /mnt/data/plastn_test.tab \
-e 1e-2 -a 48 -max-hit-per-query 30 -outfmt 1 \
-G 5 -E 2 -r 2 -q 3 \
-bargraph -verbose \
-max-database-size 200000000
| 0.186058 | 0.357792 |
# Measurement Error Mitigation
## Introduction
The measurement calibration is used to mitigate measurement errors.
The main idea is to prepare all $2^n$ basis input states and compute the probability of measuring counts in the other basis states.
From these calibrations, it is possible to correct the average results of another experiment of interest. This notebook gives examples for how to use the ``ignis.mitigation.measurement`` module.
```
# Import general libraries (needed for functions)
import numpy as np
import time
# Import Qiskit classes
import qiskit
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer
from qiskit.providers.aer import noise
from qiskit.tools.visualization import plot_histogram
# Import measurement calibration functions
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
```
## 3 Qubit Example of the Calibration Matrices
Assume that we would like to generate a calibration matrix for the 3 qubits Q2, Q3 and Q4 in a 5-qubit Quantum Register [Q0,Q1,Q2,Q3,Q4].
Since we have 3 qubits, there are $2^3=8$ possible quantum states.
## Generating Measurement Calibration Circuits
First, we generate a list of measurement calibration circuits for the full Hilbert space.
Each circuit creates a basis state.
If there are $n=3$ qubits, then we get $2^3=8$ calibration circuits.
The following function **complete_meas_cal** returns a list **meas_calibs** of `QuantumCircuit` objects containing the calibration circuits,
and a list **state_labels** of the calibration state labels.
The input to this function can be given in one of the following three forms:
- **qubit_list:** A list of qubits to perform the measurement correction on, or:
- **qr (QuantumRegister):** A quantum register, or:
- **cr (ClassicalRegister):** A classical register.
In addition, one can provide a string **circlabel**, which is added at the beginning of the circuit names for unique identification.
For example, in our case, the input is a 5-qubit `QuantumRegister` containing the qubits Q2,Q3,Q4:
```
# Generate the calibration circuits
qr = qiskit.QuantumRegister(5)
qubit_list = [2,3,4]
meas_calibs, state_labels = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal')
```
Print the $2^3=8$ state labels (for the 3 qubits Q2,Q3,Q4):
```
state_labels
```
## Computing the Calibration Matrix
If we do not apply any noise, then the calibration matrix is expected to be the $8 \times 8$ identity matrix.
```
# Execute the calibration circuits without noise
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=1000)
cal_results = job.result()
# The calibration matrix without noise is the identity matrix
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
print(meas_fitter.cal_matrix)
```
Assume that we apply some noise model from Qiskit Aer to the 5 qubits,
then the calibration matrix will have most of its mass on the main diagonal, with some additional 'noise'.
Alternatively, we can execute the calibration circuits using an IBMQ provider.
```
# Generate a noise model for the 5 qubits
noise_model = noise.NoiseModel()
for qi in range(5):
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1],[0.25,0.75]])
noise_model.add_readout_error(read_err, [qi])
# Execute the calibration circuits
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=1000, noise_model=noise_model)
cal_results = job.result()
# Calculate the calibration matrix with the noise model
meas_fitter = CompleteMeasFitter(cal_results, state_labels, qubit_list=qubit_list, circlabel='mcal')
print(meas_fitter.cal_matrix)
# Plot the calibration matrix
meas_fitter.plot_calibration()
```
## Analyzing the Results
We would like to compute the total measurement fidelity, and the measurement fidelity for a specific qubit, for example, Q0.
Since the on-diagonal elements of the calibration matrix are the probabilities of measuring state 'x' given preparation of state 'x',
then the trace of this matrix is the average assignment fidelity.
```
# What is the measurement fidelity?
print("Average Measurement Fidelity: %f" % meas_fitter.readout_fidelity())
# What is the measurement fidelity of Q0?
print("Average Measurement Fidelity of Q0: %f" % meas_fitter.readout_fidelity(
label_list = [['000','001','010','011'],['100','101','110','111']]))
```
## Applying the Calibration
We now perform another experiment and correct the measured results.
## Correct Measurement Noise on a 3Q GHZ State
As an example, we start with the 3-qubit GHZ state on the qubits Q2,Q3,Q4:
$$ \mid GHZ \rangle = \frac{\mid{000} \rangle + \mid{111} \rangle}{\sqrt{2}}$$
```
# Make a 3Q GHZ state
cr = ClassicalRegister(3)
ghz = QuantumCircuit(qr, cr)
ghz.h(qr[2])
ghz.cx(qr[2], qr[3])
ghz.cx(qr[3], qr[4])
ghz.measure(qr[2],cr[0])
ghz.measure(qr[3],cr[1])
ghz.measure(qr[4],cr[2])
```
We now execute the calibration circuits (with the noise model above):
```
job = qiskit.execute([ghz], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
```
We now compute the results without any error mitigation and with the mitigation, namely after applying the calibration matrix to the results.
There are two fitting methods for applying the calibration (if no method is defined, then 'least_squares' is used).
- **'pseudo_inverse'**, which is a direct inversion of the calibration matrix,
- **'least_squares'**, which constrains to have physical probabilities.
The raw data to be corrected can be given in a number of forms:
- Form1: A counts dictionary from results.get_counts,
- Form2: A list of counts of length=len(state_labels),
- Form3: A list of counts of length=M*len(state_labels) where M is an integer (e.g. for use with the tomography data),
- Form4: A qiskit Result (e.g. results as above).
```
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
```
We can now plot the results with and without error mitigation:
```
from qiskit.tools.visualization import *
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
```
### Applying to a reduced subset of qubits
Consider now that we want to correct a 2Q Bell state, but we have the 3Q calibration matrix. We can reduce the matrix and build a new mitigation object.
```
# Make a 2Q Bell state between Q2 and Q4
cr = ClassicalRegister(2)
bell = QuantumCircuit(qr, cr)
bell.h(qr[2])
bell.cx(qr[2], qr[4])
bell.measure(qr[2],cr[0])
bell.measure(qr[4],cr[1])
job = qiskit.execute([bell], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
#build a fitter from the subset
meas_fitter_sub = meas_fitter.subset_fitter(qubit_sublist=[2,4])
#The calibration matrix is now in the space Q2/Q4
meas_fitter_sub.cal_matrix
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter_sub = meas_fitter_sub.filter
# Results with mitigation
mitigated_results = meas_filter_sub.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
from qiskit.tools.visualization import *
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
```
## Tensored mitigation
The calibration can be simplified if the error is known to be local. By "local error" we mean that the error can be tensored to subsets of qubits. In this case, less than $2^n$ states are needed for the computation of the calibration matrix.
Assume that the error acts locally on qubit 2 and the pair of qubits 3 and 4. Construct the calibration circuits by using the function `tensored_meas_cal`. Unlike before we need to explicitly divide the qubit list up into subset regions.
```
# Generate the calibration circuits
qr = qiskit.QuantumRegister(5)
mit_pattern = [[2],[3,4]]
meas_calibs, state_labels = tensored_meas_cal(mit_pattern=mit_pattern, qr=qr, circlabel='mcal')
```
We now retrieve the names of the generated circuits. Note that in each label (of length 3), the least significant bit corresponds to qubit 2, the middle bit corresponds to qubit 3, and the most significant bit corresponds to qubit 4.
```
for circ in meas_calibs:
print(circ.name)
```
Let us elaborate on the circuit names. We see that there are only four circuits, instead of eight. The total number of required circuits is $2^m$ where $m$ is the number of qubits in the target subset (here $m=2$).
Each basis state of qubits 3 and 4 appears exactly once. Only two basis states are required for qubit 2, so these are split equally across the four experiments. For example, state '0' of qubit 2 appears in state labels '000' and '010'.
We now execute the calibration circuits on an Aer simulator, using the same noise model as before. This noise is in fact local to qubits 3 and 4 separately, but assume that we don't know it, and that we only know that it is local for qubit 2.
```
# Generate a noise model for the 5 qubits
noise_model = noise.NoiseModel()
for qi in range(5):
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1],[0.25,0.75]])
noise_model.add_readout_error(read_err, [qi])
# Execute the calibration circuits
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=5000, noise_model=noise_model)
cal_results = job.result()
meas_fitter = TensoredMeasFitter(cal_results, mit_pattern=mit_pattern)
```
The fitter provides two calibration matrices. One matrix is for qubit 2, and the other matrix is for qubits 3 and 4.
```
print(meas_fitter.cal_matrices)
```
We can look at the readout fidelities of the individual tensored components or qubits within a set:
```
#readout fidelity of Q2
print('Readout fidelity of Q2: %f'%meas_fitter.readout_fidelity(0))
#readout fidelity of Q3/Q4
print('Readout fidelity of Q3/4 space (e.g. mean assignment '
'\nfidelity of 00,10,01 and 11): %f'%meas_fitter.readout_fidelity(1))
#readout fidelity of Q3
print('Readout fidelity of Q3: %f'%meas_fitter.readout_fidelity(1,[['00','10'],['01','11']]))
```
Plot the individual calibration matrices:
```
# Plot the calibration matrix
print('Q2 Calibration Matrix')
meas_fitter.plot_calibration(0)
print('Q3/Q4 Calibration Matrix')
meas_fitter.plot_calibration(1)
# Make a 3Q GHZ state
cr = ClassicalRegister(3)
ghz = QuantumCircuit(qr, cr)
ghz.h(qr[2])
ghz.cx(qr[2], qr[3])
ghz.cx(qr[3], qr[4])
ghz.measure(qr[2],cr[0])
ghz.measure(qr[3],cr[1])
ghz.measure(qr[4],cr[2])
```
We now execute the calibration circuits (with the noise model above):
```
job = qiskit.execute([ghz], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
```
Plot the raw vs corrected state:
```
meas_filter = meas_fitter.filter
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
```
As a check we should get the same answer if we build the full correction matrix from a tensor product of the subspace calibration matrices:
```
meas_calibs2, state_labels2 = complete_meas_cal([2,3,4])
meas_fitter2 = CompleteMeasFitter(None, state_labels2)
meas_fitter2.cal_matrix = np.kron(meas_fitter.cal_matrices[1],meas_fitter.cal_matrices[0])
meas_filter2 = meas_fitter2.filter
mitigated_results2 = meas_filter2.apply(results)
mitigated_counts2 = mitigated_results2.get_counts(0)
plot_histogram([raw_counts, mitigated_counts2], legend=['raw', 'mitigated'])
```
## Running Qiskit Algorithms with Measurement Error Mitigation
To use measurement error mitigation when running quantum circuits as part of a Qiskit algorithm, we need to include the respective measurement error fitting instance in the QuantumInstance. This object also holds the specifications for the chosen backend.
In the following, we illustrate measurement error mitigation of Aqua algorithms on the example of searching the ground state of a Hamiltonian with VQE.
First, we need to import the libraries that provide backends as well as the classes that are needed to run the algorithm.
```
# Import qiskit functions and libraries
from qiskit import Aer, IBMQ
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import COBYLA
from qiskit.opflow import X, Y, Z, I, CX, T, H, S, PrimitiveOp
from qiskit.providers.aer import noise
# Import error mitigation functions
from qiskit.ignis.mitigation.measurement import CompleteMeasFitter
```
Then, we initialize the instances that are required to execute the algorithm.
```
# Initialize Hamiltonian
h_op = (-1.0523732 * I^I) + \
(0.39793742 * I^Z) + \
(-0.3979374 * Z^I) + \
(-0.0112801 * Z^Z) + \
(0.18093119 * X^X)
# Initialize trial state
ansatz = TwoLocal(h_op.num_qubits, ['ry', 'rz'], 'cz', reps=3, entanglement='full')
# Initialize optimizer
optimizer = COBYLA(maxiter=350)
```
Here, we choose the Aer `qasm_simulator` as backend and also add a custom noise model.
The application of an actual quantum backend provided by IBMQ is outlined in the commented code.
```
# Generate a noise model
noise_model = noise.NoiseModel()
for qi in range(h_op.num_qubits):
read_err = noise.errors.readout_error.ReadoutError([[0.8, 0.2],[0.1,0.9]])
noise_model.add_readout_error(read_err, [qi])
# Initialize the backend configuration using measurement error mitigation with a QuantumInstance
qi_noise_model_qasm = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), noise_model=noise_model, shots=1000,
measurement_error_mitigation_cls=CompleteMeasFitter,
measurement_error_mitigation_shots=1000)
# Intialize your TOKEN and provider with
# provider = IBMQ.get_provider(...)
# qi_noise_model_ibmq = QuantumInstance(backend=provider = provider.get_backend(backend_name)), shots=8000,
# measurement_error_mitigation_cls=CompleteMeasFitter, measurement_error_mitigation_shots=8000)
# Initialize algorithm to find the ground state
vqe = VQE(ansatz, optimizer, quantum_instance=qi_noise_model_qasm)
```
Finally, we can run the algorithm and check the results.
```
# Run the algorithm
result = vqe.compute_minimum_eigenvalue(h_op)
print(result)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
|
github_jupyter
|
# Import general libraries (needed for functions)
import numpy as np
import time
# Import Qiskit classes
import qiskit
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer
from qiskit.providers.aer import noise
from qiskit.tools.visualization import plot_histogram
# Import measurement calibration functions
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
# Generate the calibration circuits
qr = qiskit.QuantumRegister(5)
qubit_list = [2,3,4]
meas_calibs, state_labels = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal')
state_labels
# Execute the calibration circuits without noise
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=1000)
cal_results = job.result()
# The calibration matrix without noise is the identity matrix
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
print(meas_fitter.cal_matrix)
# Generate a noise model for the 5 qubits
noise_model = noise.NoiseModel()
for qi in range(5):
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1],[0.25,0.75]])
noise_model.add_readout_error(read_err, [qi])
# Execute the calibration circuits
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=1000, noise_model=noise_model)
cal_results = job.result()
# Calculate the calibration matrix with the noise model
meas_fitter = CompleteMeasFitter(cal_results, state_labels, qubit_list=qubit_list, circlabel='mcal')
print(meas_fitter.cal_matrix)
# Plot the calibration matrix
meas_fitter.plot_calibration()
# What is the measurement fidelity?
print("Average Measurement Fidelity: %f" % meas_fitter.readout_fidelity())
# What is the measurement fidelity of Q0?
print("Average Measurement Fidelity of Q0: %f" % meas_fitter.readout_fidelity(
label_list = [['000','001','010','011'],['100','101','110','111']]))
# Make a 3Q GHZ state
cr = ClassicalRegister(3)
ghz = QuantumCircuit(qr, cr)
ghz.h(qr[2])
ghz.cx(qr[2], qr[3])
ghz.cx(qr[3], qr[4])
ghz.measure(qr[2],cr[0])
ghz.measure(qr[3],cr[1])
ghz.measure(qr[4],cr[2])
job = qiskit.execute([ghz], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
from qiskit.tools.visualization import *
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
# Make a 2Q Bell state between Q2 and Q4
cr = ClassicalRegister(2)
bell = QuantumCircuit(qr, cr)
bell.h(qr[2])
bell.cx(qr[2], qr[4])
bell.measure(qr[2],cr[0])
bell.measure(qr[4],cr[1])
job = qiskit.execute([bell], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
#build a fitter from the subset
meas_fitter_sub = meas_fitter.subset_fitter(qubit_sublist=[2,4])
#The calibration matrix is now in the space Q2/Q4
meas_fitter_sub.cal_matrix
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter_sub = meas_fitter_sub.filter
# Results with mitigation
mitigated_results = meas_filter_sub.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
from qiskit.tools.visualization import *
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
# Generate the calibration circuits
qr = qiskit.QuantumRegister(5)
mit_pattern = [[2],[3,4]]
meas_calibs, state_labels = tensored_meas_cal(mit_pattern=mit_pattern, qr=qr, circlabel='mcal')
for circ in meas_calibs:
print(circ.name)
# Generate a noise model for the 5 qubits
noise_model = noise.NoiseModel()
for qi in range(5):
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1],[0.25,0.75]])
noise_model.add_readout_error(read_err, [qi])
# Execute the calibration circuits
backend = qiskit.Aer.get_backend('qasm_simulator')
job = qiskit.execute(meas_calibs, backend=backend, shots=5000, noise_model=noise_model)
cal_results = job.result()
meas_fitter = TensoredMeasFitter(cal_results, mit_pattern=mit_pattern)
print(meas_fitter.cal_matrices)
#readout fidelity of Q2
print('Readout fidelity of Q2: %f'%meas_fitter.readout_fidelity(0))
#readout fidelity of Q3/Q4
print('Readout fidelity of Q3/4 space (e.g. mean assignment '
'\nfidelity of 00,10,01 and 11): %f'%meas_fitter.readout_fidelity(1))
#readout fidelity of Q3
print('Readout fidelity of Q3: %f'%meas_fitter.readout_fidelity(1,[['00','10'],['01','11']]))
# Plot the calibration matrix
print('Q2 Calibration Matrix')
meas_fitter.plot_calibration(0)
print('Q3/Q4 Calibration Matrix')
meas_fitter.plot_calibration(1)
# Make a 3Q GHZ state
cr = ClassicalRegister(3)
ghz = QuantumCircuit(qr, cr)
ghz.h(qr[2])
ghz.cx(qr[2], qr[3])
ghz.cx(qr[3], qr[4])
ghz.measure(qr[2],cr[0])
ghz.measure(qr[3],cr[1])
ghz.measure(qr[4],cr[2])
job = qiskit.execute([ghz], backend=backend, shots=5000, noise_model=noise_model)
results = job.result()
# Results without mitigation
raw_counts = results.get_counts()
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
meas_filter = meas_fitter.filter
mitigated_results = meas_filter.apply(results)
mitigated_counts = mitigated_results.get_counts(0)
plot_histogram([raw_counts, mitigated_counts], legend=['raw', 'mitigated'])
meas_calibs2, state_labels2 = complete_meas_cal([2,3,4])
meas_fitter2 = CompleteMeasFitter(None, state_labels2)
meas_fitter2.cal_matrix = np.kron(meas_fitter.cal_matrices[1],meas_fitter.cal_matrices[0])
meas_filter2 = meas_fitter2.filter
mitigated_results2 = meas_filter2.apply(results)
mitigated_counts2 = mitigated_results2.get_counts(0)
plot_histogram([raw_counts, mitigated_counts2], legend=['raw', 'mitigated'])
# Import qiskit functions and libraries
from qiskit import Aer, IBMQ
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import COBYLA
from qiskit.opflow import X, Y, Z, I, CX, T, H, S, PrimitiveOp
from qiskit.providers.aer import noise
# Import error mitigation functions
from qiskit.ignis.mitigation.measurement import CompleteMeasFitter
# Initialize Hamiltonian
h_op = (-1.0523732 * I^I) + \
(0.39793742 * I^Z) + \
(-0.3979374 * Z^I) + \
(-0.0112801 * Z^Z) + \
(0.18093119 * X^X)
# Initialize trial state
ansatz = TwoLocal(h_op.num_qubits, ['ry', 'rz'], 'cz', reps=3, entanglement='full')
# Initialize optimizer
optimizer = COBYLA(maxiter=350)
# Generate a noise model
noise_model = noise.NoiseModel()
for qi in range(h_op.num_qubits):
read_err = noise.errors.readout_error.ReadoutError([[0.8, 0.2],[0.1,0.9]])
noise_model.add_readout_error(read_err, [qi])
# Initialize the backend configuration using measurement error mitigation with a QuantumInstance
qi_noise_model_qasm = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), noise_model=noise_model, shots=1000,
measurement_error_mitigation_cls=CompleteMeasFitter,
measurement_error_mitigation_shots=1000)
# Intialize your TOKEN and provider with
# provider = IBMQ.get_provider(...)
# qi_noise_model_ibmq = QuantumInstance(backend=provider = provider.get_backend(backend_name)), shots=8000,
# measurement_error_mitigation_cls=CompleteMeasFitter, measurement_error_mitigation_shots=8000)
# Initialize algorithm to find the ground state
vqe = VQE(ansatz, optimizer, quantum_instance=qi_noise_model_qasm)
# Run the algorithm
result = vqe.compute_minimum_eigenvalue(h_op)
print(result)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
| 0.687105 | 0.990413 |
# Plotting
There are many Python plotting libraries depending on your purpose. However, the standard general-purpose library is `matplotlib`. This is often used through its `pyplot` interface.
```
from matplotlib import pyplot
# NO CODE
%matplotlib inline
from matplotlib import rcParams
rcParams['figure.figsize']=(12,9)
from math import sin, pi
x = []
y = []
for i in range(201):
x_point = 0.01*i
x.append(x_point)
y.append(sin(pi*x_point)**2)
pyplot.plot(x, y)
pyplot.show()
```
We have defined two sequences - in this case lists, but tuples would also work. One contains the $x$-axis coordinates, the other the data points to appear on the $y$-axis. A basic plot is produced using the `plot` command of `pyplot`. However, this plot will not automatically appear on the screen, as after plotting the data you may wish to add additional information. Nothing will actually happen until you either save the figure to a file (using `pyplot.savefig(<filename>)`) or explicitly ask for it to be displayed (with the `show` command). When the plot is displayed the program will typically pause until you dismiss the plot.
If using the notebook you can include the command `%matplotlib inline` or `%matplotlib notebook` before plotting to make the plots appear automatically inside the notebook. If code is included in a program which is run inside `spyder` through an IPython console, the figures may appear in the console automatically. Either way, it is good practice to always include the `show` command to explicitly display the plot.
This plotting interface is straightforward, but the results are not particularly nice. The following commands illustrate some of the ways of improving the plot:
```
from math import sin, pi
x = []
y = []
for i in range(201):
x_point = 0.01*i
x.append(x_point)
y.append(sin(pi*x_point)**2)
pyplot.plot(x, y, marker='+', markersize=8, linestyle=':',
linewidth=3, color='b', label=r'$\sin^2(\pi x)$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A basic plot')
pyplot.show()
```
Whilst most of the commands are self-explanatory, a note should be made of the strings line `r'$x$'`. These strings are in LaTeX format, which is *the* standard typesetting method for professional-level mathematics. The `$` symbols surround mathematics. The `r` before the definition of the string is Python notation, not LaTeX. It says that the following string will be "raw": that backslash characters should be left alone. Then, special LaTeX commands have a backslash in front of them: here we use `\pi` and `\sin`. Most basic symbols can be easily guessed (eg `\theta` or `\int`), but there are [useful lists of symbols](http://www.artofproblemsolving.com/wiki/index.php/LaTeX:Symbols), and a [reverse search site](http://detexify.kirelabs.org/classify.html) available. We can also use `^` to denote superscripts (used here), `_` to denote subscripts, and use `{}` to group terms.
By combining these basic commands with other plotting types (`semilogx` and `loglog`, for example), most simple plots can be produced quickly.
Here are some more examples:
```
from math import sin, pi, exp, log
x = []
y1 = []
y2 = []
for i in range(201):
x_point = 1.0 + 0.01*i
x.append(x_point)
y1.append(exp(sin(pi*x_point)))
y2.append(log(pi+x_point*sin(x_point)))
pyplot.loglog(x, y1, linestyle='--', linewidth=4,
color='k', label=r'$y_1=e^{\sin(\pi x)}$')
pyplot.loglog(x, y2, linestyle='-.', linewidth=4,
color='r', label=r'$y_2=\log(\pi+x\sin(x))$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A basic logarithmic plot')
pyplot.show()
from math import sin, pi, exp, log
x = []
y1 = []
y2 = []
for i in range(201):
x_point = 1.0 + 0.01*i
x.append(x_point)
y1.append(exp(sin(pi*x_point)))
y2.append(log(pi+x_point*sin(x_point)))
pyplot.semilogy(x, y1, linestyle='None', marker='o',
color='g', label=r'$y_1=e^{\sin(\pi x)}$')
pyplot.semilogy(x, y2, linestyle='None', marker='^',
color='r', label=r'$y_2=\log(\pi+x\sin(x))$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A different logarithmic plot')
pyplot.show()
```
We will look at more complex plots later, but the [matplotlib documentation](http://matplotlib.org/api/pyplot_summary.html) contains a lot of details, and the [gallery](http://matplotlib.org/gallery.html) contains a lot of examples that can be adapted to fit. There is also an [extremely useful document](http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb) as part of [Johansson's lectures on scientific Python](https://github.com/jrjohansson/scientific-python-lectures), and an [introduction by Nicolas Rougier](http://www.labri.fr/perso/nrougier/teaching/matplotlib/matplotlib.html).
## Exercise: Logistic map
The logistic map builds a sequence of numbers $\{ x_n \}$ using the relation
$$ x_{n+1} = r x_n \left( 1 - x_n \right), $$
where $0 \le x_0 \le 1$.
### Exercise 1
Write a program that calculates the first $N$ members of the sequence, given as input $x_0$ and $r$ (and, of course, $N$).
### Exercise 2
Fix $x_0=0.5$. Calculate the first 2,000 members of the sequence for $r=1.5$ and $r=3.5$. Plot the last 100 members of the sequence in both cases.
What does this suggest about the long-term behaviour of the sequence?
### Exercise 3
Fix $x_0 = 0.5$. For each value of $r$ between $1$ and $4$, in steps of $0.01$, calculate the first 2,000 members of the sequence. Plot the last 1,000 members of the sequence on a plot where the $x$-axis is the value of $r$ and the $y$-axis is the values in the sequence. Do not plot lines - just plot markers (e.g., use the `'k.'` plotting style).
### Exercise 4
For iterative maps such as the logistic map, one of three things can occur:
1. The sequence settles down to a *fixed point*.
2. The sequence rotates through a finite number of values. This is called a *limit cycle*.
3. The sequence generates an infinite number of values. This is called *deterministic chaos*.
Using just your plot, or new plots from this data, work out approximate values of $r$ for which there is a transition from fixed points to limit cycles, from limit cycles of a given number of values to more values, and the transition to chaos.
|
github_jupyter
|
from matplotlib import pyplot
# NO CODE
%matplotlib inline
from matplotlib import rcParams
rcParams['figure.figsize']=(12,9)
from math import sin, pi
x = []
y = []
for i in range(201):
x_point = 0.01*i
x.append(x_point)
y.append(sin(pi*x_point)**2)
pyplot.plot(x, y)
pyplot.show()
from math import sin, pi
x = []
y = []
for i in range(201):
x_point = 0.01*i
x.append(x_point)
y.append(sin(pi*x_point)**2)
pyplot.plot(x, y, marker='+', markersize=8, linestyle=':',
linewidth=3, color='b', label=r'$\sin^2(\pi x)$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A basic plot')
pyplot.show()
from math import sin, pi, exp, log
x = []
y1 = []
y2 = []
for i in range(201):
x_point = 1.0 + 0.01*i
x.append(x_point)
y1.append(exp(sin(pi*x_point)))
y2.append(log(pi+x_point*sin(x_point)))
pyplot.loglog(x, y1, linestyle='--', linewidth=4,
color='k', label=r'$y_1=e^{\sin(\pi x)}$')
pyplot.loglog(x, y2, linestyle='-.', linewidth=4,
color='r', label=r'$y_2=\log(\pi+x\sin(x))$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A basic logarithmic plot')
pyplot.show()
from math import sin, pi, exp, log
x = []
y1 = []
y2 = []
for i in range(201):
x_point = 1.0 + 0.01*i
x.append(x_point)
y1.append(exp(sin(pi*x_point)))
y2.append(log(pi+x_point*sin(x_point)))
pyplot.semilogy(x, y1, linestyle='None', marker='o',
color='g', label=r'$y_1=e^{\sin(\pi x)}$')
pyplot.semilogy(x, y2, linestyle='None', marker='^',
color='r', label=r'$y_2=\log(\pi+x\sin(x))$')
pyplot.legend(loc='lower right')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('A different logarithmic plot')
pyplot.show()
| 0.568176 | 0.977841 |
Python for Bioinformatics
-----------------------------

This Jupyter notebook is intented to be used alongside the book [Python for Bioinformatics](http://py3.us/)
Chapter 20: Inferring Splicing Sites
-----------------------------
**Note:** These scripts requires external files to be accesible. In order to do so, the following commands will download these files from Github and from Amazon.
```
!curl https://s3.amazonaws.com/py4bio/TAIR.tar.bz2 -o TAIR.tar.bz2
!mkdir samples
!tar xvfj TAIR.tar.bz2 -C samples
!curl https://s3.amazonaws.com/py4bio/ncbi-blast-2.6.0.tar.bz2 -o ncbi-blast-2.6.0.tar.bz2
!tar xvfj ncbi-blast-2.6.0.tar.bz2
!curl https://s3.amazonaws.com/py4bio/clustalw2 -o clustalw2
!chmod a+x clustalw2
!ls
!conda install biopython -y
```
**Listing 20.1:** makedb.py: Convert data for entering into an SQLite database
**Note** The following program had to be adapted to work on Jupyter Notebook.
```
import sqlite3
from Bio import SeqIO
SEQ_FILE = open('samples/TAIR10_seq_20101214_updated')
CDS_FILE = open('samples/TAIR10_cds_20101214_updated')
AT_DB_FILE = 'AT.db'
at_d = {}
# Get all sequences from TAIR sequences file.
for record in SeqIO.parse(SEQ_FILE, 'fasta'):
sid = record.id
seq = str(record.seq)
at_d[sid] = [seq]
# Get all sequences from TAIR CDS file.
for record in SeqIO.parse(CDS_FILE, 'fasta'):
sid = record.id
seq = str(record.seq)
at_d[sid].append(seq)
# Write to a CSV file only the entries of the dictionary that
# has data from both sources
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
c.execute('create table seq(id, cds, full_seq)')
for seq_id in at_d:
if len(at_d[seq_id])==2:
# Write in this order: ID, CDS, FULL_SEQ.
c.execute('INSERT INTO seq VALUES (?,?,?)',
((seq_id, at_d[seq_id][1], at_d[seq_id][0])))
conn.commit()
conn.close()
```
**Listing 20.2:** estimateintrons.py: Estimate introns
```
#!/usr/bin/env python
import os
import sqlite3
from Bio import SeqIO, SeqRecord, Seq
from Bio.Align.Applications import ClustalwCommandline
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline as bn
from Bio import AlignIO
AT_DB_FILE = 'AT.db'
BLAST_EXE = 'ncbi-blast-2.6.0+/bin/blastn'
BLAST_DB = 'ncbi-blast-2.6.0+/db/TAIR10'
CLUSTALW_EXE = os.path.join(os.getcwd(), 'clustalw2')
input_sequence = """>XM_013747562.1 PREDICTED: Brassica oleracea var. oleracea
ATCTTTCGCGAGAGGTTCATTATTGTCCGGAAGAGGTGCTCATGTTTTGGTAAAGCGATCACAAGGTGTT
CGATACAATACCTGAGAGAGTTTCCACAGCTTTCTTCTGATTCTTACTCGGTTTGAGTGAGCTGGATCTT
CCACGACGAAGATGATGATCTTGGATGTTTGCAATGAGATTATAAAGATCCAGAAGCTAAGACGGGTTGT
CTCTTACGCTGGATTCTACTGCTTCACTGCAGCCCTCACATTCTTCTACACAAACAACACAACAAGAGCA
GGATTCTCCAGGGGAGATCAGTTTTATGCGTCTTACCCTGCGGGTACCGAACTTCTTACCGACACAGCTA
AGCTGTACAAAGCGGCGCTTGGGAATTGCTATGAATCTGAGGATTGGGGTCCTGTCGAGTTCTGCATAAT
GGCTAAGCATTTTGAGCGCCAGGGAAAGTCTCCATACGTTTACCACTCTCAATACATGGCTCACCTTCTT
TCACAAGGCCAACTTGATGGAAGTGGCTAGAGTCGTTGATGACTTGCAAGACAGCTCCTTTTTCAATCTG
TGTACCTAATCTTGTTATTGGAACTTCCTTCTTTACTCTTTTTCCGAATTTGTACGGCGATGGTATTTGA
GGTTACCACCAAGAAATATAAGAACATGTTCTGGTGTAGACAATGAATGTAATAAACACATAAGATCAGA
CCTTGATATGA
"""
with open('input_sequence.fasta', 'w') as in_seq:
in_seq.write(input_sequence)
def allgaps(seq):
"""Return a list with tuples containing all gap positions
and length. seq is a string."""
gaps = []
indash = False
for i, c in enumerate(seq):
if indash is False and c == '-':
c_ini = i
indash = True
dashn = 0
elif indash is True and c == '-':
dashn += 1
elif indash is True and c != '-':
indash = False
gaps.append((c_ini, dashn+1))
return gaps
def iss(user_seq):
"""Infer Splicing Sites from a FASTA file full of EST
sequences"""
with open('forblast','w') as forblastfh:
forblastfh.write(str(user_seq.seq))
blastn_cline = bn(cmd=BLAST_EXE, query='forblast',
db=BLAST_DB, evalue='1e-10', outfmt=5,
num_descriptions='1',
num_alignments='1', out='outfile.xml')
blastn_cline()
b_record = NCBIXML.read(open('outfile.xml'))
title = b_record.alignments[0].title
sid = title[title.index(' ')+1 : title.index(' |')]
# Polarity information of returned sequence.
# 1 = normal, -1 = reverse.
frame = b_record.alignments[0].hsps[0].frame[1]
# Run the SQLite query
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
res_cur = c.execute('SELECT CDS, FULL_SEQ from seq '
'WHERE ID=?', (sid,))
cds, full_seq = res_cur.fetchone()
if cds=='':
print('There is no matching CDS')
exit()
# Check sequence polarity.
sidcds = '{0}-CDS'.format(sid)
sidseq = '{0}-SEQ'.format(sid)
if frame==1:
seqCDS = SeqRecord.SeqRecord(Seq.Seq(cds),
id = sidcds,
name = '',
description = '')
fullseq = SeqRecord.SeqRecord(Seq.Seq(full_seq),
id = sidseq,
name='',
description='')
else:
seqCDS = SeqRecord.SeqRecord(
Seq.Seq(cds).reverse_complement(),
id = sidcds, name='', description='')
fullseq = SeqRecord.SeqRecord(
Seq.Seq(full_seq).reverse_complement(),
id = sidseq, name = '', description='')
# A tuple with the user sequence and both AT sequences
allseqs = (record, seqCDS, fullseq)
with open('foralig.txt','w') as trifh:
# Write the file with the three sequences
SeqIO.write(allseqs, trifh, 'fasta')
# Do the alignment:
outfilename = '{0}.aln'.format(user_seq.id)
cline = ClustalwCommandline(CLUSTALW_EXE,
infile = 'foralig.txt',
outfile = outfilename,
)
cline()
# Walk over all sequences and look for query sequence
for seq in AlignIO.read(outfilename, 'clustal'):
if user_seq.id in seq.id:
seqstr = str(seq.seq)
gaps = allgaps(seqstr.strip('-'))
break
print('Original sequence: {0}'.format(user_seq.id))
print('\nBest match in AT CDS: {0}'.format(sid))
acc = 0
for i, gap in enumerate(gaps):
print('Putative intron #{0}: Start at position {1}, '
'length {2}'.format(i+1, gap[0]-acc, gap[1]))
acc += gap[1]
print('\n{0}'.format(seqstr.strip('-')))
print('\nAlignment file: {0}\n'.format(outfilename))
description = 'Program to infer intron position based on ' \
'Arabidopsis Thaliana genome'
with open('input_sequence.fasta', 'r') as seqhandle:
records = SeqIO.parse(seqhandle, 'fasta')
for record in records:
iss(record)
```
|
github_jupyter
|
!curl https://s3.amazonaws.com/py4bio/TAIR.tar.bz2 -o TAIR.tar.bz2
!mkdir samples
!tar xvfj TAIR.tar.bz2 -C samples
!curl https://s3.amazonaws.com/py4bio/ncbi-blast-2.6.0.tar.bz2 -o ncbi-blast-2.6.0.tar.bz2
!tar xvfj ncbi-blast-2.6.0.tar.bz2
!curl https://s3.amazonaws.com/py4bio/clustalw2 -o clustalw2
!chmod a+x clustalw2
!ls
!conda install biopython -y
import sqlite3
from Bio import SeqIO
SEQ_FILE = open('samples/TAIR10_seq_20101214_updated')
CDS_FILE = open('samples/TAIR10_cds_20101214_updated')
AT_DB_FILE = 'AT.db'
at_d = {}
# Get all sequences from TAIR sequences file.
for record in SeqIO.parse(SEQ_FILE, 'fasta'):
sid = record.id
seq = str(record.seq)
at_d[sid] = [seq]
# Get all sequences from TAIR CDS file.
for record in SeqIO.parse(CDS_FILE, 'fasta'):
sid = record.id
seq = str(record.seq)
at_d[sid].append(seq)
# Write to a CSV file only the entries of the dictionary that
# has data from both sources
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
c.execute('create table seq(id, cds, full_seq)')
for seq_id in at_d:
if len(at_d[seq_id])==2:
# Write in this order: ID, CDS, FULL_SEQ.
c.execute('INSERT INTO seq VALUES (?,?,?)',
((seq_id, at_d[seq_id][1], at_d[seq_id][0])))
conn.commit()
conn.close()
#!/usr/bin/env python
import os
import sqlite3
from Bio import SeqIO, SeqRecord, Seq
from Bio.Align.Applications import ClustalwCommandline
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline as bn
from Bio import AlignIO
AT_DB_FILE = 'AT.db'
BLAST_EXE = 'ncbi-blast-2.6.0+/bin/blastn'
BLAST_DB = 'ncbi-blast-2.6.0+/db/TAIR10'
CLUSTALW_EXE = os.path.join(os.getcwd(), 'clustalw2')
input_sequence = """>XM_013747562.1 PREDICTED: Brassica oleracea var. oleracea
ATCTTTCGCGAGAGGTTCATTATTGTCCGGAAGAGGTGCTCATGTTTTGGTAAAGCGATCACAAGGTGTT
CGATACAATACCTGAGAGAGTTTCCACAGCTTTCTTCTGATTCTTACTCGGTTTGAGTGAGCTGGATCTT
CCACGACGAAGATGATGATCTTGGATGTTTGCAATGAGATTATAAAGATCCAGAAGCTAAGACGGGTTGT
CTCTTACGCTGGATTCTACTGCTTCACTGCAGCCCTCACATTCTTCTACACAAACAACACAACAAGAGCA
GGATTCTCCAGGGGAGATCAGTTTTATGCGTCTTACCCTGCGGGTACCGAACTTCTTACCGACACAGCTA
AGCTGTACAAAGCGGCGCTTGGGAATTGCTATGAATCTGAGGATTGGGGTCCTGTCGAGTTCTGCATAAT
GGCTAAGCATTTTGAGCGCCAGGGAAAGTCTCCATACGTTTACCACTCTCAATACATGGCTCACCTTCTT
TCACAAGGCCAACTTGATGGAAGTGGCTAGAGTCGTTGATGACTTGCAAGACAGCTCCTTTTTCAATCTG
TGTACCTAATCTTGTTATTGGAACTTCCTTCTTTACTCTTTTTCCGAATTTGTACGGCGATGGTATTTGA
GGTTACCACCAAGAAATATAAGAACATGTTCTGGTGTAGACAATGAATGTAATAAACACATAAGATCAGA
CCTTGATATGA
"""
with open('input_sequence.fasta', 'w') as in_seq:
in_seq.write(input_sequence)
def allgaps(seq):
"""Return a list with tuples containing all gap positions
and length. seq is a string."""
gaps = []
indash = False
for i, c in enumerate(seq):
if indash is False and c == '-':
c_ini = i
indash = True
dashn = 0
elif indash is True and c == '-':
dashn += 1
elif indash is True and c != '-':
indash = False
gaps.append((c_ini, dashn+1))
return gaps
def iss(user_seq):
"""Infer Splicing Sites from a FASTA file full of EST
sequences"""
with open('forblast','w') as forblastfh:
forblastfh.write(str(user_seq.seq))
blastn_cline = bn(cmd=BLAST_EXE, query='forblast',
db=BLAST_DB, evalue='1e-10', outfmt=5,
num_descriptions='1',
num_alignments='1', out='outfile.xml')
blastn_cline()
b_record = NCBIXML.read(open('outfile.xml'))
title = b_record.alignments[0].title
sid = title[title.index(' ')+1 : title.index(' |')]
# Polarity information of returned sequence.
# 1 = normal, -1 = reverse.
frame = b_record.alignments[0].hsps[0].frame[1]
# Run the SQLite query
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
res_cur = c.execute('SELECT CDS, FULL_SEQ from seq '
'WHERE ID=?', (sid,))
cds, full_seq = res_cur.fetchone()
if cds=='':
print('There is no matching CDS')
exit()
# Check sequence polarity.
sidcds = '{0}-CDS'.format(sid)
sidseq = '{0}-SEQ'.format(sid)
if frame==1:
seqCDS = SeqRecord.SeqRecord(Seq.Seq(cds),
id = sidcds,
name = '',
description = '')
fullseq = SeqRecord.SeqRecord(Seq.Seq(full_seq),
id = sidseq,
name='',
description='')
else:
seqCDS = SeqRecord.SeqRecord(
Seq.Seq(cds).reverse_complement(),
id = sidcds, name='', description='')
fullseq = SeqRecord.SeqRecord(
Seq.Seq(full_seq).reverse_complement(),
id = sidseq, name = '', description='')
# A tuple with the user sequence and both AT sequences
allseqs = (record, seqCDS, fullseq)
with open('foralig.txt','w') as trifh:
# Write the file with the three sequences
SeqIO.write(allseqs, trifh, 'fasta')
# Do the alignment:
outfilename = '{0}.aln'.format(user_seq.id)
cline = ClustalwCommandline(CLUSTALW_EXE,
infile = 'foralig.txt',
outfile = outfilename,
)
cline()
# Walk over all sequences and look for query sequence
for seq in AlignIO.read(outfilename, 'clustal'):
if user_seq.id in seq.id:
seqstr = str(seq.seq)
gaps = allgaps(seqstr.strip('-'))
break
print('Original sequence: {0}'.format(user_seq.id))
print('\nBest match in AT CDS: {0}'.format(sid))
acc = 0
for i, gap in enumerate(gaps):
print('Putative intron #{0}: Start at position {1}, '
'length {2}'.format(i+1, gap[0]-acc, gap[1]))
acc += gap[1]
print('\n{0}'.format(seqstr.strip('-')))
print('\nAlignment file: {0}\n'.format(outfilename))
description = 'Program to infer intron position based on ' \
'Arabidopsis Thaliana genome'
with open('input_sequence.fasta', 'r') as seqhandle:
records = SeqIO.parse(seqhandle, 'fasta')
for record in records:
iss(record)
| 0.41561 | 0.796094 |
# Recherche bibliographique des modèles de propagation d'un virus
Les modèles de propagation d'une épidémie sont nombreux. Le plus simple est le modèle SIR comme nous avons pu l'implémenter dans l'application Dash de ce projet. Il permet de modéliser lévolution de l'appartenance de la population à 3 groupes : les Suceptibles, les Infectés et les Recovered (guéris). Les modèles plus complexes sont souvent basés sur ce modèle SIR en intégrant de nouvelles hypothèses. Par exemple, le modèle MSIR suppose que les bébés juste nés ne sont pas suceptibles de contracter le virus grâce à la protection des anti-corps de la mère, cette hypothèse est donc inclue au modèle SIR en rajoutant un groupe : les personnes immunes.
Une autre extension du modèle SIR est le modèle SEIR qui permet l'introduction d'une période d'incubation. SEIR modélise l'évolution d'appartenance à 4 groupes : 1 - Susceptibles, 2 - Exposed (infectés mais non infectieux), 3 - Infected, 4 - Recovered. Ce modèle repose sur 4 équations différentielles qui dépendent de 3 paramètres :
- alpha : l'inverse de la durée de la période d'incubation,
- beta : la probabilité d'infection,
- gamma : la probabilité de guérison.
Le modèle Threshold est un modèle qui peut être appliqué à la propagation d'un virus et n'est pas basé sur le modèle SIR. Dans ce modèle, lors d'une épidémie, un individu dispose de deux alternatives comportementales distinctes et mutuellement exclusives, par exemple la décision de participer ou de ne pas participer à un rassemblement. La décision individuelle d'un individu dépend du pourcentage de ses voisins qui ont fait le même choix, imposant ainsi un seuil. Le modèle fonctionne comme suit : - chaque individu a son propre seuil ; - à chaque itération, chaque individu est observé : si le pourcentage de ses voisins infectés est supérieur à son seuil, il devient également infecté.
Reférences :
- Modèle MSIR : https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_MSIR_model
- Modèle SEIR :
https://towardsdatascience.com/social-distancing-to-slow-the-coronavirus-768292f04296?gi=53b98c3c5110, http://homepages.warwick.ac.uk/~masfz/ModelingInfectiousDiseases/Chapter2/Program_2.6/index.html,
https://ndlib.readthedocs.io/en/latest/reference/models/epidemics/SEIR.html
- Threshold model :
https://sociology.stanford.edu/sites/g/files/sbiybj9501/f/publications/threshold_models_ajs_1978.pdf, https://ndlib.readthedocs.io/en/latest/reference/models/epidemics/Threshold.html#id2
# Choix d'un modèle
Le modèle choisi dans le cadre de ce projet est le modèle SEIR. Ce modèle, prenant en considération une période d'incubation, est susceptible d'être adapté aux données disponibles sur le Covid-19 puisque ce virus présente également une période d'incubation de 1 à 14 jours (selon l'OMS https://www.who.int/fr/emergencies/diseases/novel-coronavirus-2019/advice-for-public/q-a-coronaviruses).
# Application numérique sur les données du corona virus
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
import yaml
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
# Lecture du fichiers des données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=["Last Update"])
.assign(day=lambda _df: _df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df.day <= datetime.date(2020, 3, 27)]
)
```
### Application du modèle sur la Corée du Sud
```
korea_df = (epidemie_df[epidemie_df['Country/Region'] == 'South Korea']
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum'})
.reset_index()
)
korea_df['infected'] = korea_df['Confirmed'].diff()
```
SEIR from scratch
```
alpha = 0.08
beta = 1.75
gamma = 0.5
size = korea_df.day.reset_index().index
def SEIR(t, init_vals):
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
#alpha, beta, gamma = parameters
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (beta*S[-1]*I[-1])*dt
next_E = E[-1] + (beta*S[-1]*I[-1] - alpha*E[-1])*dt
next_I = I[-1] + (alpha*E[-1] - gamma*I[-1])*dt
next_R = R[-1] + (gamma*I[-1])*dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T
def loss(parameters, N):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
size = korea_df.day.reset_index().index
alpha, beta, gamma = parameters
def SEIR(t, init_vals):
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
#alpha, beta, gamma = parameters
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (beta*S[-1]*I[-1])*dt
next_E = E[-1] + (beta*S[-1]*I[-1] - alpha*E[-1])*dt
next_I = I[-1] + (alpha*E[-1] - gamma*I[-1])*dt
next_R = R[-1] + (gamma*I[-1])*dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T
solution = SEIR(size, [1 - 1/N, 1/N, 0, 0])[:,3]
# solution = solve_ivp(SEIR, [size-1], [51_470_000, 1, 0, 0], t_eval=np.arange(0, size, 1), vectorized=True)
return np.sqrt(np.mean((solution - korea_df['infected'])**2))
loss([alpha, beta, gamma], 51_470_000)
%%time
from scipy.optimize import minimize
from scipy.integrate import solve_ivp
msol = minimize(loss, [0.08, 1.75, 0.5], 51_470_000, method='Nelder-Mead') # ne fonctionne pas
#msol.x
%matplotlib inline
results = SEIR(size, [51_470_000, 1, 0, 0])
fig = plt.figure(figsize=(12, 5))
plt.plot(size, results[:,0], label='Susceptible');
plt.plot(size, results[:,1], label='Exposed');
plt.plot(size, results[:,2], label='Infected');
plt.plot(size, results[:,3], label='Recovered');
plt.plot(korea_df.day.index, korea_df.infected, label='S.Korea infected')
plt.plot
plt.legend()
plt.show()
```
SEIR with nlib library
```
import networkx as nx
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as ep
alpha = 0.08
beta = 1.75
gamma = 0.5
nb_iterations = np.array(korea_df.day.reset_index().index)
fraction_infected = 0.01
def SEIR(parameters, nb_iterations, N, fraction_infected):
alpha, beta, gamma = parameters
# Network topology
g = nx.erdos_renyi_graph(N, 0.1)
# Model selection
model = ep.SEIRModel(g)
# Model Configuration
cfg = mc.Configuration()
cfg.add_model_parameter('beta', beta)
cfg.add_model_parameter('gamma', gamma)
cfg.add_model_parameter('alpha', alpha)
cfg.add_model_parameter("fraction_infected", fraction_infected)
model.set_initial_status(cfg)
# Simulation execution
iterations = model.iteration_bunch(nb_iterations)
# Count the number of people in each state at each iteration
states_count = [sub['node_count'] for sub in iterations]
# Number of suceptibles at each iteration
susceptibles = np.array([dico[0] for dico in states_count])
# Number of exposed at each iteration
exposed = np.array([dico[1] for dico in states_count])
# Number of infected at each iteration
infected = np.array([dico[2] for dico in states_count])
# Number of recovered at each iteration
recovered = np.array([dico[3] for dico in states_count])
return(pd.DataFrame({'infected': infected}))
def loss(parameters):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
def SEIR(parameters, nb_iterations, N, fraction_infected):
alpha, beta, gamma = parameters
# Network topology
g = nx.erdos_renyi_graph(N, 0.1)
# Model selection
model = ep.SEIRModel(g)
# Model Configuration
cfg = mc.Configuration()
cfg.add_model_parameter('beta', beta)
cfg.add_model_parameter('gamma', gamma)
cfg.add_model_parameter('alpha', alpha)
cfg.add_model_parameter("fraction_infected", fraction_infected)
model.set_initial_status(cfg)
# Simulation execution
iterations = model.iteration_bunch(nb_iterations)
# Count the number of people in each state at each iteration
states_count = [sub['node_count'] for sub in iterations]
# Number of suceptibles at each iteration
susceptibles = np.array([dico[0] for dico in states_count])
# Number of exposed at each iteration
exposed = np.array([dico[1] for dico in states_count])
# Number of infected at each iteration
infected = np.array([dico[2] for dico in states_count])
# Number of recovered at each iteration
recovered = np.array([dico[3] for dico in states_count])
return(pd.DataFrame({'infected': infected}))
solution = SEIR(parameters, nb_iterations, N, fraction_infected)
return np.sqrt(np.mean((solution - korea_df['infected'])**2))
%%time
from scipy.optimize import minimize
msol = minimize(loss, [alpha, beta, gamma], method='Nelder-Mead')
#msol.x
fig = plt.figure(figsize=(12, 5))
plt.plot(nb_iterations, SEIR([alpha, beta, gamma], nb_iterations, N, fraction_infected), label='Infected');
#plt.plot(korea_df.day.index, korea_df.infected/N*100, label='S.Korea infected')
plt.plot
plt.legend()
plt.show()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
import yaml
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
# Lecture du fichiers des données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=["Last Update"])
.assign(day=lambda _df: _df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df.day <= datetime.date(2020, 3, 27)]
)
korea_df = (epidemie_df[epidemie_df['Country/Region'] == 'South Korea']
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum'})
.reset_index()
)
korea_df['infected'] = korea_df['Confirmed'].diff()
alpha = 0.08
beta = 1.75
gamma = 0.5
size = korea_df.day.reset_index().index
def SEIR(t, init_vals):
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
#alpha, beta, gamma = parameters
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (beta*S[-1]*I[-1])*dt
next_E = E[-1] + (beta*S[-1]*I[-1] - alpha*E[-1])*dt
next_I = I[-1] + (alpha*E[-1] - gamma*I[-1])*dt
next_R = R[-1] + (gamma*I[-1])*dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T
def loss(parameters, N):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
size = korea_df.day.reset_index().index
alpha, beta, gamma = parameters
def SEIR(t, init_vals):
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
#alpha, beta, gamma = parameters
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (beta*S[-1]*I[-1])*dt
next_E = E[-1] + (beta*S[-1]*I[-1] - alpha*E[-1])*dt
next_I = I[-1] + (alpha*E[-1] - gamma*I[-1])*dt
next_R = R[-1] + (gamma*I[-1])*dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T
solution = SEIR(size, [1 - 1/N, 1/N, 0, 0])[:,3]
# solution = solve_ivp(SEIR, [size-1], [51_470_000, 1, 0, 0], t_eval=np.arange(0, size, 1), vectorized=True)
return np.sqrt(np.mean((solution - korea_df['infected'])**2))
loss([alpha, beta, gamma], 51_470_000)
%%time
from scipy.optimize import minimize
from scipy.integrate import solve_ivp
msol = minimize(loss, [0.08, 1.75, 0.5], 51_470_000, method='Nelder-Mead') # ne fonctionne pas
#msol.x
%matplotlib inline
results = SEIR(size, [51_470_000, 1, 0, 0])
fig = plt.figure(figsize=(12, 5))
plt.plot(size, results[:,0], label='Susceptible');
plt.plot(size, results[:,1], label='Exposed');
plt.plot(size, results[:,2], label='Infected');
plt.plot(size, results[:,3], label='Recovered');
plt.plot(korea_df.day.index, korea_df.infected, label='S.Korea infected')
plt.plot
plt.legend()
plt.show()
import networkx as nx
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as ep
alpha = 0.08
beta = 1.75
gamma = 0.5
nb_iterations = np.array(korea_df.day.reset_index().index)
fraction_infected = 0.01
def SEIR(parameters, nb_iterations, N, fraction_infected):
alpha, beta, gamma = parameters
# Network topology
g = nx.erdos_renyi_graph(N, 0.1)
# Model selection
model = ep.SEIRModel(g)
# Model Configuration
cfg = mc.Configuration()
cfg.add_model_parameter('beta', beta)
cfg.add_model_parameter('gamma', gamma)
cfg.add_model_parameter('alpha', alpha)
cfg.add_model_parameter("fraction_infected", fraction_infected)
model.set_initial_status(cfg)
# Simulation execution
iterations = model.iteration_bunch(nb_iterations)
# Count the number of people in each state at each iteration
states_count = [sub['node_count'] for sub in iterations]
# Number of suceptibles at each iteration
susceptibles = np.array([dico[0] for dico in states_count])
# Number of exposed at each iteration
exposed = np.array([dico[1] for dico in states_count])
# Number of infected at each iteration
infected = np.array([dico[2] for dico in states_count])
# Number of recovered at each iteration
recovered = np.array([dico[3] for dico in states_count])
return(pd.DataFrame({'infected': infected}))
def loss(parameters):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
def SEIR(parameters, nb_iterations, N, fraction_infected):
alpha, beta, gamma = parameters
# Network topology
g = nx.erdos_renyi_graph(N, 0.1)
# Model selection
model = ep.SEIRModel(g)
# Model Configuration
cfg = mc.Configuration()
cfg.add_model_parameter('beta', beta)
cfg.add_model_parameter('gamma', gamma)
cfg.add_model_parameter('alpha', alpha)
cfg.add_model_parameter("fraction_infected", fraction_infected)
model.set_initial_status(cfg)
# Simulation execution
iterations = model.iteration_bunch(nb_iterations)
# Count the number of people in each state at each iteration
states_count = [sub['node_count'] for sub in iterations]
# Number of suceptibles at each iteration
susceptibles = np.array([dico[0] for dico in states_count])
# Number of exposed at each iteration
exposed = np.array([dico[1] for dico in states_count])
# Number of infected at each iteration
infected = np.array([dico[2] for dico in states_count])
# Number of recovered at each iteration
recovered = np.array([dico[3] for dico in states_count])
return(pd.DataFrame({'infected': infected}))
solution = SEIR(parameters, nb_iterations, N, fraction_infected)
return np.sqrt(np.mean((solution - korea_df['infected'])**2))
%%time
from scipy.optimize import minimize
msol = minimize(loss, [alpha, beta, gamma], method='Nelder-Mead')
#msol.x
fig = plt.figure(figsize=(12, 5))
plt.plot(nb_iterations, SEIR([alpha, beta, gamma], nb_iterations, N, fraction_infected), label='Infected');
#plt.plot(korea_df.day.index, korea_df.infected/N*100, label='S.Korea infected')
plt.plot
plt.legend()
plt.show()
| 0.54819 | 0.900486 |
```
import pandas as pd
import numpy as np
import pymc3 as pm
import arviz as az
import theano.tensor as tt
import matplotlib.pyplot as plt
import seaborn as sns
import pygal
from IPython.display import SVG, display
from sklearn.metrics import mean_absolute_percentage_error as mape
import warnings
warnings.filterwarnings('ignore')
from helper import *
mediaFile = "proactiv_usa_march23.csv"
dfMedia = pd.read_csv(mediaFile).drop(columns=["holiday"]).set_index("Day")
dfMedia.head()
dfMedia = dfMedia[get_media_vars(dfMedia) + ["Orders"]]
dfMediaCombined = pd.DataFrame()
fringe = set([])
cols = sorted(dfMedia.columns.values)
for col in get_media_vars(dfMedia):
short = shorten_f_name(col)
if short in fringe:
dfMediaCombined[f"{short}_media_cost".lower()] += dfMedia[col]
else:
dfMediaCombined[f"{short}_media_cost".lower()] = dfMedia[col]
fringe.add(col)
dfMediaCombined["orders"] = dfMedia.Orders
dfMediaCombined.isna().sum()
```
checking model assumptions
```
to_keep = ["amazon_media_cost", "bingsearch_media_cost", "facebook_media_cost", "youtube_media_cost"]
dfMediaCombined = dfMediaCombined[to_keep + ["orders"]]
high = 0
threshold = 0.3
n = dfMediaCombined.shape[1]
for i in range(0, n):
for j in range(i+1, n):
if i != j:
col1, col2 = dfMediaCombined.iloc[:, i], dfMediaCombined.iloc[:, j]
r = np.corrcoef(col1, col2)[0][1]
if abs(r) >= threshold:
high += 1
print(f"{high} pairs of features out of {int(n*(n+1) / 2)} have correlation exceeding threshold={threshold}")
too_low = 0
percent_variance = 0.2 # threshold=20%
n = dfMediaCombined.shape[1]
features = []
for i in range(0, n):
col = dfMediaCombined.iloc[:, i]
stddev = np.std(col)
if stddev < (percent_variance * np.mean(col)):
too_low += 1
features.append(dfMediaCombined.columns.values[i])
print(f"{too_low} features have insufficient variability")
features
```
## saturation and carryover
```
def saturate(x, a):
"""
arbitrary saturation curve, parameters of this function must define saturation curve
"""
return 1 - tt.exp(-a*x)
def carryover(x, strength, length=21):
"""
same function as specified in google whitepaper
usually use poission random variable for length
"""
w = tt.as_tensor_variable(
[tt.power(strength, i) for i in range(length)]
)
x_lags = tt.stack(
[tt.concatenate([
tt.zeros(i),
x[:x.shape[0]-i]
]) for i in range(length)]
)
return tt.dot(w, x_lags)
def show(chart):
display(SVG(chart.render(disable_xml_declaration=True)))
class BayesianMixModel:
def __init__(self, country, target, metric=mape):
"""
data: DataFrame containing both X and y
target: (str) column in data that is the response variable
metric: TBD
"""
self.country = country
self.target = target
self.metric = metric
def fit(self, X, y):
"""
called immediately upon initialization of BayesianMixModel instance
trains model
X: channel media cost information
y: response variable
"""
self.X = X
self.y = y
with pm.Model() as mmm:
channel_contributions = []
data = pm.Data("data", self.X)
for i, channel in enumerate(self.X.columns.values):
coef = pm.Exponential(f'coef_{channel}', lam=0.0001)
sat = pm.Exponential(f'sat_{channel}', lam=1)
car = pm.Beta(f'car_{channel}', alpha=2, beta=2)
channel_data = data.get_value()[:, i]
channel_contribution = pm.Deterministic(
f'contribution_{channel}',
coef * saturate(
carryover(
channel_data,
car
),
sat
)
)
channel_contributions.append(channel_contribution)
base = pm.Exponential('base', lam=0.0001)
noise = pm.Exponential('noise', lam=0.0001)
sales = pm.Normal(
'sales',
mu=sum(channel_contributions) + base,
sigma=noise,
observed=y
)
trace = pm.sample(return_inferencedata=True, tune=3000)
self.mmm = mmm
self.trace = trace
def predict(self, X):
"""
X: DataFrame
"""
pm.set_data({"data" : X}, model=self.mmm)
ppc_test = pm.sample_posterior_predictive(self.trace, model=self.mmm, samples=1000)
p_test_pred = ppc_test["sales"].mean(axis=0)
return p_test_pred
def score(self, X, y):
"""
X: DataFrame
y: Series
"""
if self.metric:
return self.metric(self.predict(X), y)
else:
return mape(self.predict(X), y)
def lineplot(self):
"""
plots actual vs fitted time series on entire training set
"""
means = self.predict(self.X)
line_chart = pygal.Line(fill=False, height=500, width=1000, title="Model Fit Time Series", x_title="Day",
y_title=f"{self.target}", explicit_size=True, show_legend=True, legend_at_bottom=False)
line_chart.add('TRUE', self.y.values)
line_chart.add("PREDICTION", means)
show(line_chart)
def scatterplot(self):
"""
plots actual vs fitted time series on entire training set
"""
scatterplot = pygal.XY(print_values=False, stroke=False, fill=False, height=500, width=1000, title="Model Predictions vs True Observations", x_title="actual",
y_title="predicted", explicit_size=True, show_legend=True, legend_at_bottom=True)
x = self.y.values
y = self.predict(self.X)
scatterplot.add("data", [(x[i], y[i]) for i in range(len(x))])
g = max(max(x), max(y))
scatterplot.add("true = pred", [(0,0), (g, g)], stroke=True)
show(scatterplot)
from sklearn.model_selection import train_test_split
X = dfMediaCombined.drop(columns=["orders"])
y = dfMediaCombined.orders
xtrain, xval, ytrain, yval = train_test_split(X,y, test_size=0.2, shuffle=False)
model = BayesianMixModel(country="USA", target="orders")
model.fit(xtrain, ytrain)
model.lineplot()
model.scatterplot()
pred = model.predict(xval)
score = model.score(xval, yval)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import pymc3 as pm
import arviz as az
import theano.tensor as tt
import matplotlib.pyplot as plt
import seaborn as sns
import pygal
from IPython.display import SVG, display
from sklearn.metrics import mean_absolute_percentage_error as mape
import warnings
warnings.filterwarnings('ignore')
from helper import *
mediaFile = "proactiv_usa_march23.csv"
dfMedia = pd.read_csv(mediaFile).drop(columns=["holiday"]).set_index("Day")
dfMedia.head()
dfMedia = dfMedia[get_media_vars(dfMedia) + ["Orders"]]
dfMediaCombined = pd.DataFrame()
fringe = set([])
cols = sorted(dfMedia.columns.values)
for col in get_media_vars(dfMedia):
short = shorten_f_name(col)
if short in fringe:
dfMediaCombined[f"{short}_media_cost".lower()] += dfMedia[col]
else:
dfMediaCombined[f"{short}_media_cost".lower()] = dfMedia[col]
fringe.add(col)
dfMediaCombined["orders"] = dfMedia.Orders
dfMediaCombined.isna().sum()
to_keep = ["amazon_media_cost", "bingsearch_media_cost", "facebook_media_cost", "youtube_media_cost"]
dfMediaCombined = dfMediaCombined[to_keep + ["orders"]]
high = 0
threshold = 0.3
n = dfMediaCombined.shape[1]
for i in range(0, n):
for j in range(i+1, n):
if i != j:
col1, col2 = dfMediaCombined.iloc[:, i], dfMediaCombined.iloc[:, j]
r = np.corrcoef(col1, col2)[0][1]
if abs(r) >= threshold:
high += 1
print(f"{high} pairs of features out of {int(n*(n+1) / 2)} have correlation exceeding threshold={threshold}")
too_low = 0
percent_variance = 0.2 # threshold=20%
n = dfMediaCombined.shape[1]
features = []
for i in range(0, n):
col = dfMediaCombined.iloc[:, i]
stddev = np.std(col)
if stddev < (percent_variance * np.mean(col)):
too_low += 1
features.append(dfMediaCombined.columns.values[i])
print(f"{too_low} features have insufficient variability")
features
def saturate(x, a):
"""
arbitrary saturation curve, parameters of this function must define saturation curve
"""
return 1 - tt.exp(-a*x)
def carryover(x, strength, length=21):
"""
same function as specified in google whitepaper
usually use poission random variable for length
"""
w = tt.as_tensor_variable(
[tt.power(strength, i) for i in range(length)]
)
x_lags = tt.stack(
[tt.concatenate([
tt.zeros(i),
x[:x.shape[0]-i]
]) for i in range(length)]
)
return tt.dot(w, x_lags)
def show(chart):
display(SVG(chart.render(disable_xml_declaration=True)))
class BayesianMixModel:
def __init__(self, country, target, metric=mape):
"""
data: DataFrame containing both X and y
target: (str) column in data that is the response variable
metric: TBD
"""
self.country = country
self.target = target
self.metric = metric
def fit(self, X, y):
"""
called immediately upon initialization of BayesianMixModel instance
trains model
X: channel media cost information
y: response variable
"""
self.X = X
self.y = y
with pm.Model() as mmm:
channel_contributions = []
data = pm.Data("data", self.X)
for i, channel in enumerate(self.X.columns.values):
coef = pm.Exponential(f'coef_{channel}', lam=0.0001)
sat = pm.Exponential(f'sat_{channel}', lam=1)
car = pm.Beta(f'car_{channel}', alpha=2, beta=2)
channel_data = data.get_value()[:, i]
channel_contribution = pm.Deterministic(
f'contribution_{channel}',
coef * saturate(
carryover(
channel_data,
car
),
sat
)
)
channel_contributions.append(channel_contribution)
base = pm.Exponential('base', lam=0.0001)
noise = pm.Exponential('noise', lam=0.0001)
sales = pm.Normal(
'sales',
mu=sum(channel_contributions) + base,
sigma=noise,
observed=y
)
trace = pm.sample(return_inferencedata=True, tune=3000)
self.mmm = mmm
self.trace = trace
def predict(self, X):
"""
X: DataFrame
"""
pm.set_data({"data" : X}, model=self.mmm)
ppc_test = pm.sample_posterior_predictive(self.trace, model=self.mmm, samples=1000)
p_test_pred = ppc_test["sales"].mean(axis=0)
return p_test_pred
def score(self, X, y):
"""
X: DataFrame
y: Series
"""
if self.metric:
return self.metric(self.predict(X), y)
else:
return mape(self.predict(X), y)
def lineplot(self):
"""
plots actual vs fitted time series on entire training set
"""
means = self.predict(self.X)
line_chart = pygal.Line(fill=False, height=500, width=1000, title="Model Fit Time Series", x_title="Day",
y_title=f"{self.target}", explicit_size=True, show_legend=True, legend_at_bottom=False)
line_chart.add('TRUE', self.y.values)
line_chart.add("PREDICTION", means)
show(line_chart)
def scatterplot(self):
"""
plots actual vs fitted time series on entire training set
"""
scatterplot = pygal.XY(print_values=False, stroke=False, fill=False, height=500, width=1000, title="Model Predictions vs True Observations", x_title="actual",
y_title="predicted", explicit_size=True, show_legend=True, legend_at_bottom=True)
x = self.y.values
y = self.predict(self.X)
scatterplot.add("data", [(x[i], y[i]) for i in range(len(x))])
g = max(max(x), max(y))
scatterplot.add("true = pred", [(0,0), (g, g)], stroke=True)
show(scatterplot)
from sklearn.model_selection import train_test_split
X = dfMediaCombined.drop(columns=["orders"])
y = dfMediaCombined.orders
xtrain, xval, ytrain, yval = train_test_split(X,y, test_size=0.2, shuffle=False)
model = BayesianMixModel(country="USA", target="orders")
model.fit(xtrain, ytrain)
model.lineplot()
model.scatterplot()
pred = model.predict(xval)
score = model.score(xval, yval)
| 0.632843 | 0.626181 |
```
from google.colab import drive
drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
# testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# foreground_classes = {'plane', 'car', 'bird'}
# background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
# fg1,fg2,fg3 = 0,1,2
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv7 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv9 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv10 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32, track_running_stats = False)
self.batch_norm2 = nn.BatchNorm2d(128, track_running_stats = False)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
x = self.conv7(x)
x = F.relu(self.batch_norm2(x))
x = self.conv8(x)
x = F.relu(self.batch_norm2(x))
x = self.conv9(x)
x = F.relu(self.batch_norm2(x))
x = self.conv10(x)
x = F.relu(self.batch_norm2(x))
x = x.view(x.size(0), -1)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = self.fc3(x)
return x
cnn_net = CNN()#.double()
cnn_net = cnn_net.to("cuda")
print(cnn_net)
for i,j in cnn_net.state_dict().items():
print(i)
for i,j in cnn_net.state_dict().items():
if i == 'batch_norm1.weight':
print(i,j)
cnn_net.load_state_dict(torch.load("/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/"+"cnn_net_10layer"+".pt"))
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
cnn_net.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0).to("cuda")
cnn_net.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0).to("cuda")
cnn_net.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv6.weight.requires_grad = False
cnn_net.conv6.bias.requires_grad = False
cnn_net.conv7.weight.requires_grad = False
cnn_net.conv7.bias.requires_grad = False
cnn_net.conv8.weight.requires_grad = False
cnn_net.conv8.bias.requires_grad = False
cnn_net.conv9.weight.requires_grad = False
cnn_net.conv9.bias.requires_grad = False
cnn_net.conv10.weight.requires_grad = False
cnn_net.conv10.bias.requires_grad = False
cnn_net.fc1.weight.requires_grad = False
cnn_net.fc1.bias.requires_grad = False
cnn_net.fc2.weight.requires_grad = False
cnn_net.fc2.bias.requires_grad = False
cnn_net.fc3.weight.requires_grad = False
cnn_net.fc3.bias.requires_grad = False
for param in cnn_net.parameters():
print(param.requires_grad)
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
import torch.optim as optim
criterion_cnn = nn.CrossEntropyLoss()
optimizer_cnn = optim.SGD(cnn_net.parameters(), lr=0.01, momentum=0.9)
acti = []
loss_curi = []
epochs = 300
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_cnn.zero_grad()
# forward + backward + optimize
outputs = cnn_net(inputs)
loss = criterion_cnn(outputs, labels)
loss.backward()
optimizer_cnn.step()
# print statistics
running_loss += loss.item()
mini_batch = 50
if i % mini_batch == mini_batch-1: # print every 50 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_loss / mini_batch))
ep_lossi.append(running_loss/mini_batch) # loss per minibatch
running_loss = 0.0
if(np.mean(ep_lossi) <= 0.01):
break;
loss_curi.append(np.mean(ep_lossi)) #loss per epoch
print('Finished Training')
torch.save(cnn_net.state_dict(),"/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/weights"+"CIFAR10_last5layer_fixed_cnn10layer"+".pt")
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
cnn_net.eval()
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
# testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# foreground_classes = {'plane', 'car', 'bird'}
# background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
# fg1,fg2,fg3 = 0,1,2
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv7 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv9 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv10 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32, track_running_stats = False)
self.batch_norm2 = nn.BatchNorm2d(128, track_running_stats = False)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
x = self.conv7(x)
x = F.relu(self.batch_norm2(x))
x = self.conv8(x)
x = F.relu(self.batch_norm2(x))
x = self.conv9(x)
x = F.relu(self.batch_norm2(x))
x = self.conv10(x)
x = F.relu(self.batch_norm2(x))
x = x.view(x.size(0), -1)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = self.fc3(x)
return x
cnn_net = CNN()#.double()
cnn_net = cnn_net.to("cuda")
print(cnn_net)
for i,j in cnn_net.state_dict().items():
print(i)
for i,j in cnn_net.state_dict().items():
if i == 'batch_norm1.weight':
print(i,j)
cnn_net.load_state_dict(torch.load("/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/"+"cnn_net_10layer"+".pt"))
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
cnn_net.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0).to("cuda")
cnn_net.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0).to("cuda")
cnn_net.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv6.weight.requires_grad = False
cnn_net.conv6.bias.requires_grad = False
cnn_net.conv7.weight.requires_grad = False
cnn_net.conv7.bias.requires_grad = False
cnn_net.conv8.weight.requires_grad = False
cnn_net.conv8.bias.requires_grad = False
cnn_net.conv9.weight.requires_grad = False
cnn_net.conv9.bias.requires_grad = False
cnn_net.conv10.weight.requires_grad = False
cnn_net.conv10.bias.requires_grad = False
cnn_net.fc1.weight.requires_grad = False
cnn_net.fc1.bias.requires_grad = False
cnn_net.fc2.weight.requires_grad = False
cnn_net.fc2.bias.requires_grad = False
cnn_net.fc3.weight.requires_grad = False
cnn_net.fc3.bias.requires_grad = False
for param in cnn_net.parameters():
print(param.requires_grad)
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
import torch.optim as optim
criterion_cnn = nn.CrossEntropyLoss()
optimizer_cnn = optim.SGD(cnn_net.parameters(), lr=0.01, momentum=0.9)
acti = []
loss_curi = []
epochs = 300
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_cnn.zero_grad()
# forward + backward + optimize
outputs = cnn_net(inputs)
loss = criterion_cnn(outputs, labels)
loss.backward()
optimizer_cnn.step()
# print statistics
running_loss += loss.item()
mini_batch = 50
if i % mini_batch == mini_batch-1: # print every 50 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_loss / mini_batch))
ep_lossi.append(running_loss/mini_batch) # loss per minibatch
running_loss = 0.0
if(np.mean(ep_lossi) <= 0.01):
break;
loss_curi.append(np.mean(ep_lossi)) #loss per epoch
print('Finished Training')
torch.save(cnn_net.state_dict(),"/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/weights"+"CIFAR10_last5layer_fixed_cnn10layer"+".pt")
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
correct = 0
total = 0
out = []
pred = []
cnn_net.eval()
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
| 0.78789 | 0.611295 |
# **Lab: Unstructured Data**
## Exercise 1: MNIST
In this exercise, we will build a Convolution Neural Networks with Pytorch for recognising handwritten digits. We will be working on the MNIST dataset:
https://pytorch.org/vision/0.8/datasets.html#mnist
The steps are:
1. Setup Repository
2. Load Dataset
3. Prepare Data
4. Define Architecture
5. Train Model
6. Push Changes
### 1. Setup Repository
**[1.1]** Go to a folder of your choice on your computer (where you store projects)
```
# Placeholder for student's code (1 command line)
# Task: Go to a folder of your choice on your computer (where you store projects)
# Solution
cd ~/Projects/
```
**[1.2]** Copy the cookiecutter data science template
```
# Placeholder for student's code (1 command line)
# Task: Copy the cookiecutter data science template
# Solution
cookiecutter -c v1 https://github.com/drivendata/cookiecutter-data-science
```
Follow the prompt (name the project and repo adv_dsi_lab_6)
**[1.3]** Go inside the created folder `adv_dsi_lab_6`
```
# Placeholder for student's code (1 command line)
# Task: Go inside the created folder adv_dsi_lab_6
# Solution
cd adv_dsi_lab_6
```
**[1.4]** Create a file called `Dockerfile` and add the following content:
`FROM jupyter/scipy-notebook:0ce64578df46`
`RUN pip install torch==1.9.0+cpu torchvision==0.10.0+cpu torchtext==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html`
`ENV PYTHONPATH "${PYTHONPATH}:/home/jovyan/work"`
`RUN echo "export PYTHONPATH=/home/jovyan/work" >> ~/.bashrc`
`WORKDIR /home/jovyan/work`
```
# Placeholder for student's code (1 command line)
# Task: Create a file called Dockerfile
# Solution
vi Dockerfile
```
We will create our own Docker image based on the official jupyter/scipy-notebook.
**[1.5]** Build the image from this Dockerfile
```
docker build -t pytorch-notebook:latest .
```
Syntax: docker build [OPTIONS] PATH
Options:
`-t: Name and optionally a tag in the 'name:tag' format`
Documentation: https://docs.docker.com/engine/reference/commandline/build/
**[1.6]** Run the built image
```
docker run -dit --rm --name adv_dsi_lab_6 -p 8888:8888 -e JUPYTER_ENABLE_LAB=yes -v ~/Projects/adv_dsi/adv_dsi_lab_6:/home/jovyan/work -v ~/Projects/adv_dsi/src:/home/jovyan/work/src pytorch-notebook:latest
```
**[1.7]** Display last 50 lines of logs
```
docker logs --tail 50 adv_dsi_lab_6
```
Copy the url displayed and paste it to a browser in order to launch Jupyter Lab
**[1.8]** Initialise the repo
```
# Placeholder for student's code (1 command line)
# Task: Initialise the repo
# Solution
git init
```
**[1.9]** Login into Github with your account (https://github.com/) and create a public repo with the name `adv_dsi_lab_6`
**[1.10]** In your local repo `adv_dsi_lab_6`, link it with Github (replace the url with your username)
```
# Placeholder for student's code (1 command line)
# Task: Link repo with Github
# Solution
git remote add origin git@github.com:<username>/adv_dsi_lab_1_6.git
```
**[1.11]** Add you changes to git staging area and commit them
```
# Placeholder for student's code (2 command lines)
# Task: Add you changes to git staging area and commit them
# Solution
git add .
git commit -m "init"
```
**[1.12]** Push your master branch to origin
```
# Placeholder for student's code (1 command line)
# Task: Push your master branch to origin
# Solution
git push --set-upstream origin master
```
**[1.13]** Preventing push to `master` branch
```
# Placeholder for student's code (1 command line)
# Task: Preventing push to master branch
# Solution
git config branch.master.pushRemote no_push
```
**[1.14]** Create a new git branch called `pytorch_mnist`
```
# Placeholder for student's code (1 command line)
# Task: Create a new git branch called pytorch_mnist
# Solution
git checkout -b pytorch_mnist
```
**[1.15]** Navigate the folder `notebooks` and create a new jupyter notebook called `1_pytorch_mnist.ipynb`
### 2. Load Dataset
**[2.1]** Import the torch and torchvision packages
```
# Placeholder for student's code (3 lines of Python code)
# Task: Import the torch and torchvision packages
#Solution
import torch
import torchvision
```
**[2.2]** Create a variable called `download` containing the value `True`
```
# Placeholder for student's code (1 line of Python code)
# Task: Create a variable called download containing the value True
#Solution
download = True
```
**[2.3]** Define a transformation pipeline that will convert the images into tensors and normalise them
```
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
```
**[2.4]** Instantiate a torchvision.datasets.MNIST() for the training set, downlows it into `/data/raw/` folder and perform the transformation defined earlier. Save the results in a variable called `train_data`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torchvision.datasets.MNIST() for the training set, downlows it into /data/raw/ folder and perform the transformation defined earlier. Save the results in a variable called train_data
# Solution
train_data = torchvision.datasets.MNIST('../data/raw/', train=True, download=download, transform=transform)
```
**[2.5]** Instantiate a torchvision.datasets.MNIST() for the testing set, downlows it into `/data/raw/` folder and perform the transformation defined earlier. Save the results in a variable called `test_data`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torchvision.datasets.MNIST() for the testing set, downlows it into /data/raw/ folder and perform the transformation defined earlier. Save the results in a variable called test_data
# Solution
test_data = torchvision.datasets.MNIST('../data/raw/', train=False, download=download, transform=transform)
```
### 3. Prepare Data
**[3.1]** Create 2 variables called `batch_size_train` and `batch_size_test` that will respectively take the values 64 and 10
```
# Placeholder for student's code (2 lines of Python code)
# Task: Create 2 variables called batch_size_train and batch_size_test that will respectively take the values 64 and 10
# Solution
batch_size_train = 64
batch_size_test = 10
```
**[3.2]** Import DataLoader from torch.utils.data
```
# Placeholder for student's code (1 line of Python code)
# Task: Import DataLoader from torch.utils.data
# Solution:
from torch.utils.data import DataLoader
```
**[3.3]** Instantiate a `torch.utils.data.DataLoader()` on the training data, with the relevant batch size and with shuffle. Save the reults in a variable called `train_loader`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.utils.data.DataLoader() on the training data, with the relevant batch size and with shuffle. Save the reults in a variable called train_loader
# Solution
train_loader = DataLoader(train_data, batch_size=batch_size_train, shuffle=True)
```
**[3.4]** Instantiate a `torch.utils.data.DataLoader()` on the testing data, with the relevant batch size and with shuffle. Save the reults in a variable called `test_loader`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.utils.data.DataLoader() on the testing data, with the relevant batch size and with shuffle. Save the reults in a variable called test_loader
# Solution
test_loader = DataLoader(test_data, batch_size=batch_size_test, shuffle=True)
```
**[3.5]** Create a generator on the test data loader and extract the first observation
```
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
```
**[3.6]** Print the dimensions of the first image
```
# Placeholder for student's code (1 line of Python code)
# Task: Print the dimensions of the first image
# Solution
example_data.shape
```
**[3.7]** Import matplotlib.pyplot as plt
```
# Placeholder for student's code (1 line of Python code)
# Task: Import matplotlib.pyplot as plt
# Solution
import matplotlib.pyplot as plt
```
**[3.8]** Print the first image with its corresponding target
```
plt.imshow(example_data[0][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[0]))
```
### 4. Define Architecture
**[4.1]** Import torch.nn as n, torch.nn.functional as F and torch.optim as optim
```
# Placeholder for student's code (3 lines of Python code)
# Task: Import torch.nn as n, torch.nn.functional as F and torch.optim as optim
# Solution
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
```
**[4.2]** Create a class called `PytorchCNN` that inherits from `nn.Module` with:
- attributes:
- `conv1`: fully-connected layer with 128 filters of size 3
- `conv2`: fully-connected layer with 64 filters of size 3
- `fc1`: fully-connected layer with 128 neurons
- `fc2`: fully-connected layer with 10 neurons
- `softmax`: Softmax activation function
- methods:
- `forward()` with `inputs` as input parameter and will sequentially add the 2 convolution layers with relu and max pool of size 2 followed the 2 full-connected layers respectively with relu and softmax
```
# Placeholder for student's code (1 line of Python code)
# Task: Create a numpy array called y_base of dimensions (len(y_train), 1) filled with this value
# Solution:
class PytorchCNN(nn.Module):
def __init__(self):
super(PytorchCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 128, kernel_size=3)
self.conv2 = nn.Conv2d(128, 64, kernel_size=3)
self.fc1 = nn.Linear(1600, 128)
self.fc2 = nn.Linear(128, 10)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
```
**[4.3]** Instantiate a PytorchCNN and save it into a variable called `model`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a PytorchCNN and save it into a variable called model
# Solution:
model = PytorchCNN()
```
**[4.4]** Import the `get_device` function from src.models.pytorch
```
# Placeholder for student's code (1 line of Python code)
# Task: Import the get_device function from src.models.pytorch
# Solution:
from src.models.pytorch import get_device
```
**[4.5]** Get the device available and set to the model to use it
```
# Placeholder for student's code (2 lines of Python code)
# Task: Get the device available and set to the model to use it
# Solution:
device = get_device()
model.to(device)
```
### 5. Train the model
**[5.1]** Import train_classification and test_classification from src.models.pytorch
```
# Placeholder for student's code (1 line of Python code)
# Task: Import train_classification and test_classification from src.models.pytorch
# Solution:
from src.models.pytorch import train_classification, test_classification
```
**[5.2]** Instantiate a `nn.CrossEntropyLoss()` and save it into a variable called `criterion`
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a nn.CrossEntropyLoss() and save it into a variable called criterion
# Solution:
criterion = nn.CrossEntropyLoss()
```
**[5.3]** Instantiate a torch.optim.Adam() optimizer with the model's parameters and 0.001 as learning rate and save it into a variable called optimizer
```
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.optim.Adam() optimizer with the model's parameters and 0.001 as learning rate and save it into a variable called optimizer
# Solution:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
```
**[5.4]** Create 2 variables called `N_EPOCHS` and `BATCH_SIZE` that will respectively take the values 50 and 32
```
# Placeholder for student's code (2 lines of Python code)
# Task: Create 2 variables called N_EPOCHS and BATCH_SIZE that will respectively take the values 50 and 32
# Solution:
N_EPOCHS = 5
BATCH_SIZE = 32
```
**[5.5]** Create a for loop that will iterate through the specified number of epochs and will train the model with the training set and assess the performance on the validation set and print their scores
```
# Placeholder for student's code (multiples lines of Python code)
# Task: Create a for loop that will iterate through the specified number of epochs and will train the model with the training set and assess the performance on the validation set and print their scores
# Solution:
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_classification(train_data, model=model, criterion=criterion, optimizer=optimizer, batch_size=BATCH_SIZE, device=device)
valid_loss, valid_acc = test_classification(test_data, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device)
print(f'Epoch: {epoch}')
print(f'\t(train)\t|\tLoss: {train_loss:.4f}\t|\tAcc: {train_acc * 100:.1f}%')
print(f'\t(valid)\t|\tLoss: {valid_loss:.4f}\t|\tAcc: {valid_acc * 100:.1f}%')
```
**[5.6]** Save the model into the `models` folder
```
# Placeholder for student's code (1 line of Python code)
# Task: Save the model into the models folder
# Solution:
torch.save(model, "../models/pytorch_mnist_cnn.pt")
```
### 6. Push changes
**[6.1]** Add you changes to git staging area
```
# Placeholder for student's code (1 command line)
# Task: Add you changes to git staging area
# Solution:
git add .
```
**[6.2]** Create the snapshot of your repository and add a description
```
# Placeholder for student's code (1 command line)
# Task: Create the snapshot of your repository and add a description
# Solution:
git commit -m "pytorch cnn mnist"
```
**[6.3]** Push your snapshot to Github
```
# Placeholder for student's code (1 command line)
# Task: Push your snapshot to Github
# Solution:
git push
```
**[6.4]** Check out to the master branch
```
# Placeholder for student's code (1 command line)
# Task: Check out to the master branch
# Solution:
git checkout master
```
**[6.5]** Pull the latest updates
```
# Placeholder for student's code (1 command line)
# Task: Pull the latest updates
# Solution:
git pull
```
**[6.6]** Check out to the `pytorch_mnist` branch
```
# Placeholder for student's code (1 command line)
# Task: Merge the branch pytorch_mnist
# Solution:
git checkout pytorch_mnist
```
**[6.7]** Merge the `master` branch and push your changes
```
# Placeholder for student's code (2 command lines)
# Task: Merge the master branch and push your changes
# Solution:
git merge master
git push
```
**[6.8]** Go to Github and merge the branch after reviewing the code and fixing any conflict
**[6.9]** Stop the Docker container
```
# Placeholder for student's code (1 command line)
# Task: Stop the Docker container
# Solution:
docker stop adv_dsi_lab_6
```
|
github_jupyter
|
# Placeholder for student's code (1 command line)
# Task: Go to a folder of your choice on your computer (where you store projects)
# Solution
cd ~/Projects/
# Placeholder for student's code (1 command line)
# Task: Copy the cookiecutter data science template
# Solution
cookiecutter -c v1 https://github.com/drivendata/cookiecutter-data-science
# Placeholder for student's code (1 command line)
# Task: Go inside the created folder adv_dsi_lab_6
# Solution
cd adv_dsi_lab_6
# Placeholder for student's code (1 command line)
# Task: Create a file called Dockerfile
# Solution
vi Dockerfile
docker build -t pytorch-notebook:latest .
docker run -dit --rm --name adv_dsi_lab_6 -p 8888:8888 -e JUPYTER_ENABLE_LAB=yes -v ~/Projects/adv_dsi/adv_dsi_lab_6:/home/jovyan/work -v ~/Projects/adv_dsi/src:/home/jovyan/work/src pytorch-notebook:latest
docker logs --tail 50 adv_dsi_lab_6
# Placeholder for student's code (1 command line)
# Task: Initialise the repo
# Solution
git init
# Placeholder for student's code (1 command line)
# Task: Link repo with Github
# Solution
git remote add origin git@github.com:<username>/adv_dsi_lab_1_6.git
# Placeholder for student's code (2 command lines)
# Task: Add you changes to git staging area and commit them
# Solution
git add .
git commit -m "init"
# Placeholder for student's code (1 command line)
# Task: Push your master branch to origin
# Solution
git push --set-upstream origin master
# Placeholder for student's code (1 command line)
# Task: Preventing push to master branch
# Solution
git config branch.master.pushRemote no_push
# Placeholder for student's code (1 command line)
# Task: Create a new git branch called pytorch_mnist
# Solution
git checkout -b pytorch_mnist
# Placeholder for student's code (3 lines of Python code)
# Task: Import the torch and torchvision packages
#Solution
import torch
import torchvision
# Placeholder for student's code (1 line of Python code)
# Task: Create a variable called download containing the value True
#Solution
download = True
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torchvision.datasets.MNIST() for the training set, downlows it into /data/raw/ folder and perform the transformation defined earlier. Save the results in a variable called train_data
# Solution
train_data = torchvision.datasets.MNIST('../data/raw/', train=True, download=download, transform=transform)
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torchvision.datasets.MNIST() for the testing set, downlows it into /data/raw/ folder and perform the transformation defined earlier. Save the results in a variable called test_data
# Solution
test_data = torchvision.datasets.MNIST('../data/raw/', train=False, download=download, transform=transform)
# Placeholder for student's code (2 lines of Python code)
# Task: Create 2 variables called batch_size_train and batch_size_test that will respectively take the values 64 and 10
# Solution
batch_size_train = 64
batch_size_test = 10
# Placeholder for student's code (1 line of Python code)
# Task: Import DataLoader from torch.utils.data
# Solution:
from torch.utils.data import DataLoader
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.utils.data.DataLoader() on the training data, with the relevant batch size and with shuffle. Save the reults in a variable called train_loader
# Solution
train_loader = DataLoader(train_data, batch_size=batch_size_train, shuffle=True)
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.utils.data.DataLoader() on the testing data, with the relevant batch size and with shuffle. Save the reults in a variable called test_loader
# Solution
test_loader = DataLoader(test_data, batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
# Placeholder for student's code (1 line of Python code)
# Task: Print the dimensions of the first image
# Solution
example_data.shape
# Placeholder for student's code (1 line of Python code)
# Task: Import matplotlib.pyplot as plt
# Solution
import matplotlib.pyplot as plt
plt.imshow(example_data[0][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[0]))
# Placeholder for student's code (3 lines of Python code)
# Task: Import torch.nn as n, torch.nn.functional as F and torch.optim as optim
# Solution
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Placeholder for student's code (1 line of Python code)
# Task: Create a numpy array called y_base of dimensions (len(y_train), 1) filled with this value
# Solution:
class PytorchCNN(nn.Module):
def __init__(self):
super(PytorchCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 128, kernel_size=3)
self.conv2 = nn.Conv2d(128, 64, kernel_size=3)
self.fc1 = nn.Linear(1600, 128)
self.fc2 = nn.Linear(128, 10)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a PytorchCNN and save it into a variable called model
# Solution:
model = PytorchCNN()
# Placeholder for student's code (1 line of Python code)
# Task: Import the get_device function from src.models.pytorch
# Solution:
from src.models.pytorch import get_device
# Placeholder for student's code (2 lines of Python code)
# Task: Get the device available and set to the model to use it
# Solution:
device = get_device()
model.to(device)
# Placeholder for student's code (1 line of Python code)
# Task: Import train_classification and test_classification from src.models.pytorch
# Solution:
from src.models.pytorch import train_classification, test_classification
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a nn.CrossEntropyLoss() and save it into a variable called criterion
# Solution:
criterion = nn.CrossEntropyLoss()
# Placeholder for student's code (1 line of Python code)
# Task: Instantiate a torch.optim.Adam() optimizer with the model's parameters and 0.001 as learning rate and save it into a variable called optimizer
# Solution:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Placeholder for student's code (2 lines of Python code)
# Task: Create 2 variables called N_EPOCHS and BATCH_SIZE that will respectively take the values 50 and 32
# Solution:
N_EPOCHS = 5
BATCH_SIZE = 32
# Placeholder for student's code (multiples lines of Python code)
# Task: Create a for loop that will iterate through the specified number of epochs and will train the model with the training set and assess the performance on the validation set and print their scores
# Solution:
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_classification(train_data, model=model, criterion=criterion, optimizer=optimizer, batch_size=BATCH_SIZE, device=device)
valid_loss, valid_acc = test_classification(test_data, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device)
print(f'Epoch: {epoch}')
print(f'\t(train)\t|\tLoss: {train_loss:.4f}\t|\tAcc: {train_acc * 100:.1f}%')
print(f'\t(valid)\t|\tLoss: {valid_loss:.4f}\t|\tAcc: {valid_acc * 100:.1f}%')
# Placeholder for student's code (1 line of Python code)
# Task: Save the model into the models folder
# Solution:
torch.save(model, "../models/pytorch_mnist_cnn.pt")
# Placeholder for student's code (1 command line)
# Task: Add you changes to git staging area
# Solution:
git add .
# Placeholder for student's code (1 command line)
# Task: Create the snapshot of your repository and add a description
# Solution:
git commit -m "pytorch cnn mnist"
# Placeholder for student's code (1 command line)
# Task: Push your snapshot to Github
# Solution:
git push
# Placeholder for student's code (1 command line)
# Task: Check out to the master branch
# Solution:
git checkout master
# Placeholder for student's code (1 command line)
# Task: Pull the latest updates
# Solution:
git pull
# Placeholder for student's code (1 command line)
# Task: Merge the branch pytorch_mnist
# Solution:
git checkout pytorch_mnist
# Placeholder for student's code (2 command lines)
# Task: Merge the master branch and push your changes
# Solution:
git merge master
git push
# Placeholder for student's code (1 command line)
# Task: Stop the Docker container
# Solution:
docker stop adv_dsi_lab_6
| 0.777722 | 0.958577 |
```
import numpy as np
import os
from astropy.units import Unit
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../kl_tools/')
import likelihood
import intensity
import basis
import priors
import utils
import transformation as transform
%matplotlib inline
nx, ny = 30, 30
nmax = 20
true_pars = {
'g1': 0.25,
'g2': -0.1,
'theta_int': np.pi / 3,
'sini': 0.8,
'v0': 10.,
'vcirc': 200,
'rscale': 5,
}
# additional args needed for prior / likelihood evaluation
halpha = 656.28 # nm
R = 5000.
z = 0.3
pars = {
'Nx': 30, # pixels
'Ny': 30, # pixels
'true_flux': 1e5, # counts
'true_hlr': 5, # pixels
'v_unit': Unit('km / s'),
'r_unit': Unit('kpc'),
'z': z,
'spec_resolution': R,
# 'line_std': 0.17,
'line_std': halpha * (1.+z) / R, # emission line SED std; nm
'line_value': 656.28, # emission line SED std; nm
'line_unit': Unit('nm'),
'sed_start': 650,
'sed_end': 660,
'sed_resolution': 0.025,
'sed_unit': Unit('nm'),
'cov_sigma': 4, # pixel counts; dummy value
'bandpass_throughput': '.2',
'bandpass_unit': 'nm',
'bandpass_zp': 30,
'priors': {
'g1': priors.GaussPrior(0., 0.1),#, clip_sigmas=2),
'g2': priors.GaussPrior(0., 0.1),#, clip_sigmas=2),
'theta_int': priors.UniformPrior(0., np.pi),
# 'theta_int': priors.UniformPrior(np.pi/3, np.pi),
'sini': priors.UniformPrior(0., 1.),
# 'sini': priors.GaussPrior()
'v0': priors.UniformPrior(0, 20),
'vcirc': priors.GaussPrior(200, 10, clip_sigmas=2),
# 'vcirc': priors.UniformPrior(190, 210),
'rscale': priors.UniformPrior(0, 10),
},
'intensity': {
# For this test, use truth info
'type': 'inclined_exp',
'flux': 1e5, # counts
'hlr': 5, # pixels
# 'type': 'basis',
# 'basis_type': 'shapelets',
# 'basis_kwargs': {
# 'Nmax': 10,
# }
},
# 'psf': gs.Gaussian(fwhm=3), # fwhm in pixels
'use_numba': False,
}
# li, le, dl = 655.5, 657, 0.1
li, le, dl = 655.8, 656.8, 0.1
# li, le, dl = 655.9, 656.8, .1
lambdas = [(l, l+dl) for l in np.arange(li, le, dl)]
Nx, Ny = 30, 30
Nspec = len(lambdas)
shape = (Nx, Ny, Nspec)
print('Setting up test datacube and true Halpha image')
datacube, sed, vmap, true_im = likelihood.setup_likelihood_test(
true_pars, pars, shape, lambdas
)
# imap = intensity.build_intensity_map('
plt.imshow(true_im, origin='lower')
plt.colorbar()
plt.gcf().set_size_inches(7,7)
imap = intensity.BasisIntensityMap(datacube, basis_kwargs={'Nmax':nmax})
basis_im = imap.render()
data_im = np.sum(datacube._data, axis=2)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12, 6))
titles = ['data', 'model', 'residual']
images = [data_im, basis_im, data_im-basis_im]
for i in range(3):
ax = axes[i]
im = images[i]
ishow = ax.imshow(im, origin='lower')
plt.colorbar(ishow, ax=ax)
ax.set_title(titles[i])
bb = imap.get_basis()
print(vars(basis).keys())
X, Y = utils.build_map_grid(nx, ny)
args = [X, Y, *func_args]
bim = func(*args)
plt.imshow(bim, origin='lower')
plt.colorbar()
plt.title('Basis function 00')
```
## Transform the coordinates:
```
planes = ['disk', 'gal', 'source', 'obs']
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
x, y = transform.transform_coords(X, Y, 'disk', plane, true_pars)
args = [x, y, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(x, y, bim)
plt.colorbar(mesh, ax=ax)
ax.set_title(f'{plane} transform of basis function 00')
N = 3
func, func_args = bb.get_basis_func(N)
planes = ['disk', 'gal', 'source', 'obs']
# uniform grid for all planes
X, Y = utils.build_map_grid(nx, ny)
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s-1), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
xp, yp = transform.transform_coords(X, Y, plane, 'disk', true_pars)
args = [xp, yp, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(X, Y, bim)
plt.colorbar(mesh, ax=ax)
Nx, Ny = func_args[1], func_args[2]
ax.set_title(f'{plane} transform of basis function ({Nx},{Ny})')
```
## Transform the basis funcs:
```
planes = ['disk', 'gal', 'source', 'obs']
X, Y = utils.build_map_grid(nx, ny)
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
x, y = transform.transform_coords(X, Y, 'obs', plane, true_pars)
args = [x, y, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(x, y, bim)
plt.colorbar(mesh, ax=ax)
ax.set_title(f'{plane} transform of basis function 00')
# b_disk = func(*[X, Y, *func_args]).reshape(nx*ny)
# plt.plot(b_disk)
# plt.title('disk')
Xdisk, Ydisk = utils.build_map_grid(5, 5)
Bdisk = func(*[Xdisk, Ydisk, *func_args])
plt.pcolormesh(Xdisk, Ydisk, Bdisk)
plt.gcf().set_size_inches(4,4)
print(Xdisk)
plt.imshow(Bdisk, origin='lower')
```
|
github_jupyter
|
import numpy as np
import os
from astropy.units import Unit
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../kl_tools/')
import likelihood
import intensity
import basis
import priors
import utils
import transformation as transform
%matplotlib inline
nx, ny = 30, 30
nmax = 20
true_pars = {
'g1': 0.25,
'g2': -0.1,
'theta_int': np.pi / 3,
'sini': 0.8,
'v0': 10.,
'vcirc': 200,
'rscale': 5,
}
# additional args needed for prior / likelihood evaluation
halpha = 656.28 # nm
R = 5000.
z = 0.3
pars = {
'Nx': 30, # pixels
'Ny': 30, # pixels
'true_flux': 1e5, # counts
'true_hlr': 5, # pixels
'v_unit': Unit('km / s'),
'r_unit': Unit('kpc'),
'z': z,
'spec_resolution': R,
# 'line_std': 0.17,
'line_std': halpha * (1.+z) / R, # emission line SED std; nm
'line_value': 656.28, # emission line SED std; nm
'line_unit': Unit('nm'),
'sed_start': 650,
'sed_end': 660,
'sed_resolution': 0.025,
'sed_unit': Unit('nm'),
'cov_sigma': 4, # pixel counts; dummy value
'bandpass_throughput': '.2',
'bandpass_unit': 'nm',
'bandpass_zp': 30,
'priors': {
'g1': priors.GaussPrior(0., 0.1),#, clip_sigmas=2),
'g2': priors.GaussPrior(0., 0.1),#, clip_sigmas=2),
'theta_int': priors.UniformPrior(0., np.pi),
# 'theta_int': priors.UniformPrior(np.pi/3, np.pi),
'sini': priors.UniformPrior(0., 1.),
# 'sini': priors.GaussPrior()
'v0': priors.UniformPrior(0, 20),
'vcirc': priors.GaussPrior(200, 10, clip_sigmas=2),
# 'vcirc': priors.UniformPrior(190, 210),
'rscale': priors.UniformPrior(0, 10),
},
'intensity': {
# For this test, use truth info
'type': 'inclined_exp',
'flux': 1e5, # counts
'hlr': 5, # pixels
# 'type': 'basis',
# 'basis_type': 'shapelets',
# 'basis_kwargs': {
# 'Nmax': 10,
# }
},
# 'psf': gs.Gaussian(fwhm=3), # fwhm in pixels
'use_numba': False,
}
# li, le, dl = 655.5, 657, 0.1
li, le, dl = 655.8, 656.8, 0.1
# li, le, dl = 655.9, 656.8, .1
lambdas = [(l, l+dl) for l in np.arange(li, le, dl)]
Nx, Ny = 30, 30
Nspec = len(lambdas)
shape = (Nx, Ny, Nspec)
print('Setting up test datacube and true Halpha image')
datacube, sed, vmap, true_im = likelihood.setup_likelihood_test(
true_pars, pars, shape, lambdas
)
# imap = intensity.build_intensity_map('
plt.imshow(true_im, origin='lower')
plt.colorbar()
plt.gcf().set_size_inches(7,7)
imap = intensity.BasisIntensityMap(datacube, basis_kwargs={'Nmax':nmax})
basis_im = imap.render()
data_im = np.sum(datacube._data, axis=2)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12, 6))
titles = ['data', 'model', 'residual']
images = [data_im, basis_im, data_im-basis_im]
for i in range(3):
ax = axes[i]
im = images[i]
ishow = ax.imshow(im, origin='lower')
plt.colorbar(ishow, ax=ax)
ax.set_title(titles[i])
bb = imap.get_basis()
print(vars(basis).keys())
X, Y = utils.build_map_grid(nx, ny)
args = [X, Y, *func_args]
bim = func(*args)
plt.imshow(bim, origin='lower')
plt.colorbar()
plt.title('Basis function 00')
planes = ['disk', 'gal', 'source', 'obs']
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
x, y = transform.transform_coords(X, Y, 'disk', plane, true_pars)
args = [x, y, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(x, y, bim)
plt.colorbar(mesh, ax=ax)
ax.set_title(f'{plane} transform of basis function 00')
N = 3
func, func_args = bb.get_basis_func(N)
planes = ['disk', 'gal', 'source', 'obs']
# uniform grid for all planes
X, Y = utils.build_map_grid(nx, ny)
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s-1), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
xp, yp = transform.transform_coords(X, Y, plane, 'disk', true_pars)
args = [xp, yp, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(X, Y, bim)
plt.colorbar(mesh, ax=ax)
Nx, Ny = func_args[1], func_args[2]
ax.set_title(f'{plane} transform of basis function ({Nx},{Ny})')
planes = ['disk', 'gal', 'source', 'obs']
X, Y = utils.build_map_grid(nx, ny)
s = 12
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(s,s), sharex=True, sharey=True)
for i, plane in enumerate(planes):
# x, y = transform._disk2gal(true_pars, prev_x, prev_y)
x, y = transform.transform_coords(X, Y, 'obs', plane, true_pars)
args = [x, y, *func_args]
bim = func(*args)
ax = axes[i//2, i%2]
mesh = ax.pcolormesh(x, y, bim)
plt.colorbar(mesh, ax=ax)
ax.set_title(f'{plane} transform of basis function 00')
# b_disk = func(*[X, Y, *func_args]).reshape(nx*ny)
# plt.plot(b_disk)
# plt.title('disk')
Xdisk, Ydisk = utils.build_map_grid(5, 5)
Bdisk = func(*[Xdisk, Ydisk, *func_args])
plt.pcolormesh(Xdisk, Ydisk, Bdisk)
plt.gcf().set_size_inches(4,4)
print(Xdisk)
plt.imshow(Bdisk, origin='lower')
| 0.414188 | 0.621096 |
# Listes simplement chaînées
```
class Cellule:
def __init__(self,info,suivant):
self.suivant=suivant
self.info=info
class Liste:
def __init__(self):
self.premier=None
def __str__(self):
ch=''
elt=self.premier
while elt:
ch+=str(elt.info)+" "
elt=elt.suivant
return "["+ch+"]"
def estVide(self):
return self.premier is None
def Vider(self):
while self.premier is not None:
self.premier = self.premier.suivant
def nbElement(self):
size = 0
pointeur = self.premier
while pointeur is not None:
pointeur = pointeur.suivant
size += 1
return size
def ajouterEnTete(self,infoElement):
if self.estVide():
self.premier = Cellule(infoElement, None)
else:
self.premier = Cellule(infoElement, self.premier)
def supprimerEnTete(self):
self.premier = self.premier.suivant
def traiter(self):
elt = self.premier
self.premier = self.premier.suivant
return elt.info
def ajouterEnQueue(self,infoElement):
if self.estVide():
self.ajouterEnTete(infoElement)
else:
pointeur = self.premier
while pointeur.suivant is not None:
pointeur = pointeur.suivant
pointeur.suivant = Cellule(infoElement, None)
def rechercherElement(self, e):
"""Retourne -1 si la liste est vide"""
index = 0
pointeur = self.premier
while pointeur is not None and pointeur.info != e:
pointeur = pointeur.suivant
index += 1
if pointeur is not None:
return index
return -1
def insererElement(self, i, e):
if i == 0:
self.ajouterEnTete(e)
else:
index = 0
arret = i - 1
pointeur = self.premier
while index < arret and pointeur.suivant is not None:
index += 1
pointeur = pointeur.suivant
assert index == arret, "index out of range"
if index == arret:
pointeur.suivant = Cellule(e, pointeur.suivant)
L = Liste()
L.ajouterEnQueue(4)
print(L)
L.ajouterEnTete(5)
print(L)
L.ajouterEnQueue(6)
print(L)
L.Vider()
print(L)
for k in range(10):
L.ajouterEnTete(k)
print(L)
L.insererElement(0, 10)
print(L)
L.insererElement(11, 11)
print(L)
print(L.rechercherElement(1))
print(L.rechercherElement(14))
print(L.traiter())
print(L)
L.supprimerEnTete()
print(L)
print(L.nbElement())
```
|
github_jupyter
|
class Cellule:
def __init__(self,info,suivant):
self.suivant=suivant
self.info=info
class Liste:
def __init__(self):
self.premier=None
def __str__(self):
ch=''
elt=self.premier
while elt:
ch+=str(elt.info)+" "
elt=elt.suivant
return "["+ch+"]"
def estVide(self):
return self.premier is None
def Vider(self):
while self.premier is not None:
self.premier = self.premier.suivant
def nbElement(self):
size = 0
pointeur = self.premier
while pointeur is not None:
pointeur = pointeur.suivant
size += 1
return size
def ajouterEnTete(self,infoElement):
if self.estVide():
self.premier = Cellule(infoElement, None)
else:
self.premier = Cellule(infoElement, self.premier)
def supprimerEnTete(self):
self.premier = self.premier.suivant
def traiter(self):
elt = self.premier
self.premier = self.premier.suivant
return elt.info
def ajouterEnQueue(self,infoElement):
if self.estVide():
self.ajouterEnTete(infoElement)
else:
pointeur = self.premier
while pointeur.suivant is not None:
pointeur = pointeur.suivant
pointeur.suivant = Cellule(infoElement, None)
def rechercherElement(self, e):
"""Retourne -1 si la liste est vide"""
index = 0
pointeur = self.premier
while pointeur is not None and pointeur.info != e:
pointeur = pointeur.suivant
index += 1
if pointeur is not None:
return index
return -1
def insererElement(self, i, e):
if i == 0:
self.ajouterEnTete(e)
else:
index = 0
arret = i - 1
pointeur = self.premier
while index < arret and pointeur.suivant is not None:
index += 1
pointeur = pointeur.suivant
assert index == arret, "index out of range"
if index == arret:
pointeur.suivant = Cellule(e, pointeur.suivant)
L = Liste()
L.ajouterEnQueue(4)
print(L)
L.ajouterEnTete(5)
print(L)
L.ajouterEnQueue(6)
print(L)
L.Vider()
print(L)
for k in range(10):
L.ajouterEnTete(k)
print(L)
L.insererElement(0, 10)
print(L)
L.insererElement(11, 11)
print(L)
print(L.rechercherElement(1))
print(L.rechercherElement(14))
print(L.traiter())
print(L)
L.supprimerEnTete()
print(L)
print(L.nbElement())
| 0.278649 | 0.695597 |
```
from konlpy.tag import Twitter
import TextrankPmi
# 불용어 정의 -> 형태소 분석기가 잘못 분석하는 단어를 제거하기 위함입니다. 불용어의 정의가 잘될수록 성능이 올라갑니다.
stopword = set([('천이', 'Noun'),('입니', 'Adjective'),('있는', 'Adjective'),('어서', 'Noun'),('만큼', 'Noun'),('면서', 'Noun')])
sent = '제발 포스터만 보고 촌스럽다고 생각하면서 그냥 넘기지 않길 바란다. 포스터와 달리 몹시 세련된 로맨틱 코미디 니까!!간만에 제대로 된 로맨틱 코미디를 보았다. 영화를 보는 내내 웃었고 조마조마했고 가슴이 아팠고 고개를 끄덕였다. 정말 한동안은 로맨틱 코미디 영화를 추천해달라고 하면 빅식을 꼭 보라고 망설이지 않고 추천하게 될 것 같다. 아직 영화를 보지 않으셨다면 이쯤에서 읽기를 멈추고 일단 영화를 보라!! 꼭 봐라! 두번 봐라! (이하 스포있음). 뭐가 그렇게 좋았냐면.. 여느 로맨틱 코미디와 달리 신선한 점이 많았다!! 하나, 이 로맨틱 코미디 영화 주인공의 직업이 코미디언이다. 어쩐지 잘못 읽은 것 같은 느낌이라 다시 이전 문장을 읽고 싶겠지만 그럴필요가 없다. 말 그대로니까. 이러한 설정은 엄청난 장점이 있다. 배우가 생활에서 치는 드립도 억지스럽게 느껴지지 않는다. 왜냐면 얘는 원래 이런 애니까! 게다가 그 드립들도 정말 신선햌ㅋㅋ그리고 극 중 코미디 공연 중에 나오는 코미디까지 더 해지니 그야말로 금상첨화!둘, 무슬림 이민자. 파키스탄 무슬림의 결혼 문화도 상당히 신선했다. 부모들끼리 합의를 하고 남자 집에 여자를 보내 남자가 여자를 고르게 한다니!! 대박... 쥐뿔도 없는 남주와 결혼하겠ㅁㅁ다고 여자들이 줄을 서는 것을 보며...쫌 부러웠닼ㅋㅋ 하지만 그 좋은 걸 걷어차버리면서 기성세대와 갈등을 겪는 주인공... 또한 이 무슬림 이민자라는 설정은 9.11 이후 미국에서 무슬림이 겪는 일들을 그대로 보여준다. 지금 제주에서는 예멘 난민들 받지 말아야 한다고 난리인데... 딱히 사고를 겪지도 않은 우리도 난리인판에 미국에서 그들이 겪었을 고초는 오죽했으랴... 이렇게 파키스탄 무슬림이라는 하나의 설정은 여러가지 갈등을 보여주는데, 여기에도 웃음 포인트를 넣어주기에 즐겁게 볼 수 있다.셋, 희귀병. 그냥 희귀병이면 뻔하지. 근데 여기선 처음부터 끝까지 뻔하지가 않다. 헤어졌던 남주가 그 둘의 갈등이 봉합되지 않은채로 여주를 혼수상태에 빠지게 하고, 영문도 모른채 혼수상태에 들어간 여주의 건강상태는 더욱 악화되고, 남주에 대해 감정이 좋지 않은 여주의 가족들과 불편한 자리를 함께하게 되고, 남주는 자신의 사랑을 깨닫고, 남주의 증언 덕분에 여주의 병명을 파악할 수 있어 완치가 되지만, 아무것도 모른체 깨어난 여주는 여전히 남주에 대해 감정이 좋지 않기에 남주를 받아주지 않고... 캬.. 어느 것 하나 흔한 전개가 없다. 아프고 그 사이에 뜨겁게 사랑하고 죽고 슬퍼하는 신파형 로맨틱보다는 백배 천배 낫다. 넷, 이게 가장 큰 신선함인데, 이러한 신선한 설정들이 실화임 ㄷㄷㄷ 영화에 대해 아무것도 모르고 영화관에 들어가서 와..와.. 이 영화 괜찮다. 신선하다. 진짜 재밌다. 하면서 보다가 마지막에 엔딩크레딧이 올라가면서 실화라는 걸 알았을 때 내가 받은 그 충격이란;;; 연출, 스토리, 연기, ost ★극 배경 ☆종합 ★★★★☆ost 마저도 로맨틱 코메디의 느낌을 물씬 주는 곡이니 꼭 한 번 클릭해서 들어보길 추천한다. 이게 바로 로맨틴 코메디지. 암!'
# 영화 빅식 리뷰(1513자)
sent
# 불필요한 문장 10% 뽑고 지우기
tr = TextrankPmi.TextRank()
tagger = Twitter()
tr.loadSents(TextrankPmi.RawSentence(sent),
lambda sent: filter(lambda x: len(x[0])>=2 and x[1] in ('Noun', 'Verb', 'Adjective'), tagger.pos(sent)))
tr.build()
ranks = tr.rank()
delete_sent=[]
for k in sorted(ranks, key=ranks.get, reverse=True)[:100]:
print("\t".join([str(k), str(ranks[k]), str(tr.dictCount[k])]))
wow = str(tr.dictCount[k])
delete_sent.append(wow)
cy = list(tr.dictCount.values())
delsent = delete_sent[-int(len(tr.dictCount) * 0.1):-1]
delsent.append(delete_sent[-1])
for i in range(0,len(delsent)):
sent = sent.replace(delsent[i],'')
# 핵심어 뽑기 -> window, coef, 단어의 품사, 전체 단어 비율을 조절할 수 있다.
tr = TextrankPmi.TextRank(window=4, coef=1)
tr.load(TextrankPmi.RawTagger(sent),lambda w: w not in stopword and len(w[0])>=2 and w[1] in ('Noun'))
tr.build()
kw = tr.extract(0.3)
for k in sorted(kw, key=kw.get, reverse=True):
print("%s\t%g" % (k, kw[k]))
# 단어만 뽑아내기
wow = []
for k in kw.keys():
if len(k) == 2:
ee = k[0][0] +' '+k[1][0]
wow.append(ee)
else:
ee = k[0][0]
wow.append(ee)
# 핵심단어의 개수는 특정 품사(Noun, Adjective)의 사용된 단어 종류의 개수 중 5% 추출 -> 비율은 사용자가 원하는데로 조절
wow = wow[0:int(len(tr.dictCount)*0.07)]
wow
# 해당 키워드 중 TR이 가장 높은 문장을 택하는 코드
finish = []
for i in range(0,len(wow)):
for h in sorted(range(-(len(cy)),0),reverse=True):
if wow[i] in cy[h]:
from ckonlpy.tag import Twitter
nlp = Twitter()
f = open('형태소 보완.txt')
dd = f.read()
a = dd.split('\n')
nlp.add_dictionary(a,'Noun')
try:
tx = nlp.pos(cy[h])
tx2 = nlp.pos(wow[i])[0]
tx3 = tx.index(tx2)
if tx3 == 0:
j = wow[i]
else:
hoho2 = []
for y in range(0,len(tx[:tx3])):
if 'Noun' in tx[:tx3][y] and len(tx[:tx3][y][0]) >= 3:
hoho2.append(y)
if len(hoho2)>=3:
j = tx[:tx3][hoho2[-3]][0]
elif len(hoho2) ==2:
j = tx[:tx3][hoho2[-2]][0]
elif len(hoho2) == 1:
j = tx[:tx3][hoho2[0]][0]
elif len(hoho2) == 0:
j = wow[i]
aa = cy[h][cy[h].index(j):]
finish.append(aa)
except:
pass
break
# 복합명사가 띄어쓰기가 아니라 붙여쓰기인 경우를 고려
how = []
for i in range(0,len(wow)):
if ' ' in wow[i]:
how.append(wow[i])
if len(how) != 0:
for i in range(0,len(how)):
how[i] = how[i].replace(' ','')
for i in range(0,len(how)):
for h in sorted(range(-(len(cy)),0),reverse=True):
if how[i] in cy[h]:
from ckonlpy.tag import Twitter
nlp = Twitter()
f = open('형태소 보완.txt')
dd = f.read()
a = dd.split('\n')
nlp.add_dictionary(a,'Noun')
tx = nlp.pos(cy[h])
tx2 = nlp.pos(how[i])[0]
tx3 = tx.index(tx2)
if tx3 == 0:
j = how[i]
else:
hoho2 = []
for y in range(0,len(tx[:tx3])):
if 'Noun' in tx[:tx3][y] and len(tx[:tx3][y][0]) >= 3:
hoho2.append(y)
if len(hoho2)>=3:
j = tx[:tx3][hoho2[-3]][0]
elif len(hoho2) == 2:
j = tx[:tx3][hoho2[-2]][0]
elif len(hoho2) == 1:
j = tx[:tx3][hoho2[0]][0]
elif len(hoho2) == 0:
j = how[i]
aa = cy[h][cy[h].index(j):]
finish.append(aa)
break
# 문장을 시간 순으로 배열
summary = list(set(finish))
hello = []
for i in range(0,len(summary)):
for h in range(0,len(cy)):
if summary[i][0:-3] in cy[h]:
hello.append(h)
break
while 1:
if len(hello) == len(summary):
break
elif len(hello) >= len(summary):
hello.pop()
elif len(hello) <= len(summary):
hello.append(50)
print(hello)
jj=[]
for i in range(0,len(summary)):
jj.append([summary[i],hello[i]])
ee = dict(jj)
items = ee.items()
summary2 = [item[0] for item in sorted(items, key=lambda x: x[1])]
## 중복되는 문장 제거하기 코드
for i in range(0,len(summary2)):
for h in range(0,len(summary2)):
if i != h:
if summary2[i] in summary2[h]:
summary2[i] = '제거'
good = True
while good:
try:
summary2.remove('제거')
except ValueError:
good = False
print(wow)
print(summary2)
```
|
github_jupyter
|
from konlpy.tag import Twitter
import TextrankPmi
# 불용어 정의 -> 형태소 분석기가 잘못 분석하는 단어를 제거하기 위함입니다. 불용어의 정의가 잘될수록 성능이 올라갑니다.
stopword = set([('천이', 'Noun'),('입니', 'Adjective'),('있는', 'Adjective'),('어서', 'Noun'),('만큼', 'Noun'),('면서', 'Noun')])
sent = '제발 포스터만 보고 촌스럽다고 생각하면서 그냥 넘기지 않길 바란다. 포스터와 달리 몹시 세련된 로맨틱 코미디 니까!!간만에 제대로 된 로맨틱 코미디를 보았다. 영화를 보는 내내 웃었고 조마조마했고 가슴이 아팠고 고개를 끄덕였다. 정말 한동안은 로맨틱 코미디 영화를 추천해달라고 하면 빅식을 꼭 보라고 망설이지 않고 추천하게 될 것 같다. 아직 영화를 보지 않으셨다면 이쯤에서 읽기를 멈추고 일단 영화를 보라!! 꼭 봐라! 두번 봐라! (이하 스포있음). 뭐가 그렇게 좋았냐면.. 여느 로맨틱 코미디와 달리 신선한 점이 많았다!! 하나, 이 로맨틱 코미디 영화 주인공의 직업이 코미디언이다. 어쩐지 잘못 읽은 것 같은 느낌이라 다시 이전 문장을 읽고 싶겠지만 그럴필요가 없다. 말 그대로니까. 이러한 설정은 엄청난 장점이 있다. 배우가 생활에서 치는 드립도 억지스럽게 느껴지지 않는다. 왜냐면 얘는 원래 이런 애니까! 게다가 그 드립들도 정말 신선햌ㅋㅋ그리고 극 중 코미디 공연 중에 나오는 코미디까지 더 해지니 그야말로 금상첨화!둘, 무슬림 이민자. 파키스탄 무슬림의 결혼 문화도 상당히 신선했다. 부모들끼리 합의를 하고 남자 집에 여자를 보내 남자가 여자를 고르게 한다니!! 대박... 쥐뿔도 없는 남주와 결혼하겠ㅁㅁ다고 여자들이 줄을 서는 것을 보며...쫌 부러웠닼ㅋㅋ 하지만 그 좋은 걸 걷어차버리면서 기성세대와 갈등을 겪는 주인공... 또한 이 무슬림 이민자라는 설정은 9.11 이후 미국에서 무슬림이 겪는 일들을 그대로 보여준다. 지금 제주에서는 예멘 난민들 받지 말아야 한다고 난리인데... 딱히 사고를 겪지도 않은 우리도 난리인판에 미국에서 그들이 겪었을 고초는 오죽했으랴... 이렇게 파키스탄 무슬림이라는 하나의 설정은 여러가지 갈등을 보여주는데, 여기에도 웃음 포인트를 넣어주기에 즐겁게 볼 수 있다.셋, 희귀병. 그냥 희귀병이면 뻔하지. 근데 여기선 처음부터 끝까지 뻔하지가 않다. 헤어졌던 남주가 그 둘의 갈등이 봉합되지 않은채로 여주를 혼수상태에 빠지게 하고, 영문도 모른채 혼수상태에 들어간 여주의 건강상태는 더욱 악화되고, 남주에 대해 감정이 좋지 않은 여주의 가족들과 불편한 자리를 함께하게 되고, 남주는 자신의 사랑을 깨닫고, 남주의 증언 덕분에 여주의 병명을 파악할 수 있어 완치가 되지만, 아무것도 모른체 깨어난 여주는 여전히 남주에 대해 감정이 좋지 않기에 남주를 받아주지 않고... 캬.. 어느 것 하나 흔한 전개가 없다. 아프고 그 사이에 뜨겁게 사랑하고 죽고 슬퍼하는 신파형 로맨틱보다는 백배 천배 낫다. 넷, 이게 가장 큰 신선함인데, 이러한 신선한 설정들이 실화임 ㄷㄷㄷ 영화에 대해 아무것도 모르고 영화관에 들어가서 와..와.. 이 영화 괜찮다. 신선하다. 진짜 재밌다. 하면서 보다가 마지막에 엔딩크레딧이 올라가면서 실화라는 걸 알았을 때 내가 받은 그 충격이란;;; 연출, 스토리, 연기, ost ★극 배경 ☆종합 ★★★★☆ost 마저도 로맨틱 코메디의 느낌을 물씬 주는 곡이니 꼭 한 번 클릭해서 들어보길 추천한다. 이게 바로 로맨틴 코메디지. 암!'
# 영화 빅식 리뷰(1513자)
sent
# 불필요한 문장 10% 뽑고 지우기
tr = TextrankPmi.TextRank()
tagger = Twitter()
tr.loadSents(TextrankPmi.RawSentence(sent),
lambda sent: filter(lambda x: len(x[0])>=2 and x[1] in ('Noun', 'Verb', 'Adjective'), tagger.pos(sent)))
tr.build()
ranks = tr.rank()
delete_sent=[]
for k in sorted(ranks, key=ranks.get, reverse=True)[:100]:
print("\t".join([str(k), str(ranks[k]), str(tr.dictCount[k])]))
wow = str(tr.dictCount[k])
delete_sent.append(wow)
cy = list(tr.dictCount.values())
delsent = delete_sent[-int(len(tr.dictCount) * 0.1):-1]
delsent.append(delete_sent[-1])
for i in range(0,len(delsent)):
sent = sent.replace(delsent[i],'')
# 핵심어 뽑기 -> window, coef, 단어의 품사, 전체 단어 비율을 조절할 수 있다.
tr = TextrankPmi.TextRank(window=4, coef=1)
tr.load(TextrankPmi.RawTagger(sent),lambda w: w not in stopword and len(w[0])>=2 and w[1] in ('Noun'))
tr.build()
kw = tr.extract(0.3)
for k in sorted(kw, key=kw.get, reverse=True):
print("%s\t%g" % (k, kw[k]))
# 단어만 뽑아내기
wow = []
for k in kw.keys():
if len(k) == 2:
ee = k[0][0] +' '+k[1][0]
wow.append(ee)
else:
ee = k[0][0]
wow.append(ee)
# 핵심단어의 개수는 특정 품사(Noun, Adjective)의 사용된 단어 종류의 개수 중 5% 추출 -> 비율은 사용자가 원하는데로 조절
wow = wow[0:int(len(tr.dictCount)*0.07)]
wow
# 해당 키워드 중 TR이 가장 높은 문장을 택하는 코드
finish = []
for i in range(0,len(wow)):
for h in sorted(range(-(len(cy)),0),reverse=True):
if wow[i] in cy[h]:
from ckonlpy.tag import Twitter
nlp = Twitter()
f = open('형태소 보완.txt')
dd = f.read()
a = dd.split('\n')
nlp.add_dictionary(a,'Noun')
try:
tx = nlp.pos(cy[h])
tx2 = nlp.pos(wow[i])[0]
tx3 = tx.index(tx2)
if tx3 == 0:
j = wow[i]
else:
hoho2 = []
for y in range(0,len(tx[:tx3])):
if 'Noun' in tx[:tx3][y] and len(tx[:tx3][y][0]) >= 3:
hoho2.append(y)
if len(hoho2)>=3:
j = tx[:tx3][hoho2[-3]][0]
elif len(hoho2) ==2:
j = tx[:tx3][hoho2[-2]][0]
elif len(hoho2) == 1:
j = tx[:tx3][hoho2[0]][0]
elif len(hoho2) == 0:
j = wow[i]
aa = cy[h][cy[h].index(j):]
finish.append(aa)
except:
pass
break
# 복합명사가 띄어쓰기가 아니라 붙여쓰기인 경우를 고려
how = []
for i in range(0,len(wow)):
if ' ' in wow[i]:
how.append(wow[i])
if len(how) != 0:
for i in range(0,len(how)):
how[i] = how[i].replace(' ','')
for i in range(0,len(how)):
for h in sorted(range(-(len(cy)),0),reverse=True):
if how[i] in cy[h]:
from ckonlpy.tag import Twitter
nlp = Twitter()
f = open('형태소 보완.txt')
dd = f.read()
a = dd.split('\n')
nlp.add_dictionary(a,'Noun')
tx = nlp.pos(cy[h])
tx2 = nlp.pos(how[i])[0]
tx3 = tx.index(tx2)
if tx3 == 0:
j = how[i]
else:
hoho2 = []
for y in range(0,len(tx[:tx3])):
if 'Noun' in tx[:tx3][y] and len(tx[:tx3][y][0]) >= 3:
hoho2.append(y)
if len(hoho2)>=3:
j = tx[:tx3][hoho2[-3]][0]
elif len(hoho2) == 2:
j = tx[:tx3][hoho2[-2]][0]
elif len(hoho2) == 1:
j = tx[:tx3][hoho2[0]][0]
elif len(hoho2) == 0:
j = how[i]
aa = cy[h][cy[h].index(j):]
finish.append(aa)
break
# 문장을 시간 순으로 배열
summary = list(set(finish))
hello = []
for i in range(0,len(summary)):
for h in range(0,len(cy)):
if summary[i][0:-3] in cy[h]:
hello.append(h)
break
while 1:
if len(hello) == len(summary):
break
elif len(hello) >= len(summary):
hello.pop()
elif len(hello) <= len(summary):
hello.append(50)
print(hello)
jj=[]
for i in range(0,len(summary)):
jj.append([summary[i],hello[i]])
ee = dict(jj)
items = ee.items()
summary2 = [item[0] for item in sorted(items, key=lambda x: x[1])]
## 중복되는 문장 제거하기 코드
for i in range(0,len(summary2)):
for h in range(0,len(summary2)):
if i != h:
if summary2[i] in summary2[h]:
summary2[i] = '제거'
good = True
while good:
try:
summary2.remove('제거')
except ValueError:
good = False
print(wow)
print(summary2)
| 0.188287 | 0.845688 |
# Measure execution time of small code snippets
**27.5. timeit** — Measure execution time of small code snippets
https://docs.python.org/3/library/timeit.html
This module provides a simple way to
**`time` small bits of Python code** .
It has
* a **Command-Line Interface**
* a **callable** one
## 1 Timing through Python Command-Line
```
!python -m timeit "[ str(i) for i in range(100)]"
```
## 2 Timing by Jupyter Magic
`%timeit`:line magic : time execution of a Python statement or expression
`%%timeit`:cell magic: time the rest of the call line and the body of the cell
#### `%timeit`
Timing IAPWS-IF in C(seuif97)
```
import seuif97
%timeit seuif97.pt(15,535,4)
```
Timing IAPWS-IF97 in Python
```
from iapws.iapws97 import IAPWS97
%timeit IAPWS97(P=16.10,T=535.10).h
```
#### `%%timeit`
```
%%timeit
d={}
for i in range(1000):
d[str(i)] = i
```
## 3 Timing by Python Interface
### import timeit
```python
timeit.timeit(stmt='pass', setup='pass', timer=<default timer>, number=1000000, globals=None)
```
Create a Timer instance with the given statement, setup code and timer function and run its `timeit()` method with number executions. The optional globals argument specifies a namespace in which to execute the code.
Changed in version 3.5: The optional globals parameter was added.
```python
timeit.default_timer()
```
The default timer, which is always `time.perf_counter()`.
Changed in version 3.3: `time.perf_counter()` is now the default timer.
>time.perf_counter()
> Return the value (in fractional seconds) of a performance counter, i.e. a clock with **the highest available resolution** to measure a short duration.
>It does include time elapsed during sleep and is system-wide.
> The reference point of the returned value is undefined, so that only the **difference** between the results of consecutive calls is valid.
> New in version 3.3.
```
import timeit
timeit.timeit('"d={}" "for i in range(1000):" " d[str(i)] = i"', number=1000)
```
multi-line string literals.
```python
"""
"""
```
```
timeit.timeit(
"""
d={}
for i in range(1000):
d[str(i)] = i
""",
number=1000)
s="""
d={}
for i in range(1000):
d[str(i)] = i
"""
timeit.timeit(stmt=s,number=1000)
```
### timeit.Timer
The same can be done using the **Timer** class and its methods.
The constructor takes **a statement** to be timed, an additional statement used for setup, and a timer function.
Both statements default to `pass`; the timer function is platform-dependent (see the module doc string).
stmt and setup may also contain multiple statements separated by ; or newlines, as long as they don’t contain multi-line string literals.
```
import timeit
t = timeit.Timer('"d={}" "for i in range(1000):" " d[str(i)] = i"')
t.timeit()
t.repeat(3) # repeat(repeat=3, number=1000000)
```
When **repeat()** is used, it calls `timeit()` severeal times (3 in this `case`) and all of the responses are returned in a list.
```
import timeit
s='"d={}";"for i in range(1000):";"d[str(i)] = i"'
#t=timeit.Timer(stmt=s)
#t.timeit()
timeit.Timer(stmt=s).timeit()
```
### The `timeit` module access to `functions `
you can pass a **setup** parameter which contains an **import** statement:
```python
setup="from __main__ import test"
```
```
import timeit
def test():
L = [i for i in range(100)]
if __name__ == '__main__':
print(timeit.timeit("test()", setup="from __main__ import test"))
```
Another option is to pass **globals()** to the globals parameter
```python
globals=globals()
```
will cause the code to be executed within your current `global namespace`.
This can be more convenient than `individually` specifying `imports`:
```
import timeit
print(timeit.timeit('test()', globals=globals()))
```
### Example IAPWS-IF97
```
import timeit
import seuif97
t = timeit.Timer("seuif97.pt2h(16.10,535.10)",
setup="from __main__ import seuif97")
if97time = t.timeit(1000)
print('Time(s)=', if97time)
```
using `globals=globals()`
without setup="from __main__ import iapws.iapws97"
```
import timeit
import iapws.iapws97
t = timeit.Timer("iapws.iapws97.IAPWS97(P=16.10,T=535.10).h",
globals=globals())
if97time = t.timeit(1000)
print('Time(s)=', if97time)
```
## Further Reading:
**1** Doug Hellmann'S `Python Module of the Week`
The Python Module of the Week series, or PyMOTW, is a tour of the Python standard library through short examples.
https://pymotw.com/3/timeit/index.html
**2** 16.3. time — Time access and conversions
https://docs.python.org/3/library/time.html
|
github_jupyter
|
!python -m timeit "[ str(i) for i in range(100)]"
import seuif97
%timeit seuif97.pt(15,535,4)
from iapws.iapws97 import IAPWS97
%timeit IAPWS97(P=16.10,T=535.10).h
%%timeit
d={}
for i in range(1000):
d[str(i)] = i
timeit.timeit(stmt='pass', setup='pass', timer=<default timer>, number=1000000, globals=None)
timeit.default_timer()
import timeit
timeit.timeit('"d={}" "for i in range(1000):" " d[str(i)] = i"', number=1000)
### timeit.Timer
The same can be done using the **Timer** class and its methods.
The constructor takes **a statement** to be timed, an additional statement used for setup, and a timer function.
Both statements default to `pass`; the timer function is platform-dependent (see the module doc string).
stmt and setup may also contain multiple statements separated by ; or newlines, as long as they don’t contain multi-line string literals.
When **repeat()** is used, it calls `timeit()` severeal times (3 in this `case`) and all of the responses are returned in a list.
### The `timeit` module access to `functions `
you can pass a **setup** parameter which contains an **import** statement:
Another option is to pass **globals()** to the globals parameter
will cause the code to be executed within your current `global namespace`.
This can be more convenient than `individually` specifying `imports`:
### Example IAPWS-IF97
using `globals=globals()`
without setup="from __main__ import iapws.iapws97"
| 0.377082 | 0.918626 |
# Autoregressive moving average
## Install packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
import pandas as pd
import numpy as np
import os
from statsmodels.tsa.arima_model import ARMA
import matplotlib.pyplot as plt
import seaborn as sns
import quiz_tests
sns.set()
#note that for the figure size to show, this cell should be run
#separately from the import of pyplot
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14, 8)
```
## Simulate return series with autoregressive properties
```
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(200)
ar_params = np.array([1, -0.5])
ma_params = np.array([1, -0.3])
ret = ArmaProcess(ar_params, ma_params).generate_sample(nsample=5*252)
ret = pd.Series(ret)
drift = 100
price = pd.Series(np.cumsum(ret)) + drift
ret.plot(figsize=(15,6), color=sns.xkcd_rgb["pale purple"], title="simulated return series")
plt.show()
price.plot(figsize=(15,6), color=sns.xkcd_rgb["baby blue"], title="simulated price series")
plt.show()
```
### log returns
```
lret = np.log(price) - np.log(price.shift(1))
lret = lret[1:]
```
## autocorrelation
Use autocorrelation to get a sense of what lag to use for the autoregressive model.
```
from statsmodels.graphics.tsaplots import plot_acf
_ = plot_acf(lret,lags=10, title='log return autocorrelation')
```
Since the sample series was simulated to have autoregressive properties, we also see autocorrelation between the current periods and the lags.
Note that with actual stock data, there won't be much autocorrelation of returns from one day to the next.
Stock returns (log returns and normal returns) can be described as a "random walk", in that each new period's value is more or less random.
## plot partial autocorrelation
```
from statsmodels.graphics.tsaplots import plot_pacf
```
Notice how the partial autocorrelation of price shows that most of the correlation is found in the previous period. Partial autocorrelation is different from autocorrelation in that it shows the influence of each period that is not attributed to the other periods leading up to the current period. In other words, the two-day lag had a fairly strong correlation with the current value because it had a strong correlation with the one-day lag. However, the two-day lag's partial correlation with the current period that isn't attributable to the one-day lag is relatively small.
```
_ = plot_pacf(lret, lags=10, title='log return Partial Autocorrelation', color=sns.xkcd_rgb["crimson"])
```
## Discussion
Notice that there isn't much correlation between previous periods with the current period. In general, using past stock returns to predict future stock returns is rather difficult. Volatility tends to have more of a correlation with past volatility. We'll cover volatility in a later lesson within this module.
## Ljung-Box Test
The Ljung-Box test helps us check whether the lag we chose gives autocorrelations that are significantly different from zero. The null hypothesis is that the previous lags as a whole are not correlated with the current period. If the p-value is small enough (say 0.05), we can reject the null and assume that the past lags have some correlation with the current period.
```
returns:
lbvalue (float or array) – test statistic
pvalue (float or array) – p-value based on chi-square distribution
... (we'll ignore the other outputs, which are for another similar hypothesis test)
```
```
from statsmodels.stats.diagnostic import acorr_ljungbox
lb_test_stat, lb_p_value = acorr_ljungbox(lret,lags=20)
lb_p_value
```
## Discussion
Since this series was simulated to have autoregressive properties, the Ljung-Box test shows p-values less than 0.05 for the 20 lag periods that we tested.
## Fit an ARMA model
For the purpose of familiarizing ourselves with the ARMA model, we'll fit the model to our simulated return series.
We'll just use one lag for the autoregression and one lag for the moving average.
Check out the [statsmodel arma](https://www.statsmodels.org/dev/generated/statsmodels.tsa.arima_model.ARMA.html) documentation.
```
from statsmodels.tsa.arima_model import ARMA
AR_lag_p = 1
MA_lag_q = 1
order = (AR_lag_p, MA_lag_q)
arma_model = ARMA(lret.values, order=order)
arma_result = arma_model.fit()
arma_pred = pd.Series(arma_result.fittedvalues)
```
## View fitted predictions against actual values
```
plt.plot(lret, color=sns.xkcd_rgb["pale purple"])
plt.plot(arma_pred, color=sns.xkcd_rgb["dark sky blue"])
plt.title('Log returns and predictions using an ARMA(p=1,q=1) model');
print(f"Fitted AR parameter {arma_result.arparams[0]:.2f}, MA parameter {arma_result.maparams[0]:.2f}")
```
## Discussion
In general, autoregressive moving average models are not able to forecast stock returns because stock returns are non-stationary and also quite noisy.
There are other techniques that build upon the concepts of ARMA models, so the goal here was really to help you get familiar with these concepts, as they are the basis for other models that you'll see later in this module.
## Quiz: ARIMA
Fit an autoregressive integrated moving average model. Choose an order of integration of 1, autoregresion lag of 1, and moving average lag of 1.
Check out the [stats model arima](http://www.statsmodels.org/0.6.1/generated/statsmodels.tsa.arima_model.ARMAResults.html) documentation to help you.
```
from statsmodels.tsa.arima_model import ARIMA
def fit_arima(lret):
#TODO: choose autoregression lag of 1
AR_lag_p =1
#TODO: choose moving average lag of 1
MA_lag_q =1
#TODO: choose order of integration 1
order_of_integration_d = 1
#TODO: Create a tuple of p,d,q
order = (AR_lag_p,order_of_integration_d,MA_lag_q)
#TODO: create an ARIMA model object, passing in the values of the lret pandas series,
# and the tuple containing the (p,d,q) order arguments
arima_model = ARIMA(lret.values, order=order)
arima_result = arima_model.fit()
#TODO: from the result of calling ARIMA.fit(),
# save and return the fitted values, autoregression parameters, and moving average parameters
fittedvalues = arima_result.fittedvalues
arparams = arima_result.arparams
maparams = arima_result.maparams
return fittedvalues,arparams,maparams
quiz_tests.test_fit_arima(fit_arima)
fittedvalues,arparams,maparams = fit_arima(lret)
arima_pred = pd.Series(fittedvalues)
plt.plot(lret, color=sns.xkcd_rgb["pale purple"])
plt.plot(arima_pred, color=sns.xkcd_rgb["jade green"])
plt.title('Log Returns and predictions using an ARIMA(p=1,d=1,q=1) model');
print(f"fitted AR parameter {arparams[0]:.2f}, MA parameter {maparams[0]:.2f}")
```
If you're stuck, you can also check out the solution [here](autoregression_solution.ipynb)
|
github_jupyter
|
import sys
!{sys.executable} -m pip install -r requirements.txt
import pandas as pd
import numpy as np
import os
from statsmodels.tsa.arima_model import ARMA
import matplotlib.pyplot as plt
import seaborn as sns
import quiz_tests
sns.set()
#note that for the figure size to show, this cell should be run
#separately from the import of pyplot
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14, 8)
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(200)
ar_params = np.array([1, -0.5])
ma_params = np.array([1, -0.3])
ret = ArmaProcess(ar_params, ma_params).generate_sample(nsample=5*252)
ret = pd.Series(ret)
drift = 100
price = pd.Series(np.cumsum(ret)) + drift
ret.plot(figsize=(15,6), color=sns.xkcd_rgb["pale purple"], title="simulated return series")
plt.show()
price.plot(figsize=(15,6), color=sns.xkcd_rgb["baby blue"], title="simulated price series")
plt.show()
lret = np.log(price) - np.log(price.shift(1))
lret = lret[1:]
from statsmodels.graphics.tsaplots import plot_acf
_ = plot_acf(lret,lags=10, title='log return autocorrelation')
from statsmodels.graphics.tsaplots import plot_pacf
_ = plot_pacf(lret, lags=10, title='log return Partial Autocorrelation', color=sns.xkcd_rgb["crimson"])
returns:
lbvalue (float or array) – test statistic
pvalue (float or array) – p-value based on chi-square distribution
... (we'll ignore the other outputs, which are for another similar hypothesis test)
from statsmodels.stats.diagnostic import acorr_ljungbox
lb_test_stat, lb_p_value = acorr_ljungbox(lret,lags=20)
lb_p_value
from statsmodels.tsa.arima_model import ARMA
AR_lag_p = 1
MA_lag_q = 1
order = (AR_lag_p, MA_lag_q)
arma_model = ARMA(lret.values, order=order)
arma_result = arma_model.fit()
arma_pred = pd.Series(arma_result.fittedvalues)
plt.plot(lret, color=sns.xkcd_rgb["pale purple"])
plt.plot(arma_pred, color=sns.xkcd_rgb["dark sky blue"])
plt.title('Log returns and predictions using an ARMA(p=1,q=1) model');
print(f"Fitted AR parameter {arma_result.arparams[0]:.2f}, MA parameter {arma_result.maparams[0]:.2f}")
from statsmodels.tsa.arima_model import ARIMA
def fit_arima(lret):
#TODO: choose autoregression lag of 1
AR_lag_p =1
#TODO: choose moving average lag of 1
MA_lag_q =1
#TODO: choose order of integration 1
order_of_integration_d = 1
#TODO: Create a tuple of p,d,q
order = (AR_lag_p,order_of_integration_d,MA_lag_q)
#TODO: create an ARIMA model object, passing in the values of the lret pandas series,
# and the tuple containing the (p,d,q) order arguments
arima_model = ARIMA(lret.values, order=order)
arima_result = arima_model.fit()
#TODO: from the result of calling ARIMA.fit(),
# save and return the fitted values, autoregression parameters, and moving average parameters
fittedvalues = arima_result.fittedvalues
arparams = arima_result.arparams
maparams = arima_result.maparams
return fittedvalues,arparams,maparams
quiz_tests.test_fit_arima(fit_arima)
fittedvalues,arparams,maparams = fit_arima(lret)
arima_pred = pd.Series(fittedvalues)
plt.plot(lret, color=sns.xkcd_rgb["pale purple"])
plt.plot(arima_pred, color=sns.xkcd_rgb["jade green"])
plt.title('Log Returns and predictions using an ARIMA(p=1,d=1,q=1) model');
print(f"fitted AR parameter {arparams[0]:.2f}, MA parameter {maparams[0]:.2f}")
| 0.318061 | 0.889 |
```
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
#UNIONS_800deg_sq
un_data_path = "/automnt/n17data/mkilbing/astro/data/CFIS/800deg2/cfis_800d2_SP_v1.0_zp_W3_Deep23_v0.1.fits"
#EBOSS
eboss_sgc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/eBOSS_LRGpCMASS_clustering_data-NGC-vDR16.fits"
eboss_ngc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/eBOSS_LRGpCMASS_clustering_data-SGC-vDR16.fits"
#BOSS
boss_sgc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/galaxy_DR12v5_CMASSLOWZTOT_South.fits"
boss_ngc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/galaxy_DR12v5_CMASSLOWZTOT_North.fits"
#open files
hdul_un_data = fits.open(un_data_path)
hdul_eboss_sgc_path = fits.open(eboss_sgc_path)
hdul_eboss_ngc_path = fits.open(eboss_ngc_path)
hdul_boss_sgc_path = fits.open(boss_sgc_path)
hdul_boss_ngc_path = fits.open(boss_ngc_path)
#print(hdul_boss_sgc_path[1].columns.info())
un_data = hdul_un_data[1].data
eboss_sgc_data = hdul_eboss_ngc_path[1].data
eboss_ngc_data = hdul_eboss_sgc_path[1].data
boss_ngc_data = hdul_boss_ngc_path[1].data
boss_sgc_data = hdul_boss_sgc_path[1].data
RA_un = un_data["ra"]
DEC_un = un_data["dec"]
RA_eboss_s = eboss_sgc_data["RA"]
DEC_eboss_s = eboss_sgc_data["DEC"]
RA_eboss_n = eboss_ngc_data["RA"]
DEC_eboss_n = eboss_ngc_data["DEC"]
RA_boss_s = boss_sgc_data["RA"]
DEC_boss_s = boss_sgc_data["DEC"]
RA_boss_n = boss_ngc_data["RA"]
DEC_boss_n = boss_ngc_data["DEC"]
print("nbr_gal_unions", len(RA_un), "nbr_gal_boss_s", len(RA_boss_s), "nbr_gal_eboss_s", len(RA_eboss_s))
#plot footprint and search for overlap between UNIONS_800deg_sq, BOSS and eBOSS
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.scatter(RA_eboss_s, DEC_eboss_s, s = 20, alpha = 1, label = "EBOSS_S_fp")
plt.scatter(RA_eboss_n, DEC_eboss_n, s = 20, alpha = 1, label = "EBOSS_N_fp")
#plt.scatter(RA_boss_s, DEC_boss_s, s = 20, alpha = 1, label = "BOSS_S_fp")
#plt.scatter(RA_boss_n, DEC_boss_n, s = 20, alpha = 1, label = "BOSS_N_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend(loc = "best")
plt.savefig("UNIONS_EBOSS_fp.png")
plt.show()
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend()
plt.savefig("UNIONS_fp.png")
plt.show()
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.scatter(RA_boss_s, DEC_boss_s, s = 20, alpha = 1, label = "BOSS_S_fp")
plt.scatter(RA_boss_n, DEC_boss_n, s = 20, alpha = 1, label = "BOSS_N_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend()
plt.savefig("UNIONS_BOSS_fp.png")
plt.show()
```
|
github_jupyter
|
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
#UNIONS_800deg_sq
un_data_path = "/automnt/n17data/mkilbing/astro/data/CFIS/800deg2/cfis_800d2_SP_v1.0_zp_W3_Deep23_v0.1.fits"
#EBOSS
eboss_sgc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/eBOSS_LRGpCMASS_clustering_data-NGC-vDR16.fits"
eboss_ngc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/eBOSS_LRGpCMASS_clustering_data-SGC-vDR16.fits"
#BOSS
boss_sgc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/galaxy_DR12v5_CMASSLOWZTOT_South.fits"
boss_ngc_path = "/home/mkilbing/astro/data/CFIS/spectro_surveys/galaxy_DR12v5_CMASSLOWZTOT_North.fits"
#open files
hdul_un_data = fits.open(un_data_path)
hdul_eboss_sgc_path = fits.open(eboss_sgc_path)
hdul_eboss_ngc_path = fits.open(eboss_ngc_path)
hdul_boss_sgc_path = fits.open(boss_sgc_path)
hdul_boss_ngc_path = fits.open(boss_ngc_path)
#print(hdul_boss_sgc_path[1].columns.info())
un_data = hdul_un_data[1].data
eboss_sgc_data = hdul_eboss_ngc_path[1].data
eboss_ngc_data = hdul_eboss_sgc_path[1].data
boss_ngc_data = hdul_boss_ngc_path[1].data
boss_sgc_data = hdul_boss_sgc_path[1].data
RA_un = un_data["ra"]
DEC_un = un_data["dec"]
RA_eboss_s = eboss_sgc_data["RA"]
DEC_eboss_s = eboss_sgc_data["DEC"]
RA_eboss_n = eboss_ngc_data["RA"]
DEC_eboss_n = eboss_ngc_data["DEC"]
RA_boss_s = boss_sgc_data["RA"]
DEC_boss_s = boss_sgc_data["DEC"]
RA_boss_n = boss_ngc_data["RA"]
DEC_boss_n = boss_ngc_data["DEC"]
print("nbr_gal_unions", len(RA_un), "nbr_gal_boss_s", len(RA_boss_s), "nbr_gal_eboss_s", len(RA_eboss_s))
#plot footprint and search for overlap between UNIONS_800deg_sq, BOSS and eBOSS
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.scatter(RA_eboss_s, DEC_eboss_s, s = 20, alpha = 1, label = "EBOSS_S_fp")
plt.scatter(RA_eboss_n, DEC_eboss_n, s = 20, alpha = 1, label = "EBOSS_N_fp")
#plt.scatter(RA_boss_s, DEC_boss_s, s = 20, alpha = 1, label = "BOSS_S_fp")
#plt.scatter(RA_boss_n, DEC_boss_n, s = 20, alpha = 1, label = "BOSS_N_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend(loc = "best")
plt.savefig("UNIONS_EBOSS_fp.png")
plt.show()
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend()
plt.savefig("UNIONS_fp.png")
plt.show()
plt.scatter(RA_un, DEC_un, s = 20, alpha = 1, label = "UNIONS_fp")
plt.scatter(RA_boss_s, DEC_boss_s, s = 20, alpha = 1, label = "BOSS_S_fp")
plt.scatter(RA_boss_n, DEC_boss_n, s = 20, alpha = 1, label = "BOSS_N_fp")
plt.xlabel("RA [deg]")
plt.ylabel("DEC [deg]")
plt.legend()
plt.savefig("UNIONS_BOSS_fp.png")
plt.show()
| 0.296247 | 0.310485 |
# Module 5: Two-way ANOVA
## Renal function
Assume you are performing a study on renal function. You are measuring the urine volume (mL) of subjects one hour after they receive a treatment. The subjects consist of 9 males and 9 females where 3 receive a sugar pill, 3 receive a salt pill, and 3 receive a caffeine pill. The collected data is stored to a .csv file.
Before starting the Python, discuss briefly: what are the null hypotheses for this study?
```
import scipy.stats as stats
import numpy as np
import pandas as pd
import statsmodels.api as sm # A new stats package - you'll fine there are a lot
from statsmodels.formula.api import ols
df = pd.read_csv("../data/urine_volume_data.csv")
df.info()
```
## Calculating sum of squares
Assume the data passes the assumptions necessary to perform a two-way ANOVA. Fill out the table below:
| | Sum of squares (SS) | Degrees of freedom (DF) |
| --- | --- | --- |
| Total | | |
| Cells (groups) | | |
| Error (within-cells) | | |
| Factor A (treatment) | | |
| Factor B (sex) | | |
| A x B interaction | | |
```
# statsmodels.api will calculate some of the SS for us. Calculate the rest.
model = ols('volume ~ C(treatment) + C(sex) + C(treatment):C(sex)', data=df).fit()
ss_results = sm.stats.anova_lm(model, typ=2)
ss_factorA = ss_results['sum_sq']['C(treatment)']
ss_factorB = ss_results['sum_sq']['C(sex)']
ss_AxB = ss_results['sum_sq']['C(treatment):C(sex)']
ss_error = ss_results['sum_sq']['Residual']
ss_groups = ss_factorA+ss_factorB+ss_AxB
ss_total = ss_groups+ss_error
print('Sum of squares:')
print('Total: %.2f' % ss_total)
print('Groups: %.2f' % ss_groups)
print('Error: %.2f' % ss_error)
print('Factor A: %.2f' % ss_factorA)
print('Factor B: %.2f' % ss_factorB)
print('AxB interaction: %.2f' % ss_AxB)
```
Using your results from the part above, fill out the table below for α = 0.05: Which hypotheses can you reject?
| | Mean sum of squares (MSS) | F-statistic | F-critical |
| --- | --- | --- | --- |
| Factor A | | | |
| Factor B | | | |
| AxB interaction | | | |
| Error (within cells) | | N/A | N/A |
```
# Use ss_results again - there's a lot in that data frame
mss_factorA = ss_results['sum_sq']['C(treatment)']/ss_results['df']['C(treatment)']
mss_factorB = ss_results['sum_sq']['C(sex)']/ss_results['df']['C(sex)']
mss_AxB = ss_results['sum_sq']['C(treatment):C(sex)']/ss_results['df']['C(treatment):C(sex)']
mss_error = ss_results['sum_sq']['Residual']/ss_results['df']['Residual']
print('Mean sum of squares:')
print('Factor A: %.2f' % mss_factorA)
print('Factor B: %.2f' % mss_factorB)
print('AxB interaction: %.2f' % mss_AxB)
print('AxB interaction: %.2f' % mss_error)
print('F-statistic:')
print('Factor A: %.2f' % ss_results['F']['C(treatment)'])
print('Factor B: %.2f' % ss_results['F']['C(sex)'])
print('AxB interaction: %.2f' % ss_results['F']['C(treatment):C(sex)'])
df_error = ss_results['df']['Residual']
alpha = 0.05
# Remember this function?
f_factorA = stats.f.ppf(1-alpha,ss_results['df']['C(treatment)'],df_error)
f_factorB = stats.f.ppf(1-alpha,ss_results['df']['C(sex)'],df_error)
f_AxB = stats.f.ppf(1-alpha,ss_results['df']['C(treatment):C(sex)'],df_error)
print('F-critical:')
print('Factor A: %.2f' % f_factorA)
print('Factor B: %.2f' % f_factorB)
print('AxB interaction: %.2f' % f_AxB)
```
|
github_jupyter
|
import scipy.stats as stats
import numpy as np
import pandas as pd
import statsmodels.api as sm # A new stats package - you'll fine there are a lot
from statsmodels.formula.api import ols
df = pd.read_csv("../data/urine_volume_data.csv")
df.info()
# statsmodels.api will calculate some of the SS for us. Calculate the rest.
model = ols('volume ~ C(treatment) + C(sex) + C(treatment):C(sex)', data=df).fit()
ss_results = sm.stats.anova_lm(model, typ=2)
ss_factorA = ss_results['sum_sq']['C(treatment)']
ss_factorB = ss_results['sum_sq']['C(sex)']
ss_AxB = ss_results['sum_sq']['C(treatment):C(sex)']
ss_error = ss_results['sum_sq']['Residual']
ss_groups = ss_factorA+ss_factorB+ss_AxB
ss_total = ss_groups+ss_error
print('Sum of squares:')
print('Total: %.2f' % ss_total)
print('Groups: %.2f' % ss_groups)
print('Error: %.2f' % ss_error)
print('Factor A: %.2f' % ss_factorA)
print('Factor B: %.2f' % ss_factorB)
print('AxB interaction: %.2f' % ss_AxB)
# Use ss_results again - there's a lot in that data frame
mss_factorA = ss_results['sum_sq']['C(treatment)']/ss_results['df']['C(treatment)']
mss_factorB = ss_results['sum_sq']['C(sex)']/ss_results['df']['C(sex)']
mss_AxB = ss_results['sum_sq']['C(treatment):C(sex)']/ss_results['df']['C(treatment):C(sex)']
mss_error = ss_results['sum_sq']['Residual']/ss_results['df']['Residual']
print('Mean sum of squares:')
print('Factor A: %.2f' % mss_factorA)
print('Factor B: %.2f' % mss_factorB)
print('AxB interaction: %.2f' % mss_AxB)
print('AxB interaction: %.2f' % mss_error)
print('F-statistic:')
print('Factor A: %.2f' % ss_results['F']['C(treatment)'])
print('Factor B: %.2f' % ss_results['F']['C(sex)'])
print('AxB interaction: %.2f' % ss_results['F']['C(treatment):C(sex)'])
df_error = ss_results['df']['Residual']
alpha = 0.05
# Remember this function?
f_factorA = stats.f.ppf(1-alpha,ss_results['df']['C(treatment)'],df_error)
f_factorB = stats.f.ppf(1-alpha,ss_results['df']['C(sex)'],df_error)
f_AxB = stats.f.ppf(1-alpha,ss_results['df']['C(treatment):C(sex)'],df_error)
print('F-critical:')
print('Factor A: %.2f' % f_factorA)
print('Factor B: %.2f' % f_factorB)
print('AxB interaction: %.2f' % f_AxB)
| 0.505127 | 0.953405 |
# Development using test dataset
__grinder run__
@ system76-server:~/notebook/grinderSIP/t/example
__with abundance profile__
~~~
grinder -reference_file genome3.fna -fr 515Fm-927Rm.fna \
-rd 215 normal 50 -tr 1000 -length_bias 0 -unidirectional 1 \
-af abnd_prof.txt
~~~
__with abundance model__
~~~
grinder -reference_file genome3.fna -fr 515Fm-927Rm.fna -am exponential \
-rd 215 normal 50 -tr 1000 -length_bias 0 -unidirectional 1
~~~
__with multiple samples__
~~~
grinder -reference_file genome3.fna -fr 515Fm-927Rm.fna -am exponential \
-rd 215 normal 50 -tr 1000 -length_bias 0 -unidirectional 1 -num_libraries 2 \
-shared_perc 100
~~~
## testing script with 3 genome
@ system76-server:~/notebook/grinderSIP/dev
### simulating isotope incorporation (max of 100%)
$ perl ../bin/makeIncorp_phylo.pl -r grinder-ranks.txt -t genome3.nwk > genome3_inc100.txt
### simulating isotope incorporation (max of 50%)
$ perl ../bin/makeIncorp_phylo.pl -r grinder-ranks.txt -t genome3.nwk -range 0-50 > genome3_inc50.txt
### grinderSIP.pl
CHANGES to grindeSIP.pl:
Fragments written to each fraction file, then grinder is run on each
* fragments made by genome fragmentation:
* genomes fragmented based on determined or defined relative abundance (RA)
* fragmentation size defined by size distribution
* fragment start defined by poisson? or uniform? distribution
* total number of genome copies (TNGC) defined
* TNGC x RA(each taxon)
* fragments distributed into fractions based on fragment GC
* naming issues?
* may need to rename by taxon
* foreach fraction:
* wrapper to run grinder
* need option file (options based on grinder)
$ perl ../bin/grinderSIP.pl -g genome3.fna -r grinder-reads.fa -inc genome3_inc100.txt | less
__checking that all reads written__
$ nseq readsByFrac/isoIncorp/*fasta | perl -ne 'BEGIN{$sum=0} /.+:(\d+)/; $sum+=$1; END{print $sum,"\n"}'
* yes, all written
# Larger dataset of 10 genomes
## Making the dataset
@ system76-server:~/notebook/grinderSIP/dev/bac_genome10
__Randomly selected 10 genomes__
* Selecting from ../../../deltaGC_notes/data/genome_data/prok-bac-genomes_Ngaps
~~~
Bacillus_cereus_F837_76.fasta
Cyanobium_gracile_PCC_6307.fasta
Escherichia_coli_APEC_O78.fasta
Geobacter_lovleyi_SZ.fasta
Idiomarina_loihiensis_GSL_199.fasta
Leuconostoc_citreum_KM20.fasta
Micrococcus_luteus_NCTC_2665.fasta
Nitrosomonas_europaea_ATCC_19718.fasta
Roseobacter_litoralis_Och_149.fasta
Xanthomonas_axonopodis_Xac29-1.fasta
~~~
Getting genomes from collection
~~~
$ printf "Bacillus_cereus_F837_76.fasta\nCyanobium_gracile_PCC_6307.fasta\nEscherichia_coli_APEC_O78.fasta\nGeobacter_lovleyi_SZ.fasta\nIdiomarina_loihiensis_GSL_199.fasta\nLeuconostoc_citreum_KM20.fasta\nMicrococcus_luteus_NCTC_2665.fasta\nNitrosomonas_europaea_ATCC_19718.fasta\nRoseobacter_litoralis_Och_149.fasta\nXanthomonas_axonopodis_Xac29-1.fasta" | xargs -n 1 -I % bash -c "grep -c '>' ~/notebook/deltaGC_notes/data/genome_data/prok-bac-genomes_Ngaps/%"
* all have 1 genome
$ printf "Bacillus_cereus_F837_76.fasta\nCyanobium_gracile_PCC_6307.fasta\nEscherichia_coli_APEC_O78.fasta\nGeobacter_lovleyi_SZ.fasta\nIdiomarina_loihiensis_GSL_199.fasta\nLeuconostoc_citreum_KM20.fasta\nMicrococcus_luteus_NCTC_2665.fasta\nNitrosomonas_europaea_ATCC_19718.fasta\nRoseobacter_litoralis_Och_149.fasta\nXanthomonas_axonopodis_Xac29-1.fasta" | xargs -n 1 -I % bash -c "ln -s ~/notebook/deltaGC_notes/data/genome_data/prok-bac-genomes_Ngaps/% genomes/"
* all genomes symlinked
~~~
Combining and editing names (using file names)
$ find genomes/ -name "*fasta" | xargs -n 1 -I % bash -c "perl -p -e 'BEGIN{\$in=\$ARGV[0]; \$in=~s/.+\/|\.[^.]+\$//g} s/^>.+/>\$in/' % " > genome10.fna
## Grinder simulation (10000 reads)
__exponential abundance model__
~~~
grinder -reference_file genome10.fna -fr ../515Fm-927Rm.fna -am exponential \
-rd 215 normal 50 -tr 10000 -length_bias 0 -unidirectional 1 -base_name genome10
~~~
## Making a tree of the genomes
16S rRNA tree
### Using RNAmmer to get all 16S rRNA genes from each genome
Getting already created rnammer output files
$ mkdir rnammer
$ find genomes/ -name "*fasta" | perl -pe 's/.+\///; s/\.fasta/_rrn.fna/' | xargs -n 1 -I % bash -c "ln -s /var/seq_data/ncbi_db/genome/rnammer/bacteria_rrn/% rnammer/"
Filtering out poor hits
$ find rnammer/ -name "*_rrn.fna" | perl -pe 's/(.+)(\.fna)/$1$2\n$1\_s2k$2/g' | parallel -q -N 2 bash -c 'perl ~/dev/NY_misc/bin/rnammer_filt.py -s 2000 - <{1} > {2}'
* score >= 2000
### Making consensus 16S sequence for each
$ find rnammer/ -name "*_s2k.fna" | perl -pe 's/(.+)(\.fna)/$1$2\n$1\_con$2/g' | parallel -q -N 2 bash -c "~/dev/NY_misc_perl/bin/consensus_seq.pl -i - -t 30 -n {1} < {1} > {2}"
* low threshold used to minimize ambiguous
Editing names & combining ('?' -> 'N')
$ find rnammer/ -name "*con.fna" | xargs -n 1 -I % bash -c "perl -pe 's/.+\//>/;s/_rrn_s2k\.fna//; s/\?/N/g' %" > genome10_rrn-con.fna
### Aligning with mafft
$ mafft-linsi --thread 20 genome10_rrn-con.fna > genome10_rrn-con_aln.fna
### Making ML tree with RAxML
@ system76-server:~/notebook/grinderSIP/dev/bac_genome10/raxml
$ alignIO.py ../genome10_rrn-con_aln.fna genome10_rrn-con_aln.phy
$ raxmlHPC-PTHREADS-AVX -f a -x 0318 -p 0911 -# 100 -m GTRGAMMA -s genome10_rrn-con_aln.phy -n genome10_rrn-con_ML -T 20
$ mv RAxML_bipartitions.genome10_rrn-con_ML genome10_rrn-con_ML.nwk
## Making incorp file (min-max incorp: 0-100)
@ system76-server:~/notebook/grinderSIP/dev/bac_genome10
__Incorp: min0, max100; weight=0.5__
$ perl ../../bin/makeIncorp_phylo.pl -r genome10-ranks.txt -t raxml/genome10_rrn-con_ML.nwk > genome10_inc0-100_w0.5.txt
__Incorp: min0, max100; weight=1__
$ perl ../../bin/makeIncorp_phylo.pl -r genome10-ranks.txt -t raxml/genome10_rrn-con_ML.nwk -w 1 > genome10_inc0-100_w1.txt
__Incorp: min0, max100; weight=0__
$ perl ../../bin/makeIncorp_phylo.pl -r genome10-ranks.txt -t raxml/genome10_rrn-con_ML.nwk -w 0 > genome10_inc0-100_w0.txt
## running grinderSIP.pl
$ perl ../../bin/grinderSIP.pl -g genome10.fna -r genome10-reads.fa -inc genome10_inc0-100_w1.txt -out readsByFrac_inc0-100_w1 | less
__checking that all reads written__
$ nseq readsByFrac_inc0-100_w1/isoIncorp/*fasta | perl -ne 'BEGIN{$sum=0} /.+:(\d+)/; $sum+=$1; END{print $sum,"\n"}'
* yes, all written
```
# Checking on character trait evolution through ordering by tree
import os
import glob
pwd = '/home/nick/notebook/grinderSIP/dev/bac_genome10/'
## getting tree labels
cmd = 'nw_labels -I ' + pwd + 'raxml/genome10_rrn-con_ML.nwk'
ret = os.popen(cmd, 'r')
labels = [i.strip() for i in ret]
## ordering & printing tables
def order_table(csv_file, order_list):
fh = open(csv_file, 'r')
tbl = [i.rstrip().split('\t') for i in fh]
tbl = dict(tbl)
for i in labels:
print '\t'.join([csv_file, i, tbl[i]])
# writing dict(tbl) in order of labels
files = glob.glob(pwd + '*_w*.txt')
for f in files:
order_table(f, labels)
```
|
github_jupyter
|
# Checking on character trait evolution through ordering by tree
import os
import glob
pwd = '/home/nick/notebook/grinderSIP/dev/bac_genome10/'
## getting tree labels
cmd = 'nw_labels -I ' + pwd + 'raxml/genome10_rrn-con_ML.nwk'
ret = os.popen(cmd, 'r')
labels = [i.strip() for i in ret]
## ordering & printing tables
def order_table(csv_file, order_list):
fh = open(csv_file, 'r')
tbl = [i.rstrip().split('\t') for i in fh]
tbl = dict(tbl)
for i in labels:
print '\t'.join([csv_file, i, tbl[i]])
# writing dict(tbl) in order of labels
files = glob.glob(pwd + '*_w*.txt')
for f in files:
order_table(f, labels)
| 0.344664 | 0.213275 |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import qgrid
import seaborn as sns
files = {"all": "../../explanations-for-ner-train-finnish-20190114-total.txt",
"only_target_entities": "../../explanations-for-ner-train-finnish-20190115-total-only_target_entities.txt",
"finnish_model_100_size": "explanations-for-ner-train-finnish_model_100_size.txt",
"turkish_model_100_size": "explanations-for-ner-train-turkish_model_100_size.txt"}
lines = []
records = []
with open(files["turkish_model_100_size"], "r") as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split("\t")
record = [int(tokens[0]), tokens[1], tuple([int(x) for x in tokens[2].split(" ")])]
record.append({k: float(v) for k, v in [tuple(x.split(" ")) for x in tokens[3:]]})
records.append(record)
records[0]
list(record[3].values())
def log_sum_exp(input_x):
max_value = np.max(input_x)
return np.log(np.sum([np.exp(x-max_value) for x in input_x])) + max_value
log_sum_exp([1, 2])
np.logaddexp(*[1, 2])
group_by_entity_type = {}
for record in records:
entity_type = record[1]
if entity_type not in group_by_entity_type:
group_by_entity_type[entity_type] = {}
if entity_type in group_by_entity_type:
# sum_weights = log_sum_exp(list(record[3].values()))
# min_value = np.min(list(record[3].values()))
# max_value = np.max(list(record[3].values()))
for morpho_tag, weight in record[3].items():
# value = np.exp(weight - sum_weights)
# value = (weight-min_value)/float(max_value-min_value)
value = weight
if morpho_tag in group_by_entity_type[entity_type]:
group_by_entity_type[entity_type][morpho_tag].append(value)
else:
group_by_entity_type[entity_type][morpho_tag] = [value]
group_by_entity_type.keys()
group_by_entity_type['ORG'].keys()
stats_by_entity_type = {key: dict() for key in group_by_entity_type.keys()}
for entity_type in stats_by_entity_type.keys():
for morpho_tag in group_by_entity_type[entity_type]:
l = group_by_entity_type[entity_type][morpho_tag]
stats_by_entity_type[entity_type][morpho_tag] = (np.mean(l), len(l))
for entity_type in stats_by_entity_type.keys():
sorted_l = sorted(stats_by_entity_type[entity_type].items(), key=lambda x: np.abs(x[1][0]), reverse=True)
print(entity_type, sorted_l[:10])
all_morpho_tags = set()
for record in records:
all_morpho_tags.update(set(record[3].keys()))
all_morpho_tags
morpho_tag_to_id = {m: idx for idx, m in enumerate(all_morpho_tags)}
morpho_tag_to_id
record
records_for_panda = []
for record in records:
record_pre_panda = [record[0], record[1], record[2][0], record[2][1]]
morpho_tags = [None] * len(morpho_tag_to_id)
for morpho_tag, idx in morpho_tag_to_id.items():
if morpho_tag in record[3]:
morpho_tags[idx] = record[3][morpho_tag]
record_pre_panda += morpho_tags
records_for_panda.append(record_pre_panda)
# print(record_pre_panda)
id_to_morpho_tag = {idx: morpho_tag for morpho_tag, idx in morpho_tag_to_id.items()}
column_names = ['sentence_idx', 'entity_type', 'entity_start', 'entity_end']
column_names += [id_to_morpho_tag[x] for x in range(len(morpho_tag_to_id))]
explanations = pd.DataFrame(records_for_panda, columns=column_names)
explanations
df_by_entity_type = explanations.groupby('entity_type')
df_by_entity_type['Loc'].mean()
explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type').mean()
means_over_entity_type = explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type').mean()
%matplotlib inline
means_over_entity_type.hist(['Loc'])
means_over_entity_type.corr()
means_over_entity_type['Ins^DB'].mean()
means_over_entity_type[means_over_entity_type.columns[0]].mean()
explanations_grid = qgrid.show_grid(means_over_entity_type.corr().iloc[:, 0:2], show_toolbar=True)
explanations_grid
df_by_entity_type = explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type')
explanations[explanations['entity_type'] == "LOC"]
loc_group_explanations = explanations[explanations['entity_type'] == "LOC"].drop(["sentence_idx", "entity_type", "entity_start", "entity_end"], axis=1)
loc_group_explanations['Loc'].clip(lower=-1.0, upper=1, inplace=False)
len(morpho_tag_to_id)
loc_group_explanations.size
for idx, morpho_tag in enumerate(list(morpho_tag_to_id.keys())):
if idx % 9 == 0:
fig = plt.figure(int(idx/9))
rem = idx % 9
plt.subplot(3, 3, rem+1)
print(morpho_tag)
# sns.violinplot(data=list(loc_group_explanations[morpho_tag].clip(lower=-0.5, upper=0.5)))
data = loc_group_explanations[morpho_tag].dropna().clip(lower=-0.5, upper=0.5)
print(data)
if data.size > 0:
sns.distplot(data)
plt.show()
loc_group_explanations
mean_loc_group_explanations = loc_group_explanations.mean()
mean_loc_group_explanations.sort_values(ascending=False)
loc_group_explanations['Loc'].sort_values()[:10]
loc_group_explanations['Loc'].sort_values(ascending=False)[:10]
loc_group_explanations.hist(['Loc'], range=[-1, 1])
loc_group_explanations.hist(['Loc^DB'], range=[-1, 1])
loc_group_explanations.hist(['Loc'])
loc_group_explanations.hist(['Loc^DB'])
loc_group_explanations.hist(['Loc'], range=[-5000, -10], bins=100)
loc_group_explanations.hist(['Loc'], range=[1, 1000], bins=100)
loc_group_explanations['Loc'][loc_group_explanations['Loc'] < 0].count()
loc_group_explanations['Loc'][loc_group_explanations['Loc'] >= 0].count()
for morpho_tag in ['Loc', 'Loc^DB']:
below_zero = loc_group_explanations[morpho_tag][loc_group_explanations[morpho_tag] < 0].count()
above_zero = loc_group_explanations[morpho_tag][loc_group_explanations[morpho_tag] >= 0].count()
print(morpho_tag, below_zero, above_zero)
!pwd
!ls ../../explanations-for-ner-train-finnish-201901*
```
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import qgrid
import seaborn as sns
files = {"all": "../../explanations-for-ner-train-finnish-20190114-total.txt",
"only_target_entities": "../../explanations-for-ner-train-finnish-20190115-total-only_target_entities.txt",
"finnish_model_100_size": "explanations-for-ner-train-finnish_model_100_size.txt",
"turkish_model_100_size": "explanations-for-ner-train-turkish_model_100_size.txt"}
lines = []
records = []
with open(files["turkish_model_100_size"], "r") as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split("\t")
record = [int(tokens[0]), tokens[1], tuple([int(x) for x in tokens[2].split(" ")])]
record.append({k: float(v) for k, v in [tuple(x.split(" ")) for x in tokens[3:]]})
records.append(record)
records[0]
list(record[3].values())
def log_sum_exp(input_x):
max_value = np.max(input_x)
return np.log(np.sum([np.exp(x-max_value) for x in input_x])) + max_value
log_sum_exp([1, 2])
np.logaddexp(*[1, 2])
group_by_entity_type = {}
for record in records:
entity_type = record[1]
if entity_type not in group_by_entity_type:
group_by_entity_type[entity_type] = {}
if entity_type in group_by_entity_type:
# sum_weights = log_sum_exp(list(record[3].values()))
# min_value = np.min(list(record[3].values()))
# max_value = np.max(list(record[3].values()))
for morpho_tag, weight in record[3].items():
# value = np.exp(weight - sum_weights)
# value = (weight-min_value)/float(max_value-min_value)
value = weight
if morpho_tag in group_by_entity_type[entity_type]:
group_by_entity_type[entity_type][morpho_tag].append(value)
else:
group_by_entity_type[entity_type][morpho_tag] = [value]
group_by_entity_type.keys()
group_by_entity_type['ORG'].keys()
stats_by_entity_type = {key: dict() for key in group_by_entity_type.keys()}
for entity_type in stats_by_entity_type.keys():
for morpho_tag in group_by_entity_type[entity_type]:
l = group_by_entity_type[entity_type][morpho_tag]
stats_by_entity_type[entity_type][morpho_tag] = (np.mean(l), len(l))
for entity_type in stats_by_entity_type.keys():
sorted_l = sorted(stats_by_entity_type[entity_type].items(), key=lambda x: np.abs(x[1][0]), reverse=True)
print(entity_type, sorted_l[:10])
all_morpho_tags = set()
for record in records:
all_morpho_tags.update(set(record[3].keys()))
all_morpho_tags
morpho_tag_to_id = {m: idx for idx, m in enumerate(all_morpho_tags)}
morpho_tag_to_id
record
records_for_panda = []
for record in records:
record_pre_panda = [record[0], record[1], record[2][0], record[2][1]]
morpho_tags = [None] * len(morpho_tag_to_id)
for morpho_tag, idx in morpho_tag_to_id.items():
if morpho_tag in record[3]:
morpho_tags[idx] = record[3][morpho_tag]
record_pre_panda += morpho_tags
records_for_panda.append(record_pre_panda)
# print(record_pre_panda)
id_to_morpho_tag = {idx: morpho_tag for morpho_tag, idx in morpho_tag_to_id.items()}
column_names = ['sentence_idx', 'entity_type', 'entity_start', 'entity_end']
column_names += [id_to_morpho_tag[x] for x in range(len(morpho_tag_to_id))]
explanations = pd.DataFrame(records_for_panda, columns=column_names)
explanations
df_by_entity_type = explanations.groupby('entity_type')
df_by_entity_type['Loc'].mean()
explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type').mean()
means_over_entity_type = explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type').mean()
%matplotlib inline
means_over_entity_type.hist(['Loc'])
means_over_entity_type.corr()
means_over_entity_type['Ins^DB'].mean()
means_over_entity_type[means_over_entity_type.columns[0]].mean()
explanations_grid = qgrid.show_grid(means_over_entity_type.corr().iloc[:, 0:2], show_toolbar=True)
explanations_grid
df_by_entity_type = explanations.drop(['sentence_idx', 'entity_start', 'entity_end'], axis=1).groupby('entity_type')
explanations[explanations['entity_type'] == "LOC"]
loc_group_explanations = explanations[explanations['entity_type'] == "LOC"].drop(["sentence_idx", "entity_type", "entity_start", "entity_end"], axis=1)
loc_group_explanations['Loc'].clip(lower=-1.0, upper=1, inplace=False)
len(morpho_tag_to_id)
loc_group_explanations.size
for idx, morpho_tag in enumerate(list(morpho_tag_to_id.keys())):
if idx % 9 == 0:
fig = plt.figure(int(idx/9))
rem = idx % 9
plt.subplot(3, 3, rem+1)
print(morpho_tag)
# sns.violinplot(data=list(loc_group_explanations[morpho_tag].clip(lower=-0.5, upper=0.5)))
data = loc_group_explanations[morpho_tag].dropna().clip(lower=-0.5, upper=0.5)
print(data)
if data.size > 0:
sns.distplot(data)
plt.show()
loc_group_explanations
mean_loc_group_explanations = loc_group_explanations.mean()
mean_loc_group_explanations.sort_values(ascending=False)
loc_group_explanations['Loc'].sort_values()[:10]
loc_group_explanations['Loc'].sort_values(ascending=False)[:10]
loc_group_explanations.hist(['Loc'], range=[-1, 1])
loc_group_explanations.hist(['Loc^DB'], range=[-1, 1])
loc_group_explanations.hist(['Loc'])
loc_group_explanations.hist(['Loc^DB'])
loc_group_explanations.hist(['Loc'], range=[-5000, -10], bins=100)
loc_group_explanations.hist(['Loc'], range=[1, 1000], bins=100)
loc_group_explanations['Loc'][loc_group_explanations['Loc'] < 0].count()
loc_group_explanations['Loc'][loc_group_explanations['Loc'] >= 0].count()
for morpho_tag in ['Loc', 'Loc^DB']:
below_zero = loc_group_explanations[morpho_tag][loc_group_explanations[morpho_tag] < 0].count()
above_zero = loc_group_explanations[morpho_tag][loc_group_explanations[morpho_tag] >= 0].count()
print(morpho_tag, below_zero, above_zero)
!pwd
!ls ../../explanations-for-ner-train-finnish-201901*
| 0.15059 | 0.26935 |
#Desgaste e desempenho de funcionário da IBM HR Analytics
###Previsão do desgaste de seus valiosos funcionários
Descubra os fatores que levam ao desgaste de funcionários e como consequentemente a sair da empresa que trabalha. Este é um conjunto de dados fictício criado por cientistas de dados da IBM com 1470 registros.
Informações:
* Idade
* Desgaste
* Viagem a negócio
* Salário diário
* Departamento
* Distância de casa do trabalho
* Educação
* 1 Ensino Médio
* 2 Graduado
* 3 Pós Graduado
* 4 Mestre
* 5 Doutor
* Área de formação
* Contagem Funcionário
* Matricula do funcionário
* Satisfeito com ambiente
* 1 Baixo
* 2 Médio
* 3 Alto
* 4 Muito Alto
* Genero
* Horas trabalhadas
* Envolvimento com trabalho
* 1 Baixo
* 2 Médio
* 3 Alto
* 4 Muito Alto
* Nível do emprego
* Cargo
* Satisfeito com o trabalho
* 1 Baixo
* 2 Médio
* 3 Alto
* 4 Muito Alto
* Estado Civil
* Renda mensal
* Taxa de salario mensal
* Nº de empresas que trabalhou
* Mais de 18 anos
* Horas Extra
* Aumento de salário percentual
* Avaliação de desempenho
* 1 Baixo
* 2 Bom
* 3 Excelente
* 4 Excepcional
* Satisfeito com relacionamento no trabalho
* 1 Baixo
* 2 Médio
* 3 Alto
* 4 Muito Alto
* Carga horária
* Nível de ações na empresa
* Tempo de registro em carteira
* Tempo de treinamento no ano passado
* Equilibrio entre o trabalho e vida pessoal
* 1 Ruim
* 2 Bom
* 3 Melhor
* 4 Melhor
* Tempo na empresa atual
* Ano desde da ultima promoção
* Anos com o mesmo gerente
https://www.kaggle.com/pavansubhasht/ibm-hr-analytics-attrition-dataset
#1. Importação da biblioteca que serão usadas na exploração dos dados
```
#Carregando as bibliotecas para exploração dos dados
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows', None)
```
#2. Carregando a base de dados no Google Drive
```
#Comando para carregar base de dados do Google Drive
from google.colab import drive
drive.mount('/content/drive')
#Carregando a base de dados em uma variável
df_empregados = pd.read_csv('/content/drive/MyDrive/Portfólio Machine Learning/Departamento Recursos Humanos/Human_Resources.csv')
```
#3. Exploração dos dados
```
#Visualizando o shape da base de dados
df_empregados.shape
#Visualizando os primeiros 5 registros do dataset
df_empregados.head(5)
#Renomeando as colunas
df_empregados = df_empregados.rename(columns={'Age':'Idade', 'Attrition':'Atrito', 'BusinessTravel':'Viagem_Negocio', 'DailyRate':'Valor_Diario', 'Department':'Departamento', 'DistanceFromHome':'Distancia_Casa',
'Education':'Educacao', 'EducationField':'Area_Formacao', 'EmployeeCount':'Cont_Empregado', 'EmployeeNumber':'Matricula_Empreg', 'EnvironmentSatisfaction':'Satisfeito_Ambiente',
'Gender':'Genero', 'HourlyRate':'Horas_Trabalhadas', 'JobInvolvement':'Envolvimento_Trabalho', 'JobLevel':'Nivel_Emprego', 'JobRole':'Cargo', 'JobSatisfaction':'Satisfeito_Trabalho',
'MaritalStatus':'Estado_Civil', 'MonthlyIncome':'Renda_Mensal', 'MonthlyRate':'Taxa_Mensal', 'NumCompaniesWorked':'Num_Empresa_Trabalhou', 'Over18':'Mais_18', 'OverTime':'Hora_Extra',
'PercentSalaryHike':'Aumento_Percentual_Salar', 'PerformanceRating':'Avaliacao_Desempenho', 'RelationshipSatisfaction':'Satisfacao_Relacionamento', 'StandardHours':'Carga_Horaria',
'StockOptionLevel':'Nivel_Acoes_Empresa', 'TotalWorkingYears':'Tempo_De_Registro', 'TrainingTimesLastYear':'Tempo_Treinamento_Ano_Passado', 'WorkLifeBalance':'Equilibrio_Trab_Vida_Pess',
'YearsAtCompany':'Tempo_Na_Empresa', 'YearsInCurrentRole':'Anos_Funcao_Atual', 'YearsSinceLastPromotion':'Anos_Desde_Ultim_Promo', 'YearsWithCurrManager':'Anos_Com_Mesmo_Gerente'})
#Descrição dos dados numerérico
df_empregados.describe()
#Visualizando dados único da variável "Area de Formação"
df_empregados['Area_Formacao'].unique()
#Visualizando informação da base de dados
df_empregados.info()
#Visualizando dados único da variável "Departamento"
df_empregados['Departamento'].unique()
#Visualizando dados único da variável "Viagem_Negocio"
df_empregados['Viagem_Negocio'].unique()
#Visualizando dados da variável "Cargo"
df_empregados['Cargo'].unique()
#Visualizando dados único da variável "Estado_Civil"
df_empregados['Estado_Civil'].unique()
#Visualizando dados único da variável "Area_Formacao"
df_empregados['Area_Formacao'].unique()
#Gráfico para visualização de dados faltantes
sns.heatmap(df_empregados.isnull(), cbar=False);
#Gráfico para exploração dos dados
df_empregados.hist(bins=30, figsize=(20,20), color = 'b');
#Apagando as colunas que não serão relevante para o modelo
df_empregados.drop(['Cont_Empregado', 'Matricula_Empreg', 'Mais_18', 'Carga_Horaria'], axis=1, inplace=True)
#Visualizando o shape da base
df_empregados.shape
#Visualizando os primeiros 5 registros da base
df_empregados.head()
#Visualizando informações sobre a base de dados
df_empregados.info()
#Transformando a variável 'Atrito' e 'Hora Extra' em númerico
df_empregados['Atrito'] = df_empregados['Atrito'].apply(lambda x: 1 if x == 'Yes' else 0)
df_empregados['Hora_Extra'] = df_empregados['Hora_Extra'].apply(lambda x: 1 if x == 'Yes' else 0)
df_empregados.Atrito.value_counts()
#Visualização da quantidade de funcionário sairam ou não da empresa
left_company = df_empregados[df_empregados['Atrito'] == 1]
stayed_company = df_empregados[df_empregados['Atrito'] == 0]
print('Total = ', len(df_empregados))
print('Número de funcionários que saíram da empresa = ', len(left_company))
print('Porcentagem de funcionários que saíram da empresa = {}%'.format(round((len(left_company) / len(df_empregados)) * 100)))
print('Número de funcionários que ficaram na empresa = {}'.format(len(stayed_company)))
print('Porcentagem de funcionários que ficaram na empresa = {}%'.format(round((len(stayed_company) / len(df_empregados)) * 100)))
#Gráfico para mostrar a correção dos dados entre si
correlations = df_empregados.corr()
f, ax = plt.subplots(figsize=(20,20))
sns.heatmap(correlations, annot=True);
#Visualização das correções com a variável 'Atrito'
correlations = df_empregados.corr()['Atrito'].sort_values()
correlations
#Gráfico para mostrar os colaboradores que sairam ou não da empresa por idade
plt.figure(figsize=(15,10))
sns.countplot(x='Idade', hue= 'Atrito', data=df_empregados).set_title('Categoria 0 indica que "Não Saiu da Empresa" e 1 indica que "Saiu da Empresa"');
#Gráficos para mostrar funcionários que saíram ou não da empresa por 'Horas Trabalhadas', 'Cargo', 'Aumento percentual de salário', 'Satisfação Relacionamento', 'Departamento' e 'Nível de emprego'
plt.figure(figsize=[20,20])
plt.subplot(611)
sns.countplot(x = 'Horas_Trabalhadas', hue='Atrito', data=df_empregados);
plt.subplot(612)
sns.countplot(x = 'Cargo', hue='Atrito', data=df_empregados);
plt.subplot(613)
sns.countplot(x = 'Aumento_Percentual_Salar', hue='Atrito', data=df_empregados);
plt.subplot(614)
sns.countplot(x = 'Satisfacao_Relacionamento', hue='Atrito', data=df_empregados);
plt.subplot(615)
sns.countplot(x = 'Departamento', hue='Atrito', data=df_empregados);
plt.subplot(616)
sns.countplot(x = 'Nivel_Emprego', hue='Atrito', data=df_empregados);
#Gráfico para mostrar renda mensal por cargo
plt.figure(figsize=(10,10))
sns.boxplot(x='Renda_Mensal', y='Cargo', data=df_empregados);
#Gráfico para apresentar a distribuição entre a variável 'Distancia de Casa' entre a variável 'Atrito'
plt.figure(figsize=(15,10))
sns.kdeplot(left_company['Distancia_Casa'], label='Colaboradores que saíram', shade=True, color='r', legend=False);
sns.kdeplot(stayed_company['Distancia_Casa'], label='Colaboradores que ficaram', shade=True, color='b', legend=False);
#Gráfico para apresentar a distribuição entre a variável 'Tempo na empresa' entre a variável 'Atrito'
plt.figure(figsize=(12,7))
sns.kdeplot(left_company['Tempo_Na_Empresa'], label = 'Funcionários que saíram', shade = True, color = 'r')
sns.kdeplot(stayed_company['Tempo_Na_Empresa'], label = 'Funcionários que ficaram', shade = True, color = 'b');
#Importação da biblioteca para transformação das variáveis categorica para numérico
from sklearn.preprocessing import OneHotEncoder
#Separando os dados em categórico e número e em seguida removendo a coluna 'Atrito'
df_cat = df_empregados.select_dtypes(include='object')
df_num = df_empregados.select_dtypes(exclude='object')
df_num.drop(['Atrito'], axis=1, inplace=True)
#Visualizando os 5 primeiros dados categórico
df_cat.head()
#Visualizando o shape dos dados categórico e numérico
df_cat.shape, df_num.shape
#Aplicando a transformação nos dados categórico
onehotencoder = OneHotEncoder()
df_cat = onehotencoder.fit_transform(df_cat).toarray()
df_cat = pd.DataFrame(df_cat)
df_cat.head()
#Visualizando o shape dos dados transformados
df_cat.shape
#Juntando os dados categórico transformado com os dados numérico
df_empregados_encoder = pd.concat([df_cat, df_num], axis=1)
#Visualizando o shape dos dados unificado
df_empregados_encoder.shape
#Visualizando os dados concatenados
df_empregados_encoder.head()
#Separando os dados que serão usado para previsão da variável que será prevista
X = df_empregados_encoder
y = df_empregados['Atrito']
#Visualizando o shape dos dados separados
X.shape, y.shape
#Importação da bibliote para normalização dos dados
from sklearn.preprocessing import MinMaxScaler
#Aplicando a normalização nos dados
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X.shape
#Visualizando a primeira linha dos dados normalizados
print(X[0])
#Importando a biblioteca para separação dos dados de treino e validação
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=1)
#Visualizando a quantidade de registros por 'Atrito' - 1 indica que funcionário tem a tendência de sair da empresa
y_train.value_counts()
#Melhor visualização dos dados da classe 0 e 1
#Notamos que os dados estão desbalanceados
sns.countplot(y_train, hue=df_empregados['Atrito']);
#Importação das bibliotecas dos modelos e bibliotecas para validação dos modelos
from xgboost import XGBClassifier
import xgboost
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report, accuracy_score, confusion_matrix
#Aplicando o modelo XGBClassifier com os dados desbalanceados
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando o modelo RandomForestClassifier com os dados desbalanceados
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
cm_rf = confusion_matrix(y_test, pred_rf)
print(classification_report(y_test, pred_rf))
print('RF Matrix: {}\n'.format(cm_rf))
sns.heatmap(cm_rf, annot=True, cbar=False);
#Aplicando o modelo LGBMClassifier com os dados desbalanceados
lgbm = LGBMClassifier()
lgbm.fit(X_train, y_train)
pred_lgbm = lgbm.predict(X_test)
cm_rf = confusion_matrix(y_test, pred_lgbm)
print(classification_report(y_test, pred_lgbm))
print('RF Matrix: {}\n'.format(cm_rf))
sns.heatmap(cm_rf, annot=True, cbar=False);
```
O modelo que saiu melhor com os dados desbalanceado foi o XGBClassifier de acordo com a metrica F1-Score, acertando 40% da classe 1 que é o nosso objetivo.
```
#Importando as bibliotecas para balanceamento dos dados
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTETomek, SMOTEENN
#Aplicando o método under sampling SMOTE na base desbalanceada
X_resampled, y_resampled = SMOTE().fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando o método under sampling ADASYN na base desbalanceada
X_resampled, y_resampled = ADASYN().fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando a mistura técnicas de over e under sampling com SMOTETomek
X_resampled, y_resampled = SMOTETomek(ratio='minority',random_state=1).fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando a mistura técnicas de over e under sampling com SMOTEENN
X_resampled, y_resampled = SMOTEENN(random_state=1, ratio='minority').fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
model = XGBClassifier()
model.fit(X_train, y_train)
pred_xgb = model.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
```
Notamos que a mistura da técnica de over e under sampling com SMOTEENN apresentou o melhor resultado na métrica F1-Score, acertando 93% dos dados com a classe 1
```
#Importando bibliotecas para salvar o modelo treinado
import pickle
import joblib
from xgboost import Booster
#Salvando procedimento de transformação de variáveis categórica e normalização dos dados que serão usado na aplicação em produção
with open('model.pkl', 'wb') as f:
pickle.dump([scaler, onehotencoder, model], f)
#Salvando o modelo de previsão que obteve o melhor resultado para ser usado na aplicação em produção
#xgb_ok.save_model('model.dat')
#Carregando procedimentos de normalização e transformação salvos
with open('model.pkl', 'rb') as f:
scaler, onehotencoder, model = pickle.load(f)
#Carregando procedimentos de normalização e transformação salvos
with open('precessing_data.pkl', 'rb') as f:
scaler, onehotencoder = pickle.load(f)
#Carregando dados para teste do modelo em produção
df_test = pd.read_csv('/content/drive/MyDrive/Portfólio Machine Learning/Departamento Recursos Humanos/Test model.csv', sep=';')
df_test = df_test.rename(columns={'Age':'Idade', 'Attrition':'Atrito', 'BusinessTravel':'Viagem_Negocio', 'DailyRate':'Valor_Diario', 'Department':'Departamento', 'DistanceFromHome':'Distancia_Casa',
'Education':'Educacao', 'EducationField':'Area_Formacao', 'EmployeeCount':'Cont_Empregado', 'EmployeeNumber':'Matricula_Empreg', 'EnvironmentSatisfaction':'Satisfeito_Ambiente',
'Gender':'Genero', 'HourlyRate':'Horas_Trabalhadas', 'JobInvolvement':'Envolvimento_Trabalho', 'JobLevel':'Nivel_Emprego', 'JobRole':'Cargo', 'JobSatisfaction':'Satisfeito_Trabalho',
'MaritalStatus':'Estado_Civil', 'MonthlyIncome':'Renda_Mensal', 'MonthlyRate':'Taxa_Mensal', 'NumCompaniesWorked':'Num_Empresa_Trabalhou', 'Over18':'Mais_18', 'OverTime':'Hora_Extra',
'PercentSalaryHike':'Aumento_Percentual_Salar', 'PerformanceRating':'Avaliacao_Desempenho', 'RelationshipSatisfaction':'Satisfacao_Relacionamento', 'StandardHours':'Carga_Horaria',
'StockOptionLevel':'Nivel_Acoes_Empresa', 'TotalWorkingYears':'Tempo_De_Registro', 'TrainingTimesLastYear':'Tempo_Treinamento_Ano_Passado', 'WorkLifeBalance':'Equilibrio_Trab_Vida_Pess',
'YearsAtCompany':'Tempo_Na_Empresa', 'YearsInCurrentRole':'Anos_Funcao_Atual', 'YearsSinceLastPromotion':'Anos_Desde_Ultim_Promo', 'YearsWithCurrManager':'Anos_Com_Mesmo_Gerente'})
df_test['Hora_Extra'] = df_test['Hora_Extra'].apply(lambda x: 1 if x == 'Yes' else 0)
df_test['Atrito'] = df_test['Atrito'].apply(lambda x: 1 if x == 'Yes' else 0)
data_new = df_test.drop(['Atrito', 'Mais_18', 'Carga_Horaria', 'Cont_Empregado', 'Matricula_Empreg'], axis=1)
#Separando os dados em categórico e número e em seguida removendo a coluna 'Atrito'
X_cat_new = data_new[['Viagem_Negocio', 'Departamento', 'Area_Formacao', 'Genero', 'Cargo', 'Estado_Civil']]
X_num_new = data_new[['Idade', 'Valor_Diario', 'Distancia_Casa', 'Educacao', 'Satisfeito_Ambiente', 'Horas_Trabalhadas', 'Envolvimento_Trabalho',
'Nivel_Emprego', 'Satisfeito_Trabalho', 'Renda_Mensal', 'Taxa_Mensal', 'Num_Empresa_Trabalhou', 'Hora_Extra', 'Aumento_Percentual_Salar',
'Avaliacao_Desempenho', 'Satisfacao_Relacionamento', 'Nivel_Acoes_Empresa', 'Tempo_De_Registro', 'Tempo_Treinamento_Ano_Passado',
'Equilibrio_Trab_Vida_Pess', 'Tempo_Na_Empresa', 'Anos_Funcao_Atual', 'Anos_Desde_Ultim_Promo', 'Anos_Com_Mesmo_Gerente']]
#Aplicando transformação de dados categórico
X_cat_encod = onehotencoder.transform(X_cat_new).toarray()
X_cat_encod = pd.DataFrame(X_cat_encod)
#Concatenando dados transformado com dados numérico
X_new_all = pd.concat([X_cat_encod, X_num_new], axis= 1)
#Normalizando os dados
data_new = scaler.transform(X_new_all)
#Executando o modelo
pred = model.predict(data_new)
#Aplicando dados ao modelo
pred = pd.DataFrame(pred, columns=['Predict'])
df_test['Predict'] = pred
#Resultado das previsões
df_test[['Atrito', 'Predict']]
#Metrica do modelo em produção
print(classification_report(df_test['Atrito'],pred))
```
Para melhorar os resultados poderiamos testar:
* Usar outros modelos de classificação;
* Usar técnicas de hiperparamentro para melhorar o modelo;
* Combinação de modelos;
|
github_jupyter
|
#Carregando as bibliotecas para exploração dos dados
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows', None)
#Comando para carregar base de dados do Google Drive
from google.colab import drive
drive.mount('/content/drive')
#Carregando a base de dados em uma variável
df_empregados = pd.read_csv('/content/drive/MyDrive/Portfólio Machine Learning/Departamento Recursos Humanos/Human_Resources.csv')
#Visualizando o shape da base de dados
df_empregados.shape
#Visualizando os primeiros 5 registros do dataset
df_empregados.head(5)
#Renomeando as colunas
df_empregados = df_empregados.rename(columns={'Age':'Idade', 'Attrition':'Atrito', 'BusinessTravel':'Viagem_Negocio', 'DailyRate':'Valor_Diario', 'Department':'Departamento', 'DistanceFromHome':'Distancia_Casa',
'Education':'Educacao', 'EducationField':'Area_Formacao', 'EmployeeCount':'Cont_Empregado', 'EmployeeNumber':'Matricula_Empreg', 'EnvironmentSatisfaction':'Satisfeito_Ambiente',
'Gender':'Genero', 'HourlyRate':'Horas_Trabalhadas', 'JobInvolvement':'Envolvimento_Trabalho', 'JobLevel':'Nivel_Emprego', 'JobRole':'Cargo', 'JobSatisfaction':'Satisfeito_Trabalho',
'MaritalStatus':'Estado_Civil', 'MonthlyIncome':'Renda_Mensal', 'MonthlyRate':'Taxa_Mensal', 'NumCompaniesWorked':'Num_Empresa_Trabalhou', 'Over18':'Mais_18', 'OverTime':'Hora_Extra',
'PercentSalaryHike':'Aumento_Percentual_Salar', 'PerformanceRating':'Avaliacao_Desempenho', 'RelationshipSatisfaction':'Satisfacao_Relacionamento', 'StandardHours':'Carga_Horaria',
'StockOptionLevel':'Nivel_Acoes_Empresa', 'TotalWorkingYears':'Tempo_De_Registro', 'TrainingTimesLastYear':'Tempo_Treinamento_Ano_Passado', 'WorkLifeBalance':'Equilibrio_Trab_Vida_Pess',
'YearsAtCompany':'Tempo_Na_Empresa', 'YearsInCurrentRole':'Anos_Funcao_Atual', 'YearsSinceLastPromotion':'Anos_Desde_Ultim_Promo', 'YearsWithCurrManager':'Anos_Com_Mesmo_Gerente'})
#Descrição dos dados numerérico
df_empregados.describe()
#Visualizando dados único da variável "Area de Formação"
df_empregados['Area_Formacao'].unique()
#Visualizando informação da base de dados
df_empregados.info()
#Visualizando dados único da variável "Departamento"
df_empregados['Departamento'].unique()
#Visualizando dados único da variável "Viagem_Negocio"
df_empregados['Viagem_Negocio'].unique()
#Visualizando dados da variável "Cargo"
df_empregados['Cargo'].unique()
#Visualizando dados único da variável "Estado_Civil"
df_empregados['Estado_Civil'].unique()
#Visualizando dados único da variável "Area_Formacao"
df_empregados['Area_Formacao'].unique()
#Gráfico para visualização de dados faltantes
sns.heatmap(df_empregados.isnull(), cbar=False);
#Gráfico para exploração dos dados
df_empregados.hist(bins=30, figsize=(20,20), color = 'b');
#Apagando as colunas que não serão relevante para o modelo
df_empregados.drop(['Cont_Empregado', 'Matricula_Empreg', 'Mais_18', 'Carga_Horaria'], axis=1, inplace=True)
#Visualizando o shape da base
df_empregados.shape
#Visualizando os primeiros 5 registros da base
df_empregados.head()
#Visualizando informações sobre a base de dados
df_empregados.info()
#Transformando a variável 'Atrito' e 'Hora Extra' em númerico
df_empregados['Atrito'] = df_empregados['Atrito'].apply(lambda x: 1 if x == 'Yes' else 0)
df_empregados['Hora_Extra'] = df_empregados['Hora_Extra'].apply(lambda x: 1 if x == 'Yes' else 0)
df_empregados.Atrito.value_counts()
#Visualização da quantidade de funcionário sairam ou não da empresa
left_company = df_empregados[df_empregados['Atrito'] == 1]
stayed_company = df_empregados[df_empregados['Atrito'] == 0]
print('Total = ', len(df_empregados))
print('Número de funcionários que saíram da empresa = ', len(left_company))
print('Porcentagem de funcionários que saíram da empresa = {}%'.format(round((len(left_company) / len(df_empregados)) * 100)))
print('Número de funcionários que ficaram na empresa = {}'.format(len(stayed_company)))
print('Porcentagem de funcionários que ficaram na empresa = {}%'.format(round((len(stayed_company) / len(df_empregados)) * 100)))
#Gráfico para mostrar a correção dos dados entre si
correlations = df_empregados.corr()
f, ax = plt.subplots(figsize=(20,20))
sns.heatmap(correlations, annot=True);
#Visualização das correções com a variável 'Atrito'
correlations = df_empregados.corr()['Atrito'].sort_values()
correlations
#Gráfico para mostrar os colaboradores que sairam ou não da empresa por idade
plt.figure(figsize=(15,10))
sns.countplot(x='Idade', hue= 'Atrito', data=df_empregados).set_title('Categoria 0 indica que "Não Saiu da Empresa" e 1 indica que "Saiu da Empresa"');
#Gráficos para mostrar funcionários que saíram ou não da empresa por 'Horas Trabalhadas', 'Cargo', 'Aumento percentual de salário', 'Satisfação Relacionamento', 'Departamento' e 'Nível de emprego'
plt.figure(figsize=[20,20])
plt.subplot(611)
sns.countplot(x = 'Horas_Trabalhadas', hue='Atrito', data=df_empregados);
plt.subplot(612)
sns.countplot(x = 'Cargo', hue='Atrito', data=df_empregados);
plt.subplot(613)
sns.countplot(x = 'Aumento_Percentual_Salar', hue='Atrito', data=df_empregados);
plt.subplot(614)
sns.countplot(x = 'Satisfacao_Relacionamento', hue='Atrito', data=df_empregados);
plt.subplot(615)
sns.countplot(x = 'Departamento', hue='Atrito', data=df_empregados);
plt.subplot(616)
sns.countplot(x = 'Nivel_Emprego', hue='Atrito', data=df_empregados);
#Gráfico para mostrar renda mensal por cargo
plt.figure(figsize=(10,10))
sns.boxplot(x='Renda_Mensal', y='Cargo', data=df_empregados);
#Gráfico para apresentar a distribuição entre a variável 'Distancia de Casa' entre a variável 'Atrito'
plt.figure(figsize=(15,10))
sns.kdeplot(left_company['Distancia_Casa'], label='Colaboradores que saíram', shade=True, color='r', legend=False);
sns.kdeplot(stayed_company['Distancia_Casa'], label='Colaboradores que ficaram', shade=True, color='b', legend=False);
#Gráfico para apresentar a distribuição entre a variável 'Tempo na empresa' entre a variável 'Atrito'
plt.figure(figsize=(12,7))
sns.kdeplot(left_company['Tempo_Na_Empresa'], label = 'Funcionários que saíram', shade = True, color = 'r')
sns.kdeplot(stayed_company['Tempo_Na_Empresa'], label = 'Funcionários que ficaram', shade = True, color = 'b');
#Importação da biblioteca para transformação das variáveis categorica para numérico
from sklearn.preprocessing import OneHotEncoder
#Separando os dados em categórico e número e em seguida removendo a coluna 'Atrito'
df_cat = df_empregados.select_dtypes(include='object')
df_num = df_empregados.select_dtypes(exclude='object')
df_num.drop(['Atrito'], axis=1, inplace=True)
#Visualizando os 5 primeiros dados categórico
df_cat.head()
#Visualizando o shape dos dados categórico e numérico
df_cat.shape, df_num.shape
#Aplicando a transformação nos dados categórico
onehotencoder = OneHotEncoder()
df_cat = onehotencoder.fit_transform(df_cat).toarray()
df_cat = pd.DataFrame(df_cat)
df_cat.head()
#Visualizando o shape dos dados transformados
df_cat.shape
#Juntando os dados categórico transformado com os dados numérico
df_empregados_encoder = pd.concat([df_cat, df_num], axis=1)
#Visualizando o shape dos dados unificado
df_empregados_encoder.shape
#Visualizando os dados concatenados
df_empregados_encoder.head()
#Separando os dados que serão usado para previsão da variável que será prevista
X = df_empregados_encoder
y = df_empregados['Atrito']
#Visualizando o shape dos dados separados
X.shape, y.shape
#Importação da bibliote para normalização dos dados
from sklearn.preprocessing import MinMaxScaler
#Aplicando a normalização nos dados
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X.shape
#Visualizando a primeira linha dos dados normalizados
print(X[0])
#Importando a biblioteca para separação dos dados de treino e validação
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=1)
#Visualizando a quantidade de registros por 'Atrito' - 1 indica que funcionário tem a tendência de sair da empresa
y_train.value_counts()
#Melhor visualização dos dados da classe 0 e 1
#Notamos que os dados estão desbalanceados
sns.countplot(y_train, hue=df_empregados['Atrito']);
#Importação das bibliotecas dos modelos e bibliotecas para validação dos modelos
from xgboost import XGBClassifier
import xgboost
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report, accuracy_score, confusion_matrix
#Aplicando o modelo XGBClassifier com os dados desbalanceados
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando o modelo RandomForestClassifier com os dados desbalanceados
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
cm_rf = confusion_matrix(y_test, pred_rf)
print(classification_report(y_test, pred_rf))
print('RF Matrix: {}\n'.format(cm_rf))
sns.heatmap(cm_rf, annot=True, cbar=False);
#Aplicando o modelo LGBMClassifier com os dados desbalanceados
lgbm = LGBMClassifier()
lgbm.fit(X_train, y_train)
pred_lgbm = lgbm.predict(X_test)
cm_rf = confusion_matrix(y_test, pred_lgbm)
print(classification_report(y_test, pred_lgbm))
print('RF Matrix: {}\n'.format(cm_rf))
sns.heatmap(cm_rf, annot=True, cbar=False);
#Importando as bibliotecas para balanceamento dos dados
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTETomek, SMOTEENN
#Aplicando o método under sampling SMOTE na base desbalanceada
X_resampled, y_resampled = SMOTE().fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando o método under sampling ADASYN na base desbalanceada
X_resampled, y_resampled = ADASYN().fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando a mistura técnicas de over e under sampling com SMOTETomek
X_resampled, y_resampled = SMOTETomek(ratio='minority',random_state=1).fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
pred_xgb = xgb.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Aplicando a mistura técnicas de over e under sampling com SMOTEENN
X_resampled, y_resampled = SMOTEENN(random_state=1, ratio='minority').fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size = 0.25, random_state=1)
model = XGBClassifier()
model.fit(X_train, y_train)
pred_xgb = model.predict(X_test)
cm_xgb = confusion_matrix(y_test, pred_xgb)
print(classification_report(y_test, pred_xgb))
print('XGB Matrix: {}\n'.format(cm_xgb))
sns.heatmap(cm_xgb, annot=True, cbar=False);
#Importando bibliotecas para salvar o modelo treinado
import pickle
import joblib
from xgboost import Booster
#Salvando procedimento de transformação de variáveis categórica e normalização dos dados que serão usado na aplicação em produção
with open('model.pkl', 'wb') as f:
pickle.dump([scaler, onehotencoder, model], f)
#Salvando o modelo de previsão que obteve o melhor resultado para ser usado na aplicação em produção
#xgb_ok.save_model('model.dat')
#Carregando procedimentos de normalização e transformação salvos
with open('model.pkl', 'rb') as f:
scaler, onehotencoder, model = pickle.load(f)
#Carregando procedimentos de normalização e transformação salvos
with open('precessing_data.pkl', 'rb') as f:
scaler, onehotencoder = pickle.load(f)
#Carregando dados para teste do modelo em produção
df_test = pd.read_csv('/content/drive/MyDrive/Portfólio Machine Learning/Departamento Recursos Humanos/Test model.csv', sep=';')
df_test = df_test.rename(columns={'Age':'Idade', 'Attrition':'Atrito', 'BusinessTravel':'Viagem_Negocio', 'DailyRate':'Valor_Diario', 'Department':'Departamento', 'DistanceFromHome':'Distancia_Casa',
'Education':'Educacao', 'EducationField':'Area_Formacao', 'EmployeeCount':'Cont_Empregado', 'EmployeeNumber':'Matricula_Empreg', 'EnvironmentSatisfaction':'Satisfeito_Ambiente',
'Gender':'Genero', 'HourlyRate':'Horas_Trabalhadas', 'JobInvolvement':'Envolvimento_Trabalho', 'JobLevel':'Nivel_Emprego', 'JobRole':'Cargo', 'JobSatisfaction':'Satisfeito_Trabalho',
'MaritalStatus':'Estado_Civil', 'MonthlyIncome':'Renda_Mensal', 'MonthlyRate':'Taxa_Mensal', 'NumCompaniesWorked':'Num_Empresa_Trabalhou', 'Over18':'Mais_18', 'OverTime':'Hora_Extra',
'PercentSalaryHike':'Aumento_Percentual_Salar', 'PerformanceRating':'Avaliacao_Desempenho', 'RelationshipSatisfaction':'Satisfacao_Relacionamento', 'StandardHours':'Carga_Horaria',
'StockOptionLevel':'Nivel_Acoes_Empresa', 'TotalWorkingYears':'Tempo_De_Registro', 'TrainingTimesLastYear':'Tempo_Treinamento_Ano_Passado', 'WorkLifeBalance':'Equilibrio_Trab_Vida_Pess',
'YearsAtCompany':'Tempo_Na_Empresa', 'YearsInCurrentRole':'Anos_Funcao_Atual', 'YearsSinceLastPromotion':'Anos_Desde_Ultim_Promo', 'YearsWithCurrManager':'Anos_Com_Mesmo_Gerente'})
df_test['Hora_Extra'] = df_test['Hora_Extra'].apply(lambda x: 1 if x == 'Yes' else 0)
df_test['Atrito'] = df_test['Atrito'].apply(lambda x: 1 if x == 'Yes' else 0)
data_new = df_test.drop(['Atrito', 'Mais_18', 'Carga_Horaria', 'Cont_Empregado', 'Matricula_Empreg'], axis=1)
#Separando os dados em categórico e número e em seguida removendo a coluna 'Atrito'
X_cat_new = data_new[['Viagem_Negocio', 'Departamento', 'Area_Formacao', 'Genero', 'Cargo', 'Estado_Civil']]
X_num_new = data_new[['Idade', 'Valor_Diario', 'Distancia_Casa', 'Educacao', 'Satisfeito_Ambiente', 'Horas_Trabalhadas', 'Envolvimento_Trabalho',
'Nivel_Emprego', 'Satisfeito_Trabalho', 'Renda_Mensal', 'Taxa_Mensal', 'Num_Empresa_Trabalhou', 'Hora_Extra', 'Aumento_Percentual_Salar',
'Avaliacao_Desempenho', 'Satisfacao_Relacionamento', 'Nivel_Acoes_Empresa', 'Tempo_De_Registro', 'Tempo_Treinamento_Ano_Passado',
'Equilibrio_Trab_Vida_Pess', 'Tempo_Na_Empresa', 'Anos_Funcao_Atual', 'Anos_Desde_Ultim_Promo', 'Anos_Com_Mesmo_Gerente']]
#Aplicando transformação de dados categórico
X_cat_encod = onehotencoder.transform(X_cat_new).toarray()
X_cat_encod = pd.DataFrame(X_cat_encod)
#Concatenando dados transformado com dados numérico
X_new_all = pd.concat([X_cat_encod, X_num_new], axis= 1)
#Normalizando os dados
data_new = scaler.transform(X_new_all)
#Executando o modelo
pred = model.predict(data_new)
#Aplicando dados ao modelo
pred = pd.DataFrame(pred, columns=['Predict'])
df_test['Predict'] = pred
#Resultado das previsões
df_test[['Atrito', 'Predict']]
#Metrica do modelo em produção
print(classification_report(df_test['Atrito'],pred))
| 0.302082 | 0.829285 |
```
import numpy as np
import torch
from torch.backends import cudnn
cudnn.enabled = True
import voc12.data
import scipy.misc
import importlib
from torch.utils.data import DataLoader
import torchvision
from tool import imutils, pyutils
import argparse
from PIL import Image
import torch.nn.functional as F
import os.path
class Normalize():
def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]
proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]
proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]
return proc_img
from pspnet import PSPNet
model = PSPNet(backbone = 'resnet', layers=50, classes=20, zoom_factor=1, pretrained=False, syncbn=False).cuda()
checkpoint = torch.load('exp/drivable/res101_psp_coarse/model/train_epoch_14.pth')
pretrained_dict = {k.replace('module.',''): v for k, v in checkpoint['state_dict'].items()}
dict1 = model.state_dict()
print (dict1.keys(), pretrained_dict.keys())
for item in dict1:
if item not in pretrained_dict.keys():
print(item,'nbnmbkjhiuguig~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`')
model.load_state_dict(pretrained_dict, strict=False)
model.eval()
model.cuda()
print(model)
normalize = Normalize()
infer_dataset = voc12.data.VOC12ClsDatasetMSF('voc12/train_aug.txt', voc12_root='../VOC2012',
scales=(1, 0.5, 1.5, 2.0),
inter_transform=torchvision.transforms.Compose(
[np.asarray,
normalize,
imutils.HWC_to_CHW]))
infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=8, pin_memory=True)
for iter, (img_name, img_list, label) in enumerate(infer_data_loader):
print(iter,img_name, img_list, label)
print(list(enumerate(img_list)))
img_name = img_name[0]; label = label[0]
img_path = voc12.data.get_img_path(img_name, '../VOC2012')
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
with torch.no_grad():
cam = model.forward_cam(img_list[0].cuda())
cam = F.upsample(cam, orig_img_size, mode='bilinear', align_corners=False)[0]
cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
break
iter, (img_name, img_list, label) = enumerate(infer_data_loader).__next__()
img_name,label
img_name = img_name[0]; label = label[0]
img_path = voc12.data.get_img_path(img_name, '../VOC2012')
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
%matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(orig_img)
with torch.no_grad():
cam = model.forward_cam(img_list[0].cuda())
cam1 = F.upsample(cam, orig_img_size, mode='bilinear', align_corners=False)[0]
cam2 = cam1.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
cam
plt.imshow(cam1.cpu().numpy()[14])
cam1
cam2
plt.imshow(cam2[14])
plt.imshow(cam2[0])
cam2[14]
sum_cam = np.sum([cam2,cam2], axis=0)
sum_cam.shape
norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5)
plt.imshow(norm_cam[0])
plt.imshow(norm_cam[14])
norm_cam[14].max()
bg_score = [np.ones_like(norm_cam[0])*0.2]
bg_score
pred = np.argmax(np.concatenate((bg_score, norm_cam)), 0)
plt.imshow(pred)
cam_dict = {}
for i in range(20):
if label[i] > 1e-5:
cam_dict[i] = norm_cam[i]
cam_dict
v = np.array(list(cam_dict.values()))
bg_score = np.power(1 - np.max(v, axis=0, keepdims=True), alpha)
bgcam_score = np.concatenate((bg_score, v), axis=0)
crf_score = imutils.crf_inference(orig_img, bgcam_score, labels=bgcam_score.shape[0])
n_crf_al = dict()
n_crf_al[0] = crf_score[0]
for i, key in enumerate(cam_dict.keys()):
n_crf_al[key+1] = crf_score[i+1]
return n_crf_al
v = np.array(list(cam_dict.values()))
v.shape
bg_score = np.power(1 - np.max(v, axis=0, keepdims=True), 32)
bgcam_score = np.concatenate((bg_score, v), axis=0)
crf_score = imutils.crf_inference(orig_img, bgcam_score, labels=bgcam_score.shape[0])
plt.imshow(np.argmax(crf_score,0))
plt.imshow(np.argmax(crf_score,0))
```
|
github_jupyter
|
import numpy as np
import torch
from torch.backends import cudnn
cudnn.enabled = True
import voc12.data
import scipy.misc
import importlib
from torch.utils.data import DataLoader
import torchvision
from tool import imutils, pyutils
import argparse
from PIL import Image
import torch.nn.functional as F
import os.path
class Normalize():
def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]
proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]
proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]
return proc_img
from pspnet import PSPNet
model = PSPNet(backbone = 'resnet', layers=50, classes=20, zoom_factor=1, pretrained=False, syncbn=False).cuda()
checkpoint = torch.load('exp/drivable/res101_psp_coarse/model/train_epoch_14.pth')
pretrained_dict = {k.replace('module.',''): v for k, v in checkpoint['state_dict'].items()}
dict1 = model.state_dict()
print (dict1.keys(), pretrained_dict.keys())
for item in dict1:
if item not in pretrained_dict.keys():
print(item,'nbnmbkjhiuguig~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`')
model.load_state_dict(pretrained_dict, strict=False)
model.eval()
model.cuda()
print(model)
normalize = Normalize()
infer_dataset = voc12.data.VOC12ClsDatasetMSF('voc12/train_aug.txt', voc12_root='../VOC2012',
scales=(1, 0.5, 1.5, 2.0),
inter_transform=torchvision.transforms.Compose(
[np.asarray,
normalize,
imutils.HWC_to_CHW]))
infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=8, pin_memory=True)
for iter, (img_name, img_list, label) in enumerate(infer_data_loader):
print(iter,img_name, img_list, label)
print(list(enumerate(img_list)))
img_name = img_name[0]; label = label[0]
img_path = voc12.data.get_img_path(img_name, '../VOC2012')
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
with torch.no_grad():
cam = model.forward_cam(img_list[0].cuda())
cam = F.upsample(cam, orig_img_size, mode='bilinear', align_corners=False)[0]
cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
break
iter, (img_name, img_list, label) = enumerate(infer_data_loader).__next__()
img_name,label
img_name = img_name[0]; label = label[0]
img_path = voc12.data.get_img_path(img_name, '../VOC2012')
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
%matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(orig_img)
with torch.no_grad():
cam = model.forward_cam(img_list[0].cuda())
cam1 = F.upsample(cam, orig_img_size, mode='bilinear', align_corners=False)[0]
cam2 = cam1.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
cam
plt.imshow(cam1.cpu().numpy()[14])
cam1
cam2
plt.imshow(cam2[14])
plt.imshow(cam2[0])
cam2[14]
sum_cam = np.sum([cam2,cam2], axis=0)
sum_cam.shape
norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5)
plt.imshow(norm_cam[0])
plt.imshow(norm_cam[14])
norm_cam[14].max()
bg_score = [np.ones_like(norm_cam[0])*0.2]
bg_score
pred = np.argmax(np.concatenate((bg_score, norm_cam)), 0)
plt.imshow(pred)
cam_dict = {}
for i in range(20):
if label[i] > 1e-5:
cam_dict[i] = norm_cam[i]
cam_dict
v = np.array(list(cam_dict.values()))
bg_score = np.power(1 - np.max(v, axis=0, keepdims=True), alpha)
bgcam_score = np.concatenate((bg_score, v), axis=0)
crf_score = imutils.crf_inference(orig_img, bgcam_score, labels=bgcam_score.shape[0])
n_crf_al = dict()
n_crf_al[0] = crf_score[0]
for i, key in enumerate(cam_dict.keys()):
n_crf_al[key+1] = crf_score[i+1]
return n_crf_al
v = np.array(list(cam_dict.values()))
v.shape
bg_score = np.power(1 - np.max(v, axis=0, keepdims=True), 32)
bgcam_score = np.concatenate((bg_score, v), axis=0)
crf_score = imutils.crf_inference(orig_img, bgcam_score, labels=bgcam_score.shape[0])
plt.imshow(np.argmax(crf_score,0))
plt.imshow(np.argmax(crf_score,0))
| 0.47025 | 0.290918 |
```
import numpy as np
import logging
import sys, time
import os
import h5py
sys.path.append('/Users/lekan/catkin_ws/src/dp_planning/scripts')
logger = logging.getLogger(__name__)
with open('../scripts/moplan_data.csv', 'r+') as f:
data = f.readlines()
moplan_data = [x.split() for x in data]
proper_data = []
for i in range(len(moplan_data)):
if not moplan_data[i]:
continue
temp = moplan_data[i]
to_append = []
temp = [x.split(sep=',')[0] for x in temp]
#print(temp)
for x in temp:
if '[' in x:
x = x.split(sep="[")[1]
elif ']' in x:
x = x.split(sep=']')[0]
to_append.append(float(x))
proper_data.append(to_append)
proper_data = np.array(proper_data)
print(proper_data.shape)
joint_workspace = dict(pts1=dict(q=[], qdot=[]), pts2=dict(q=[], qdot=[]), pts3=dict(q=[], qdot=[]))
joint_workspace['pts1']['q'] = proper_data[slice(0, 21, 3), slice(0, 7)]
joint_workspace['pts1']['qdot'] = proper_data[slice(0, 21, 3), slice(7, 14)]
joint_workspace['pts2']['q'] = proper_data[slice(1, 21, 3), slice(0, 7)]
joint_workspace['pts2']['qdot'] = proper_data[slice(1, 21, 3), slice(7, 14)]
joint_workspace['pts3']['q'] = proper_data[slice(2, 21, 3), slice(0, 7)]
joint_workspace['pts3']['qdot'] = proper_data[slice(2, 21, 3), slice(7, 14)]
qs = np.vstack([joint_workspace['pts1']['q'], joint_workspace['pts2']['q']])
qdots = np.vstack([joint_workspace['pts1']['qdot'], joint_workspace['pts2']['qdot']])
# print(joint_workspace['pts1']['q'])
# print()
# print(joint_workspace['pts2']['q'])
# print()
# print(joint_workspace['pts3']['q'])
np.set_printoptions(precision=4)
# qs = np.hstack([joint_workspace['pts1']['q'], joint_workspace['pts2']['q']])
print(qs.shape)
```
### sanity check
```
### save the data we have collected
filename = '../scripts/{}.h5'.format('joints_data')
os.remove(filename) if os.path.isfile(filename) else None
time.sleep(4)
with h5py.File(filename, 'w') as f:
pos_grp = f.create_group('workspace_coords')
pos_grp.create_dataset("joint_positions", data=qs, dtype=np.float32, compression="gzip", compression_opts=9)
pos_grp.create_dataset("joint_velocities", data=qdots, dtype=np.float32, compression="gzip", compression_opts=9)
targ_grp = f.create_group('workspace_targets')
targ_grp.create_dataset("joint_positions", data=joint_workspace['pts3']['q'], dtype=np.float32, compression="gzip", compression_opts=9)
targ_grp.create_dataset("joint_velocities", data=joint_workspace['pts3']['qdot'], dtype=np.float32, compression="gzip", compression_opts=9)
a = np.random.randn(4, 7)
for i in range(7):
a[:, i] = i
print(a)
for i in range(7):
# for (x, y, xd, yd) in (a[:,i]):
x, y, xd, yd = a[:,i]
print(x, y, xd, yd)
```
|
github_jupyter
|
import numpy as np
import logging
import sys, time
import os
import h5py
sys.path.append('/Users/lekan/catkin_ws/src/dp_planning/scripts')
logger = logging.getLogger(__name__)
with open('../scripts/moplan_data.csv', 'r+') as f:
data = f.readlines()
moplan_data = [x.split() for x in data]
proper_data = []
for i in range(len(moplan_data)):
if not moplan_data[i]:
continue
temp = moplan_data[i]
to_append = []
temp = [x.split(sep=',')[0] for x in temp]
#print(temp)
for x in temp:
if '[' in x:
x = x.split(sep="[")[1]
elif ']' in x:
x = x.split(sep=']')[0]
to_append.append(float(x))
proper_data.append(to_append)
proper_data = np.array(proper_data)
print(proper_data.shape)
joint_workspace = dict(pts1=dict(q=[], qdot=[]), pts2=dict(q=[], qdot=[]), pts3=dict(q=[], qdot=[]))
joint_workspace['pts1']['q'] = proper_data[slice(0, 21, 3), slice(0, 7)]
joint_workspace['pts1']['qdot'] = proper_data[slice(0, 21, 3), slice(7, 14)]
joint_workspace['pts2']['q'] = proper_data[slice(1, 21, 3), slice(0, 7)]
joint_workspace['pts2']['qdot'] = proper_data[slice(1, 21, 3), slice(7, 14)]
joint_workspace['pts3']['q'] = proper_data[slice(2, 21, 3), slice(0, 7)]
joint_workspace['pts3']['qdot'] = proper_data[slice(2, 21, 3), slice(7, 14)]
qs = np.vstack([joint_workspace['pts1']['q'], joint_workspace['pts2']['q']])
qdots = np.vstack([joint_workspace['pts1']['qdot'], joint_workspace['pts2']['qdot']])
# print(joint_workspace['pts1']['q'])
# print()
# print(joint_workspace['pts2']['q'])
# print()
# print(joint_workspace['pts3']['q'])
np.set_printoptions(precision=4)
# qs = np.hstack([joint_workspace['pts1']['q'], joint_workspace['pts2']['q']])
print(qs.shape)
### save the data we have collected
filename = '../scripts/{}.h5'.format('joints_data')
os.remove(filename) if os.path.isfile(filename) else None
time.sleep(4)
with h5py.File(filename, 'w') as f:
pos_grp = f.create_group('workspace_coords')
pos_grp.create_dataset("joint_positions", data=qs, dtype=np.float32, compression="gzip", compression_opts=9)
pos_grp.create_dataset("joint_velocities", data=qdots, dtype=np.float32, compression="gzip", compression_opts=9)
targ_grp = f.create_group('workspace_targets')
targ_grp.create_dataset("joint_positions", data=joint_workspace['pts3']['q'], dtype=np.float32, compression="gzip", compression_opts=9)
targ_grp.create_dataset("joint_velocities", data=joint_workspace['pts3']['qdot'], dtype=np.float32, compression="gzip", compression_opts=9)
a = np.random.randn(4, 7)
for i in range(7):
a[:, i] = i
print(a)
for i in range(7):
# for (x, y, xd, yd) in (a[:,i]):
x, y, xd, yd = a[:,i]
print(x, y, xd, yd)
| 0.125493 | 0.500977 |
# TextEmbedding and Logistic
In this demo, we will go through a sample project to show how to build a project by applying **`TextEmbedding`** and **`Logistic`** Tempaltes. In the project of amazon_reviews, we are trying to decide customers’ sentiment given their contents of the review.
```
import esppy
esp = esppy.ESP('<server>:<port>')
```
### Step 1 - Data preporcessing
```
import pandas
train_data = pandas.read_csv('reviews_train_5000.csv', header=None,
names=["id", "title", "content", "rank", "sentiment"])
score_data = pandas.read_csv('reviews_test_1000.csv', header=None,
names=["id", "title", "content", "rank", "sentiment"])
seed = 1234
n_samples = 5000
train_ratio = 0.5
train_data_sample_pos = train_data.loc[train_data['sentiment'] == 1.0].sample(int(n_samples * train_ratio), random_state=seed)
#since the target variable(sentiment) is highy unbalanced, we are doing resamling here
train_data_sample_neg = train_data.loc[train_data['sentiment'] == 0.0].sample(int(n_samples * (1 - train_ratio)), replace=True, random_state=seed)
train_data_sample = pandas.concat([train_data_sample_pos, train_data_sample_neg])
from sklearn.utils import shuffle
train_data_sample_shuffled = shuffle(train_data_sample, random_state=seed)
# train data
train_data_sample_shuffled.head()
# score data
score_data.head()
```
### Step 2 - Model Construction
```
# Create a project
proj = esp.create_project('amazon_reviews')
esppy.options.display.image_scale = 1
proj
```
### Step 3: Training Data Stream
```
# Define a source window
src = esp.SourceWindow(schema=('id*:int64', 'content:string', 'sentiment:string'),
index_type='empty', insert_only=True, autogen_key=True)
# Initialize a TextEmbedding Temaplate
t1 = esp.Template.TextEmbedding('t1',startList='server_route_to/pos_neg_words.txt',
stopList='server_route_to/stop-words.txt',
wordVec='server_route_to/vectors.txt')
# Add corresponding edge between windows
src.add_target(t1, role='data', auto_schema=True)
src.add_target(t1.windows['w_join'], role='data')
t1.windows['w_tok'].set_key('tid')
proj.windows['w_data_t'] = src
proj.add_template(t1)
proj
t1.to_graph(detail=True, schema=True)
```
### Step 4: Validation Data Stream
```
# Define a source window
src2 = esp.SourceWindow(schema=('id*:int64', 'content:string', 'sentiment:string'),
index_type='empty', insert_only=True, autogen_key=True)
# Make a copy of t1
t2 = t1.copy('t2', deep=True, internal_only=True)
# Add corresponding edge between windows
src2.add_target(t2, role='data', auto_schema=True)
src2.add_target(t2.windows['w_join'], role='data')
proj.windows['w_data_v'] = src2
proj.add_template(t2)
proj
t2.windows['w_tok'].set_key('tid')
```
### Step 5: Streaming Logistic Regression
```
# Initialize a Logis Template
t3 = esp.Template.Logistic('t3',train_inputs=dict(inputs=['vector[1-200]', 'sentiment'], target='sentiment'),
score_inputs=dict(inputs='vector[1-200]'))
# add connectivities between corresponding windows
t1.add_target(t3, role='data')
t2.add_target(t3.windows['w_score_logis'], role='data', auto_schema=True)
proj.add_template(t3)
esppy.options.display.image_scale = 0.65
proj
```
### Step 6: Online Model Measure
```
comp_logis = esp.ComputeWindow("w_comp_logis",
schema=['id*:int64', 'sentiment:string',
'predicted_y:double', 'p_1:double', 'p_0:double'])
#predicted_y is actually the predicted P(sentiment = 1)
comp_logis.add_field_expression("tostring(tointeger(sentiment))")
comp_logis.add_field_expression("predicted_y")
comp_logis.add_field_expression("predicted_y")
comp_logis.add_field_expression("1-predicted_y")
fitstat_logis = esp.calculate.FitStat(schema=('id*:int64','mceOut:double'),
classLabels='0,1',
windowLength=200)
fitstat_logis.set_inputs(inputs=('p_0:double', 'p_1:double'),
response=('sentiment:string'))
fitstat_logis.set_outputs(mceOut='mceOut:double')
proj.windows['w_comp_logis'] = comp_logis
proj.windows['w_fitstat_logis'] = fitstat_logis
t3.add_target(comp_logis, role='data')
comp_logis.add_target(fitstat_logis, role='data')
proj
```
### Step 7: Data Streaming and Processing
```
# Load project to esp server
esp.load_project(proj)
#subscribe necessary windows
src.subscribe()
src2.subscribe()
fitstat_logis.subscribe()
# stream the training data into the engine
src.publish_events(train_data_sample_shuffled, pause=15)
src2.publish_events(score_data, pause=100)
src.head()
fitstat_logis.head()
## create a streaming line that visualize mceOut(Mean consequential error) in real time, as new events arrive.
fitstat_logis.streaming_line('id', ['mceOut'], steps=100.0, interval=10, max_data=50, y_range=[0,0.8])
```
### Step 8: Clean Up
```
fitstat_logis.unsubscribe()
esp.delete_project(name='amazon_reviews')
proj
# proj.save_xml('amazon.xml')
```
|
github_jupyter
|
import esppy
esp = esppy.ESP('<server>:<port>')
import pandas
train_data = pandas.read_csv('reviews_train_5000.csv', header=None,
names=["id", "title", "content", "rank", "sentiment"])
score_data = pandas.read_csv('reviews_test_1000.csv', header=None,
names=["id", "title", "content", "rank", "sentiment"])
seed = 1234
n_samples = 5000
train_ratio = 0.5
train_data_sample_pos = train_data.loc[train_data['sentiment'] == 1.0].sample(int(n_samples * train_ratio), random_state=seed)
#since the target variable(sentiment) is highy unbalanced, we are doing resamling here
train_data_sample_neg = train_data.loc[train_data['sentiment'] == 0.0].sample(int(n_samples * (1 - train_ratio)), replace=True, random_state=seed)
train_data_sample = pandas.concat([train_data_sample_pos, train_data_sample_neg])
from sklearn.utils import shuffle
train_data_sample_shuffled = shuffle(train_data_sample, random_state=seed)
# train data
train_data_sample_shuffled.head()
# score data
score_data.head()
# Create a project
proj = esp.create_project('amazon_reviews')
esppy.options.display.image_scale = 1
proj
# Define a source window
src = esp.SourceWindow(schema=('id*:int64', 'content:string', 'sentiment:string'),
index_type='empty', insert_only=True, autogen_key=True)
# Initialize a TextEmbedding Temaplate
t1 = esp.Template.TextEmbedding('t1',startList='server_route_to/pos_neg_words.txt',
stopList='server_route_to/stop-words.txt',
wordVec='server_route_to/vectors.txt')
# Add corresponding edge between windows
src.add_target(t1, role='data', auto_schema=True)
src.add_target(t1.windows['w_join'], role='data')
t1.windows['w_tok'].set_key('tid')
proj.windows['w_data_t'] = src
proj.add_template(t1)
proj
t1.to_graph(detail=True, schema=True)
# Define a source window
src2 = esp.SourceWindow(schema=('id*:int64', 'content:string', 'sentiment:string'),
index_type='empty', insert_only=True, autogen_key=True)
# Make a copy of t1
t2 = t1.copy('t2', deep=True, internal_only=True)
# Add corresponding edge between windows
src2.add_target(t2, role='data', auto_schema=True)
src2.add_target(t2.windows['w_join'], role='data')
proj.windows['w_data_v'] = src2
proj.add_template(t2)
proj
t2.windows['w_tok'].set_key('tid')
# Initialize a Logis Template
t3 = esp.Template.Logistic('t3',train_inputs=dict(inputs=['vector[1-200]', 'sentiment'], target='sentiment'),
score_inputs=dict(inputs='vector[1-200]'))
# add connectivities between corresponding windows
t1.add_target(t3, role='data')
t2.add_target(t3.windows['w_score_logis'], role='data', auto_schema=True)
proj.add_template(t3)
esppy.options.display.image_scale = 0.65
proj
comp_logis = esp.ComputeWindow("w_comp_logis",
schema=['id*:int64', 'sentiment:string',
'predicted_y:double', 'p_1:double', 'p_0:double'])
#predicted_y is actually the predicted P(sentiment = 1)
comp_logis.add_field_expression("tostring(tointeger(sentiment))")
comp_logis.add_field_expression("predicted_y")
comp_logis.add_field_expression("predicted_y")
comp_logis.add_field_expression("1-predicted_y")
fitstat_logis = esp.calculate.FitStat(schema=('id*:int64','mceOut:double'),
classLabels='0,1',
windowLength=200)
fitstat_logis.set_inputs(inputs=('p_0:double', 'p_1:double'),
response=('sentiment:string'))
fitstat_logis.set_outputs(mceOut='mceOut:double')
proj.windows['w_comp_logis'] = comp_logis
proj.windows['w_fitstat_logis'] = fitstat_logis
t3.add_target(comp_logis, role='data')
comp_logis.add_target(fitstat_logis, role='data')
proj
# Load project to esp server
esp.load_project(proj)
#subscribe necessary windows
src.subscribe()
src2.subscribe()
fitstat_logis.subscribe()
# stream the training data into the engine
src.publish_events(train_data_sample_shuffled, pause=15)
src2.publish_events(score_data, pause=100)
src.head()
fitstat_logis.head()
## create a streaming line that visualize mceOut(Mean consequential error) in real time, as new events arrive.
fitstat_logis.streaming_line('id', ['mceOut'], steps=100.0, interval=10, max_data=50, y_range=[0,0.8])
fitstat_logis.unsubscribe()
esp.delete_project(name='amazon_reviews')
proj
# proj.save_xml('amazon.xml')
| 0.574156 | 0.889144 |
```
from keras.preprocessing.image import ImageDataGenerator
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import LabelEncoder
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
training_path='/Users/mindachen/PycharmProjects/7390_Final_Project/dataset_merged/training'
validation_path='/Users/mindachen/PycharmProjects/7390_Final_Project/dataset_merged/validation'
img_h = 224
img_w = 224
def load_data(path):
directories = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path, d))]
imgs=[]
labels=[]
for d in directories:
label_directory = os.path.join(training_path, d)
file_names = [os.path.join(label_directory, f)
for f in os.listdir(label_directory)
if f.endswith(".jpg")]
for f in file_names:
imgs.append(f)
labels.append(d)
# Encode labels with value between 0 and n_classes-1.
LE = LabelEncoder()
# Fit label encoder and return encoded labels
labels = LE.fit_transform(labels)
labels = np.eye(7)[np.array([labels]).reshape(-1)]
return imgs,labels
train_imgs, train_labels = load_data(training_path)
test_imgs, test_labels = load_data(validation_path)
# Python optimisation variables
learning_rate = 0.0001
epochs = 2
batch_size = 32
from tensorflow.python.framework import ops
def read_data(imgs,labels):
# Converts the given value to a Tensor.
image_path_tf = ops.convert_to_tensor(imgs, dtype=tf.string, name="image_paths_tf")
labels_tf = ops.convert_to_tensor(labels, dtype=tf.int32, name="labels_tf")
# Produces a slice of each Tensor in tensor_list.
image_name_tf, label_tf = tf.train.slice_input_producer([image_path_tf, labels_tf], shuffle=True)
image_buffer_tf = tf.read_file(image_name_tf, name="image_buffer")
image = tf.image.decode_jpeg(image_buffer_tf, channels=3)
global img_h, img_w
image = tf.image.resize_image_with_crop_or_pad(image, img_h, img_w)
return tf.cast(image,tf.float32),tf.cast(label_tf,tf.float32)
train_imgs_tf, train_labels_tf=read_data(train_imgs, train_labels)
test_imgs_tf, test_labels_tf=read_data(test_imgs, test_labels)
num_preprocess_threads = 3
min_queue_examples = 3 * batch_size
train_data = tf.train.shuffle_batch(
[train_imgs_tf, train_labels_tf],
batch_size = batch_size,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3 * batch_size,
min_after_dequeue = min_queue_examples,
name = "train_images")
test_data = tf.train.shuffle_batch(
[test_imgs_tf, test_labels_tf],
batch_size = 500,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 1 * batch_size,
min_after_dequeue = min_queue_examples,
name = "test_images")
print('Batch shape: ', train_data[0].shape)
print('Label shape: ', train_data[1].shape)
# declare the training data placeholders
# Inserts a placeholder for a tensor that will be always fed.
x = tf.placeholder(tf.float32, [None,224,224,3],name='raw_input')
# dynamically reshape the input
tf.summary.image('input_img', x)
y = tf.placeholder(tf.float32, [None, 7], name='output')
from tensorflow.python.framework.dtypes import float32
def conv(input, in_shape, out_shape, filter_shape, name='conv'):
with tf.name_scope(name):
# setup the filter input shape for conv layer
conv_filt_shape = [filter_shape[0], filter_shape[1], in_shape, out_shape]
# initialise weights and bias for the filter
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.3), name=name+'_w')
bias = tf.Variable(tf.constant(0.1,shape=[out_shape]), name=name+'_b')
# setup the convolutional layer operation
_ = tf.nn.conv2d(input, weights, [1, 1, 1, 1], padding='SAME')
act=tf.nn.relu(_+bias)
tf.summary.histogram('weights',weights)
tf.summary.histogram('bias',bias)
tf.summary.histogram('actications',act)
# apply a ReLU non-linear activation
return act
def fc(input, in_shape, out_shape, name='conv'):
with tf.name_scope(name):
weights = tf.Variable(tf.zeros([in_shape, out_shape]), name=name+'_w')
bias = tf.Variable(tf.zeros([out_shape]), name=name+'_b')
return tf.matmul(input, weights)+bias
conv1 = conv(x, 3, 32, [5, 5],name='conv1')
pool1= tf.nn.max_pool(conv1, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool1')
tf.summary.image('conv1_output',tf.expand_dims(pool1[:,:,:,0],axis=-1),max_outputs=3)
conv2 = conv(pool1, 32, 64, [5, 5],name='conv2')
pool2= tf.nn.max_pool(conv2, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool2')
flattened = tf.reshape(pool2, [-1, 56 * 56 * 64])
fc1 = fc(flattened, 56*56*64, 1000, name='fc1')
fc2 = fc(fc1, 1000, 7, name='fc2')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fc2, labels=y),name='loss')
tf.summary.scalar('loss', loss)
# add an optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate,name='optmz').minimize(loss)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(fc2, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='acc')
tf.summary.scalar('accuracy', accuracy)
```
Test block 1. To check the output of pool1 layer.
```
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out=sess.run(pool1,feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
fig=plt.figure(figsize=(8, 4))
for i in range(3):
fig.add_subplot(8, 4, i+1)
plt.imshow(out[0,:,:,i])
plt.show()
coord.request_stop()
coord.join(threads)
```
Test block 2. To check the output of pool2 layer.
```
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out=sess.run(pool2,feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
fig=plt.figure(figsize=(8, 4))
for i in range(3):
fig.add_subplot(8, 4, i+1)
plt.imshow(out[0,:,:,i])
plt.show()
coord.request_stop()
coord.join(threads)
# setup the initialisation operator
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
merged_summary=tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
writer=tf.summary.FileWriter('./Graph/genre_classfication_crossentropy_try5', graph=sess.graph)
print('Training started...')
for epoch in range(epochs):
print('Epoch: ',(epoch+1))
avg_cost=0
total_batch=int(7622/batch_size)
for i in range(total_batch):
sess.run(optimizer, feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
if i%20==0:
l,s, acc= sess.run([loss,merged_summary,accuracy], feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
writer.add_summary(s,epoch*total_batch+i)
print('Step: ',i, ' loss: ', l, 'train accuracy: ', acc)
# test_acc = sess.run(accuracy, feed_dict={x: test_data[0].eval(), y: test_data[1]})
print("\nEpoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
# , "test accuracy:,{:.3f}".format(test_acc))
print("\nTraining complete!")
# print(sess.run([accuracy], feed_dict={x: mnist.test.images, y: mnist.test.labels}))
coord.request_stop()
coord.join(threads)
incp3=tf.keras.applications.InceptionV3(include_top=False,
weights='imagenet',
input_shape=(img_w,img_h,3),
classes=7)
layer_dict = dict([(layer.name, layer) for layer in incp3.layers[1:]])
targets=tf.placeholder(dtype=tf.float32,shape=[None,7],name='targets')
top_dense=tf.layers.dense(tf.layers.max_pooling2d(incp3.outputs[0],(3,3),(3,3),padding='valid'),7)
predictions=tf.layers.flatten(top_dense)
loss_incp3=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=predictions,labels=targets))
optimizer_incp3=tf.train.AdadeltaOptimizer().minimize(loss_incp3)
train_acc_incp3=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(predictions,1), tf.argmax(targets,1)),dtype=tf.float32),name='incp3_acc')
test_acc_incp3=tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(predictions,1),
tf.argmax(targets,1)),
dtype=tf.float32), name='test_acc')
tf.summary.scalar('loss',loss_incp3)
tf.summary.scalar('acc',train_acc_incp3)
tf.summary.image('raw_input',incp3.input)
for i in range(9):
layer_name='mixed'+str(i+1)
tf.summary.image(name=layer_name+'_output',tensor=tf.expand_dims(layer_dict[layer_name].output[:,:,:,0],axis=-1))
merged_summary=tf.summary.merge_all()
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
writer=tf.summary.FileWriter('./Genre_classification/try_5_add_filter_visualization', graph=sess.graph)
print('Training started...')
for epoch in range(epochs):
print('Epoch: ',(epoch+1))
avg_cost=0
total_batch=int(7622/batch_size)
for i in range(total_batch):
sess.run(optimizer_incp3, feed_dict={incp3.input:train_data[0].eval(), targets:train_data[1].eval()})
if i%10==0:
l,s, acc= sess.run([loss_incp3,merged_summary,train_acc_incp3], feed_dict={incp3.input:train_data[0].eval(), targets:train_data[1].eval()})
writer.add_summary(s,epoch*total_batch+i)
print('Step: ',i, ' loss: ', l, ' train accuracy: ', acc)
writer.add_summary(sess.run(tf.summary.scalar(test_acc_incp3),
feed_dict={incp3.input:test_data[0].eval(), targets:test_data[1].eval()}))
print("\nEpoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
print("\nTraining complete!")
# print(sess.run([accuracy], feed_dict={x: mnist.test.images, y: mnist.test.labels}))
coord.request_stop()
coord.join(threads)
tf.expand_dims(layer_dict['mixed1'].output[:,:,:,0],axis=-1)
train_data[1]
```
|
github_jupyter
|
from keras.preprocessing.image import ImageDataGenerator
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import LabelEncoder
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
training_path='/Users/mindachen/PycharmProjects/7390_Final_Project/dataset_merged/training'
validation_path='/Users/mindachen/PycharmProjects/7390_Final_Project/dataset_merged/validation'
img_h = 224
img_w = 224
def load_data(path):
directories = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path, d))]
imgs=[]
labels=[]
for d in directories:
label_directory = os.path.join(training_path, d)
file_names = [os.path.join(label_directory, f)
for f in os.listdir(label_directory)
if f.endswith(".jpg")]
for f in file_names:
imgs.append(f)
labels.append(d)
# Encode labels with value between 0 and n_classes-1.
LE = LabelEncoder()
# Fit label encoder and return encoded labels
labels = LE.fit_transform(labels)
labels = np.eye(7)[np.array([labels]).reshape(-1)]
return imgs,labels
train_imgs, train_labels = load_data(training_path)
test_imgs, test_labels = load_data(validation_path)
# Python optimisation variables
learning_rate = 0.0001
epochs = 2
batch_size = 32
from tensorflow.python.framework import ops
def read_data(imgs,labels):
# Converts the given value to a Tensor.
image_path_tf = ops.convert_to_tensor(imgs, dtype=tf.string, name="image_paths_tf")
labels_tf = ops.convert_to_tensor(labels, dtype=tf.int32, name="labels_tf")
# Produces a slice of each Tensor in tensor_list.
image_name_tf, label_tf = tf.train.slice_input_producer([image_path_tf, labels_tf], shuffle=True)
image_buffer_tf = tf.read_file(image_name_tf, name="image_buffer")
image = tf.image.decode_jpeg(image_buffer_tf, channels=3)
global img_h, img_w
image = tf.image.resize_image_with_crop_or_pad(image, img_h, img_w)
return tf.cast(image,tf.float32),tf.cast(label_tf,tf.float32)
train_imgs_tf, train_labels_tf=read_data(train_imgs, train_labels)
test_imgs_tf, test_labels_tf=read_data(test_imgs, test_labels)
num_preprocess_threads = 3
min_queue_examples = 3 * batch_size
train_data = tf.train.shuffle_batch(
[train_imgs_tf, train_labels_tf],
batch_size = batch_size,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3 * batch_size,
min_after_dequeue = min_queue_examples,
name = "train_images")
test_data = tf.train.shuffle_batch(
[test_imgs_tf, test_labels_tf],
batch_size = 500,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 1 * batch_size,
min_after_dequeue = min_queue_examples,
name = "test_images")
print('Batch shape: ', train_data[0].shape)
print('Label shape: ', train_data[1].shape)
# declare the training data placeholders
# Inserts a placeholder for a tensor that will be always fed.
x = tf.placeholder(tf.float32, [None,224,224,3],name='raw_input')
# dynamically reshape the input
tf.summary.image('input_img', x)
y = tf.placeholder(tf.float32, [None, 7], name='output')
from tensorflow.python.framework.dtypes import float32
def conv(input, in_shape, out_shape, filter_shape, name='conv'):
with tf.name_scope(name):
# setup the filter input shape for conv layer
conv_filt_shape = [filter_shape[0], filter_shape[1], in_shape, out_shape]
# initialise weights and bias for the filter
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.3), name=name+'_w')
bias = tf.Variable(tf.constant(0.1,shape=[out_shape]), name=name+'_b')
# setup the convolutional layer operation
_ = tf.nn.conv2d(input, weights, [1, 1, 1, 1], padding='SAME')
act=tf.nn.relu(_+bias)
tf.summary.histogram('weights',weights)
tf.summary.histogram('bias',bias)
tf.summary.histogram('actications',act)
# apply a ReLU non-linear activation
return act
def fc(input, in_shape, out_shape, name='conv'):
with tf.name_scope(name):
weights = tf.Variable(tf.zeros([in_shape, out_shape]), name=name+'_w')
bias = tf.Variable(tf.zeros([out_shape]), name=name+'_b')
return tf.matmul(input, weights)+bias
conv1 = conv(x, 3, 32, [5, 5],name='conv1')
pool1= tf.nn.max_pool(conv1, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool1')
tf.summary.image('conv1_output',tf.expand_dims(pool1[:,:,:,0],axis=-1),max_outputs=3)
conv2 = conv(pool1, 32, 64, [5, 5],name='conv2')
pool2= tf.nn.max_pool(conv2, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool2')
flattened = tf.reshape(pool2, [-1, 56 * 56 * 64])
fc1 = fc(flattened, 56*56*64, 1000, name='fc1')
fc2 = fc(fc1, 1000, 7, name='fc2')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fc2, labels=y),name='loss')
tf.summary.scalar('loss', loss)
# add an optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate,name='optmz').minimize(loss)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(fc2, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='acc')
tf.summary.scalar('accuracy', accuracy)
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out=sess.run(pool1,feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
fig=plt.figure(figsize=(8, 4))
for i in range(3):
fig.add_subplot(8, 4, i+1)
plt.imshow(out[0,:,:,i])
plt.show()
coord.request_stop()
coord.join(threads)
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out=sess.run(pool2,feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
fig=plt.figure(figsize=(8, 4))
for i in range(3):
fig.add_subplot(8, 4, i+1)
plt.imshow(out[0,:,:,i])
plt.show()
coord.request_stop()
coord.join(threads)
# setup the initialisation operator
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
merged_summary=tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
writer=tf.summary.FileWriter('./Graph/genre_classfication_crossentropy_try5', graph=sess.graph)
print('Training started...')
for epoch in range(epochs):
print('Epoch: ',(epoch+1))
avg_cost=0
total_batch=int(7622/batch_size)
for i in range(total_batch):
sess.run(optimizer, feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
if i%20==0:
l,s, acc= sess.run([loss,merged_summary,accuracy], feed_dict={x:train_data[0].eval(), y:train_data[1].eval()})
writer.add_summary(s,epoch*total_batch+i)
print('Step: ',i, ' loss: ', l, 'train accuracy: ', acc)
# test_acc = sess.run(accuracy, feed_dict={x: test_data[0].eval(), y: test_data[1]})
print("\nEpoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
# , "test accuracy:,{:.3f}".format(test_acc))
print("\nTraining complete!")
# print(sess.run([accuracy], feed_dict={x: mnist.test.images, y: mnist.test.labels}))
coord.request_stop()
coord.join(threads)
incp3=tf.keras.applications.InceptionV3(include_top=False,
weights='imagenet',
input_shape=(img_w,img_h,3),
classes=7)
layer_dict = dict([(layer.name, layer) for layer in incp3.layers[1:]])
targets=tf.placeholder(dtype=tf.float32,shape=[None,7],name='targets')
top_dense=tf.layers.dense(tf.layers.max_pooling2d(incp3.outputs[0],(3,3),(3,3),padding='valid'),7)
predictions=tf.layers.flatten(top_dense)
loss_incp3=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=predictions,labels=targets))
optimizer_incp3=tf.train.AdadeltaOptimizer().minimize(loss_incp3)
train_acc_incp3=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(predictions,1), tf.argmax(targets,1)),dtype=tf.float32),name='incp3_acc')
test_acc_incp3=tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(predictions,1),
tf.argmax(targets,1)),
dtype=tf.float32), name='test_acc')
tf.summary.scalar('loss',loss_incp3)
tf.summary.scalar('acc',train_acc_incp3)
tf.summary.image('raw_input',incp3.input)
for i in range(9):
layer_name='mixed'+str(i+1)
tf.summary.image(name=layer_name+'_output',tensor=tf.expand_dims(layer_dict[layer_name].output[:,:,:,0],axis=-1))
merged_summary=tf.summary.merge_all()
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
writer=tf.summary.FileWriter('./Genre_classification/try_5_add_filter_visualization', graph=sess.graph)
print('Training started...')
for epoch in range(epochs):
print('Epoch: ',(epoch+1))
avg_cost=0
total_batch=int(7622/batch_size)
for i in range(total_batch):
sess.run(optimizer_incp3, feed_dict={incp3.input:train_data[0].eval(), targets:train_data[1].eval()})
if i%10==0:
l,s, acc= sess.run([loss_incp3,merged_summary,train_acc_incp3], feed_dict={incp3.input:train_data[0].eval(), targets:train_data[1].eval()})
writer.add_summary(s,epoch*total_batch+i)
print('Step: ',i, ' loss: ', l, ' train accuracy: ', acc)
writer.add_summary(sess.run(tf.summary.scalar(test_acc_incp3),
feed_dict={incp3.input:test_data[0].eval(), targets:test_data[1].eval()}))
print("\nEpoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
print("\nTraining complete!")
# print(sess.run([accuracy], feed_dict={x: mnist.test.images, y: mnist.test.labels}))
coord.request_stop()
coord.join(threads)
tf.expand_dims(layer_dict['mixed1'].output[:,:,:,0],axis=-1)
train_data[1]
| 0.794544 | 0.676266 |
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Q3 API
from layers import dense
epochs = 35
batch_size = 200
learning_rate = 0.3
mnist = np.load('mnist.npz')
# split training data (60,000) into 55,000 for train and 5,000 for validate images
train, x_test = mnist['x_train'], mnist['x_test']
train = train.reshape((60000, 784))/255
x_train = train[:55000].copy()
x_val = train[55000:].copy()
x_test = x_test.reshape((10000, 784))/255
# labels
train_labels = mnist['y_train']
y_train = np.eye(10)[train_labels[:55000]]
y_val = np.eye(10)[train_labels[55000:]]
y_test = np.eye(10)[mnist['y_test']]
# input
x_p = tf.placeholder(tf.float32, [None, 784])
# output
y_p = tf.placeholder(tf.float32, [None, 10])
hidden1 = dense(x=x_p, in_length=784, neurons=300, activation=tf.nn.relu, layer_name='Layer_1', dev=0.01)
hidden2 = dense(x=hidden1, in_length=300, neurons=100, activation=tf.nn.relu, layer_name='Layer_2', dev=0.01)
output = dense(x=hidden2, in_length=100, neurons=10, activation=tf.nn.softmax, layer_name='Layer_Output')
y_clipped = tf.clip_by_value(output, 1e-10, 0.9999999)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(y_p * tf.log(y_clipped)+ (1 - y_p) * tf.log(1 - y_clipped), axis=1))
optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
labels = tf.argmax(y_p, 1)
predictions = tf.argmax(output, 1)
acc, acc_op = tf.metrics.accuracy(labels, predictions)
conmat = tf.confusion_matrix(labels, predictions)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
avg_loss = []
validate_accuracy = []
total_batches = x_train.shape[0] // batch_size
# Training
for e in range(epochs):
avg_loss.append(0.0)
for b in range(total_batches):
start = b*batch_size
end = (b+1)*batch_size
batch = sess.run([optimiser, cross_entropy],
feed_dict={x_p: x_train[start:end], y_p: y_train[start:end]})
avg_loss[e] += batch[1] / total_batches
# Validation
accuracy = sess.run(acc_op,
feed_dict={x_p: x_val, y_p: y_val})
validate_accuracy.append(accuracy)
print("Epoch: {:2d}".format(e + 1), "train_loss =", "{:.4f}".format(avg_loss[e]), "validate_accuracy =", "{:.4f}".format(validate_accuracy[e]))
# Testing
test_accuracy, confusion_mat = sess.run([acc_op, conmat],
feed_dict={x_p:x_test, y_p:y_test})
print('Testing Accuracy:', test_accuracy)
print('Confusion Matrix:', confusion_mat)
tf.io.write_graph(sess.graph_def, 'graphs/', 'mnist-v1.pbtxt')
np.savetxt('mnistv1-conmat.txt', confusion_mat, fmt='%4d', delimiter=' & ', newline='\\\\\ \hline\n')
plt.xlabel('Epoch')
plt.ylabel('Cross Entropy Loss')
plt.plot(avg_loss[None:])
plt.show()
plt.xlabel('Epoch')
plt.ylabel('Validation Accuracy')
plt.plot(validate_accuracy)
plt.show()
True_positives = np.diag(confusion_mat)
False_positives = np.sum(confusion_mat, axis=1) - True_positives
False_negatives = np.sum(confusion_mat, axis=0) - True_positives
Precision = True_positives / (True_positives + False_positives)
print("Precision:", Precision)
Recall = True_positives / (True_positives + False_negatives)
print("\nRecall:", Recall)
F_scores = (2*Precision*Recall) / (Recall+Precision)
print("\nF_scores:", F_scores)
plt.plot(Precision, label='Precision')
plt.plot(Recall, label='Recall')
plt.plot(F_scores, label='F Scores')
plt.ylabel('Score')
plt.xlabel('Class')
plt.legend()
plt.show()
```
|
github_jupyter
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Q3 API
from layers import dense
epochs = 35
batch_size = 200
learning_rate = 0.3
mnist = np.load('mnist.npz')
# split training data (60,000) into 55,000 for train and 5,000 for validate images
train, x_test = mnist['x_train'], mnist['x_test']
train = train.reshape((60000, 784))/255
x_train = train[:55000].copy()
x_val = train[55000:].copy()
x_test = x_test.reshape((10000, 784))/255
# labels
train_labels = mnist['y_train']
y_train = np.eye(10)[train_labels[:55000]]
y_val = np.eye(10)[train_labels[55000:]]
y_test = np.eye(10)[mnist['y_test']]
# input
x_p = tf.placeholder(tf.float32, [None, 784])
# output
y_p = tf.placeholder(tf.float32, [None, 10])
hidden1 = dense(x=x_p, in_length=784, neurons=300, activation=tf.nn.relu, layer_name='Layer_1', dev=0.01)
hidden2 = dense(x=hidden1, in_length=300, neurons=100, activation=tf.nn.relu, layer_name='Layer_2', dev=0.01)
output = dense(x=hidden2, in_length=100, neurons=10, activation=tf.nn.softmax, layer_name='Layer_Output')
y_clipped = tf.clip_by_value(output, 1e-10, 0.9999999)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(y_p * tf.log(y_clipped)+ (1 - y_p) * tf.log(1 - y_clipped), axis=1))
optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
labels = tf.argmax(y_p, 1)
predictions = tf.argmax(output, 1)
acc, acc_op = tf.metrics.accuracy(labels, predictions)
conmat = tf.confusion_matrix(labels, predictions)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
avg_loss = []
validate_accuracy = []
total_batches = x_train.shape[0] // batch_size
# Training
for e in range(epochs):
avg_loss.append(0.0)
for b in range(total_batches):
start = b*batch_size
end = (b+1)*batch_size
batch = sess.run([optimiser, cross_entropy],
feed_dict={x_p: x_train[start:end], y_p: y_train[start:end]})
avg_loss[e] += batch[1] / total_batches
# Validation
accuracy = sess.run(acc_op,
feed_dict={x_p: x_val, y_p: y_val})
validate_accuracy.append(accuracy)
print("Epoch: {:2d}".format(e + 1), "train_loss =", "{:.4f}".format(avg_loss[e]), "validate_accuracy =", "{:.4f}".format(validate_accuracy[e]))
# Testing
test_accuracy, confusion_mat = sess.run([acc_op, conmat],
feed_dict={x_p:x_test, y_p:y_test})
print('Testing Accuracy:', test_accuracy)
print('Confusion Matrix:', confusion_mat)
tf.io.write_graph(sess.graph_def, 'graphs/', 'mnist-v1.pbtxt')
np.savetxt('mnistv1-conmat.txt', confusion_mat, fmt='%4d', delimiter=' & ', newline='\\\\\ \hline\n')
plt.xlabel('Epoch')
plt.ylabel('Cross Entropy Loss')
plt.plot(avg_loss[None:])
plt.show()
plt.xlabel('Epoch')
plt.ylabel('Validation Accuracy')
plt.plot(validate_accuracy)
plt.show()
True_positives = np.diag(confusion_mat)
False_positives = np.sum(confusion_mat, axis=1) - True_positives
False_negatives = np.sum(confusion_mat, axis=0) - True_positives
Precision = True_positives / (True_positives + False_positives)
print("Precision:", Precision)
Recall = True_positives / (True_positives + False_negatives)
print("\nRecall:", Recall)
F_scores = (2*Precision*Recall) / (Recall+Precision)
print("\nF_scores:", F_scores)
plt.plot(Precision, label='Precision')
plt.plot(Recall, label='Recall')
plt.plot(F_scores, label='F Scores')
plt.ylabel('Score')
plt.xlabel('Class')
plt.legend()
plt.show()
| 0.71721 | 0.722086 |
# Building a Generalized Nuclear Model
```
import os
import errno
import numpy as np
import deepcell
```
## Load Each Dataset
```
# Download the data (saves to ~/.keras/datasets)
hela_filename = 'HeLa_S3.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.hela_s3.load_data(hela_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# Download the data (saves to ~/.keras/datasets)
hek_filename = 'HEK293.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.hek293.load_data(hek_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# Download the data (saves to ~/.keras/datasets)
nih_filename = '3T3_NIH.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.nih_3t3.load_data(nih_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
```
### Flatten All Datasets into 2D and Combine
```
# Load the data with get_data function
from deepcell.utils.data_utils import get_data
def get_path(fname):
path = os.path.join('~', '.keras', 'datasets', fname)
return os.path.expanduser(path)
hela_train, hela_test = get_data(get_path(hela_filename))
hek_train, hek_test = get_data(get_path(hek_filename))
nih_train, nih_test = get_data(get_path(nih_filename))
def count_cells(train, test, name):
y = np.vstack([train['y'], test['y']])
if len(y.shape) == 5:
y_reshape = np.resize(y, (y.shape[0] * y.shape[1], *y.shape[2:]))
else:
y_reshape = y
total_cells = 0
for i in range(y_reshape.shape[0]):
unique = np.unique(y_reshape[i])
total_cells += (len(unique) - 1)
print('{} Total {} Cells'.format(total_cells, name))
count_cells(nih_train, nih_test, '3T3_NIH')
count_cells(hek_train, hek_test, 'HEK293')
count_cells(hela_train, hela_test, 'HeLa_S3')
# flatten the NIH dataset as it is 3D
def flatten(d):
return np.resize(d, tuple([d.shape[0] * d.shape[1]] + list(d.shape[2:])))
nih_train['X'] = flatten(nih_train['X'])
nih_train['y'] = flatten(nih_train['y'])
nih_test['X'] = flatten(nih_test['X'])
nih_test['y'] = flatten(nih_test['y'])
# Now reshape the dat aso that they all have the same x/y dimensions
from deepcell.utils.data_utils import reshape_matrix
RESHAPE_SIZE = 128
hela_train['X'], hela_train['y'] = reshape_matrix(hela_train['X'], hela_train['y'], RESHAPE_SIZE)
hela_test['X'], hela_test['y'] = reshape_matrix(hela_test['X'], hela_test['y'], RESHAPE_SIZE)
hek_train['X'], hek_train['y'] = reshape_matrix(hek_train['X'], hek_train['y'], RESHAPE_SIZE)
hek_test['X'], hek_test['y'] = reshape_matrix(hek_test['X'], hek_test['y'], RESHAPE_SIZE)
nih_train['X'], nih_train['y'] = reshape_matrix(nih_train['X'], nih_train['y'], RESHAPE_SIZE)
nih_test['X'], nih_test['y'] = reshape_matrix(nih_test['X'], nih_test['y'], RESHAPE_SIZE)
# Stack up our data as train and test
X_train = np.vstack([hela_train['X'], hek_train['X'], nih_train['X']])
y_train = np.vstack([hela_train['y'], hek_train['y'], nih_train['y']])
X_test = np.vstack([hela_test['X'], hek_test['X'], nih_test['X']])
y_test = np.vstack([hela_test['y'], hek_test['y'], nih_test['y']])
# Load the data into a tensors as X and y
X = np.vstack([X_train, X_test])
y = np.vstack([y_train, y_test])
# Set up filepath constants
# change DATA_DIR if you are not using `deepcell.datasets`
DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets'))
# filename to write combined data
filename = 'general_nuclear_data.npz'
# DATA_FILE should be a npz file, preferably from `make_training_data`
DATA_FILE = os.path.join(DATA_DIR, filename)
# the path to the data file is currently required for `train_model_()` functions
np.savez(DATA_FILE, X=X, y=y)
# confirm the data file is available
assert os.path.isfile(DATA_FILE)
# If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR
PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR)
ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume
MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX))
LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX))
# create directories if they do not exist
for d in (MODEL_DIR, LOG_DIR):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
```
## Train a General Nuclear Model
### Set up Training Parameters
```
from tensorflow.keras.optimizers import SGD
from deepcell.utils.train_utils import rate_scheduler
fgbg_model_name = 'fgbg_nuclear_model'
conv_model_name = 'watershed_nuclear_model'
n_epoch = 3 # Number of training epochs
test_size = .20 # % of data saved as test
norm_method = 'std' # data normalization
receptive_field = 41 # should be adjusted for the scale of the data
optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
lr_sched = rate_scheduler(lr=0.01, decay=0.99)
# FC training settings
n_skips = 0 # number of skip-connections (only for FC training)
batch_size = 1 # FC training uses 1 image per batch
# Transformation settings
transform = 'watershed'
distance_bins = 4
erosion_width = 0 # erode edges
```
### First, create a foreground/background separation model
#### Instantiate the fgbg model
```
from deepcell import model_zoo
fgbg_model = model_zoo.bn_feature_net_skip_2D(
n_features=2, # segmentation mask (is_cell, is_not_cell)
receptive_field=receptive_field,
n_skips=n_skips,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple(X_train.shape[1:]))
```
#### Train the model fgbg model
```
from deepcell.training import train_model_conv
fgbg_model = train_model_conv(
model=fgbg_model,
dataset=DATA_FILE, # full path to npz file
model_name=fgbg_model_name,
test_size=test_size,
optimizer=optimizer,
n_epoch=n_epoch,
batch_size=batch_size,
transform='fgbg',
model_dir=MODEL_DIR,
log_dir=LOG_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
```
### Next, Create a model for the watershed energy transform
#### Instantiate the distance transform model
```
from deepcell import model_zoo
watershed_model = model_zoo.bn_feature_net_skip_2D(
fgbg_model=fgbg_model,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple(X_train.shape[1:]))
```
#### Train the distance transform model
```
from deepcell.training import train_model_conv
watershed_model = train_model_conv(
model=watershed_model,
dataset=DATA_FILE, # full path to npz file
model_name=conv_model_name,
test_size=test_size,
optimizer=optimizer,
n_epoch=n_epoch,
batch_size=batch_size,
transform=transform,
model_dir=MODEL_DIR,
log_dir=LOG_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
```
## Run the Model
```
from timeit import default_timer
start = default_timer()
test_images = watershed_model.predict(X_test)
print('watershed transform shape:', test_images.shape)
watershed_time = default_timer() - start
print('segmented in', watershed_time, 'seconds')
start = default_timer()
test_images_fgbg = fgbg_model.predict(X_test)
print('segmentation mask shape:', test_images_fgbg.shape)
fgbg_time = default_timer() - start
print('segmented in', fgbg_time, 'seconds')
test_images = watershed_model.predict(X_test)
test_images_fgbg = fgbg_model.predict(X_test)
print('watershed transform shape:', test_images.shape)
print('segmentation mask shape:', test_images_fgbg.shape)
argmax_images = []
for i in range(test_images.shape[0]):
max_image = np.argmax(test_images[i], axis=-1)
argmax_images.append(max_image)
argmax_images = np.array(argmax_images)
argmax_images = np.expand_dims(argmax_images, axis=-1)
print('watershed argmax shape:', argmax_images.shape)
# threshold the foreground/background
# and remove back ground from watershed transform
threshold = 0.5
fg_thresh = test_images_fgbg[..., 1] > threshold
fg_thresh = test_images[..., 1] + test_images[..., 2] + test_images[..., 3] > threshold
fg_thresh = np.expand_dims(fg_thresh, axis=-1)
argmax_images_post_fgbg = argmax_images * fg_thresh
# Apply watershed method with the distance transform as seed
from scipy import ndimage
from scipy.ndimage.morphology import distance_transform_edt
from skimage.morphology import watershed, opening, closing
from skimage.feature import peak_local_max
from skimage.morphology import erosion, ball
watershed_images = []
for i in range(argmax_images_post_fgbg.shape[0]):
image = fg_thresh[i, ..., 0]
distance = argmax_images_post_fgbg[i, ..., 0]
local_maxi = peak_local_max(
test_images[i, ..., -1],
min_distance=10,
threshold_abs=0.05,
indices=False,
labels=image,
exclude_border=False)
markers = ndimage.label(local_maxi)[0]
segments = watershed(-distance, markers, mask=image)
watershed_images.append(segments)
watershed_images = np.array(watershed_images)
watershed_images = np.expand_dims(watershed_images, axis=-1)
import matplotlib.pyplot as plt
import matplotlib.animation as animation
index = 1000
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(15, 15), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(X_test[index, :, :, 0])
ax[0].set_title('Source Image')
ax[1].imshow(test_images_fgbg[index, :, :, 1])
ax[1].set_title('Segmentation Prediction')
ax[2].imshow(fg_thresh[index, :, :, 0], cmap='jet')
ax[2].set_title('Thresholded Segmentation')
ax[3].imshow(test_images[index, :, :, 3], cmap='jet')
ax[3].set_title('Watershed Transform')
ax[4].imshow(argmax_images_post_fgbg[index, :, :, 0], cmap='jet')
ax[4].set_title('Watershed Transform w/o Background')
ax[5].imshow(watershed_images[index, :, :, 0], cmap='jet')
ax[5].set_title('Watershed Segmentation')
fig.tight_layout()
plt.show()
```
## Export the Model for TensorFlow-Serving
```
from deepcell import model_zoo
from tensorflow.keras import backend as K
K.set_floatx('float16')
# re-instantiate with a new input_shape
fgbg_model_f16 = model_zoo.bn_feature_net_skip_2D(
receptive_field=receptive_field,
n_skips=n_skips,
n_features=2,
n_conv_filters=32,
n_dense_filters=128,
input_shape=(128, 128, 1))
fgbg_model_f16.load_weights(os.path.join(MODEL_DIR, fgbg_model_name + '.h5'))
watershed_model_f16 = model_zoo.bn_feature_net_skip_2D(
fgbg_model=fgbg_model_f16,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_conv_filters=32,
n_dense_filters=128,
input_shape=(128, 128, 1))
watershed_model_f16.load_weights(os.path.join(MODEL_DIR, conv_model_name + '.h5'))
from timeit import default_timer
start = default_timer()
test_images_16 = watershed_model_f16.predict(X_test)
print('watershed transform shape:', test_images_16.shape)
_watershed_time = default_timer() - start
print('float16 time is', _watershed_time, 'seconds')
print('float32 time was', watershed_time, 'seconds')
start = default_timer()
test_images_fgbg_16 = fgbg_model_f16.predict(X_test)
print('segmentation mask shape:', test_images_fgbg_16.shape)
_fgbg_time = default_timer() - start
print('float16 time is', _fgbg_time, 'seconds')
print('float32 time was', fgbg_time, 'seconds')
from deepcell.utils.export_utils import export_model
weights_path = os.path.join(MODEL_DIR, conv_model_name + '.h5')
EXPORT_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'exports', PREFIX))
export_path = os.path.join(EXPORT_DIR, conv_model_name)
export_model(watershed_model_f16, export_path,
model_version=1, weights_path=weights_path)
assert os.path.isdir(export_path)
```
|
github_jupyter
|
import os
import errno
import numpy as np
import deepcell
# Download the data (saves to ~/.keras/datasets)
hela_filename = 'HeLa_S3.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.hela_s3.load_data(hela_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# Download the data (saves to ~/.keras/datasets)
hek_filename = 'HEK293.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.hek293.load_data(hek_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# Download the data (saves to ~/.keras/datasets)
nih_filename = '3T3_NIH.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.nih_3t3.load_data(nih_filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# Load the data with get_data function
from deepcell.utils.data_utils import get_data
def get_path(fname):
path = os.path.join('~', '.keras', 'datasets', fname)
return os.path.expanduser(path)
hela_train, hela_test = get_data(get_path(hela_filename))
hek_train, hek_test = get_data(get_path(hek_filename))
nih_train, nih_test = get_data(get_path(nih_filename))
def count_cells(train, test, name):
y = np.vstack([train['y'], test['y']])
if len(y.shape) == 5:
y_reshape = np.resize(y, (y.shape[0] * y.shape[1], *y.shape[2:]))
else:
y_reshape = y
total_cells = 0
for i in range(y_reshape.shape[0]):
unique = np.unique(y_reshape[i])
total_cells += (len(unique) - 1)
print('{} Total {} Cells'.format(total_cells, name))
count_cells(nih_train, nih_test, '3T3_NIH')
count_cells(hek_train, hek_test, 'HEK293')
count_cells(hela_train, hela_test, 'HeLa_S3')
# flatten the NIH dataset as it is 3D
def flatten(d):
return np.resize(d, tuple([d.shape[0] * d.shape[1]] + list(d.shape[2:])))
nih_train['X'] = flatten(nih_train['X'])
nih_train['y'] = flatten(nih_train['y'])
nih_test['X'] = flatten(nih_test['X'])
nih_test['y'] = flatten(nih_test['y'])
# Now reshape the dat aso that they all have the same x/y dimensions
from deepcell.utils.data_utils import reshape_matrix
RESHAPE_SIZE = 128
hela_train['X'], hela_train['y'] = reshape_matrix(hela_train['X'], hela_train['y'], RESHAPE_SIZE)
hela_test['X'], hela_test['y'] = reshape_matrix(hela_test['X'], hela_test['y'], RESHAPE_SIZE)
hek_train['X'], hek_train['y'] = reshape_matrix(hek_train['X'], hek_train['y'], RESHAPE_SIZE)
hek_test['X'], hek_test['y'] = reshape_matrix(hek_test['X'], hek_test['y'], RESHAPE_SIZE)
nih_train['X'], nih_train['y'] = reshape_matrix(nih_train['X'], nih_train['y'], RESHAPE_SIZE)
nih_test['X'], nih_test['y'] = reshape_matrix(nih_test['X'], nih_test['y'], RESHAPE_SIZE)
# Stack up our data as train and test
X_train = np.vstack([hela_train['X'], hek_train['X'], nih_train['X']])
y_train = np.vstack([hela_train['y'], hek_train['y'], nih_train['y']])
X_test = np.vstack([hela_test['X'], hek_test['X'], nih_test['X']])
y_test = np.vstack([hela_test['y'], hek_test['y'], nih_test['y']])
# Load the data into a tensors as X and y
X = np.vstack([X_train, X_test])
y = np.vstack([y_train, y_test])
# Set up filepath constants
# change DATA_DIR if you are not using `deepcell.datasets`
DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets'))
# filename to write combined data
filename = 'general_nuclear_data.npz'
# DATA_FILE should be a npz file, preferably from `make_training_data`
DATA_FILE = os.path.join(DATA_DIR, filename)
# the path to the data file is currently required for `train_model_()` functions
np.savez(DATA_FILE, X=X, y=y)
# confirm the data file is available
assert os.path.isfile(DATA_FILE)
# If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR
PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR)
ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume
MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX))
LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX))
# create directories if they do not exist
for d in (MODEL_DIR, LOG_DIR):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
from tensorflow.keras.optimizers import SGD
from deepcell.utils.train_utils import rate_scheduler
fgbg_model_name = 'fgbg_nuclear_model'
conv_model_name = 'watershed_nuclear_model'
n_epoch = 3 # Number of training epochs
test_size = .20 # % of data saved as test
norm_method = 'std' # data normalization
receptive_field = 41 # should be adjusted for the scale of the data
optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
lr_sched = rate_scheduler(lr=0.01, decay=0.99)
# FC training settings
n_skips = 0 # number of skip-connections (only for FC training)
batch_size = 1 # FC training uses 1 image per batch
# Transformation settings
transform = 'watershed'
distance_bins = 4
erosion_width = 0 # erode edges
from deepcell import model_zoo
fgbg_model = model_zoo.bn_feature_net_skip_2D(
n_features=2, # segmentation mask (is_cell, is_not_cell)
receptive_field=receptive_field,
n_skips=n_skips,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple(X_train.shape[1:]))
from deepcell.training import train_model_conv
fgbg_model = train_model_conv(
model=fgbg_model,
dataset=DATA_FILE, # full path to npz file
model_name=fgbg_model_name,
test_size=test_size,
optimizer=optimizer,
n_epoch=n_epoch,
batch_size=batch_size,
transform='fgbg',
model_dir=MODEL_DIR,
log_dir=LOG_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
from deepcell import model_zoo
watershed_model = model_zoo.bn_feature_net_skip_2D(
fgbg_model=fgbg_model,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple(X_train.shape[1:]))
from deepcell.training import train_model_conv
watershed_model = train_model_conv(
model=watershed_model,
dataset=DATA_FILE, # full path to npz file
model_name=conv_model_name,
test_size=test_size,
optimizer=optimizer,
n_epoch=n_epoch,
batch_size=batch_size,
transform=transform,
model_dir=MODEL_DIR,
log_dir=LOG_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
from timeit import default_timer
start = default_timer()
test_images = watershed_model.predict(X_test)
print('watershed transform shape:', test_images.shape)
watershed_time = default_timer() - start
print('segmented in', watershed_time, 'seconds')
start = default_timer()
test_images_fgbg = fgbg_model.predict(X_test)
print('segmentation mask shape:', test_images_fgbg.shape)
fgbg_time = default_timer() - start
print('segmented in', fgbg_time, 'seconds')
test_images = watershed_model.predict(X_test)
test_images_fgbg = fgbg_model.predict(X_test)
print('watershed transform shape:', test_images.shape)
print('segmentation mask shape:', test_images_fgbg.shape)
argmax_images = []
for i in range(test_images.shape[0]):
max_image = np.argmax(test_images[i], axis=-1)
argmax_images.append(max_image)
argmax_images = np.array(argmax_images)
argmax_images = np.expand_dims(argmax_images, axis=-1)
print('watershed argmax shape:', argmax_images.shape)
# threshold the foreground/background
# and remove back ground from watershed transform
threshold = 0.5
fg_thresh = test_images_fgbg[..., 1] > threshold
fg_thresh = test_images[..., 1] + test_images[..., 2] + test_images[..., 3] > threshold
fg_thresh = np.expand_dims(fg_thresh, axis=-1)
argmax_images_post_fgbg = argmax_images * fg_thresh
# Apply watershed method with the distance transform as seed
from scipy import ndimage
from scipy.ndimage.morphology import distance_transform_edt
from skimage.morphology import watershed, opening, closing
from skimage.feature import peak_local_max
from skimage.morphology import erosion, ball
watershed_images = []
for i in range(argmax_images_post_fgbg.shape[0]):
image = fg_thresh[i, ..., 0]
distance = argmax_images_post_fgbg[i, ..., 0]
local_maxi = peak_local_max(
test_images[i, ..., -1],
min_distance=10,
threshold_abs=0.05,
indices=False,
labels=image,
exclude_border=False)
markers = ndimage.label(local_maxi)[0]
segments = watershed(-distance, markers, mask=image)
watershed_images.append(segments)
watershed_images = np.array(watershed_images)
watershed_images = np.expand_dims(watershed_images, axis=-1)
import matplotlib.pyplot as plt
import matplotlib.animation as animation
index = 1000
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(15, 15), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(X_test[index, :, :, 0])
ax[0].set_title('Source Image')
ax[1].imshow(test_images_fgbg[index, :, :, 1])
ax[1].set_title('Segmentation Prediction')
ax[2].imshow(fg_thresh[index, :, :, 0], cmap='jet')
ax[2].set_title('Thresholded Segmentation')
ax[3].imshow(test_images[index, :, :, 3], cmap='jet')
ax[3].set_title('Watershed Transform')
ax[4].imshow(argmax_images_post_fgbg[index, :, :, 0], cmap='jet')
ax[4].set_title('Watershed Transform w/o Background')
ax[5].imshow(watershed_images[index, :, :, 0], cmap='jet')
ax[5].set_title('Watershed Segmentation')
fig.tight_layout()
plt.show()
from deepcell import model_zoo
from tensorflow.keras import backend as K
K.set_floatx('float16')
# re-instantiate with a new input_shape
fgbg_model_f16 = model_zoo.bn_feature_net_skip_2D(
receptive_field=receptive_field,
n_skips=n_skips,
n_features=2,
n_conv_filters=32,
n_dense_filters=128,
input_shape=(128, 128, 1))
fgbg_model_f16.load_weights(os.path.join(MODEL_DIR, fgbg_model_name + '.h5'))
watershed_model_f16 = model_zoo.bn_feature_net_skip_2D(
fgbg_model=fgbg_model_f16,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_conv_filters=32,
n_dense_filters=128,
input_shape=(128, 128, 1))
watershed_model_f16.load_weights(os.path.join(MODEL_DIR, conv_model_name + '.h5'))
from timeit import default_timer
start = default_timer()
test_images_16 = watershed_model_f16.predict(X_test)
print('watershed transform shape:', test_images_16.shape)
_watershed_time = default_timer() - start
print('float16 time is', _watershed_time, 'seconds')
print('float32 time was', watershed_time, 'seconds')
start = default_timer()
test_images_fgbg_16 = fgbg_model_f16.predict(X_test)
print('segmentation mask shape:', test_images_fgbg_16.shape)
_fgbg_time = default_timer() - start
print('float16 time is', _fgbg_time, 'seconds')
print('float32 time was', fgbg_time, 'seconds')
from deepcell.utils.export_utils import export_model
weights_path = os.path.join(MODEL_DIR, conv_model_name + '.h5')
EXPORT_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'exports', PREFIX))
export_path = os.path.join(EXPORT_DIR, conv_model_name)
export_model(watershed_model_f16, export_path,
model_version=1, weights_path=weights_path)
assert os.path.isdir(export_path)
| 0.426083 | 0.892609 |
# PySpark Cookbook
### Tomasz Drabas, Denny Lee
#### Version: 0.1
#### Date: 3/10/2018
# Loading the data
```
forest_path = '../data/forest_coverage_type.csv'
forest = spark.read.csv(
forest_path
, header=True
, inferSchema=True
)
forest.printSchema()
```
# Introducing Transformers
List of most popular **Transformers**
* Binarizer
* Bucketizer
* ChiSqSelector
* CountVectorizer
* DCT
* ElementwiseProduct
* HashingTF
* IDF
* IndexToString
* MaxAbsScaler
* MinMaxScaler
* NGram
* Normalizer
* OneHotEncoder
* PCA
* PolynomialExpansion
* QuantileDiscretizer
* RegexTokenizer
* RFormula
* SQLTransformer
* StandardScaler
* StopWordsRemover
* StringIndexer
* Tokenizer
* VectorAssembler
* VectorIndexer
* VectorSlicer
* Word2Vec
```
import pyspark.sql.functions as f
import pyspark.ml.feature as feat
import numpy as np
```
# Bucketize
```
buckets_no = 10
dist_min_max = (
forest.agg(
f.min('Horizontal_Distance_To_Hydrology')
.alias('min')
, f.max('Horizontal_Distance_To_Hydrology')
.alias('max')
)
.rdd
.map(lambda row: (row.min, row.max))
.collect()[0]
)
rng = dist_min_max[1] - dist_min_max[0]
splits = list(np.arange(
dist_min_max[0]
, dist_min_max[1]
, rng / (buckets_no + 1)))
bucketizer = feat.Bucketizer(
splits=splits
, inputCol= 'Horizontal_Distance_To_Hydrology'
, outputCol='Horizontal_Distance_To_Hydrology_Bkt'
)
(
bucketizer
.transform(forest)
.select(
'Horizontal_Distance_To_Hydrology'
,'Horizontal_Distance_To_Hydrology_Bkt'
).show(5)
)
```
# Principal Components Analysis
```
vectorAssembler = (
feat.VectorAssembler(
inputCols=forest.columns,
outputCol='feat'
)
)
pca = (
feat.PCA(
k=5
, inputCol=vectorAssembler.getOutputCol()
, outputCol='pca_feat'
)
)
(
pca
.fit(vectorAssembler.transform(forest))
.transform(vectorAssembler.transform(forest))
.select('feat','pca_feat')
.take(1)
)
```
# Introducing Estimators
List of most popular **Estimators**
1. Classification
* LinearSVC
* LogisticRegression
* DecisionTreeClassifier
* GBTClassifier
* RandomForestClassifier
* NaiveBayes
* MultilayerPerceptronClassifier
* OneVsRest
2. Regression
* AFTSurvivalRegression
* DecisionTreeRegressor
* GBTRegressor
* GeneralizedLinearRegression
* IsotonicRegression
* LinearRegression
* RandomForestRegressor
3. Clustering
* BisectingKMeans
* Kmeans
* GaussianMixture
* LDA
```
forest.select('CoverType').groupBy('CoverType').count().show()
```
# Linear SVM
```
import pyspark.ml.classification as cl
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features')
fir_dataset = (
vectorAssembler
.transform(forest)
.withColumn(
'label'
, (f.col('CoverType') == 1).cast('integer'))
.select('label', 'features')
)
svc_obj = cl.LinearSVC(maxIter=10, regParam=0.01)
svc_model = svc_obj.fit(fir_dataset)
svc_model.coefficients
```
# Linear Regression
```
import pyspark.ml.regression as rg
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
elevation_dataset = (
vectorAssembler
.transform(forest)
.withColumn(
'label'
, f.col('Elevation').cast('float'))
.select('label', 'features')
)
lr_obj = rg.LinearRegression(
maxIter=10
, regParam=0.01
, elasticNetParam=1.00)
lr_model = lr_obj.fit(elevation_dataset)
lr_model.coefficients
summary = lr_model.summary
print(
summary.r2
, summary.rootMeanSquaredError
, summary.meanAbsoluteError
)
```
# Introducing Pipelines
```
from pyspark.ml import Pipeline
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
lr_obj = rg.GeneralizedLinearRegression(
labelCol='Elevation'
, maxIter=10
, regParam=0.01
, link='identity'
, linkPredictionCol="p"
)
pip = Pipeline(stages=[vectorAssembler, lr_obj])
(
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
.show(5)
)
import matplotlib.pyplot as plt
transformed_df = forest.select('Elevation')
transformed_df.toPandas().hist()
plt.savefig('Elevation_histogram.png')
plt.close('all')
```
# Selecting the most predictable features
## Chi-Square selector
```
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features'
)
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=10
, outputCol='selected')
pipeline_sel = Pipeline(stages=[vectorAssembler, selector])
(
pipeline_sel
.fit(forest)
.transform(forest)
.select(selector.getOutputCol())
.show(5)
)
```
## Correlation matrix
```
import pyspark.ml.stat as st
features_and_label = feat.VectorAssembler(
inputCols=forest.columns
, outputCol='features'
)
corr = st.Correlation.corr(
features_and_label.transform(forest),
'features',
'pearson'
)
print(str(corr.collect()[0][0]))
num_of_features = 10
cols = dict([
(i, e)
for i, e
in enumerate(forest.columns)
])
corr_matrix = corr.collect()[0][0]
label_corr_with_idx = [
(i[0], e)
for i, e
in np.ndenumerate(corr_matrix.toArray()[:,0])
][1:]
label_corr_with_idx_sorted = sorted(
label_corr_with_idx
, key=lambda el: -abs(el[1])
)
features_selected = np.array([
cols[el[0]]
for el
in label_corr_with_idx_sorted
])[0:num_of_features]
features_selected
```
# Predicting forest coverage type
## Logistic regression
```
forest_train, forest_test = (
forest
.randomSplit([0.7, 0.3], seed=666)
)
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features'
)
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=10
, outputCol='selected'
)
logReg_obj = cl.LogisticRegression(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, regParam=0.01
, elasticNetParam=1.0
, family='multinomial'
)
pipeline = Pipeline(
stages=[
vectorAssembler
, selector
, logReg_obj
])
pModel = pipeline.fit(forest_train)
import pyspark.ml.evaluation as ev
results_logReg = (
pModel
.transform(forest_test)
.select('CoverType', 'probability', 'prediction')
)
evaluator = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
(
evaluator.evaluate(results_logReg)
, evaluator.evaluate(
results_logReg
, {evaluator.metricName: 'weightedPrecision'}
)
, evaluator.evaluate(
results_logReg
, {evaluator.metricName: 'accuracy'}
)
)
```
## Random Forest classifier
```
rf_obj = cl.RandomForestClassifier(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, minInstancesPerNode=10
, numTrees=10
)
pipeline = Pipeline(
stages=[vectorAssembler, selector, rf_obj]
)
pModel = pipeline.fit(forest_train)
results_rf = (
pModel
.transform(forest_test)
.select('CoverType', 'probability', 'prediction')
)
evaluator = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
(
evaluator.evaluate(results_rf)
, evaluator.evaluate(
results_rf
, {evaluator.metricName: 'weightedPrecision'}
)
, evaluator.evaluate(
results_rf
, {evaluator.metricName: 'accuracy'}
)
)
```
# Estimating forest elevation
## Random Forest regression
```
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
rf_obj = rg.RandomForestRegressor(
labelCol='Elevation'
, maxDepth=10
, minInstancesPerNode=10
, minInfoGain=0.1
, numTrees=10
)
pip = Pipeline(stages=[vectorAssembler, rf_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
)
evaluator = ev.RegressionEvaluator(labelCol='Elevation')
evaluator.evaluate(results, {evaluator.metricName: 'r2'})
```
## Gradient Boosted Trees regression
```
gbt_obj = rg.GBTRegressor(
labelCol='Elevation'
, minInstancesPerNode=10
, minInfoGain=0.1
)
pip = Pipeline(stages=[vectorAssembler, gbt_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
)
evaluator = ev.RegressionEvaluator(labelCol='Elevation')
evaluator.evaluate(results, {evaluator.metricName: 'r2'})
```
# Clustering forest cover type
```
import pyspark.ml.clustering as clust
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[:-1]
, outputCol='features')
kmeans_obj = clust.KMeans(k=7, seed=666)
pip = Pipeline(stages=[vectorAssembler, kmeans_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('features', 'CoverType', 'prediction')
)
results.show(5)
clustering_ev = ev.ClusteringEvaluator()
clustering_ev.evaluate(results)
```
# Tuning hyper parameters
## Grid search
```
import pyspark.ml.tuning as tune
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features')
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=5
, outputCol='selected')
logReg_obj = cl.LogisticRegression(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, family='multinomial'
)
logReg_grid = (
tune.ParamGridBuilder()
.addGrid(logReg_obj.regParam
, [0.01, 0.1]
)
.addGrid(logReg_obj.elasticNetParam
, [1.0, 0.5]
)
.build()
)
logReg_ev = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
cross_v = tune.CrossValidator(
estimator=logReg_obj
, estimatorParamMaps=logReg_grid
, evaluator=logReg_ev
)
pipeline = Pipeline(stages=[vectorAssembler, selector])
data_trans = pipeline.fit(forest_train)
logReg_modelTest = cross_v.fit(
data_trans.transform(forest_train)
)
data_trans_test = data_trans.transform(forest_test)
results = logReg_modelTest.transform(data_trans_test)
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))
```
## Train-validation splitting
```
train_v = tune.TrainValidationSplit(
estimator=logReg_obj
, estimatorParamMaps=logReg_grid
, evaluator=logReg_ev
, parallelism=4
)
logReg_modelTrainV = (
train_v
.fit(data_trans.transform(forest_train))
results = logReg_modelTrainV.transform(data_trans_test)
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))
```
# Feature engineering - NLP
```
some_text = spark.createDataFrame([
['''
Apache Spark achieves high performance for both batch
and streaming data, using a state-of-the-art DAG scheduler,
a query optimizer, and a physical execution engine.
''']
, ['''
Apache Spark is a fast and general-purpose cluster computing
system. It provides high-level APIs in Java, Scala, Python
and R, and an optimized engine that supports general execution
graphs. It also supports a rich set of higher-level tools including
Spark SQL for SQL and structured data processing, MLlib for machine
learning, GraphX for graph processing, and Spark Streaming.
''']
, ['''
Machine learning is a field of computer science that often uses
statistical techniques to give computers the ability to "learn"
(i.e., progressively improve performance on a specific task)
with data, without being explicitly programmed.
''']
], ['text'])
```
## Tokenizer
```
splitter = feat.RegexTokenizer(
inputCol='text'
, outputCol='text_split'
, pattern='\s+|[,.\"]'
)
splitter.transform(some_text).select('text_split').take(1)
```
## Stop-words removal
```
sw_remover = feat.StopWordsRemover(
inputCol=splitter.getOutputCol()
, outputCol='no_stopWords'
)
sw_remover.transform(splitter.transform(some_text)).select('no_stopWords').take(1)
```
## Hashing trick
```
hasher = feat.HashingTF(
inputCol=sw_remover.getOutputCol()
, outputCol='hashed'
, numFeatures=20
)
hasher.transform(sw_remover.transform(splitter.transform(some_text))).select('hashed').take(1)
```
## Term Frequency-Inverse Document Frequency
```
idf = feat.IDF(
inputCol=hasher.getOutputCol()
, outputCol='features'
)
idfModel = idf.fit(hasher.transform(sw_remover.transform(splitter.transform(some_text))))
idfModel.transform(hasher.transform(sw_remover.transform(splitter.transform(some_text)))).select('features').take(1)
pipeline = Pipeline(stages=[splitter, sw_remover, hasher, idf])
pipelineModel = pipeline.fit(some_text)
pipelineModel.transform(some_text).select('text','features').take(1)
```
## Word-2-Vec model
```
w2v = feat.Word2Vec(
vectorSize=5
, minCount=2
, inputCol=sw_remover.getOutputCol()
, outputCol='vector'
)
model=w2v.fit(sw_remover.transform(splitter.transform(some_text)))
model.transform(sw_remover.transform(splitter.transform(some_text))).select('vector').take(1)
```
# Discretizing continuous variables
```
signal_df = spark.read.csv(
'../data/fourier_signal.csv'
, header=True
, inferSchema=True
)
steps = feat.QuantileDiscretizer(
numBuckets=10,
inputCol='signal',
outputCol='discretized')
transformed = (
steps
.fit(signal_df)
.transform(signal_df)
)
import matplotlib.pyplot as plt
transformed_df = transformed.toPandas()
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(transformed_df['signal'], 'k')
ax2.plot(transformed_df['discretized'], 'b-')
ax1.set_ylabel('original', color='k')
ax2.set_ylabel('discretized', color='b')
ax1.set_ylim((-55, 35))
ax2.set_ylim((-2, 12))
fig.tight_layout()
plt.savefig('discretized.png')
plt.close('all')
```
# Standardizing continuous variables
```
signal_df.describe().show()
from pyspark.ml import Pipeline
vec = feat.VectorAssembler(
inputCols=['signal']
, outputCol='signal_vec'
)
norm = feat.StandardScaler(
inputCol=vec.getOutputCol()
, outputCol='signal_norm'
, withMean=True
, withStd=True
)
norm_pipeline = Pipeline(stages=[vec, norm])
signal_norm = (
norm_pipeline
.fit(signal_df)
.transform(signal_df)
)
signal_norm.take(1)
normalized_df = signal_norm.toPandas()
normalized_df['normalized'] = normalized_df.apply(lambda row: row[2][0], axis=1)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(normalized_df['signal'], 'k')
ax2.plot(normalized_df['normalized'], 'b-')
ax1.set_ylabel('original', color='k')
ax2.set_ylabel('discretized', color='b')
ax1.set_ylim((-105, 30))
ax2.set_ylim((-6, 12))
fig.tight_layout()
plt.savefig('normalized.png')
plt.close('all')
```
# Topic mining
```
articles = spark.createDataFrame([
('''
The Andromeda Galaxy, named after the mythological
Princess Andromeda, also known as Messier 31, M31,
or NGC 224, is a spiral galaxy approximately 780
kiloparsecs (2.5 million light-years) from Earth,
and the nearest major galaxy to the Milky Way.
Its name stems from the area of the sky in which it
appears, the constellation of Andromeda. The 2006
observations by the Spitzer Space Telescope revealed
that the Andromeda Galaxy contains approximately one
trillion stars, more than twice the number of the
Milky Way’s estimated 200-400 billion stars. The
Andromeda Galaxy, spanning approximately 220,000 light
years, is the largest galaxy in our Local Group,
which is also home to the Triangulum Galaxy and
other minor galaxies. The Andromeda Galaxy's mass is
estimated to be around 1.76 times that of the Milky
Way Galaxy (~0.8-1.5×1012 solar masses vs the Milky
Way's 8.5×1011 solar masses).
''','Galaxy', 'Andromeda')
, ('''
The Milky Way is the galaxy that contains our Solar
System. The descriptive "milky" is derived from the
appearance from Earth of the galaxy – a band of light
seen in the night sky formed from stars that cannot be
individually distinguished by the naked eye. The term
Milky Way is a translation of the Latin via lactea, from
the Greek. From Earth, the Milky Way appears as a band
because its disk-shaped structure is viewed from within.
Galileo Galilei first resolved the band of light into
individual stars with his telescope in 1610. Observations
by Edwin Hubble showed that the Milky
Way is just one of many galaxies.
''','Galaxy','Milky Way')
, ('''
Australia, officially the Commonwealth of Australia,
is a sovereign country comprising the mainland of the
Australian continent, the island of Tasmania and numerous
smaller islands. It is the largest country in Oceania and
the world's sixth-largest country by total area. The
neighbouring countries are Papua New Guinea, Indonesia and
East Timor to the north; the Solomon Islands and Vanuatu to
the north-east; and New Zealand to the south-east. Australia's
capital is Canberra, and its largest city is Sydney.
''','Geography', 'Australia')
, ('''
The United States of America (USA), commonly known as the United
States (U.S.) or America, is a federal republic composed of 50
states, a federal district, five major self-governing territories,
and various possessions. At 3.8 million square miles (9.8 million
km2) and with over 325 million people, the United States is the
world's third- or fourth-largest country by total area and the
third-most populous country. The capital is Washington, D.C., and
the largest city by population is New York City. Forty-eight states
and the capital's federal district are contiguous and in North America
between Canada and Mexico. The State of Alaska is in the northwest
corner of North America, bordered by Canada to the east and across
the Bering Strait from Russia to the west. The State of Hawaii is
an archipelago in the mid-Pacific Ocean. The U.S. territories are
scattered about the Pacific Ocean and the Caribbean Sea, stretching
across nine official time zones. The extremely diverse geography,
climate, and wildlife of the United States make it one of the world's
17 megadiverse countries.
''','Geography', 'USA')
, ('''
China, officially the People's Republic of China (PRC), is a unitary
sovereign state in East Asia and, with a population of around 1.404
billion, the world's most populous country. Covering 9,600,000
square kilometers (3,700,000 sq mi), China has the most borders of
any country in the world. Governed by the Communist Party of China,
it exercises jurisdiction over 22 provinces, five autonomous regions,
four direct-controlled municipalities (Beijing, Tianjin, Shanghai, and
Chongqing), and the special administrative regions of Hong Kong and Macau.
''','Geography', 'China')
, ('''
Poland, officially the Republic of Poland, is a country located in
Central Europe. It is divided into 16 administrative subdivisions,
covering an area of 312,679 square kilometres (120,726 sq mi), and has
a largely temperate seasonal climate. With a population of approximately
38.5 million people, Poland is the sixth most populous member state of
the European Union. Poland's capital and largest metropolis is
Warsaw.
''','Geography', 'Poland')
, ('''
The domestic dog (Canis lupus familiaris when considered a subspecies
of the gray wolf or Canis familiaris when considered a distinct species)
is a member of the genus Canis (canines), which forms part of the
wolf-like canids, and is the most widely abundant terrestrial carnivore.
The dog and the extant gray wolf are sister taxa as modern wolves are
not closely related to the wolves that were first domesticated, which
implies that the direct ancestor of the dog is extinct. The dog was
the first species to be domesticated and has been selectively bred over
millennia for various behaviors, sensory capabilities, and physical attributes.
''','Animal', 'Dog')
, ('''
The origin of the domestic dog is not clear. It is known that the dog was
the first domesticated species. The domestic dog is a member of the genus
Canis (canines), which forms part of the wolf-like canids, and is the most
widely abundant terrestrial carnivore. The closest living relative of the
dog is the gray wolf and there is no evidence of any other canine
contributing to its genetic lineage. The dog and the extant gray wolf
form two sister clades, with modern wolves not closely related to the
wolves that were first domesticated. The archaeological record shows
the first undisputed dog remains buried beside humans 14,700 years ago,
with disputed remains occurring 36,000 years ago. These dates imply
that the earliest dogs arose in the time of human hunter-gatherers
and not agriculturists.
''','Animal', 'Dog')
, ('''
Washington, officially the State of Washington, is a state in the Pacific
Northwest region of the United States. Named after George Washington,
the first president of the United States, the state was made out of the
western part of the Washington Territory, which was ceded by Britain in
1846 in accordance with the Oregon Treaty in the settlement of the
Oregon boundary dispute. It was admitted to the Union as the 42nd state
in 1889. Olympia is the state capital. Washington is sometimes referred
to as Washington State, to distinguish it from Washington, D.C., the
capital of the United States, which is often shortened to Washington.
''','Geography', 'Washington State')
], ['articles', 'Topic', 'Object'])
import pyspark.ml.clustering as clust
splitter = feat.RegexTokenizer(
inputCol='articles'
, outputCol='articles_split'
, pattern='\s+|[,.\"]'
)
sw_remover = feat.StopWordsRemover(
inputCol=splitter.getOutputCol()
, outputCol='no_stopWords'
)
count_vec = feat.CountVectorizer(
inputCol=sw_remover.getOutputCol()
, outputCol='vector'
)
lda_clusters = clust.LDA(
k=3
, optimizer='online'
, featuresCol=count_vec.getOutputCol()
)
topic_pipeline = Pipeline(
stages=[
splitter
, sw_remover
, count_vec
, lda_clusters
]
)
for topic in (
topic_pipeline
.fit(articles)
.transform(articles)
.select('Topic','Object','topicDistribution')
.take(10)
):
print(
topic.Topic
, topic.Object
, np.argmax(topic.topicDistribution)
, topic.topicDistribution
)
```
|
github_jupyter
|
forest_path = '../data/forest_coverage_type.csv'
forest = spark.read.csv(
forest_path
, header=True
, inferSchema=True
)
forest.printSchema()
import pyspark.sql.functions as f
import pyspark.ml.feature as feat
import numpy as np
buckets_no = 10
dist_min_max = (
forest.agg(
f.min('Horizontal_Distance_To_Hydrology')
.alias('min')
, f.max('Horizontal_Distance_To_Hydrology')
.alias('max')
)
.rdd
.map(lambda row: (row.min, row.max))
.collect()[0]
)
rng = dist_min_max[1] - dist_min_max[0]
splits = list(np.arange(
dist_min_max[0]
, dist_min_max[1]
, rng / (buckets_no + 1)))
bucketizer = feat.Bucketizer(
splits=splits
, inputCol= 'Horizontal_Distance_To_Hydrology'
, outputCol='Horizontal_Distance_To_Hydrology_Bkt'
)
(
bucketizer
.transform(forest)
.select(
'Horizontal_Distance_To_Hydrology'
,'Horizontal_Distance_To_Hydrology_Bkt'
).show(5)
)
vectorAssembler = (
feat.VectorAssembler(
inputCols=forest.columns,
outputCol='feat'
)
)
pca = (
feat.PCA(
k=5
, inputCol=vectorAssembler.getOutputCol()
, outputCol='pca_feat'
)
)
(
pca
.fit(vectorAssembler.transform(forest))
.transform(vectorAssembler.transform(forest))
.select('feat','pca_feat')
.take(1)
)
forest.select('CoverType').groupBy('CoverType').count().show()
import pyspark.ml.classification as cl
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features')
fir_dataset = (
vectorAssembler
.transform(forest)
.withColumn(
'label'
, (f.col('CoverType') == 1).cast('integer'))
.select('label', 'features')
)
svc_obj = cl.LinearSVC(maxIter=10, regParam=0.01)
svc_model = svc_obj.fit(fir_dataset)
svc_model.coefficients
import pyspark.ml.regression as rg
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
elevation_dataset = (
vectorAssembler
.transform(forest)
.withColumn(
'label'
, f.col('Elevation').cast('float'))
.select('label', 'features')
)
lr_obj = rg.LinearRegression(
maxIter=10
, regParam=0.01
, elasticNetParam=1.00)
lr_model = lr_obj.fit(elevation_dataset)
lr_model.coefficients
summary = lr_model.summary
print(
summary.r2
, summary.rootMeanSquaredError
, summary.meanAbsoluteError
)
from pyspark.ml import Pipeline
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
lr_obj = rg.GeneralizedLinearRegression(
labelCol='Elevation'
, maxIter=10
, regParam=0.01
, link='identity'
, linkPredictionCol="p"
)
pip = Pipeline(stages=[vectorAssembler, lr_obj])
(
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
.show(5)
)
import matplotlib.pyplot as plt
transformed_df = forest.select('Elevation')
transformed_df.toPandas().hist()
plt.savefig('Elevation_histogram.png')
plt.close('all')
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features'
)
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=10
, outputCol='selected')
pipeline_sel = Pipeline(stages=[vectorAssembler, selector])
(
pipeline_sel
.fit(forest)
.transform(forest)
.select(selector.getOutputCol())
.show(5)
)
import pyspark.ml.stat as st
features_and_label = feat.VectorAssembler(
inputCols=forest.columns
, outputCol='features'
)
corr = st.Correlation.corr(
features_and_label.transform(forest),
'features',
'pearson'
)
print(str(corr.collect()[0][0]))
num_of_features = 10
cols = dict([
(i, e)
for i, e
in enumerate(forest.columns)
])
corr_matrix = corr.collect()[0][0]
label_corr_with_idx = [
(i[0], e)
for i, e
in np.ndenumerate(corr_matrix.toArray()[:,0])
][1:]
label_corr_with_idx_sorted = sorted(
label_corr_with_idx
, key=lambda el: -abs(el[1])
)
features_selected = np.array([
cols[el[0]]
for el
in label_corr_with_idx_sorted
])[0:num_of_features]
features_selected
forest_train, forest_test = (
forest
.randomSplit([0.7, 0.3], seed=666)
)
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features'
)
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=10
, outputCol='selected'
)
logReg_obj = cl.LogisticRegression(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, regParam=0.01
, elasticNetParam=1.0
, family='multinomial'
)
pipeline = Pipeline(
stages=[
vectorAssembler
, selector
, logReg_obj
])
pModel = pipeline.fit(forest_train)
import pyspark.ml.evaluation as ev
results_logReg = (
pModel
.transform(forest_test)
.select('CoverType', 'probability', 'prediction')
)
evaluator = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
(
evaluator.evaluate(results_logReg)
, evaluator.evaluate(
results_logReg
, {evaluator.metricName: 'weightedPrecision'}
)
, evaluator.evaluate(
results_logReg
, {evaluator.metricName: 'accuracy'}
)
)
rf_obj = cl.RandomForestClassifier(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, minInstancesPerNode=10
, numTrees=10
)
pipeline = Pipeline(
stages=[vectorAssembler, selector, rf_obj]
)
pModel = pipeline.fit(forest_train)
results_rf = (
pModel
.transform(forest_test)
.select('CoverType', 'probability', 'prediction')
)
evaluator = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
(
evaluator.evaluate(results_rf)
, evaluator.evaluate(
results_rf
, {evaluator.metricName: 'weightedPrecision'}
)
, evaluator.evaluate(
results_rf
, {evaluator.metricName: 'accuracy'}
)
)
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[1:]
, outputCol='features')
rf_obj = rg.RandomForestRegressor(
labelCol='Elevation'
, maxDepth=10
, minInstancesPerNode=10
, minInfoGain=0.1
, numTrees=10
)
pip = Pipeline(stages=[vectorAssembler, rf_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
)
evaluator = ev.RegressionEvaluator(labelCol='Elevation')
evaluator.evaluate(results, {evaluator.metricName: 'r2'})
gbt_obj = rg.GBTRegressor(
labelCol='Elevation'
, minInstancesPerNode=10
, minInfoGain=0.1
)
pip = Pipeline(stages=[vectorAssembler, gbt_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('Elevation', 'prediction')
)
evaluator = ev.RegressionEvaluator(labelCol='Elevation')
evaluator.evaluate(results, {evaluator.metricName: 'r2'})
import pyspark.ml.clustering as clust
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[:-1]
, outputCol='features')
kmeans_obj = clust.KMeans(k=7, seed=666)
pip = Pipeline(stages=[vectorAssembler, kmeans_obj])
results = (
pip
.fit(forest)
.transform(forest)
.select('features', 'CoverType', 'prediction')
)
results.show(5)
clustering_ev = ev.ClusteringEvaluator()
clustering_ev.evaluate(results)
import pyspark.ml.tuning as tune
vectorAssembler = feat.VectorAssembler(
inputCols=forest.columns[0:-1]
, outputCol='features')
selector = feat.ChiSqSelector(
labelCol='CoverType'
, numTopFeatures=5
, outputCol='selected')
logReg_obj = cl.LogisticRegression(
labelCol='CoverType'
, featuresCol=selector.getOutputCol()
, family='multinomial'
)
logReg_grid = (
tune.ParamGridBuilder()
.addGrid(logReg_obj.regParam
, [0.01, 0.1]
)
.addGrid(logReg_obj.elasticNetParam
, [1.0, 0.5]
)
.build()
)
logReg_ev = ev.MulticlassClassificationEvaluator(
predictionCol='prediction'
, labelCol='CoverType')
cross_v = tune.CrossValidator(
estimator=logReg_obj
, estimatorParamMaps=logReg_grid
, evaluator=logReg_ev
)
pipeline = Pipeline(stages=[vectorAssembler, selector])
data_trans = pipeline.fit(forest_train)
logReg_modelTest = cross_v.fit(
data_trans.transform(forest_train)
)
data_trans_test = data_trans.transform(forest_test)
results = logReg_modelTest.transform(data_trans_test)
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))
train_v = tune.TrainValidationSplit(
estimator=logReg_obj
, estimatorParamMaps=logReg_grid
, evaluator=logReg_ev
, parallelism=4
)
logReg_modelTrainV = (
train_v
.fit(data_trans.transform(forest_train))
results = logReg_modelTrainV.transform(data_trans_test)
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))
print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))
some_text = spark.createDataFrame([
['''
Apache Spark achieves high performance for both batch
and streaming data, using a state-of-the-art DAG scheduler,
a query optimizer, and a physical execution engine.
''']
, ['''
Apache Spark is a fast and general-purpose cluster computing
system. It provides high-level APIs in Java, Scala, Python
and R, and an optimized engine that supports general execution
graphs. It also supports a rich set of higher-level tools including
Spark SQL for SQL and structured data processing, MLlib for machine
learning, GraphX for graph processing, and Spark Streaming.
''']
, ['''
Machine learning is a field of computer science that often uses
statistical techniques to give computers the ability to "learn"
(i.e., progressively improve performance on a specific task)
with data, without being explicitly programmed.
''']
], ['text'])
splitter = feat.RegexTokenizer(
inputCol='text'
, outputCol='text_split'
, pattern='\s+|[,.\"]'
)
splitter.transform(some_text).select('text_split').take(1)
sw_remover = feat.StopWordsRemover(
inputCol=splitter.getOutputCol()
, outputCol='no_stopWords'
)
sw_remover.transform(splitter.transform(some_text)).select('no_stopWords').take(1)
hasher = feat.HashingTF(
inputCol=sw_remover.getOutputCol()
, outputCol='hashed'
, numFeatures=20
)
hasher.transform(sw_remover.transform(splitter.transform(some_text))).select('hashed').take(1)
idf = feat.IDF(
inputCol=hasher.getOutputCol()
, outputCol='features'
)
idfModel = idf.fit(hasher.transform(sw_remover.transform(splitter.transform(some_text))))
idfModel.transform(hasher.transform(sw_remover.transform(splitter.transform(some_text)))).select('features').take(1)
pipeline = Pipeline(stages=[splitter, sw_remover, hasher, idf])
pipelineModel = pipeline.fit(some_text)
pipelineModel.transform(some_text).select('text','features').take(1)
w2v = feat.Word2Vec(
vectorSize=5
, minCount=2
, inputCol=sw_remover.getOutputCol()
, outputCol='vector'
)
model=w2v.fit(sw_remover.transform(splitter.transform(some_text)))
model.transform(sw_remover.transform(splitter.transform(some_text))).select('vector').take(1)
signal_df = spark.read.csv(
'../data/fourier_signal.csv'
, header=True
, inferSchema=True
)
steps = feat.QuantileDiscretizer(
numBuckets=10,
inputCol='signal',
outputCol='discretized')
transformed = (
steps
.fit(signal_df)
.transform(signal_df)
)
import matplotlib.pyplot as plt
transformed_df = transformed.toPandas()
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(transformed_df['signal'], 'k')
ax2.plot(transformed_df['discretized'], 'b-')
ax1.set_ylabel('original', color='k')
ax2.set_ylabel('discretized', color='b')
ax1.set_ylim((-55, 35))
ax2.set_ylim((-2, 12))
fig.tight_layout()
plt.savefig('discretized.png')
plt.close('all')
signal_df.describe().show()
from pyspark.ml import Pipeline
vec = feat.VectorAssembler(
inputCols=['signal']
, outputCol='signal_vec'
)
norm = feat.StandardScaler(
inputCol=vec.getOutputCol()
, outputCol='signal_norm'
, withMean=True
, withStd=True
)
norm_pipeline = Pipeline(stages=[vec, norm])
signal_norm = (
norm_pipeline
.fit(signal_df)
.transform(signal_df)
)
signal_norm.take(1)
normalized_df = signal_norm.toPandas()
normalized_df['normalized'] = normalized_df.apply(lambda row: row[2][0], axis=1)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(normalized_df['signal'], 'k')
ax2.plot(normalized_df['normalized'], 'b-')
ax1.set_ylabel('original', color='k')
ax2.set_ylabel('discretized', color='b')
ax1.set_ylim((-105, 30))
ax2.set_ylim((-6, 12))
fig.tight_layout()
plt.savefig('normalized.png')
plt.close('all')
articles = spark.createDataFrame([
('''
The Andromeda Galaxy, named after the mythological
Princess Andromeda, also known as Messier 31, M31,
or NGC 224, is a spiral galaxy approximately 780
kiloparsecs (2.5 million light-years) from Earth,
and the nearest major galaxy to the Milky Way.
Its name stems from the area of the sky in which it
appears, the constellation of Andromeda. The 2006
observations by the Spitzer Space Telescope revealed
that the Andromeda Galaxy contains approximately one
trillion stars, more than twice the number of the
Milky Way’s estimated 200-400 billion stars. The
Andromeda Galaxy, spanning approximately 220,000 light
years, is the largest galaxy in our Local Group,
which is also home to the Triangulum Galaxy and
other minor galaxies. The Andromeda Galaxy's mass is
estimated to be around 1.76 times that of the Milky
Way Galaxy (~0.8-1.5×1012 solar masses vs the Milky
Way's 8.5×1011 solar masses).
''','Galaxy', 'Andromeda')
, ('''
The Milky Way is the galaxy that contains our Solar
System. The descriptive "milky" is derived from the
appearance from Earth of the galaxy – a band of light
seen in the night sky formed from stars that cannot be
individually distinguished by the naked eye. The term
Milky Way is a translation of the Latin via lactea, from
the Greek. From Earth, the Milky Way appears as a band
because its disk-shaped structure is viewed from within.
Galileo Galilei first resolved the band of light into
individual stars with his telescope in 1610. Observations
by Edwin Hubble showed that the Milky
Way is just one of many galaxies.
''','Galaxy','Milky Way')
, ('''
Australia, officially the Commonwealth of Australia,
is a sovereign country comprising the mainland of the
Australian continent, the island of Tasmania and numerous
smaller islands. It is the largest country in Oceania and
the world's sixth-largest country by total area. The
neighbouring countries are Papua New Guinea, Indonesia and
East Timor to the north; the Solomon Islands and Vanuatu to
the north-east; and New Zealand to the south-east. Australia's
capital is Canberra, and its largest city is Sydney.
''','Geography', 'Australia')
, ('''
The United States of America (USA), commonly known as the United
States (U.S.) or America, is a federal republic composed of 50
states, a federal district, five major self-governing territories,
and various possessions. At 3.8 million square miles (9.8 million
km2) and with over 325 million people, the United States is the
world's third- or fourth-largest country by total area and the
third-most populous country. The capital is Washington, D.C., and
the largest city by population is New York City. Forty-eight states
and the capital's federal district are contiguous and in North America
between Canada and Mexico. The State of Alaska is in the northwest
corner of North America, bordered by Canada to the east and across
the Bering Strait from Russia to the west. The State of Hawaii is
an archipelago in the mid-Pacific Ocean. The U.S. territories are
scattered about the Pacific Ocean and the Caribbean Sea, stretching
across nine official time zones. The extremely diverse geography,
climate, and wildlife of the United States make it one of the world's
17 megadiverse countries.
''','Geography', 'USA')
, ('''
China, officially the People's Republic of China (PRC), is a unitary
sovereign state in East Asia and, with a population of around 1.404
billion, the world's most populous country. Covering 9,600,000
square kilometers (3,700,000 sq mi), China has the most borders of
any country in the world. Governed by the Communist Party of China,
it exercises jurisdiction over 22 provinces, five autonomous regions,
four direct-controlled municipalities (Beijing, Tianjin, Shanghai, and
Chongqing), and the special administrative regions of Hong Kong and Macau.
''','Geography', 'China')
, ('''
Poland, officially the Republic of Poland, is a country located in
Central Europe. It is divided into 16 administrative subdivisions,
covering an area of 312,679 square kilometres (120,726 sq mi), and has
a largely temperate seasonal climate. With a population of approximately
38.5 million people, Poland is the sixth most populous member state of
the European Union. Poland's capital and largest metropolis is
Warsaw.
''','Geography', 'Poland')
, ('''
The domestic dog (Canis lupus familiaris when considered a subspecies
of the gray wolf or Canis familiaris when considered a distinct species)
is a member of the genus Canis (canines), which forms part of the
wolf-like canids, and is the most widely abundant terrestrial carnivore.
The dog and the extant gray wolf are sister taxa as modern wolves are
not closely related to the wolves that were first domesticated, which
implies that the direct ancestor of the dog is extinct. The dog was
the first species to be domesticated and has been selectively bred over
millennia for various behaviors, sensory capabilities, and physical attributes.
''','Animal', 'Dog')
, ('''
The origin of the domestic dog is not clear. It is known that the dog was
the first domesticated species. The domestic dog is a member of the genus
Canis (canines), which forms part of the wolf-like canids, and is the most
widely abundant terrestrial carnivore. The closest living relative of the
dog is the gray wolf and there is no evidence of any other canine
contributing to its genetic lineage. The dog and the extant gray wolf
form two sister clades, with modern wolves not closely related to the
wolves that were first domesticated. The archaeological record shows
the first undisputed dog remains buried beside humans 14,700 years ago,
with disputed remains occurring 36,000 years ago. These dates imply
that the earliest dogs arose in the time of human hunter-gatherers
and not agriculturists.
''','Animal', 'Dog')
, ('''
Washington, officially the State of Washington, is a state in the Pacific
Northwest region of the United States. Named after George Washington,
the first president of the United States, the state was made out of the
western part of the Washington Territory, which was ceded by Britain in
1846 in accordance with the Oregon Treaty in the settlement of the
Oregon boundary dispute. It was admitted to the Union as the 42nd state
in 1889. Olympia is the state capital. Washington is sometimes referred
to as Washington State, to distinguish it from Washington, D.C., the
capital of the United States, which is often shortened to Washington.
''','Geography', 'Washington State')
], ['articles', 'Topic', 'Object'])
import pyspark.ml.clustering as clust
splitter = feat.RegexTokenizer(
inputCol='articles'
, outputCol='articles_split'
, pattern='\s+|[,.\"]'
)
sw_remover = feat.StopWordsRemover(
inputCol=splitter.getOutputCol()
, outputCol='no_stopWords'
)
count_vec = feat.CountVectorizer(
inputCol=sw_remover.getOutputCol()
, outputCol='vector'
)
lda_clusters = clust.LDA(
k=3
, optimizer='online'
, featuresCol=count_vec.getOutputCol()
)
topic_pipeline = Pipeline(
stages=[
splitter
, sw_remover
, count_vec
, lda_clusters
]
)
for topic in (
topic_pipeline
.fit(articles)
.transform(articles)
.select('Topic','Object','topicDistribution')
.take(10)
):
print(
topic.Topic
, topic.Object
, np.argmax(topic.topicDistribution)
, topic.topicDistribution
)
| 0.644225 | 0.830388 |
SSN strings can be converted to the following formats via the `output_format` parameter:
* `compact`: only number strings without any seperators or whitespace, like "7569217076985"
* `standard`: SSN strings with proper whitespace in the proper places, like "756.9217.0769.85"
Invalid parsing is handled with the `errors` parameter:
* `coerce` (default): invalid parsing will be set to NaN
* `ignore`: invalid parsing will return the input
* `raise`: invalid parsing will raise an exception
The following sections demonstrate the functionality of `clean_ch_ssn()` and `validate_ch_ssn()`.
### An example dataset containing SSN strings
```
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"ssn": [
"7569217076985",
"756.9217.0769.84",
"51824753556",
"51 824 753 556",
"hello",
np.nan,
"NULL"
],
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
]
}
)
df
```
## 1. Default `clean_ch_ssn`
By default, `clean_ch_ssn` will clean ssn strings and output them in the standard format with proper separators.
```
from dataprep.clean import clean_ch_ssn
clean_ch_ssn(df, column = "ssn")
```
## 2. Output formats
This section demonstrates the output parameter.
### `standard` (default)
```
clean_ch_ssn(df, column = "ssn", output_format="standard")
```
### `compact`
```
clean_ch_ssn(df, column = "ssn", output_format="compact")
```
## 3. `inplace` parameter
This deletes the given column from the returned DataFrame.
A new column containing cleaned SSN strings is added with a title in the format `"{original title}_clean"`.
```
clean_ch_ssn(df, column="ssn", inplace=True)
```
## 4. `errors` parameter
### `coerce` (default)
```
clean_ch_ssn(df, "ssn", errors="coerce")
```
### `ignore`
```
clean_ch_ssn(df, "ssn", errors="ignore")
```
## 4. `validate_ch_ssn()`
`validate_ch_ssn()` returns `True` when the input is a valid SSN. Otherwise it returns `False`.
The input of `validate_ch_ssn()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame.
When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated.
When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_ch_ssn()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_ch_ssn()` returns the validation result for the whole DataFrame.
```
from dataprep.clean import validate_ch_ssn
print(validate_ch_ssn("7569217076985"))
print(validate_ch_ssn("756.9217.0769.84"))
print(validate_ch_ssn("51824753556"))
print(validate_ch_ssn("51 824 753 556"))
print(validate_ch_ssn("hello"))
print(validate_ch_ssn(np.nan))
print(validate_ch_ssn("NULL"))
```
### Series
```
validate_ch_ssn(df["ssn"])
```
### DataFrame + Specify Column
```
validate_ch_ssn(df, column="ssn")
```
### Only DataFrame
```
validate_ch_ssn(df)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"ssn": [
"7569217076985",
"756.9217.0769.84",
"51824753556",
"51 824 753 556",
"hello",
np.nan,
"NULL"
],
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
]
}
)
df
from dataprep.clean import clean_ch_ssn
clean_ch_ssn(df, column = "ssn")
clean_ch_ssn(df, column = "ssn", output_format="standard")
clean_ch_ssn(df, column = "ssn", output_format="compact")
clean_ch_ssn(df, column="ssn", inplace=True)
clean_ch_ssn(df, "ssn", errors="coerce")
clean_ch_ssn(df, "ssn", errors="ignore")
from dataprep.clean import validate_ch_ssn
print(validate_ch_ssn("7569217076985"))
print(validate_ch_ssn("756.9217.0769.84"))
print(validate_ch_ssn("51824753556"))
print(validate_ch_ssn("51 824 753 556"))
print(validate_ch_ssn("hello"))
print(validate_ch_ssn(np.nan))
print(validate_ch_ssn("NULL"))
validate_ch_ssn(df["ssn"])
validate_ch_ssn(df, column="ssn")
validate_ch_ssn(df)
| 0.410402 | 0.988119 |
# Results
```
import numpy as np
import pandas as pd
import seaborn as sns
path = 'logs/results/14-10-2021-09:14:21.json'
main_metric = 'pr_auc'
dataframe = pd.read_json(path)
dataframe.head()
dataframe["approach"] = ["Excluding" if not kd else "Including" for kd in dataframe["keep_dropped"]]
to_remove = ["details", "best_parameters", "feature_importance", "keep_dropped"]
to_remove = [t for t in to_remove if t in dataframe]
results = dataframe.drop(to_remove, axis=1)
from postprocess import Cols, Datasets, Models, metric_map, update_result_labels
results = update_result_labels(results)
```
### Comparing the previous and the proposed approach in terms of performance obtained
```
models = (Models.rf, Models.mv)
models = ','.join([f'"{m}"' for m in models])
metrics = ["acc", "roc_auc", "precision", "recall", "f1", "pr_auc", "rmse"]
metrics = ','.join([f'"{metric_map[m]}"' for m in metrics])
df = results.query(f'{Cols.metric} in ({metrics})') \
.query(f'(`{Cols.round_to_predict}` == 7 and {Cols.dataset} != "{Datasets.wd}") or (`{Cols.round_to_predict}` == 6 and {Cols.dataset} == "{Datasets.wd}")') \
.query(f'{Cols.model} in ({models})')
sns.set(font_scale=3.3)
g = sns.relplot(data=df, row=Cols.dataset, col=Cols.metric, y=Cols.score, x =Cols.features_up_to_round,
style=Cols.model, hue=Cols.approach, kind="line", linewidth=3.5, aspect=11/9)
g.set_titles(template='')
colnames = df[Cols.metric].unique()
for ax, col in zip(g.axes[0], colnames):
ax.set_title(col)
g.set_axis_labels(y_var='')
rownames = df[Cols.dataset].unique()
for ax, row in zip(g.axes[:,0], rownames):
ax.set_ylabel(row, rotation=90, size='large')
handles, labels = g.axes[0][0].get_legend_handles_labels()
labels = labels[1:3] + labels[4:6]
handles = handles[1:3] + handles[4:6]
[ha.set_linewidth(3) for ha in handles]
g._legend.remove()
g.fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.45, 1.08), ncol=2, frameon=False, fontsize=42)
fig_name = 'dataset-by-metric-predicting-round-7-rf-mv-distinct'
for ext in ('.pdf', '.svg'):
g.savefig('plots/' + fig_name + ext)
print(f'wrote {fig_name + ext}')
```
## Comparing models
```
df = results.copy()
metrics = ["acc", "roc_auc",]
metrics = ','.join([f'"{metric_map[m]}"' for m in metrics])
df = df.query(f'{Cols.metric} in ({metrics})')
sns.set(font_scale=2.9)
g = sns.catplot(data=df, row=Cols.metric, col=Cols.approach,
x=Cols.round_to_predict, y=Cols.score, hue=Cols.model,
aspect=12/8, legend=True, ci=0, kind="point")
g.set_titles(template='{row_name}, {col_name}')
handles, labels = g.axes[0][0].get_legend_handles_labels()
[ha.set_linewidth(3) for ha in handles]
g._legend.remove()
g.set_axis_labels(y_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(1.035, .5),
handletextpad=.001, ncol=1, frameon=False, fontsize=40)
fig_name = 'model-performance-comparison-acc-rocauc-wide'
for ext in ('.pdf', '.svg'):
g.savefig('plots/' + fig_name + ext)
print(f'wrote {fig_name + ext}')
```
## Feature importance
```
def filter_dataframe(dataframe, last_round, all_prev_round=False):
columns = ["dataset", "round_to_predict", "features_up_to_round",
"approach", "model", "feature_importance"]
# Feature importance was only dependent on the main metric
mask = (dataframe.metric == main_metric)
# We only care about the most important metrics for our main model
mask &= ((dataframe.model == "LogisticRegression") |
(dataframe.model == "RandomForestClassifier"))
# Let us only evaluate the most important metrics when taking advantage
# of a full dataset (i.e., predicting last round using all data from previous rounds)
mask &= (dataframe.round_to_predict == last_round)
if all_prev_round:
mask &= (dataframe.features_up_to_round == (last_round - 1))
return dataframe.loc[mask][columns]
```
## Show that there is randomness in most important features
```
from main import get_datasets
from instances.BrowsingData import BrowsingData
from instances.SubmissionData import SubmissionData
data_instances = get_datasets(['cs1-b', 'cs1-s', 'cs0_web', 'wd'])
from collections import Counter
def show_percentage_of_previous_feature_rounds_used(dataframe, last_round, top):
logs = []
results = filter_dataframe(dataframe, last_round, True)
for dataset in results.dataset.unique():
mapper = data_instances[dataset].get_features_of_each_round()
for model in results.model.unique():
for approach in results.approach.unique():
df = results[(results.dataset == dataset) &
(results.model == model) &
(results.approach == approach)]
if not df.empty:
d = df["feature_importance"].iloc[0]
if type(d) == dict:
d = pd.DataFrame(d)
d["name"] = d["name"].apply(tuple)
importances_and_names = d.sort_values(by="importance", ascending=False)
features = importances_and_names.head(top)['name']
features = [f[1] if type(f) == tuple else f for f in features]
features_rounds = [mapper[f] for f in features]
counter = Counter(features_rounds)
counter = {r: c/top for r, c in counter.items()}
for r in range(1, last_round):
logs.append({
"last_round": last_round,
"round": r,
"percentage": counter.get(r, 0) * 100,
"approach": approach,
"model": model,
"dataset": dataset
})
df = pd.DataFrame(logs)
print(df)
if not df.empty:
sns.catplot(x="round", y="percentage", hue="approach", kind="point", data=df)
sns.catplot(x="round", y="percentage", hue="approach", kind="bar", data=df)
return df
top = 10
datasets = ["cs1-s", "wd"]
for dataset in datasets:
df = dataframe[(dataframe.dataset == dataset) & (dataframe.model == "RandomForestClassifier")]
show_percentage_of_previous_feature_rounds_used(df, last_round=6, top=top)
def feature_to_type(feature):
""" Obtain the type of a feature given its name. """
if len(feature) == 2: # submission data feature
return feature[0]
# browsing data feature
if "count" in feature.lower():
return "count"
elif "length" in feature.lower():
return "length"
else:
return "time"
def count_types(fi, top, percentage=True):
""" Count the number of top features of each type.
Parameters
----------
fi: DataFrame
Feature importance dataframe. Two columns: importance and name
top: int, default=10
Number of top features to take into account
Returns
-------
count: DataFrame
Count the types of the top features.
"""
features = fi.sort_values(by="importance", ascending=False).head(top)
features['type'] = list(map(feature_to_type, features['name']))
count = features.groupby('type').type.count()
if percentage:
count /= len(features)
count *= 100
return count
def add_heights(ax, x_offset=0.015, y_offset=0.1, size=15):
for p in ax.patches:
ax.text(p.get_x() + x_offset,
p.get_height() + y_offset,
'{0:.2f}'.format(p.get_height()),
color='black', rotation='horizontal', size=size)
def save_fig(g, fig_name):
for ext in ('.pdf', '.svg'):
save_path = f'plots/{fig_name}{ext}'
g.savefig(save_path)
print(f'wrote {save_path}')
def compare_feature_importance(dataframe, last_round=6, main_metric="pr_auc", top=10):
""" """
print(dataframe.columns)
columns = ["dataset", "round_to_predict", "features_up_to_round",
"model", "approach", "feature_importance"]
types = {"browsing_types": ["count", "length", "time"],
"submissions_types": ["solved", "submission_time"]}
logs = []
for last_round in range(2, 8):
results = filter_dataframe(dataframe, last_round, False)
print('round', last_round)
results.feature_importance = results.feature_importance.apply(pd.DataFrame)
for _, row in results.iterrows():
count = count_types(row['feature_importance'], top)
if "browsing" in row['dataset']:
feature_types = types["browsing_types"]
else:
feature_types = types["submissions_types"]
for c in feature_types:
count[c] = 0 if c not in count else count[c]
for t, c in count.to_dict().items():
log = {col: row[col] for col in columns[:-1]}
log["Type"], log["Percentage"] = t, c
logs.append(log)
data = update_result_labels(pd.DataFrame(logs))
type_map = {
'solved': 'Correctness',
'late': 'Late attempt count',
'submission_time': 'Attempt count',
'length': 'Session length',
'count': 'Session count',
'time': 'Total time'
}
data["Type"] = data["Type"].apply(lambda x: type_map[x] if x in type_map else x)
sns.set(font_scale=2.6)
if not data.empty:
df = data.query(f'{Cols.dataset} != "{Datasets.cs1_browsing}"')
g = sns.catplot(x=Cols.approach, y="Percentage", hue="Type", kind="bar", aspect=11/9, data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.945, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=28)
save_fig(g, 'submission-feature-type-percentage-comparison')
sns.set(font_scale=2.8)
g = sns.catplot(x=Cols.approach, y="Percentage", col=Cols.dataset, hue="Type", kind="bar", data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.977, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=32)
g.set_titles(template='{col_name}')
save_fig(g, 'submission-feature-type-percentage-comparison-distinct-datasets')
df = data.query(f'{Cols.dataset} == "{Datasets.cs1_browsing}"')
sns.set(font_scale=2.6)
g = sns.catplot(x=Cols.approach, y="Percentage", hue="Type", kind="bar", aspect=11/9, data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.945, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=28)
save_fig(g, 'browsing-feature-type-percentage-comparison')
return data
df = compare_feature_importance(dataframe, top=10)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import seaborn as sns
path = 'logs/results/14-10-2021-09:14:21.json'
main_metric = 'pr_auc'
dataframe = pd.read_json(path)
dataframe.head()
dataframe["approach"] = ["Excluding" if not kd else "Including" for kd in dataframe["keep_dropped"]]
to_remove = ["details", "best_parameters", "feature_importance", "keep_dropped"]
to_remove = [t for t in to_remove if t in dataframe]
results = dataframe.drop(to_remove, axis=1)
from postprocess import Cols, Datasets, Models, metric_map, update_result_labels
results = update_result_labels(results)
models = (Models.rf, Models.mv)
models = ','.join([f'"{m}"' for m in models])
metrics = ["acc", "roc_auc", "precision", "recall", "f1", "pr_auc", "rmse"]
metrics = ','.join([f'"{metric_map[m]}"' for m in metrics])
df = results.query(f'{Cols.metric} in ({metrics})') \
.query(f'(`{Cols.round_to_predict}` == 7 and {Cols.dataset} != "{Datasets.wd}") or (`{Cols.round_to_predict}` == 6 and {Cols.dataset} == "{Datasets.wd}")') \
.query(f'{Cols.model} in ({models})')
sns.set(font_scale=3.3)
g = sns.relplot(data=df, row=Cols.dataset, col=Cols.metric, y=Cols.score, x =Cols.features_up_to_round,
style=Cols.model, hue=Cols.approach, kind="line", linewidth=3.5, aspect=11/9)
g.set_titles(template='')
colnames = df[Cols.metric].unique()
for ax, col in zip(g.axes[0], colnames):
ax.set_title(col)
g.set_axis_labels(y_var='')
rownames = df[Cols.dataset].unique()
for ax, row in zip(g.axes[:,0], rownames):
ax.set_ylabel(row, rotation=90, size='large')
handles, labels = g.axes[0][0].get_legend_handles_labels()
labels = labels[1:3] + labels[4:6]
handles = handles[1:3] + handles[4:6]
[ha.set_linewidth(3) for ha in handles]
g._legend.remove()
g.fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.45, 1.08), ncol=2, frameon=False, fontsize=42)
fig_name = 'dataset-by-metric-predicting-round-7-rf-mv-distinct'
for ext in ('.pdf', '.svg'):
g.savefig('plots/' + fig_name + ext)
print(f'wrote {fig_name + ext}')
df = results.copy()
metrics = ["acc", "roc_auc",]
metrics = ','.join([f'"{metric_map[m]}"' for m in metrics])
df = df.query(f'{Cols.metric} in ({metrics})')
sns.set(font_scale=2.9)
g = sns.catplot(data=df, row=Cols.metric, col=Cols.approach,
x=Cols.round_to_predict, y=Cols.score, hue=Cols.model,
aspect=12/8, legend=True, ci=0, kind="point")
g.set_titles(template='{row_name}, {col_name}')
handles, labels = g.axes[0][0].get_legend_handles_labels()
[ha.set_linewidth(3) for ha in handles]
g._legend.remove()
g.set_axis_labels(y_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(1.035, .5),
handletextpad=.001, ncol=1, frameon=False, fontsize=40)
fig_name = 'model-performance-comparison-acc-rocauc-wide'
for ext in ('.pdf', '.svg'):
g.savefig('plots/' + fig_name + ext)
print(f'wrote {fig_name + ext}')
def filter_dataframe(dataframe, last_round, all_prev_round=False):
columns = ["dataset", "round_to_predict", "features_up_to_round",
"approach", "model", "feature_importance"]
# Feature importance was only dependent on the main metric
mask = (dataframe.metric == main_metric)
# We only care about the most important metrics for our main model
mask &= ((dataframe.model == "LogisticRegression") |
(dataframe.model == "RandomForestClassifier"))
# Let us only evaluate the most important metrics when taking advantage
# of a full dataset (i.e., predicting last round using all data from previous rounds)
mask &= (dataframe.round_to_predict == last_round)
if all_prev_round:
mask &= (dataframe.features_up_to_round == (last_round - 1))
return dataframe.loc[mask][columns]
from main import get_datasets
from instances.BrowsingData import BrowsingData
from instances.SubmissionData import SubmissionData
data_instances = get_datasets(['cs1-b', 'cs1-s', 'cs0_web', 'wd'])
from collections import Counter
def show_percentage_of_previous_feature_rounds_used(dataframe, last_round, top):
logs = []
results = filter_dataframe(dataframe, last_round, True)
for dataset in results.dataset.unique():
mapper = data_instances[dataset].get_features_of_each_round()
for model in results.model.unique():
for approach in results.approach.unique():
df = results[(results.dataset == dataset) &
(results.model == model) &
(results.approach == approach)]
if not df.empty:
d = df["feature_importance"].iloc[0]
if type(d) == dict:
d = pd.DataFrame(d)
d["name"] = d["name"].apply(tuple)
importances_and_names = d.sort_values(by="importance", ascending=False)
features = importances_and_names.head(top)['name']
features = [f[1] if type(f) == tuple else f for f in features]
features_rounds = [mapper[f] for f in features]
counter = Counter(features_rounds)
counter = {r: c/top for r, c in counter.items()}
for r in range(1, last_round):
logs.append({
"last_round": last_round,
"round": r,
"percentage": counter.get(r, 0) * 100,
"approach": approach,
"model": model,
"dataset": dataset
})
df = pd.DataFrame(logs)
print(df)
if not df.empty:
sns.catplot(x="round", y="percentage", hue="approach", kind="point", data=df)
sns.catplot(x="round", y="percentage", hue="approach", kind="bar", data=df)
return df
top = 10
datasets = ["cs1-s", "wd"]
for dataset in datasets:
df = dataframe[(dataframe.dataset == dataset) & (dataframe.model == "RandomForestClassifier")]
show_percentage_of_previous_feature_rounds_used(df, last_round=6, top=top)
def feature_to_type(feature):
""" Obtain the type of a feature given its name. """
if len(feature) == 2: # submission data feature
return feature[0]
# browsing data feature
if "count" in feature.lower():
return "count"
elif "length" in feature.lower():
return "length"
else:
return "time"
def count_types(fi, top, percentage=True):
""" Count the number of top features of each type.
Parameters
----------
fi: DataFrame
Feature importance dataframe. Two columns: importance and name
top: int, default=10
Number of top features to take into account
Returns
-------
count: DataFrame
Count the types of the top features.
"""
features = fi.sort_values(by="importance", ascending=False).head(top)
features['type'] = list(map(feature_to_type, features['name']))
count = features.groupby('type').type.count()
if percentage:
count /= len(features)
count *= 100
return count
def add_heights(ax, x_offset=0.015, y_offset=0.1, size=15):
for p in ax.patches:
ax.text(p.get_x() + x_offset,
p.get_height() + y_offset,
'{0:.2f}'.format(p.get_height()),
color='black', rotation='horizontal', size=size)
def save_fig(g, fig_name):
for ext in ('.pdf', '.svg'):
save_path = f'plots/{fig_name}{ext}'
g.savefig(save_path)
print(f'wrote {save_path}')
def compare_feature_importance(dataframe, last_round=6, main_metric="pr_auc", top=10):
""" """
print(dataframe.columns)
columns = ["dataset", "round_to_predict", "features_up_to_round",
"model", "approach", "feature_importance"]
types = {"browsing_types": ["count", "length", "time"],
"submissions_types": ["solved", "submission_time"]}
logs = []
for last_round in range(2, 8):
results = filter_dataframe(dataframe, last_round, False)
print('round', last_round)
results.feature_importance = results.feature_importance.apply(pd.DataFrame)
for _, row in results.iterrows():
count = count_types(row['feature_importance'], top)
if "browsing" in row['dataset']:
feature_types = types["browsing_types"]
else:
feature_types = types["submissions_types"]
for c in feature_types:
count[c] = 0 if c not in count else count[c]
for t, c in count.to_dict().items():
log = {col: row[col] for col in columns[:-1]}
log["Type"], log["Percentage"] = t, c
logs.append(log)
data = update_result_labels(pd.DataFrame(logs))
type_map = {
'solved': 'Correctness',
'late': 'Late attempt count',
'submission_time': 'Attempt count',
'length': 'Session length',
'count': 'Session count',
'time': 'Total time'
}
data["Type"] = data["Type"].apply(lambda x: type_map[x] if x in type_map else x)
sns.set(font_scale=2.6)
if not data.empty:
df = data.query(f'{Cols.dataset} != "{Datasets.cs1_browsing}"')
g = sns.catplot(x=Cols.approach, y="Percentage", hue="Type", kind="bar", aspect=11/9, data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.945, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=28)
save_fig(g, 'submission-feature-type-percentage-comparison')
sns.set(font_scale=2.8)
g = sns.catplot(x=Cols.approach, y="Percentage", col=Cols.dataset, hue="Type", kind="bar", data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.977, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=32)
g.set_titles(template='{col_name}')
save_fig(g, 'submission-feature-type-percentage-comparison-distinct-datasets')
df = data.query(f'{Cols.dataset} == "{Datasets.cs1_browsing}"')
sns.set(font_scale=2.6)
g = sns.catplot(x=Cols.approach, y="Percentage", hue="Type", kind="bar", aspect=11/9, data=df)
handles, labels = g.axes[0][0].get_legend_handles_labels()
g._legend.remove()
g.set_axis_labels(y_var='', x_var='')
g.fig.legend(handles, labels, loc='center right', bbox_to_anchor=(.945, .5),
handletextpad=.5, handlelength=1, ncol=1, frameon=False, fontsize=28)
save_fig(g, 'browsing-feature-type-percentage-comparison')
return data
df = compare_feature_importance(dataframe, top=10)
| 0.645679 | 0.616503 |
## StateFarm Kaggle Notebook
```
%matplotlib inline
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
from utils import *
from PIL import Image
from keras.preprocessing import image
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
DATA_HOME_DIR = current_dir+'/data'
categories = sorted([os.path.basename(x) for x in glob(DATA_HOME_DIR+'/train/*')])
categories
def plot_history(h):
plt.plot(h['acc'])
plt.plot(h['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(h['loss'])
plt.plot(h['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def underfit(train_err, test_error):
return train_err < test_error * 0.667
def overfit(train_acc, test_acc):
return train_acc > test_acc
def validation_histogram():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
data = np.vstack([expected_labels, our_labels]).T
plt.style.use('seaborn-deep')
plt.hist(data, range(11), alpha=0.7, label=['expected', 'ours'])
plt.legend(loc='upper right')
plt.show()
def validation_np_histogram():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
print np.histogram(our_labels,range(11))[0]
def validation_confusion():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
cm = confusion_matrix(expected_labels, our_labels)
plot_confusion_matrix(cm, val_batches.class_indices)
%cd $DATA_HOME_DIR
#Set path to sample/ path if desired
path = DATA_HOME_DIR + '/'
#path = DATA_HOME_DIR + '/sample/'
test_path = DATA_HOME_DIR + '/test/' #We use all the test data
results_path=DATA_HOME_DIR + '/results/'
train_path=path + '/train/'
valid_path=path + '/valid/'
histories = {}
batch_size=64
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
val_batches = get_batches(path+'valid', batch_size=batch_size, shuffle=False)
```
## Skip this if created conv data to train
```
# use the batch normalization VGG
#vgg = Vgg16BN()
# No. He's not using BN yet, so let's just stop that
vgg = Vgg16()
model=vgg.model
last_conv_idx = [i for i,l in enumerate(model.layers) if type(l) is Convolution2D][-1]
conv_layers = model.layers[:last_conv_idx+1]
print "input shape to dense layer", conv_layers[-1].output_shape[1:]
conv_model = Sequential(conv_layers)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
# Hmmm, don't we want to train with more augmented images?
#batches = get_batches(path+'train', gen_t, batch_size=batch_size)
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
#test_batches = get_batches(path+'test', batch_size=batch_size*2, shuffle=False)
conv_feat = conv_model.predict_generator(batches, batches.nb_sample)
conv_val_feat = conv_model.predict_generator(val_batches, val_batches.nb_sample)
# With 8GB I get memory error on this. 79k images is too many
# Trying 16GB...bah! I *still* get a memory error. WTF? I see 10.6GB out of 16GB used.
#conv_test_feat = conv_model.predict_generator(test_batches, test_batches.nb_sample)
save_array(path+'results/conv_feat.dat', conv_feat)
save_array(path+'results/conv_val_feat.dat', conv_val_feat)
#save_array(path+'results/conv_test_feat.dat', conv_test_feat)
```
## Load Conv Net saved features
```
conv_feat = load_array(path+'results/conv_feat.dat')
conv_val_feat = load_array(path+'results/conv_val_feat.dat')
```
### Batchnorm dense layers on pretrained conv layers
```
def get_bn_layers(p,input_shape):
return [
MaxPooling2D(input_shape=input_shape),
Flatten(),
Dropout(p/2),
#Dense(128, activation='relu'),
Dense(256, activation='relu'),
BatchNormalization(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(10, activation='softmax')
]
p=0.5 # wow isn't this high?
bn_model = Sequential(get_bn_layers(p,conv_val_feat.shape[1:]))
bn_model.compile(Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['accuracy'])
# starting with super-small lr first
bn_model.optimizer.lr=0.000001
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=1,
validation_data=(conv_val_feat, val_labels))
# okay at least with 16GB this is much quicker. 6s vs. 200+ due to swapping. I"m at 10.6GiB memory.
validation_histogram()
validation_histogram()
validation_histogram()
```
Okay, I get a *completely* different result than what he gets. I'm using the samples, though.
Let's try with full dataset?
```
bn_model.optimizer.lr=0.0001
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=3,
validation_data=(conv_val_feat, val_labels))
validation_histogram()
validation_histogram()
validation_histogram()
validation_confusion()
hist = bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=5,
validation_data=(conv_val_feat, val_labels))
```
# Vgg16BN code below.
This is older code than above. For historical reference at the moment...
```
#Set constants. You can experiment with no_of_epochs to improve the model
batch_size=64
no_of_epochs=30
# Augment the data
gen = image.ImageDataGenerator(rotation_range=15, #=0,
height_shift_range=0.05,#=0.1,
width_shift_range=0.1,
shear_range=0.05,#=0
channel_shift_range=20,#=0
#zoom_range=0.1
#, horizontal_flip=True
)
# Finetune the model
# just add gen as 2nd parameter to batches & not val_batches
batches = vgg.get_batches(train_path, gen, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
vgg.finetune(batches)
INIT_LR0=0.00001
INIT_LR=0.001
EPOCHS_DROP=5.0
DROP=0.5
def step_decay0(epoch, initial_lrate = INIT_LR0, epochs_drop = EPOCHS_DROP, drop = DROP):
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
def step_decay(epoch, initial_lrate = INIT_LR, epochs_drop = EPOCHS_DROP, drop = DROP):
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
#latest_weights_filename="weights-02-04-1.00.hdf5"
#vgg.model.load_weights(results_path+latest_weights_filename)
#run_index=0 # restarting fresh
run_index+=1
filepath=results_path+"run-%02d-weights-{epoch:02d}-{val_acc:.2f}.hdf5"%(run_index)
history_filepath=results_path+"run-%02d-history.csv"%(run_index)
checkpoint = ModelCheckpoint(filepath,
#monitor='val_acc', mode='max',
monitor='val_loss', mode='min',
verbose=1,
save_weights_only=True, save_best_only=True)
lr_scheduler0 = LearningRateScheduler(step_decay0)
lr_scheduler = LearningRateScheduler(step_decay)
callbacks = [checkpoint,lr_scheduler]
# okay, so he says we need to first start with super-low learning rate just to get things started
history0 = vgg.fit(batches, val_batches, 3, [checkpoint,lr_scheduler0])
# then, let's try again with more reasonable learning rate
history = vgg.fit(batches, val_batches, no_of_epochs, callbacks)
history_df = pd.DataFrame(history.history)
history_df.to_csv(history_filepath)
histories[run_index] = history_df
histories.keys()
history_df["underfit"] = map(underfit, history_df["loss"], history_df["val_loss"])
history_df["overfit"] = map(overfit, history_df["acc"], history_df["val_acc"])
plot_history(histories[11])
plot_history(histories[10])
history_df["underfit"] = map(underfit, history_df["loss"], history_df["val_loss"])
history_df["overfit"] = map(overfit, history_df["acc"], history_df["val_acc"])
plot_history(history_df)
history_df
```
## Reproduce (or not) simple CNN results
```
batch_size=64
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
def conv1():
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Convolution2D(32,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Convolution2D(64,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr = 0.001
model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
return model
model = conv1()
# that worked!
```
|
github_jupyter
|
%matplotlib inline
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
from utils import *
from PIL import Image
from keras.preprocessing import image
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
DATA_HOME_DIR = current_dir+'/data'
categories = sorted([os.path.basename(x) for x in glob(DATA_HOME_DIR+'/train/*')])
categories
def plot_history(h):
plt.plot(h['acc'])
plt.plot(h['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(h['loss'])
plt.plot(h['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def underfit(train_err, test_error):
return train_err < test_error * 0.667
def overfit(train_acc, test_acc):
return train_acc > test_acc
def validation_histogram():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
data = np.vstack([expected_labels, our_labels]).T
plt.style.use('seaborn-deep')
plt.hist(data, range(11), alpha=0.7, label=['expected', 'ours'])
plt.legend(loc='upper right')
plt.show()
def validation_np_histogram():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
print np.histogram(our_labels,range(11))[0]
def validation_confusion():
probs = bn_model.predict(conv_val_feat, batch_size=batch_size)
expected_labels = val_batches.classes
our_labels = np.argmax(probs, axis=1)
cm = confusion_matrix(expected_labels, our_labels)
plot_confusion_matrix(cm, val_batches.class_indices)
%cd $DATA_HOME_DIR
#Set path to sample/ path if desired
path = DATA_HOME_DIR + '/'
#path = DATA_HOME_DIR + '/sample/'
test_path = DATA_HOME_DIR + '/test/' #We use all the test data
results_path=DATA_HOME_DIR + '/results/'
train_path=path + '/train/'
valid_path=path + '/valid/'
histories = {}
batch_size=64
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
val_batches = get_batches(path+'valid', batch_size=batch_size, shuffle=False)
# use the batch normalization VGG
#vgg = Vgg16BN()
# No. He's not using BN yet, so let's just stop that
vgg = Vgg16()
model=vgg.model
last_conv_idx = [i for i,l in enumerate(model.layers) if type(l) is Convolution2D][-1]
conv_layers = model.layers[:last_conv_idx+1]
print "input shape to dense layer", conv_layers[-1].output_shape[1:]
conv_model = Sequential(conv_layers)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
# Hmmm, don't we want to train with more augmented images?
#batches = get_batches(path+'train', gen_t, batch_size=batch_size)
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
#test_batches = get_batches(path+'test', batch_size=batch_size*2, shuffle=False)
conv_feat = conv_model.predict_generator(batches, batches.nb_sample)
conv_val_feat = conv_model.predict_generator(val_batches, val_batches.nb_sample)
# With 8GB I get memory error on this. 79k images is too many
# Trying 16GB...bah! I *still* get a memory error. WTF? I see 10.6GB out of 16GB used.
#conv_test_feat = conv_model.predict_generator(test_batches, test_batches.nb_sample)
save_array(path+'results/conv_feat.dat', conv_feat)
save_array(path+'results/conv_val_feat.dat', conv_val_feat)
#save_array(path+'results/conv_test_feat.dat', conv_test_feat)
conv_feat = load_array(path+'results/conv_feat.dat')
conv_val_feat = load_array(path+'results/conv_val_feat.dat')
def get_bn_layers(p,input_shape):
return [
MaxPooling2D(input_shape=input_shape),
Flatten(),
Dropout(p/2),
#Dense(128, activation='relu'),
Dense(256, activation='relu'),
BatchNormalization(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(10, activation='softmax')
]
p=0.5 # wow isn't this high?
bn_model = Sequential(get_bn_layers(p,conv_val_feat.shape[1:]))
bn_model.compile(Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['accuracy'])
# starting with super-small lr first
bn_model.optimizer.lr=0.000001
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=1,
validation_data=(conv_val_feat, val_labels))
# okay at least with 16GB this is much quicker. 6s vs. 200+ due to swapping. I"m at 10.6GiB memory.
validation_histogram()
validation_histogram()
validation_histogram()
bn_model.optimizer.lr=0.0001
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=3,
validation_data=(conv_val_feat, val_labels))
validation_histogram()
validation_histogram()
validation_histogram()
validation_confusion()
hist = bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=5,
validation_data=(conv_val_feat, val_labels))
#Set constants. You can experiment with no_of_epochs to improve the model
batch_size=64
no_of_epochs=30
# Augment the data
gen = image.ImageDataGenerator(rotation_range=15, #=0,
height_shift_range=0.05,#=0.1,
width_shift_range=0.1,
shear_range=0.05,#=0
channel_shift_range=20,#=0
#zoom_range=0.1
#, horizontal_flip=True
)
# Finetune the model
# just add gen as 2nd parameter to batches & not val_batches
batches = vgg.get_batches(train_path, gen, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
vgg.finetune(batches)
INIT_LR0=0.00001
INIT_LR=0.001
EPOCHS_DROP=5.0
DROP=0.5
def step_decay0(epoch, initial_lrate = INIT_LR0, epochs_drop = EPOCHS_DROP, drop = DROP):
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
def step_decay(epoch, initial_lrate = INIT_LR, epochs_drop = EPOCHS_DROP, drop = DROP):
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
#latest_weights_filename="weights-02-04-1.00.hdf5"
#vgg.model.load_weights(results_path+latest_weights_filename)
#run_index=0 # restarting fresh
run_index+=1
filepath=results_path+"run-%02d-weights-{epoch:02d}-{val_acc:.2f}.hdf5"%(run_index)
history_filepath=results_path+"run-%02d-history.csv"%(run_index)
checkpoint = ModelCheckpoint(filepath,
#monitor='val_acc', mode='max',
monitor='val_loss', mode='min',
verbose=1,
save_weights_only=True, save_best_only=True)
lr_scheduler0 = LearningRateScheduler(step_decay0)
lr_scheduler = LearningRateScheduler(step_decay)
callbacks = [checkpoint,lr_scheduler]
# okay, so he says we need to first start with super-low learning rate just to get things started
history0 = vgg.fit(batches, val_batches, 3, [checkpoint,lr_scheduler0])
# then, let's try again with more reasonable learning rate
history = vgg.fit(batches, val_batches, no_of_epochs, callbacks)
history_df = pd.DataFrame(history.history)
history_df.to_csv(history_filepath)
histories[run_index] = history_df
histories.keys()
history_df["underfit"] = map(underfit, history_df["loss"], history_df["val_loss"])
history_df["overfit"] = map(overfit, history_df["acc"], history_df["val_acc"])
plot_history(histories[11])
plot_history(histories[10])
history_df["underfit"] = map(underfit, history_df["loss"], history_df["val_loss"])
history_df["overfit"] = map(overfit, history_df["acc"], history_df["val_acc"])
plot_history(history_df)
history_df
batch_size=64
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
def conv1():
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Convolution2D(32,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Convolution2D(64,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr = 0.001
model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
return model
model = conv1()
# that worked!
| 0.54819 | 0.702275 |
# Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
```
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
```
## Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
```
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
```
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
```
text[:100]
```
### Tokenization
In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
```
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
```
And we can see those same characters from above, encoded as integers.
```
encoded[:100]
```
## Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
```
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
```
## Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
### Creating Batches
**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **
Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
**2. After that, we need to split `arr` into $N$ batches. **
You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
**3. Now that we have this array, we can iterate through it to get our mini-batches. **
The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
```
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size * seq_length
# total number of batches we can make
n_batches = len(arr)//batch_size_total
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size_total]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
# iterate through the array, one sequence at a time
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
```
### Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
```
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
```
If you implemented `get_batches` correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `drop_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
```python
self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
```
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
```python
self.init_hidden()
```
```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the LSTM
self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## TODO: define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## TODO: define the final, fully-connected output layer
self.fc = nn.Linear(n_hidden, len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x, hidden)
## TODO: pass through a dropout layer
out = self.dropout(r_output)
# Stack up LSTM outputs using view
# you may need to use contiguous to reshape the output
out = out.contiguous().view(-1, self.n_hidden)
## TODO: put x through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
```
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
```
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
```
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
```
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
batch_size = 128
seq_length = 100
n_epochs = 20 # start smaller if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
```
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
```
# change the name, for saving multiple files
model_name = 'rnn_20_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
```
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
```
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
```
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
```
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
```
## Loading a checkpoint
```
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_20_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
```
|
github_jupyter
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
text[:100]
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
encoded[:100]
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size * seq_length
# total number of batches we can make
n_batches = len(arr)//batch_size_total
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size_total]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
# iterate through the array, one sequence at a time
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `drop_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
## Loading a checkpoint
| 0.891835 | 0.969728 |
# Settings:
```
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
import os
```
## Read df:
```
df = pd.read_csv(os.path.join('Data', 'info.csv'))
```
## Check df:
```
for _, row in df.iterrows():
if row['genre'] == "hiphiop":
row['genre'] = "hiphop"
if row['genre'] == "electronic ":
row['genre'] = "electronic"
df.head()
```
## Count durations:
```
dur_list = []
for _, row in df.iterrows():
begin = datetime.strptime(row['time_from'], "%H:%M:%S")
end = datetime.strptime(row['time_to'], "%H:%M:%S")
duration = (end - begin).total_seconds()
dur_list.append(duration)
dur_df = pd.DataFrame({"duration": dur_list})
complete_df = pd.concat([df, dur_df], axis=1)
```
## Calculate source durations:
```
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in complete_df.iterrows():
duration = row['duration']
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
print(dur_src_dict)
```
## Calculate genres durations:
```
genre_dict = {}
for _, row in complete_df.iterrows():
duration = row['duration']
if row['genre'] in genre_dict:
genre_dict.update({row['genre']: duration + genre_dict[row['genre']]})
else:
genre_dict.update({row['genre']: duration})
print(genre_dict)
# check tyops (old)
for _, row in complete_df.iterrows():
if row['genre'] == "electronic " or row['genre'] == "hiphiop":
print(row)
```
# Make plots:
```
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(os.path.join('Plots', 'genres.png'))
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(os.path.join('Plots', 'Sources.png'))
plt.show()
```
## Split 70/30
```
train, test = train_test_split(complete_df, test_size=0.3)
def source_dict(data_frame):
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in data_frame.iterrows():
duration = row['duration']
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
return dur_src_dict
def genres_dict(data_frame):
genre_dict = {}
for _, row in data_frame.iterrows():
duration = row['duration']
if row['genre'] in genre_dict:
genre_dict.update({row['genre']: duration + genre_dict[row['genre']]})
else:
genre_dict.update({row['genre']: duration})
return genre_dict
```
## Plots:
```
def make_plots(name, data_frame):
genre_dict = genres_dict(data_frame)
dur_src_dict = source_dict(data_frame)
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(name + 'genres.png')
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(name + 'Sources.png')
plt.show()
```
## Train plots:
```
make_plots(os.path.join('Plots', 'train_'), train)
```
## Test plots:
```
make_plots(os.path.join('Plots', 'test_'), test)
```
# Split datasets:
```
df = pd.read_csv(os.path.join('Data', 'infoPPP.csv'), encoding='cp1251')
df.head()
train, test = train_test_split(df, test_size=0.2)
train, valid = train_test_split(df, test_size=0.2)
genres = {'pop', 'rock', 'indi', 'hiphop', 'metal', 'electronic', 'folk', 'blues', 'classical', 'jazz', 'country', 'disco', 'reggae'}
def source_dict(data_frame):
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in data_frame.iterrows():
duration = 0
for cur_genre in genres:
duration += row[cur_genre]
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
return dur_src_dict
def genres_dict(data_frame):
genre_dict = {}
for _, row in data_frame.iterrows():
for cur_genre in genres:
dur = row[cur_genre]
if cur_genre in genre_dict:
genre_dict.update({cur_genre: dur + genre_dict[cur_genre]})
else:
genre_dict.update({cur_genre: dur})
return genre_dict
def make_plots(name, data_frame):
genre_dict = genres_dict(data_frame)
dur_src_dict = source_dict(data_frame)
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(name + 'genres.png')
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(name + 'Sources.png')
plt.show()
make_plots(os.path.join('Plots', 'tr_'), train)
make_plots(os.path.join('Plots', 'val_'), valid)
make_plots(os.path.join('Plots', 'ts_'), test)
train.to_csv(os.path.join('Data', 'train.csv'), encoding='cp1251')
valid.to_csv(os.path.join('Data', 'valid.csv'), encoding='cp1251')
test.to_csv(os.path.join('Data', 'test.csv'), encoding='cp1251')
```
|
github_jupyter
|
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
import os
df = pd.read_csv(os.path.join('Data', 'info.csv'))
for _, row in df.iterrows():
if row['genre'] == "hiphiop":
row['genre'] = "hiphop"
if row['genre'] == "electronic ":
row['genre'] = "electronic"
df.head()
dur_list = []
for _, row in df.iterrows():
begin = datetime.strptime(row['time_from'], "%H:%M:%S")
end = datetime.strptime(row['time_to'], "%H:%M:%S")
duration = (end - begin).total_seconds()
dur_list.append(duration)
dur_df = pd.DataFrame({"duration": dur_list})
complete_df = pd.concat([df, dur_df], axis=1)
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in complete_df.iterrows():
duration = row['duration']
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
print(dur_src_dict)
genre_dict = {}
for _, row in complete_df.iterrows():
duration = row['duration']
if row['genre'] in genre_dict:
genre_dict.update({row['genre']: duration + genre_dict[row['genre']]})
else:
genre_dict.update({row['genre']: duration})
print(genre_dict)
# check tyops (old)
for _, row in complete_df.iterrows():
if row['genre'] == "electronic " or row['genre'] == "hiphiop":
print(row)
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(os.path.join('Plots', 'genres.png'))
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(os.path.join('Plots', 'Sources.png'))
plt.show()
train, test = train_test_split(complete_df, test_size=0.3)
def source_dict(data_frame):
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in data_frame.iterrows():
duration = row['duration']
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
return dur_src_dict
def genres_dict(data_frame):
genre_dict = {}
for _, row in data_frame.iterrows():
duration = row['duration']
if row['genre'] in genre_dict:
genre_dict.update({row['genre']: duration + genre_dict[row['genre']]})
else:
genre_dict.update({row['genre']: duration})
return genre_dict
def make_plots(name, data_frame):
genre_dict = genres_dict(data_frame)
dur_src_dict = source_dict(data_frame)
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(name + 'genres.png')
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(name + 'Sources.png')
plt.show()
make_plots(os.path.join('Plots', 'train_'), train)
make_plots(os.path.join('Plots', 'test_'), test)
df = pd.read_csv(os.path.join('Data', 'infoPPP.csv'), encoding='cp1251')
df.head()
train, test = train_test_split(df, test_size=0.2)
train, valid = train_test_split(df, test_size=0.2)
genres = {'pop', 'rock', 'indi', 'hiphop', 'metal', 'electronic', 'folk', 'blues', 'classical', 'jazz', 'country', 'disco', 'reggae'}
def source_dict(data_frame):
dur_src_dict = { "Show": 0,
"Contest": 0,
"Concert": 0}
for _, row in data_frame.iterrows():
duration = 0
for cur_genre in genres:
duration += row[cur_genre]
prev_dur = dur_src_dict[row['type_of_source']]
duration += prev_dur
dur_src_dict.update({row['type_of_source']: duration})
return dur_src_dict
def genres_dict(data_frame):
genre_dict = {}
for _, row in data_frame.iterrows():
for cur_genre in genres:
dur = row[cur_genre]
if cur_genre in genre_dict:
genre_dict.update({cur_genre: dur + genre_dict[cur_genre]})
else:
genre_dict.update({cur_genre: dur})
return genre_dict
def make_plots(name, data_frame):
genre_dict = genres_dict(data_frame)
dur_src_dict = source_dict(data_frame)
plt.barh(range(len(genre_dict)), genre_dict.values(), tick_label=list(genre_dict.keys()), align='center')
plt.savefig(name + 'genres.png')
plt.show()
plt.pie(list(dur_src_dict.values()), labels=list(dur_src_dict.keys()))
plt.savefig(name + 'Sources.png')
plt.show()
make_plots(os.path.join('Plots', 'tr_'), train)
make_plots(os.path.join('Plots', 'val_'), valid)
make_plots(os.path.join('Plots', 'ts_'), test)
train.to_csv(os.path.join('Data', 'train.csv'), encoding='cp1251')
valid.to_csv(os.path.join('Data', 'valid.csv'), encoding='cp1251')
test.to_csv(os.path.join('Data', 'test.csv'), encoding='cp1251')
| 0.362518 | 0.745445 |
# Experiments for ER Graph
## Imports
```
%load_ext autoreload
%autoreload 2
import os
import sys
from collections import OrderedDict
import logging
import math
from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
import torch
from torchdiffeq import odeint, odeint_adjoint
sys.path.append('../')
# Baseline imports
from gd_controller import AdjointGD
from dynamics_driver import ForwardKuramotoDynamics, BackwardKuramotoDynamics
# Nodec imports
from neural_net import EluTimeControl, TrainingAlgorithm
# Various Utilities
from utilities import evaluate, calculate_critical_coupling_constant, comparison_plot, state_plot
from nnc.helpers.torch_utils.oscillators import order_parameter_cos
logging.getLogger().setLevel(logging.CRITICAL) # set to info to look at loss values etc.
```
## Load graph parameters
Basic setup for calculations, graph, number of nodes, etc.
```
dtype = torch.float32
device = 'cpu'
graph_type = 'erdos_renyi'
adjacency_matrix = torch.load('../../data/'+graph_type+'_adjacency.pt')
parameters = torch.load('../../data/parameters.pt')
# driver vector is a column vector with 1 value for driver nodes
# and 0 for non drivers.
result_folder = '../../results/' + graph_type + os.path.sep
os.makedirs(result_folder, exist_ok=True)
```
## Load dynamics parameters
Load natural frequencies and initial states which are common for all graphs and also calculate the coupling constant which is different per graph. We use a coupling constant value that is $10%$ of the critical coupling constant value.
```
total_time = parameters['total_time']
total_time = 5
natural_frequencies = parameters['natural_frequencies']
critical_coupling_constant = calculate_critical_coupling_constant(adjacency_matrix, natural_frequencies)
coupling_constant = 0.2*critical_coupling_constant
theta_0 = parameters['theta_0']
```
## NODEC
We now train NODEC with a shallow neural network. We initialize the parameters in a deterministic manner, and use stochastic gradient descent to train it. The learning rate, number of epochs and neural architecture may change per graph. We use different fractions of driver nodes.
```
fractions = np.linspace(0.9,1,10)
order_parameter_mean = []
order_parameter_std = []
samples = 1000
for p in fractions:
sample_arr = []
for i in range(samples):
print(p,i)
driver_nodes = int(p*adjacency_matrix.shape[0])
driver_vector = torch.zeros([adjacency_matrix.shape[0],1])
idx = torch.randperm(len(driver_vector))[:driver_nodes]
driver_vector[idx] = 1
forward_dynamics = ForwardKuramotoDynamics(adjacency_matrix,
driver_vector,
coupling_constant,
natural_frequencies
)
backward_dynamics = BackwardKuramotoDynamics(adjacency_matrix,
driver_vector,
coupling_constant,
natural_frequencies
)
neural_net = EluTimeControl([2])
for parameter in neural_net.parameters():
parameter.data = torch.ones_like(parameter.data)/1000 # deterministic init!
train_algo = TrainingAlgorithm(neural_net, forward_dynamics)
best_model = train_algo.train(theta_0, total_time, epochs=3, lr=0.15)
control_trajectory, state_trajectory =\
evaluate(forward_dynamics, theta_0, best_model, total_time, 100)
nn_control = torch.cat(control_trajectory).squeeze().cpu().detach().numpy()
nn_states = torch.cat(state_trajectory).cpu().detach().numpy()
nn_e = (nn_control**2).cumsum(-1)
nn_r = order_parameter_cos(torch.tensor(nn_states)).cpu().numpy()
sample_arr.append(nn_r[-1])
order_parameter_mean.append(np.mean(sample_arr))
order_parameter_std.append(np.std(sample_arr,ddof=1))
order_parameter_mean = np.array(order_parameter_mean)
order_parameter_std = np.array(order_parameter_std)
plt.figure()
plt.errorbar(fractions,order_parameter_mean,yerr=order_parameter_std/np.sqrt(samples),fmt="o")
plt.xlabel(r"fraction of controlled nodes")
plt.ylabel(r"$r(T)$")
plt.tight_layout()
plt.show()
np.savetxt("ER_drivers_K02_zoom.csv",np.c_[order_parameter_mean,order_parameter_std],header="order parameter mean\t order parameter std")
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import os
import sys
from collections import OrderedDict
import logging
import math
from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
import torch
from torchdiffeq import odeint, odeint_adjoint
sys.path.append('../')
# Baseline imports
from gd_controller import AdjointGD
from dynamics_driver import ForwardKuramotoDynamics, BackwardKuramotoDynamics
# Nodec imports
from neural_net import EluTimeControl, TrainingAlgorithm
# Various Utilities
from utilities import evaluate, calculate_critical_coupling_constant, comparison_plot, state_plot
from nnc.helpers.torch_utils.oscillators import order_parameter_cos
logging.getLogger().setLevel(logging.CRITICAL) # set to info to look at loss values etc.
dtype = torch.float32
device = 'cpu'
graph_type = 'erdos_renyi'
adjacency_matrix = torch.load('../../data/'+graph_type+'_adjacency.pt')
parameters = torch.load('../../data/parameters.pt')
# driver vector is a column vector with 1 value for driver nodes
# and 0 for non drivers.
result_folder = '../../results/' + graph_type + os.path.sep
os.makedirs(result_folder, exist_ok=True)
total_time = parameters['total_time']
total_time = 5
natural_frequencies = parameters['natural_frequencies']
critical_coupling_constant = calculate_critical_coupling_constant(adjacency_matrix, natural_frequencies)
coupling_constant = 0.2*critical_coupling_constant
theta_0 = parameters['theta_0']
fractions = np.linspace(0.9,1,10)
order_parameter_mean = []
order_parameter_std = []
samples = 1000
for p in fractions:
sample_arr = []
for i in range(samples):
print(p,i)
driver_nodes = int(p*adjacency_matrix.shape[0])
driver_vector = torch.zeros([adjacency_matrix.shape[0],1])
idx = torch.randperm(len(driver_vector))[:driver_nodes]
driver_vector[idx] = 1
forward_dynamics = ForwardKuramotoDynamics(adjacency_matrix,
driver_vector,
coupling_constant,
natural_frequencies
)
backward_dynamics = BackwardKuramotoDynamics(adjacency_matrix,
driver_vector,
coupling_constant,
natural_frequencies
)
neural_net = EluTimeControl([2])
for parameter in neural_net.parameters():
parameter.data = torch.ones_like(parameter.data)/1000 # deterministic init!
train_algo = TrainingAlgorithm(neural_net, forward_dynamics)
best_model = train_algo.train(theta_0, total_time, epochs=3, lr=0.15)
control_trajectory, state_trajectory =\
evaluate(forward_dynamics, theta_0, best_model, total_time, 100)
nn_control = torch.cat(control_trajectory).squeeze().cpu().detach().numpy()
nn_states = torch.cat(state_trajectory).cpu().detach().numpy()
nn_e = (nn_control**2).cumsum(-1)
nn_r = order_parameter_cos(torch.tensor(nn_states)).cpu().numpy()
sample_arr.append(nn_r[-1])
order_parameter_mean.append(np.mean(sample_arr))
order_parameter_std.append(np.std(sample_arr,ddof=1))
order_parameter_mean = np.array(order_parameter_mean)
order_parameter_std = np.array(order_parameter_std)
plt.figure()
plt.errorbar(fractions,order_parameter_mean,yerr=order_parameter_std/np.sqrt(samples),fmt="o")
plt.xlabel(r"fraction of controlled nodes")
plt.ylabel(r"$r(T)$")
plt.tight_layout()
plt.show()
np.savetxt("ER_drivers_K02_zoom.csv",np.c_[order_parameter_mean,order_parameter_std],header="order parameter mean\t order parameter std")
| 0.448426 | 0.794664 |
# SageMaker PySpark K-Means Clustering MNIST Example
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Loading the Data](#Loading-the-Data)
4. [Training with K-Means and Hosting a Model](#Training-with-K-Means-and-Hosting-a-Model)
5. [Inference](#Inference)
8. [Re-using existing endpoints or models to create a SageMakerModel](#Re-using-existing-endpoints-or-models-to-create-SageMakerModel)
9. [Clean-up](#Clean-up)
10. [More on SageMaker Spark](#More-on-SageMaker-Spark)
## Introduction
This notebook will show how to cluster handwritten digits through the SageMaker PySpark library.
We will manipulate data through Spark using a SparkSession, and then use the SageMaker Spark library to interact with SageMaker for training and inference.
We will first train on SageMaker using K-Means clustering on the MNIST dataset. Then, we will see how to re-use models from existing endpoints and from a model stored on S3 in order to only run inference.
You can visit SageMaker Spark's GitHub repository at https://github.com/aws/sagemaker-spark to learn more about SageMaker Spark.
This notebook was created and tested on an ml.m4.xlarge notebook instance.
## Setup
First, we import the necessary modules and create the `SparkSession` with the SageMaker-Spark dependencies attached.
```
import os
import boto3
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import sagemaker
from sagemaker import get_execution_role
import sagemaker_pyspark
role = get_execution_role()
# Configure Spark to use the SageMaker Spark dependency jars
jars = sagemaker_pyspark.classpath_jars()
classpath = ":".join(sagemaker_pyspark.classpath_jars())
# See the SageMaker Spark Github to learn how to connect to EMR from a notebook instance
spark = (
SparkSession.builder.config("spark.driver.extraClassPath", classpath)
.master("local[*]")
.getOrCreate()
)
spark
```
## Loading the Data
Now, we load the MNIST dataset into a Spark Dataframe, which dataset is available in LibSVM format at
`s3://sagemaker-sample-data-[region]/spark/mnist/`
where `[region]` is replaced with a supported AWS region, such as us-east-1.
In order to train and make inferences our input DataFrame must have a column of Doubles (named "label" by default) and a column of Vectors of Doubles (named "features" by default).
Spark's LibSVM DataFrameReader loads a DataFrame already suitable for training and inference.
Here, we load into a DataFrame in the SparkSession running on the local Notebook Instance, but you can connect your Notebook Instance to a remote Spark cluster for heavier workloads. Starting from EMR 5.11.0, SageMaker Spark is pre-installed on EMR Spark clusters. For more on connecting your SageMaker Notebook Instance to a remote EMR cluster, please see [this blog post](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/).
```
import boto3
cn_regions = ["cn-north-1", "cn-northwest-1"]
region = boto3.Session().region_name
endpoint_domain = "com.cn" if region in cn_regions else "com"
spark._jsc.hadoopConfiguration().set(
"fs.s3a.endpoint", "s3.{}.amazonaws.{}".format(region, endpoint_domain)
)
trainingData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/train/".format(region))
)
testData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/test/".format(region))
)
trainingData.show()
```
MNIST images are 28x28, resulting in 784 pixels. The dataset consists of images of digits going from 0 to 9, representing 10 classes.
In each row:
* The `label` column identifies the image's label. For example, if the image of the handwritten number is the digit 5, the label value is 5.
* The `features` column stores a vector (`org.apache.spark.ml.linalg.Vector`) of `Double` values. The length of the vector is 784, as each image consists of 784 pixels. Those pixels are the features we will use.
As we are interested in clustering the images of digits, the number of pixels represents the feature vector, while the number of classes represents the number of clusters we want to find.
## Training with K-Means and Hosting a Model
Now we create a KMeansSageMakerEstimator, which uses the KMeans Amazon SageMaker Algorithm to train on our input data, and uses the KMeans Amazon SageMaker model image to host our model.
Calling fit() on this estimator will train our model on Amazon SageMaker, and then create an Amazon SageMaker Endpoint to host our model.
We can then use the SageMakerModel returned by this call to fit() to transform Dataframes using our hosted model.
The following cell runs a training job and creates an endpoint to host the resulting model, so this cell can take up to twenty minutes to complete.
```
from sagemaker_pyspark import IAMRole
from sagemaker_pyspark.algorithms import KMeansSageMakerEstimator
from sagemaker_pyspark import RandomNamePolicyFactory
# Create K-Means Estimator
kmeans_estimator = KMeansSageMakerEstimator(
sagemakerRole=IAMRole(role),
trainingInstanceType="ml.m4.xlarge", # Instance type to train K-means on SageMaker
trainingInstanceCount=1,
endpointInstanceType="ml.t2.large", # Instance type to serve model (endpoint) for inference
endpointInitialInstanceCount=1,
namePolicyFactory=RandomNamePolicyFactory("sparksm-1a-"),
) # All the resources created are prefixed with sparksm-1
# Set parameters for K-Means
kmeans_estimator.setFeatureDim(784)
kmeans_estimator.setK(10)
# Train
initialModel = kmeans_estimator.fit(trainingData)
```
To put this `KMeansSageMakerEstimator` back into context, let's look at the below architecture that shows what actually runs on the notebook instance and on SageMaker.

We'll need the name of the SageMaker endpoint hosting the K-Means model later on. This information can be accessed directly within the `SageMakerModel`.
```
initialModelEndpointName = initialModel.endpointName
print(initialModelEndpointName)
```
## Inference
Now we transform our DataFrame.
To do this, we serialize each row's "features" Vector of Doubles into a Protobuf format for inference against the Amazon SageMaker Endpoint. We deserialize the Protobuf responses back into our DataFrame. This serialization and deserialization is handled automatically by the `transform()` method:
```
# Run inference on the test data and show some results
transformedData = initialModel.transform(testData)
transformedData.show()
```
How well did the algorithm perform? Let us display the digits from each of the clusters and manually inspect the results:
```
from pyspark.sql.types import DoubleType
import matplotlib.pyplot as plt
import numpy as np
import string
# Helper function to display a digit
def showDigit(img, caption="", xlabel="", subplot=None):
if subplot == None:
_, (subplot) = plt.subplots(1, 1)
imgr = img.reshape((28, 28))
subplot.axes.get_xaxis().set_ticks([])
subplot.axes.get_yaxis().set_ticks([])
plt.title(caption)
plt.xlabel(xlabel)
subplot.imshow(imgr, cmap="gray")
def displayClusters(data):
images = np.array(data.select("features").cache().take(250))
clusters = data.select("closest_cluster").cache().take(250)
for cluster in range(10):
print("\n\n\nCluster {}:".format(string.ascii_uppercase[cluster]))
digits = [img for l, img in zip(clusters, images) if int(l.closest_cluster) == cluster]
height = ((len(digits) - 1) // 5) + 1
width = 5
plt.rcParams["figure.figsize"] = (width, height)
_, subplots = plt.subplots(height, width)
subplots = np.ndarray.flatten(subplots)
for subplot, image in zip(subplots, digits):
showDigit(image, subplot=subplot)
for subplot in subplots[len(digits) :]:
subplot.axis("off")
plt.show()
displayClusters(transformedData)
```
Now that we've seen how to use Spark to load data and SageMaker to train and infer on it, we will look into creating pipelines consisting of multiple algorithms, both from SageMaker-provided algorithms as well as from Spark MLlib.
## Re-using existing endpoints or models to create `SageMakerModel`
SageMaker Spark supports connecting a `SageMakerModel` to an existing SageMaker endpoint, or to an Endpoint created by reference to model data in S3, or to a previously completed Training Job.
This allows you to use SageMaker Spark just for model hosting and inference on Spark-scale DataFrames without running a new Training Job.
### Endpoint re-use
Here we will connect to the initial endpoint we created by using it's unique name. The endpoint name can either be retrieved by the console or in in the `endpointName` parameter of the model you created. In our case, we saved this early on in a variable by accessing the parameter.
```
ENDPOINT_NAME = initialModelEndpointName
print(ENDPOINT_NAME)
```
Once you have the name of the endpoint, we need to make sure that no endpoint will be created as we are attaching to an existing endpoint. This is done using `endpointCreationPolicy` field with a value of `EndpointCreationPolicy.DO_NOT_CREATE`. As we are using an endpoint serving a K-Means model, we also need to use the `KMeansProtobufResponseRowDeserializer` so that the output of the endpoint on SageMaker will be deserialized in the right way and passed on back to Spark in a DataFrame with the right columns.
```
from sagemaker_pyspark import SageMakerModel
from sagemaker_pyspark import EndpointCreationPolicy
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import KMeansProtobufResponseRowDeserializer
attachedModel = SageMakerModel(
existingEndpointName=ENDPOINT_NAME,
endpointCreationPolicy=EndpointCreationPolicy.DO_NOT_CREATE,
endpointInstanceType=None, # Required
endpointInitialInstanceCount=None, # Required
requestRowSerializer=ProtobufRequestRowSerializer(
featuresColumnName="features"
), # Optional: already default value
responseRowDeserializer=KMeansProtobufResponseRowDeserializer( # Optional: already default values
distance_to_cluster_column_name="distance_to_cluster",
closest_cluster_column_name="closest_cluster",
),
)
```
As the data we are passing through the model is using the default columns naming for both the input to the model (`features`) and for the ouput of the model (`distance_to_cluster_column_name` and `closest_cluster_column_name`), we do not need to specify the names of the columns in the serializer and deserializer. If your column naming is different, it's possible to define the name of the columns as shown above in the `requestRowSerializer` and `responseRowDeserializer`.
It is also possible to use the `SageMakerModel.fromEndpoint` method to perform the same as above.
```
transformedData2 = attachedModel.transform(testData)
transformedData2.show()
```
### Create model and endpoint from model data
You can create a SageMakerModel and an Endpoint by referring directly to your model data in S3. To do this, you need the path to where the model is saved (in our case on S3), as well as the role and the inference image to use. In our case, we use the model data from the initial model, consisting of a simple K-Means model. We can retrieve the necessary information from the model variable, or through the console.
```
from sagemaker_pyspark import S3DataPath
MODEL_S3_PATH = S3DataPath(initialModel.modelPath.bucket, initialModel.modelPath.objectPath)
MODEL_ROLE_ARN = initialModel.modelExecutionRoleARN
MODEL_IMAGE_PATH = initialModel.modelImage
print(MODEL_S3_PATH.bucket + MODEL_S3_PATH.objectPath)
print(MODEL_ROLE_ARN)
print(MODEL_IMAGE_PATH)
```
Similar to how we created a model from a running endpoint, we specify the model data information using `modelPath`, `modelExecutionRoleARN`, `modelImage`. This method is more akin to creating a `SageMakerEstimator`, where among others you specify the endpoint information.
```
from sagemaker_pyspark import RandomNamePolicy
retrievedModel = SageMakerModel(
modelPath=MODEL_S3_PATH,
modelExecutionRoleARN=MODEL_ROLE_ARN,
modelImage=MODEL_IMAGE_PATH,
endpointInstanceType="ml.t2.medium",
endpointInitialInstanceCount=1,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer(),
namePolicy=RandomNamePolicy("sparksm-1b-"),
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
)
```
It is also possible to use the `SageMakerModel.fromModelS3Path` method that takes the same parameters and produces the same model.
```
transformedData3 = retrievedModel.transform(testData)
transformedData3.show()
```
### Create model and endpoint from job training data
You can create a SageMakerModel and an Endpoint by referring to a previously-completed training job. Only difference with the model data from S3 is that instead of providing the model data, you provide the `trainingJobName`.
```
TRAINING_JOB_NAME = "<YOUR_TRAINING_JOB_NAME>"
MODEL_ROLE_ARN = initialModel.modelExecutionRoleARN
MODEL_IMAGE_PATH = initialModel.modelImage
modelFromJob = SageMakerModel.fromTrainingJob(
trainingJobName=TRAINING_JOB_NAME,
modelExecutionRoleARN=MODEL_ROLE_ARN,
modelImage=MODEL_IMAGE_PATH,
endpointInstanceType="ml.t2.medium",
endpointInitialInstanceCount=1,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer(),
namePolicy=RandomNamePolicy("sparksm-1c-"),
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
)
transformedData4 = modelFromJob.transform(testData)
transformedData4.show()
```
## Clean-up
Since we don't need to make any more inferences, now we delete the resources (endpoints, models, configurations, etc):
```
# Delete the resources
from sagemaker_pyspark import SageMakerResourceCleanup
def cleanUp(model):
resource_cleanup = SageMakerResourceCleanup(model.sagemakerClient)
resource_cleanup.deleteResources(model.getCreatedResources())
# Don't forget to include any models or pipeline models that you created in the notebook
models = [initialModel, retrievedModel, modelFromJob]
# Delete regular SageMakerModels
for m in models:
cleanUp(m)
```
## More on SageMaker Spark
The SageMaker Spark Github repository has more about SageMaker Spark, including how to use SageMaker Spark using the Scala SDK: https://github.com/aws/sagemaker-spark
|
github_jupyter
|
import os
import boto3
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import sagemaker
from sagemaker import get_execution_role
import sagemaker_pyspark
role = get_execution_role()
# Configure Spark to use the SageMaker Spark dependency jars
jars = sagemaker_pyspark.classpath_jars()
classpath = ":".join(sagemaker_pyspark.classpath_jars())
# See the SageMaker Spark Github to learn how to connect to EMR from a notebook instance
spark = (
SparkSession.builder.config("spark.driver.extraClassPath", classpath)
.master("local[*]")
.getOrCreate()
)
spark
import boto3
cn_regions = ["cn-north-1", "cn-northwest-1"]
region = boto3.Session().region_name
endpoint_domain = "com.cn" if region in cn_regions else "com"
spark._jsc.hadoopConfiguration().set(
"fs.s3a.endpoint", "s3.{}.amazonaws.{}".format(region, endpoint_domain)
)
trainingData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/train/".format(region))
)
testData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/test/".format(region))
)
trainingData.show()
from sagemaker_pyspark import IAMRole
from sagemaker_pyspark.algorithms import KMeansSageMakerEstimator
from sagemaker_pyspark import RandomNamePolicyFactory
# Create K-Means Estimator
kmeans_estimator = KMeansSageMakerEstimator(
sagemakerRole=IAMRole(role),
trainingInstanceType="ml.m4.xlarge", # Instance type to train K-means on SageMaker
trainingInstanceCount=1,
endpointInstanceType="ml.t2.large", # Instance type to serve model (endpoint) for inference
endpointInitialInstanceCount=1,
namePolicyFactory=RandomNamePolicyFactory("sparksm-1a-"),
) # All the resources created are prefixed with sparksm-1
# Set parameters for K-Means
kmeans_estimator.setFeatureDim(784)
kmeans_estimator.setK(10)
# Train
initialModel = kmeans_estimator.fit(trainingData)
initialModelEndpointName = initialModel.endpointName
print(initialModelEndpointName)
# Run inference on the test data and show some results
transformedData = initialModel.transform(testData)
transformedData.show()
from pyspark.sql.types import DoubleType
import matplotlib.pyplot as plt
import numpy as np
import string
# Helper function to display a digit
def showDigit(img, caption="", xlabel="", subplot=None):
if subplot == None:
_, (subplot) = plt.subplots(1, 1)
imgr = img.reshape((28, 28))
subplot.axes.get_xaxis().set_ticks([])
subplot.axes.get_yaxis().set_ticks([])
plt.title(caption)
plt.xlabel(xlabel)
subplot.imshow(imgr, cmap="gray")
def displayClusters(data):
images = np.array(data.select("features").cache().take(250))
clusters = data.select("closest_cluster").cache().take(250)
for cluster in range(10):
print("\n\n\nCluster {}:".format(string.ascii_uppercase[cluster]))
digits = [img for l, img in zip(clusters, images) if int(l.closest_cluster) == cluster]
height = ((len(digits) - 1) // 5) + 1
width = 5
plt.rcParams["figure.figsize"] = (width, height)
_, subplots = plt.subplots(height, width)
subplots = np.ndarray.flatten(subplots)
for subplot, image in zip(subplots, digits):
showDigit(image, subplot=subplot)
for subplot in subplots[len(digits) :]:
subplot.axis("off")
plt.show()
displayClusters(transformedData)
ENDPOINT_NAME = initialModelEndpointName
print(ENDPOINT_NAME)
from sagemaker_pyspark import SageMakerModel
from sagemaker_pyspark import EndpointCreationPolicy
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import KMeansProtobufResponseRowDeserializer
attachedModel = SageMakerModel(
existingEndpointName=ENDPOINT_NAME,
endpointCreationPolicy=EndpointCreationPolicy.DO_NOT_CREATE,
endpointInstanceType=None, # Required
endpointInitialInstanceCount=None, # Required
requestRowSerializer=ProtobufRequestRowSerializer(
featuresColumnName="features"
), # Optional: already default value
responseRowDeserializer=KMeansProtobufResponseRowDeserializer( # Optional: already default values
distance_to_cluster_column_name="distance_to_cluster",
closest_cluster_column_name="closest_cluster",
),
)
transformedData2 = attachedModel.transform(testData)
transformedData2.show()
from sagemaker_pyspark import S3DataPath
MODEL_S3_PATH = S3DataPath(initialModel.modelPath.bucket, initialModel.modelPath.objectPath)
MODEL_ROLE_ARN = initialModel.modelExecutionRoleARN
MODEL_IMAGE_PATH = initialModel.modelImage
print(MODEL_S3_PATH.bucket + MODEL_S3_PATH.objectPath)
print(MODEL_ROLE_ARN)
print(MODEL_IMAGE_PATH)
from sagemaker_pyspark import RandomNamePolicy
retrievedModel = SageMakerModel(
modelPath=MODEL_S3_PATH,
modelExecutionRoleARN=MODEL_ROLE_ARN,
modelImage=MODEL_IMAGE_PATH,
endpointInstanceType="ml.t2.medium",
endpointInitialInstanceCount=1,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer(),
namePolicy=RandomNamePolicy("sparksm-1b-"),
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
)
transformedData3 = retrievedModel.transform(testData)
transformedData3.show()
TRAINING_JOB_NAME = "<YOUR_TRAINING_JOB_NAME>"
MODEL_ROLE_ARN = initialModel.modelExecutionRoleARN
MODEL_IMAGE_PATH = initialModel.modelImage
modelFromJob = SageMakerModel.fromTrainingJob(
trainingJobName=TRAINING_JOB_NAME,
modelExecutionRoleARN=MODEL_ROLE_ARN,
modelImage=MODEL_IMAGE_PATH,
endpointInstanceType="ml.t2.medium",
endpointInitialInstanceCount=1,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer(),
namePolicy=RandomNamePolicy("sparksm-1c-"),
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
)
transformedData4 = modelFromJob.transform(testData)
transformedData4.show()
# Delete the resources
from sagemaker_pyspark import SageMakerResourceCleanup
def cleanUp(model):
resource_cleanup = SageMakerResourceCleanup(model.sagemakerClient)
resource_cleanup.deleteResources(model.getCreatedResources())
# Don't forget to include any models or pipeline models that you created in the notebook
models = [initialModel, retrievedModel, modelFromJob]
# Delete regular SageMakerModels
for m in models:
cleanUp(m)
| 0.495606 | 0.983691 |
## Drugmonizome ETL: DrugCentral
##### Author : Eryk Kropiwnicki | eryk.kropiwnicki@icahn.mssm.edu
#### Data Source: http://drugcentral.org/download
```
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
import os
import sys
import zipfile
import datetime
import pandas as pd
import numpy as np
import drugmonizome.utility_functions as uf
import harmonizome.lookup as lookup
%load_ext autoreload
%autoreload 2
```
### Notebook Information
```
print('This notebook was run on:', datetime.date.today(), '\nPython version:', sys.version)
```
### Initializing Notebook
```
%%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
%%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="http://drugcentral.org/download" target="_blank">http://drugcentral.org/</a>. If clicking on the example does not work, it should be downloaded directly from the source website.',
section='data'
) %}
{% set data_file = FileField(
constraint='.*\.tsv.gz$',
name='drug-target-interactions',
label='Drug-target interaction data (tsv.gz)',
default='drug.target.interaction.tsv.gz',
examples={
'drug.target.interaction.tsv.gz': 'http://unmtid-shinyapps.net/download/drug.target.interaction.tsv.gz'
},
section='data'
) %}
{% set metadata_file = FileField(
constraint='.*\.tsv$',
name='small_molecule_metadata',
label='Small molecule metadata (tsv)',
default='drug.target.interaction.tsv',
examples={
'structures.smiles.tsv': 'http://unmtid-shinyapps.net/download/structures.smiles.tsv'
},
section='data'
) %}
{% set entity_type = ChoiceField(
name='entity_type',
label='Choose identifier type for exported small molecules',
choices=[
'Name',
'InChI Key',
],
default='Name',
section='data'
) %}
```
### Load Gene Mapping Dictionaries
```
symbol_lookup, geneid_lookup = lookup.get_lookups()
```
### Create Output Path
```
%%appyter code_exec
output_name = 'drugcentral'
path = 'output/drugmonizome_drugcentral'
if not os.path.exists(path):
os.makedirs(path)
```
### Load Drug-Target Interaction Data
```
%%appyter code_exec
df_data = pd.read_csv({{data_file}},
sep = '\t',
usecols=['GENE','DRUG_NAME','ORGANISM'])
df_data.head()
df_data.shape
```
### Splitting GENE Column
```
# Retain only human gene symbols
df_data = df_data[df_data['ORGANISM'] == 'Homo sapiens']
# Some small molecules interact with multiple targets and need to be split into multiple rows
df_data[df_data['GENE'] == 'CACNA1C|CACNA1D'].head(2)
df_data['GENE'] = df_data['GENE'].map(lambda x: x.split('|'))
df_data = df_data.explode('GENE')
df_data.head()
```
### Loading Small Molecule Metadata
```
%%appyter code_exec
df_meta = pd.read_csv({{metadata_file}},
sep = '\t',
usecols=['InChIKey', 'INN'])
df_meta.head()
df_meta.shape
```
### Match Metadata to Small Molecule Names
```
# Merging drug metadata
df_meta.rename(columns={'INN':'DRUG_NAME'}, inplace=True)
df_data = df_data.merge(df_meta)
df_data.head()
```
### Index dataframe by user selected small molecule identifier
```
%%appyter code_exec
{% if entity_type.raw_value == 'InChI Key' %}
# Index small molecules by InChI Key
df_output = df_data[['InChIKey','GENE']]
df_output.set_index('InChIKey', inplace = True)
{% else %}
# Index small molecules by name
df_output = df_data[['DRUG_NAME','GENE']]
df_output.set_index('DRUG_NAME', inplace = True)
{% endif %}
```
### Matching Gene Symbols to Approved Entrez Gene Symbols
```
df_output = uf.map_symbols(df_output, symbol_lookup)
df_output.head()
```
## Analyze Data
### Export Edge List
```
uf.save_data(df_output, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
```
### Create Binary Matrix
```
binary_matrix = uf.binary_matrix(df_output)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
```
### Create Drug and Attribute Set Library
```
uf.save_setlib(binary_matrix, 'drug', path, output_name + '_drug_setlibrary')
uf.save_setlib(binary_matrix, 'attribute', path, output_name + '_attribute_setlibrary')
```
### Create Attribute Similarity Matrix
```
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
```
### Create Drug Similarity Matrix
```
drug_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
drug_similarity_matrix.head()
uf.save_data(drug_similarity_matrix, path,
output_name + '_drug_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
```
### Create download folder with all outputs
```
uf.archive(path)
```
### Link to the output folder: [Download](./output_archive.zip)
|
github_jupyter
|
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
import os
import sys
import zipfile
import datetime
import pandas as pd
import numpy as np
import drugmonizome.utility_functions as uf
import harmonizome.lookup as lookup
%load_ext autoreload
%autoreload 2
print('This notebook was run on:', datetime.date.today(), '\nPython version:', sys.version)
%%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
%%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="http://drugcentral.org/download" target="_blank">http://drugcentral.org/</a>. If clicking on the example does not work, it should be downloaded directly from the source website.',
section='data'
) %}
{% set data_file = FileField(
constraint='.*\.tsv.gz$',
name='drug-target-interactions',
label='Drug-target interaction data (tsv.gz)',
default='drug.target.interaction.tsv.gz',
examples={
'drug.target.interaction.tsv.gz': 'http://unmtid-shinyapps.net/download/drug.target.interaction.tsv.gz'
},
section='data'
) %}
{% set metadata_file = FileField(
constraint='.*\.tsv$',
name='small_molecule_metadata',
label='Small molecule metadata (tsv)',
default='drug.target.interaction.tsv',
examples={
'structures.smiles.tsv': 'http://unmtid-shinyapps.net/download/structures.smiles.tsv'
},
section='data'
) %}
{% set entity_type = ChoiceField(
name='entity_type',
label='Choose identifier type for exported small molecules',
choices=[
'Name',
'InChI Key',
],
default='Name',
section='data'
) %}
symbol_lookup, geneid_lookup = lookup.get_lookups()
%%appyter code_exec
output_name = 'drugcentral'
path = 'output/drugmonizome_drugcentral'
if not os.path.exists(path):
os.makedirs(path)
%%appyter code_exec
df_data = pd.read_csv({{data_file}},
sep = '\t',
usecols=['GENE','DRUG_NAME','ORGANISM'])
df_data.head()
df_data.shape
# Retain only human gene symbols
df_data = df_data[df_data['ORGANISM'] == 'Homo sapiens']
# Some small molecules interact with multiple targets and need to be split into multiple rows
df_data[df_data['GENE'] == 'CACNA1C|CACNA1D'].head(2)
df_data['GENE'] = df_data['GENE'].map(lambda x: x.split('|'))
df_data = df_data.explode('GENE')
df_data.head()
%%appyter code_exec
df_meta = pd.read_csv({{metadata_file}},
sep = '\t',
usecols=['InChIKey', 'INN'])
df_meta.head()
df_meta.shape
# Merging drug metadata
df_meta.rename(columns={'INN':'DRUG_NAME'}, inplace=True)
df_data = df_data.merge(df_meta)
df_data.head()
%%appyter code_exec
{% if entity_type.raw_value == 'InChI Key' %}
# Index small molecules by InChI Key
df_output = df_data[['InChIKey','GENE']]
df_output.set_index('InChIKey', inplace = True)
{% else %}
# Index small molecules by name
df_output = df_data[['DRUG_NAME','GENE']]
df_output.set_index('DRUG_NAME', inplace = True)
{% endif %}
df_output = uf.map_symbols(df_output, symbol_lookup)
df_output.head()
uf.save_data(df_output, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
binary_matrix = uf.binary_matrix(df_output)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
uf.save_setlib(binary_matrix, 'drug', path, output_name + '_drug_setlibrary')
uf.save_setlib(binary_matrix, 'attribute', path, output_name + '_attribute_setlibrary')
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
drug_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
drug_similarity_matrix.head()
uf.save_data(drug_similarity_matrix, path,
output_name + '_drug_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
uf.archive(path)
| 0.39257 | 0.778102 |
#1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
```
!pip install git+https://github.com/google/starthinker
```
#2. Get Cloud Project ID
To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
```
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
```
#3. Get Client Credentials
To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
```
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
```
#4. Enter DV360 Bulk Targeting Editor Parameters
Allows bulk targeting DV360 through Sheets and BigQuery.
1. A Sheet called <b>DV Targeter </b> will be created.
1. Select <b>Load</b> as the command, click <b>Save</b>, then <b>Run<b>.
1. In the 'Partners' sheet tab, fill in <i>Filter</i> column.
1. Select <b>Load</b> as the command, click <b>Save</b>, then <b>Run<b>.
1. In the 'Advertisers' sheet tab, fill in <i>Filter</i> column.
1. Select <b>Load</b> as the command, click <b>Save</b>, then <b>Run<b>.
1. In the 'Line Items' sheet tab, fill in <i>Filter</i> column.
1. Select <b>Load</b> as the command, click <b>Save</b>, then <b>Run<b>.
1. Make updates, fill in changes on all tabs with colored fields (RED FIELDS ARE NOT IMPLEMENTED, IGNORE).
1. Select <i>Preview</i>, <b>Save</b> , then <b>Run<b>.
1. Check the <b>Preview</b> tabs.
1. Select <b>Update</b> as the command, click <b>Save</b>, then <b>Run<b>.
1. Check the <b>Success</b> and <b>Error</b> tabs.
1. Load and Update can be run multiple times.
Modify the values below for your use case, can be done multiple times, then click play.
```
FIELDS = {
'auth_dv': 'user', # Credentials used for dv.
'auth_sheet': 'user', # Credentials used for sheet.
'auth_bigquery': 'service', # Credentials used for bigquery.
'recipe_name': '', # Name of Google Sheet to create.
'recipe_slug': '', # Name of Google BigQuery dataset to create.
'command': 'Load', # Action to take.
}
print("Parameters Set To: %s" % FIELDS)
```
#5. Execute DV360 Bulk Targeting Editor
This does NOT need to be modified unless you are changing the recipe, click play.
```
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'dataset': {
'__comment__': 'Ensure dataset exists.',
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','prefix': 'DV_Targeter_','kind': 'string','order': 2,'default': '','description': 'Name of Google BigQuery dataset to create.'}}
}
},
{
'drive': {
'__comment__': 'Copy the default template to sheet with the recipe name',
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1ARkIvh0D-gltZeiwniUonMNrm0Mi1s2meZ9FUjutXOE/',
'destination': {'field': {'name': 'recipe_name','prefix': 'DV Targeter ','kind': 'string','order': 3,'default': '','description': 'Name of Google Sheet to create.'}}
}
}
},
{
'dv_targeter': {
'__comment': 'Depending on users choice, execute a different part of the solution.',
'auth_dv': {'field': {'name': 'auth_dv','kind': 'authentication','order': 1,'default': 'user','description': 'Credentials used for dv.'}},
'auth_sheets': {'field': {'name': 'auth_sheet','kind': 'authentication','order': 2,'default': 'user','description': 'Credentials used for sheet.'}},
'auth_bigquery': {'field': {'name': 'auth_bigquery','kind': 'authentication','order': 3,'default': 'service','description': 'Credentials used for bigquery.'}},
'sheet': {'field': {'name': 'recipe_name','prefix': 'DV Targeter ','kind': 'string','order': 4,'default': '','description': 'Name of Google Sheet to create.'}},
'dataset': {'field': {'name': 'recipe_slug','prefix': 'DV_Targeter_','kind': 'string','order': 5,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'command': {'field': {'name': 'command','kind': 'choice','choices': ['Clear','Load','Preview','Update'],'order': 6,'default': 'Load','description': 'Action to take.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
```
|
github_jupyter
|
!pip install git+https://github.com/google/starthinker
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
FIELDS = {
'auth_dv': 'user', # Credentials used for dv.
'auth_sheet': 'user', # Credentials used for sheet.
'auth_bigquery': 'service', # Credentials used for bigquery.
'recipe_name': '', # Name of Google Sheet to create.
'recipe_slug': '', # Name of Google BigQuery dataset to create.
'command': 'Load', # Action to take.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'dataset': {
'__comment__': 'Ensure dataset exists.',
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','prefix': 'DV_Targeter_','kind': 'string','order': 2,'default': '','description': 'Name of Google BigQuery dataset to create.'}}
}
},
{
'drive': {
'__comment__': 'Copy the default template to sheet with the recipe name',
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1ARkIvh0D-gltZeiwniUonMNrm0Mi1s2meZ9FUjutXOE/',
'destination': {'field': {'name': 'recipe_name','prefix': 'DV Targeter ','kind': 'string','order': 3,'default': '','description': 'Name of Google Sheet to create.'}}
}
}
},
{
'dv_targeter': {
'__comment': 'Depending on users choice, execute a different part of the solution.',
'auth_dv': {'field': {'name': 'auth_dv','kind': 'authentication','order': 1,'default': 'user','description': 'Credentials used for dv.'}},
'auth_sheets': {'field': {'name': 'auth_sheet','kind': 'authentication','order': 2,'default': 'user','description': 'Credentials used for sheet.'}},
'auth_bigquery': {'field': {'name': 'auth_bigquery','kind': 'authentication','order': 3,'default': 'service','description': 'Credentials used for bigquery.'}},
'sheet': {'field': {'name': 'recipe_name','prefix': 'DV Targeter ','kind': 'string','order': 4,'default': '','description': 'Name of Google Sheet to create.'}},
'dataset': {'field': {'name': 'recipe_slug','prefix': 'DV_Targeter_','kind': 'string','order': 5,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'command': {'field': {'name': 'command','kind': 'choice','choices': ['Clear','Load','Preview','Update'],'order': 6,'default': 'Load','description': 'Action to take.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
| 0.330903 | 0.808332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.