repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
tclaudioe/Scientific-Computing
|
SC5/04 Numerical Example of Spectral Differentiation.ipynb
|
bsd-3-clause
|
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import scipy.sparse.linalg as sp
from scipy import interpolate
import scipy as spf
from sympy import *
import sympy as sym
from scipy.linalg import toeplitz
from ipywidgets import interact
from ipywidgets import IntSlider
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# The variable M is used for changing the default size of the figures
M=5
import ipywidgets as widgets
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
sym.init_printing()
"""
Explanation: INF-510, v0.31, Claudio Torres, ctorres@inf.utfsm.cl. DI-UTFSM
Textbook: Lloyd N. Trefethen, Spectral Methods in MATLAB, SIAM, Philadelphia, 2000
More on Spectral Matrices
End of explanation
"""
def cheb(N):
if N==0:
D=0
x=1
return D,x
x = np.cos(np.pi*np.arange(N+1)/N)
c=np.hstack((2,np.ones(N-1),2))*((-1.)**np.arange(N+1))
X=np.tile(x,(N+1,1)).T
dX=X-X.T
D = np.outer(c,1./c)/(dX+np.eye(N+1))
D = D - np.diag(np.sum(D.T,axis=0))
return D,x
"""
Explanation: Chebyshev differentiation matrix
End of explanation
"""
def show_spectral_derivative_example(N):
x=np.linspace(2*np.pi/N,2*np.pi,N)
u = lambda x: np.sin(x)
up = lambda x: np.cos(x)
#u = lambda x: np.sin(x)*np.cos(x)
#up = lambda x: np.cos(x)*np.cos(x)-np.sin(x)*np.sin(x)
v=u(x)
K=np.fft.fftfreq(N)*N
iK=1j*K
vhat=np.fft.fft(v)
W=iK*vhat
W[int(N/2)]=0
vp=np.real(np.fft.ifft(W))
plt.figure(figsize=(10,10))
plt.plot(x,v,'ks-',markersize=12,markeredgewidth=3,label='$\sin(x)$',linewidth=3)
plt.plot(x,up(x),'b.-',markersize=24,markeredgewidth=3,label='Exact derivative: $\cos(x)$',linewidth=3)
plt.plot(x,np.real(vp),'rx-',markersize=10,markeredgewidth=3,label='spectral derivative',linewidth=3)
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('$x$')
plt.show()
print('v :',v)
print('vhat :',vhat)
print('K :',K)
print('W :',W)
print('vprime: ',vp)
widgets.interact(show_spectral_derivative_example,N=(2,40,2))
def spectralDerivativeByFFT(v,nu=1):
if not np.all(np.isreal(v)):
raise ValueError('The input vector must be real')
N=v.shape[0]
K=np.fft.fftfreq(N)*N
iK=(1j*K)**nu
v_hat=np.fft.fft(v)
w_hat=iK*v_hat
if np.mod(nu,2)!=0:
w_hat[int(N/2)]=0
return np.real(np.fft.ifft(w_hat))
def my_D2_spec_2pi(N):
h=(2*np.pi/N)
c=np.zeros(N)
j=np.arange(1,N)
c[0]=-np.pi**2/(3.*h**2)-1./6.
c[1:]=-0.5*((-1)**j)/(np.sin(j*h/2.)**2)
D2=toeplitz(c)
return D2
"""
Explanation: Understanding how the np.FFT does the FFT
End of explanation
"""
def fractional_derivative(N=10,nu=1):
x=np.linspace(2*np.pi/N,2*np.pi,N)
u = lambda x: np.sin(x)
up = lambda x: np.cos(x)
v = u(x)
vp=spectralDerivativeByFFT(v,nu)
plt.figure(figsize=(10,10))
plt.plot(x,v,'ks-',markersize=12,markeredgewidth=3,label='$\sin(x)$',linewidth=3)
plt.plot(x,up(x),'b.-',markersize=24,markeredgewidth=3,label='Exact derivative: $\cos(x)$',linewidth=3)
plt.plot(x,np.real(vp),'rx-',markersize=10,markeredgewidth=3,label=r'$\frac{d^{\nu}u}{dx^{\nu}}$',linewidth=3)
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('$x$')
plt.show()
d_nu=0.1
widgets.interact(fractional_derivative,N=(4,100),nu=(d_nu,1,d_nu))
"""
Explanation: Fractional derivative application
End of explanation
"""
L=8.0
def show_example_1(N=6):
h=2*np.pi/N
x=np.linspace(h,2*np.pi,N)
x=L*(x-np.pi)/np.pi
D2=(np.pi/L)**2*my_D2_spec_2pi(N)
w, v = np.linalg.eig(-D2+np.diag(x**2))
# eigenvalues = np.sort(np.linalg.eigvals(-D2+np.diag(x**2)))
ii = np.argsort(w)
w=w[ii]
v=v[:,ii]
plt.figure(figsize=(2*M,2*M))
for i in np.arange(1,5):
plt.subplot(2,2,i)
plt.title(r'$u_{:d}(x),\, \lambda_{:d}={:f}$'.format(i,i,w[i-1]))
plt.plot(x,v[:,i],'kx',markersize=16,markeredgewidth=3)
plt.grid(True)
plt.show()
widgets.interact(show_example_1,N=(6,100,1))
"""
Explanation: Example 1: Computing Eigenvalues
We are solving: $-u''(x)+x^2\,u(x)=\lambda\, u(x)$ on $\mathbb{R}$
End of explanation
"""
def example_2(N=16):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
f = np.exp(4*x[1:-1])
u = np.linalg.solve(D2,f)
u = np.concatenate(([0],u,[0]),axis=0)
plt.figure(figsize=(M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
exact = (np.exp(4*xx)-np.sinh(4.)*xx-np.cosh(4.))/16.
plt.title('max error= '+str(np.linalg.norm(exact-uu,np.inf)))
plt.ylim([-2.5,0.5])
plt.show()
interact(example_2,N=(2,35))
"""
Explanation: Example 2: Solving ODE
Solving the following BVP $u_{xx}=\exp(4\,x)$ with $u(-1)=u(1)=0$
End of explanation
"""
def example_3(N=16,IT=20):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
u = np.zeros(N-1)
for i in np.arange(IT):
u_new = np.linalg.solve(D2,np.exp(u))
change = np.linalg.norm(u_new-u,np.inf)
u = u_new
u = np.concatenate(([0],u,[0]),axis=0)
plt.figure(figsize=(M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
plt.title('IT= '+str(IT)+' u(0)= '+str(u[int(N/2)]))
plt.ylim([-0.5,0.])
plt.show()
interact(example_3,N=(2,30),IT=(0,100))
"""
Explanation: Example 3: Solving ODE
Solving the following BVP $u_{xx}=\exp(u)$ with $u(-1)=u(1)=0$
End of explanation
"""
N_widget = IntSlider(min=2, max=50, step=1, value=10)
j_widget = IntSlider(min=1, max=49, step=1, value=5)
def update_j_range(*args):
j_widget.max = N_widget.value-1
j_widget.observe(update_j_range, 'value')
def example_4(N=36,j=5):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
lam, V = np.linalg.eig(D2)
ii=np.argsort(-np.real(lam))
lam=lam[ii]
V=V[:,ii]
u = np.concatenate(([0],V[:,j-1],[0]),axis=0)
plt.figure(figsize=(2*M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
plt.title('eig '+str(j)+' = '+str(lam[j-1]*4./(np.pi**2))+' pi**2/4'+' ppw '+str(4*N/(np.pi*j)))
plt.show()
interact(example_4,N=N_widget,j=j_widget)
"""
Explanation: Example 4: Eigenvalue BVP
Solve $u_{xx}=\lambda\,u$ with $u(-1)=u(1)=0$
End of explanation
"""
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
def example_5(N=10,elev=40,azim=230):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
f = 10*np.sin(8*xx*(yy-1))
I = np.eye(N-1)
# The Laplacian
L = np.kron(I,D2)+np.kron(D2,I)
u = np.linalg.solve(L,f)
fig = plt.figure(figsize=(2*M,2*M))
# The spy of the Laplacian
plt.subplot(221)
plt.spy(L)
# Plotting the approximation and its interpolation
# The numerical approximation
uu = np.zeros((N+1,N+1))
uu[1:-1,1:-1]=np.reshape(u,(N-1,N-1))
xx,yy=np.meshgrid(x,y)
value = uu[int(N/4),int(N/4)]
plt.subplot(222,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, uu)
ax.view_init(elev,azim)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
uuu = spf.interpolate.interp2d(xx, yy, uu, kind='linear')
uuu_n=np.reshape(uuu(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(224,projection='3d')
ax = fig.gca()
surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
#ax.plot_wireframe(xxx, yyy, uuu_n)
fig.colorbar(surf)
ax.view_init(elev,azim)
plt.subplot(223)
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
extent = [x[0], x[-1], y[0], y[-1]]
plt.imshow(uu, extent=extent)
plt.ylabel('$y$')
plt.xlabel('$x$')
plt.colorbar()
plt.show()
interact(example_5,N=(3,20),elev=elev_widget,azim=azim_widget)
"""
Explanation: Example 5: (2D) Poisson equation $u_{xx}+u_{yy}=f$ with u=0 on $\partial\Gamma$
End of explanation
"""
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
def example_6(N=10,elev=40,azim=230,k=9,n_contours=8):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
f = np.exp(-10.*((yy-1.)**2+(xx-.5)**2))
I = np.eye(N-1)
# The Laplacian
L = np.kron(I,D2)+np.kron(D2,I)+k**2*np.eye((N-1)**2)
u = np.linalg.solve(L,f)
fig = plt.figure(figsize=(2*M,2*M))
# Plotting the approximation and its interpolation
# The numerical approximation
uu = np.zeros((N+1,N+1))
uu[1:-1,1:-1]=np.reshape(u,(N-1,N-1))
xx,yy=np.meshgrid(x,y)
value = uu[int(N/4),int(N/4)]
plt.subplot(221,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, uu)
ax.view_init(elev,azim)
plt.subplot(222)
plt.contour(xx, yy, uu, n_contours,
colors='k', # negative contours will be dashed by default
)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
uuu = spf.interpolate.interp2d(xx, yy, uu, kind='linear')
uuu_n=np.reshape(uuu(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(223,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xxx, yyy, uuu_n)
ax.view_init(elev,azim)
plt.subplot(224)
plt.contour(xxx, yyy, uuu_n, n_contours,
colors='k', # negative contours will be dashed by default
)
plt.show()
interact(example_6,N=(3,30),elev=elev_widget,azim=azim_widget,k=(1,20),n_contours=(5,12))
"""
Explanation: Example 6: (2D) Helmholtz equation $u_{xx}+u_{yy}+k^2\,u=f$ with u=0 on $\partial\Gamma$
End of explanation
"""
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
N_widget = IntSlider(min=2, max=30, step=1, value=10)
j_widget = IntSlider(min=1, max=20, step=1, value=1)
def update_j_range(*args):
j_widget.max = (N_widget.value-1)**2
j_widget.observe(update_j_range, 'value')
def example_7(N=10,elev=40,azim=230,n_contours=8,j=1):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
I = np.eye(N-1)
# The Laplacian
L = (np.kron(I,-D2)+np.kron(-D2,I))
lam, V = np.linalg.eig(L)
ii=np.argsort(np.real(lam))
lam=lam[ii]
V=V[:,ii]
fig = plt.figure(figsize=(2*M,M))
# Plotting the approximation and its interpolation
# The numerical approximation
vv = np.zeros((N+1,N+1))
vv[1:-1,1:-1]=np.reshape(np.real(V[:,j-1]),(N-1,N-1))
xx,yy=np.meshgrid(x,y)
plt.subplot(221,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, vv)
plt.title('eig '+str(j)+'/ (pi/2)**2= '+str(lam[j-1]/((np.pi/2)**2)))
ax.view_init(elev,azim)
plt.subplot(222)
plt.contour(xx, yy, vv, n_contours,
colors='k', # negative contours will be dashed by default
)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
vvv = spf.interpolate.interp2d(xx, yy, vv, kind='linear')
vvv_n=np.reshape(vvv(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(223,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xxx, yyy, vvv_n)
ax.view_init(elev,azim)
plt.subplot(224)
plt.contour(xxx, yyy, vvv_n, n_contours,
colors='k', # negative contours will be dashed by default
)
plt.show()
interact(example_7,N=N_widget,elev=elev_widget,azim=azim_widget,n_contours=(5,12),j=j_widget)
"""
Explanation: Example 7: (2D) $-(u_{xx}+u_{yy})=\lambda\,u$ with u=0 on $\partial\Gamma$
End of explanation
"""
|
xesscorp/pygmyhdl
|
docs/_build/singlehtml/notebooks/2_hierarchy/.ipynb_checkpoints/hierarchy_and_abstraction_and_ursidae_oh_my-checkpoint.ipynb
|
mit
|
from pygmyhdl import *
@chunk
def dff(clk_i, d_i, q_o):
'''
Inputs:
clk_i: Rising edge on this input stores data on d_i into q_o.
d_i: Input that brings new data into the flip-flop:
Outputs:
q_o: Output of the data stored in the flip-flop.
'''
@seq_logic(clk_i.posedge)
def logic():
q_o.next = d_i
"""
Explanation: Note: If you're reading this as a static HTML page, you can also get it as an
executable Jupyter notebook here.
Hierarchy and Abstraction and Ursidae, Oh My!
Those are big, scary words often associated with FPGA designs, but with simple meanings:
Hierarchy: "See that big, complex thing? It's actually made of small, simple things.
And those small, simple things are made of even smaller, simpler things."
Abstraction: "See that thing? It does something. I don't care how it does it.
I just need to know what it does."
Ursidae: This is the scientific family name for bears. They have nothing to do with this chapter.
I just needed it to make the whole Wizard of Oz reference work.
Take a car as an example. It consists of a body and a drive train. The drive train
is made up of an engine, transmission, drive shaft, axles and wheels.
And the engine is made up of a block, pistons, fuel injectors, etc.
And on and on.
That's hierarchy.
Then think about somebody who's putting an engine in a car.
He needs to know how big the engine is, how to feed gas to it, and where the
spinny thing is that attaches to the drive shaft and makes the wheels go round.
All the stuff about piston rings and camshafts and coolant channels
are details he can ignore.
That's abstraction.
And what about Ursidae?
Bears have nothing to do with this.
I told you that already.
Just let it go.
The role of hierarchy and abstraction in digital design is best understood
by going through a concrete example.
And what's the best example we have at this point?
The LED Blinker Revisited
Let's rebuild the LED blinker from our
previous example
as a hierarchy of smaller pieces of digital logic.
I'll do this in two phases:
Phase 1: I'll break the design into smaller pieces. Then I'll break those pieces into
even smaller pieces and continue the process until I've reached the lowest level
I think is reasonable.
Phase 2: I'll take each piece at the lowest level and describe it using MyHDL.
Then, I'll interconnect those MyHDL code fragments and use them to build the
next higher level.
And I'll proceed like that until I've got the entire LED blinker built in MyHDL.
This process - break it down starting from the top, then rebuild it starting from the bottom - is
a common technique used in software design.
Since building hardware using an HDL is similar to writing software, it's no surprise
the pattern works here.
Break Down
Let's start by showing the LED blinker at it's highest level of abstraction:
it takes a clock as input and generates a single output that turns an LED on and off:
<img alt="Highest level of abstraction for an LED blinker." src="lvl_led_blinker.png" width=500/>
At the next level of abstraction, we already know the blinker is built from an $N$-bit
counter with the LED output driven from the counter's MSB.
<img alt="Second highest level of abstraction for an LED blinker." src="lvl_counter.png" width=500/>
Going down another level, a counter can be constructed from an $N$-bit
register
and an $N$-bit binary adder.
The register holds a value $V$ and, on every pulse of the clock input, that value
is replaced by $V + 1$.
<img alt="Third highest level of abstraction for an LED blinker." src="lvl_reg_adder.png" width=550/>
The register consists of $N$ individual
D flip-flops
each of which stores a single bit of the register value that is updated when there is a rising
edge of the clock input:
<img alt="Individual bits of a register." src="lvl_dff.png" width=400/>
The adder is also composed of $N$ subcomponents called
full-adder bits.
The $k$-th full-adder bit takes the $k$-th bit from each of the numbers to be added along with
a carry bit from the preceding stage of the adder.
It combines these to compute the $k$-th sum bit and a carry bit for the next stage of the adder.
In the figure below, I've replaced the a input with the value 0...001 because the
adder is being used to increment the value on the b input.
I've also placed a 0 on the carry input into the first bit of the adder.
<img alt="Interconnection of full-adder bits." src="lvl_adder.png" width=350/>
Finally, each full-adder bit is built from a
collection of primitive logic gates [1] like this:
<img alt="Full-adder bit gates." src="lvl_adder_bit.png" width=200/>
And that's the complete breakdown of the LED blinker.
Now it's time to build it back up using MyHDL.
Build Up
I broke the LED blinker down to its most basic components,
now I'll start at the lowest level of the hierarchy to build it back up.
<img alt="LED Blinker hierarchy" src="blinker_hierarchy.png" width=200/>
Here's the MyHDL code for a single D flip-flop. It's a simple piece of
sequential logic that copies its input to its output whenever there is a rising
edge on the clock input:
End of explanation
"""
@chunk
def register(clk_i, d_i, q_o):
for k in range(len(d_i)):
dff(clk_i, d_i.o[k], q_o.i[k])
"""
Explanation: Now I can build a register by instantiating multiple dff modules in a loop:
End of explanation
"""
initialize() # Initialize for simulation.
# Create clock signal and 8-bit register input and output buses.
clk = Wire(name='clk')
data_in = Bus(8, name='data_in')
data_out = Bus(8, name='data_out')
# Instantiate a register and attach the clock and I/O buses.
register(clk_i=clk, d_i=data_in, q_o=data_out)
# Apply random 8-bit integers to the register input along with
# rising and falling edges of the clock signal.
from random import randint # Random integer function.
def test_bench():
# Apply ten random inputs to the register.
for i in range(10):
data_in.next = randint(0,256) # Set register input to a random 8-bit value.
clk.next = 0 # Lower clock signal to 0.
yield delay(1) # Wait for one unit of simulation time.
clk.next = 1 # Then raise the clock signal to 1 ...
yield delay(1) # ... and wait one more time unit. The register output should change.
# Pass the test bench function to the simulator function to run the simulation.
simulate(test_bench())
# View the results of the simulation.
show_waveforms()
"""
Explanation: The loop count is set by the number of bits in the incoming data bus, d_i.
During each loop iteration, a D flip-flop is created.
The d_i.o[k] notation [2] says that the $k$-th output bit of
the d_i bus is used to drive the data input of the $k$-th flip-flop.
In a similar manner, the q_o.i[k] notation says that the output of the $k$-th
flip-flop will drive the $k$-th input bit of the q_o bus.
In this way, the register input bus is applied to the individual flip-flops, and the
flip-flop outputs collectively drive the register output bus.
To test the register, I'll connect it to a clock signal and some
input and output buses [3].
Then, I'll use a simple
test bench function
to apply random values to the
register input and see if those values propagate to the register output:
End of explanation
"""
@chunk
def full_adder_bit(a_i, b_i, c_i, s_o, c_o):
'''
Inputs:
a_i, b_i: Inputs from i-th bit of a and b values.
c_i: Input from carry output of (i-1)-th adder stage.
Outputs:
s_o: Output of i-th sum bit.
c_o: Carry output to the (i+1)-th adder stage.
'''
@comb_logic
def logic():
# Exclusive-OR (^) the inputs to create the sum bit.
s_o.next = a_i ^ b_i ^ c_i
# Generate a carry output if two or more of the inputs are 1.
# This uses the logic AND (&) and OR (|) operators.
c_o.next = (a_i & b_i) | (a_i & c_i) | (b_i & c_i)
"""
Explanation: From the logic waveforms, you can see the value on the data_in bus is transferred to the
data_out bus whenever a rising edge occurs on the clk signal.
The register appears to be working, so I'll move on to begin implementing the adder
branch of the blinker hierarchy.
Here's the MyHDL for the combinatorial logic of a single full-adder bit:
End of explanation
"""
initialize() # Initialize for a new simulation.
# Declare input and output signals for the full-adder bit.
a_i, b_i, c_i = Wire(name='a_i'), Wire(name='b_i'), Wire(name='c_i')
sum_o, c_o = Wire(name='sum_o'), Wire(name='c_o')
# Instantiate a full-adder bit with the I/O connections.
full_adder_bit(a_i, b_i, c_i, sum_o, c_o)
# Simulate the full-adder bit operation for every possible combination
# of the a_i, b_i and c_i inputs.
exhaustive_sim(a_i, b_i, c_i)
# Show the response of the full-adder bit to the inputs.
show_text_table()
"""
Explanation: To test the full-adder bit, I'll apply every possible combination of the three inputs and
see how it reacts:
End of explanation
"""
@chunk
def adder(a_i, b_i, s_o):
'''
Inputs:
a_i, b_i: Numbers to be added.
Outputs:
s_o: Sum of a_i and b_i inputs.
'''
# Create a bus for the carry bits that pass from one stage to the next.
# There is one more carry bit than the number of adder stages in order
# to drive the carry input of the first stage.
c = Bus(len(a_i)+1)
# Set the carry input to the first stage of the adder to 0.
c.i[0] = 0
# Use the length of the a_i input bus to set the loop counter.
for k in range(len(a_i)):
# The k-th bit of the a_i and b_i buses are added with the
# k-th carry bit to create the k-th sum bit and the
# carry output bit. The carry output is the
# carry input to the (k+1)-th stage.
full_adder_bit(a_i=a_i.o[k], b_i=b_i.o[k], c_i=c.o[k], s_o=s_o.i[k], c_o=c.i[k+1])
"""
Explanation: You can compare the table of values above to this
full-adder truth-table
to verify it's working correctly.
Just like the register, I can build a complete adder by iteratively
instantiating the full-adder bit:
End of explanation
"""
initialize() # Once again, initialize for a new simulation.
# Declare 8-bit buses for the two numbers to be added and the sum.
a = Bus(8, name='a')
b = Bus(8, name='b')
s = Bus(8, name='sum')
# Instantiate an adder and connect the I/O buses.
adder(a, b, s)
# Simulate the adder's output for 20 randomly-selected inputs.
random_sim(a, b, num_tests=20)
# Show a table of the adder output for each set of inputs.
show_text_table()
"""
Explanation: I won't even try to do an exhaustive test of the adder because with eight-bit
input buses, that would take $2^8 \times 2^8 = $ 65,536 simulation cycles
and nobody would even look at a fraction of the output any way.
Instead, I'll just check the adder's output for a few randomly-selected inputs:
End of explanation
"""
@chunk
def counter(clk_i, cnt_o):
'''
Inputs:
clk_i: Counter increments on the rising edge of the clock.
Outputs:
cnt_o: Counter value.
'''
# The length of the counter output determines the number of counter bits.
length = len(cnt_o)
one = Bus(length, init_val=1) # A constant bus that carries the value 1.
next_cnt = Bus(length) # A bus that carries the next counter value.
# Add one to the current counter value to create the next value.
adder(one, cnt_o, next_cnt)
# Load the next counter value into the register on a rising clock edge.
register(clk_i, next_cnt, cnt_o)
"""
Explanation: You can manually check the table results to see the adder is working correctly.
(Remember that the adder's output is only eight bits, so it will roll over to 0
upon reaching 256. Therefore, take the $\bmod{256}$ of the sum of the inputs and compare
that to the adder's output. For example, $(72 + 213) \bmod{256} = (285) \bmod{256} = 29$.)
Now I can move up another level in the hierarchy and build a counter from the adder and register:
End of explanation
"""
@chunk
def blinker(clk_i, led_o, length):
'''
Inputs:
clk_i: This is a clock signal input.
length: This is the number of bits in the counter that generates the led_o output.
Outputs:
led_o: This is an output signal that drives an LED on and off.
'''
cnt = Bus(length, name='cnt') # Declare the counter bus with the given length.
counter(clk_i, cnt) # Instantiate a counter of the same length.
# Attach the MSB of the counter bus to the LED output.
@comb_logic
def output_logic():
led_o.next = cnt[length-1]
"""
Explanation: Having finally reached the top of the hierarchy, I'll use the counter to complete the implementation of the LED blinker:
End of explanation
"""
initialize() # Initialize for simulation.
clk = Wire(name='clk') # Declare the clock input.
led = Wire(name='led') # Declare the LED output.
blinker(clk, led, 3) # Instantiate a three-bit blinker and attach I/O signals.
clk_sim(clk, num_cycles=16) # Apply 16 clock pulses.
show_waveforms() # Look at the waveforms.
"""
Explanation: Let's check the operation by simulating a small LED blinker as we did in the previous example:
End of explanation
"""
toVerilog(blinker, clk_i=clk, led_o=led, length=22)
"""
Explanation: A blinker with a three-bit counter should output an LED signal that runs
at an eighth of the incoming clock frequency.
The waveforms show that's what is happening.
Since the simulation seems to work, it's time to convert a larger version of the blinker into Verilog
after which it can be compiled and run on an iCEstick.
(Because this LED blinker uses a hierarchical description that results in a lot of
smaller, individual components at the lower levels, it will take longer to generate
the Verilog than the previous example. Be patient!)
End of explanation
"""
with open('blinker.pcf', 'w') as pcf:
pcf.write(
'''
set_io led_o 99
set_io clk_i 21
'''
)
"""
Explanation: Whew, done!
You may notice a few warnings about undriven signals, but don't worry about those.
The next step is to write a file with the blinker pin assignments for the iCEstick board.
The 12 MHz clock enters through pin 21 of the FPGA, and LED D1 is driven by pin 99.
End of explanation
"""
!yosys -q -p "synth_ice40 -blif blinker.blif" blinker.v
!arachne-pnr -q -d 1k -p blinker.pcf blinker.blif -o blinker.asc
!icepack blinker.asc blinker.bin
"""
Explanation: The following commands will compile the Verilog and pin assignments into a bitstream file:
End of explanation
"""
!iceprog blinker.bin
"""
Explanation: Finally, the bitstream is ready to download into the iCEstick:
End of explanation
"""
|
fpavogt/pyqz
|
docs/source/pyqz_demo_param.ipynb
|
gpl-3.0
|
%matplotlib inline
import pyqz
import pyqz.pyqz_plots as pyqzp
import numpy as np
"""
Explanation: The parameters of pyqz
pyqz is designed to be easy and quick to use, but without withholding any information from the user. As such, all parameters of importance for deriving the estimates of LogQ and Tot[O]+12 can be modified via dedicated keywords. Here, we present some basic examples to clarify what does what. In addition to these examples, the documentation also contains a detailed list of the functions of pyqz, along with a brief description of each keyword.
First things first, let's import pyqz and the Image module to display the figures.
End of explanation
"""
my_srs = 800
pyqz.get_global_qz(np.array([[ 1.00e+00, 5.00e-02, 2.38e+00, 1.19e-01, 5.07e+00, 2.53e-01,
5.67e-01, 2.84e-02, 5.11e-01, 2.55e-02, 2.88e+00, 1.44e-01]]),
['Hb','stdHb','[OIII]','std[OIII]','[OII]+','std[OII]+',
'[NII]','std[NII]','[SII]+','std[SII]+','Ha','stdHa'],
['[NII]/[OII]+;[OIII]/[SII]+'],
ids = ['NGC_5678'],
srs = my_srs,
KDE_pickle_loc = './examples/',
KDE_method = 'multiv',
KDE_qz_sampling=201j,
struct='pp',
sampling=1)
# And use pyqz_plots.plot_global_qz() to display the result
import glob
fn = glob.glob('./examples/*NGC_5678*.pkl')
pyqzp.plot_global_qz(fn[0], show_plots = True, save_loc = './examples', do_all_diags = False)
"""
Explanation: Parameter 1: srs
srs defines the size of the random sample of line fluxes generated by pyqz. This is an essential keyword to the propagation of observational errors associated with each line flux measurements. In other words, srs is the number of discrete estimates of the probability density function (in the {LogQ vs. Tot[O]+12} plane) associated with one diagnostic grid.
Hence, the joint probability function density function (combining $n$ diagnostic grids) will be reconstructed via a Kernel Density Estimation routine from $n\cdot$srs points. srs=400 is the default value, suitable for error levels of $\sim$5%. We suggest srs=800 for errors at the 10%-15% level. Basically, larger errors result in wider probability density peaks, and thus require more srs points to be properly discretized - at the cost of additional computation time of course ! Try changing the value of my_srs in the example below, and watch the number of black dots vary accordingly in the KDE diagram.
End of explanation
"""
my_method = 'gauss'
pyqz.get_global_qz(np.array([[ 1.00e+00, 5.00e-02, 2.38e+00, 1.19e-01, 2.07e+00, 2.53e-01,
5.67e-01, 2.84e-02, 5.11e-01, 2.55e-02, 2.88e+00, 1.44e-01]]),
['Hb','stdHb','[OIII]','std[OIII]','[OII]+','std[OII]+',
'[NII]','std[NII]','[SII]+','std[SII]+','Ha','stdHa'],
['[NII]/[SII]+;[OIII]/[SII]+','[NII]/[OII]+;[OIII]/[OII]+'],
ids = ['NGC_09'],
srs = 400,
KDE_pickle_loc = './examples/',
KDE_method = my_method,
KDE_qz_sampling=201j,
struct='pp',
sampling=1)
# And use pyqz_plots.plot_global_qz() to display the result
import glob
fn = glob.glob('./examples/*NGC_09*%s*.pkl' % my_method)
pyqzp.plot_global_qz(fn[0], show_plots = True, save_loc = './examples', do_all_diags = False)
"""
Explanation: Parameter 2: KDE_method
This keyword specifies the Kernel Density Estimation routine used to reconstruct the individual and joint probability density functions in the {LogQ vs. Tot[O]+12} plane. It can be either gauss to use gaussian_kde from the scipy.stats module, or multiv to use KDEMultivariate from the statsmodels package.
The former option is 10-100x faster, but usually results in less accurate results if different diagnostic grids disagree. The underlying reason is that with gaussian_kde, the kernel bandwidth cannot be explicitly set individually for the LogQ and Tot[O]+12 directions, so that the function tends to over-smooth the distribution. KDEMultivariate should be preferred as the bandwidth of the kernel is set individually for both the LogQ and Tot[O]+12 directions using Scott's rule, scaled by the standard deviation of the distribution along these directions.
In the example below, we insert some error in the [OII] line flux - thereby creating a mismatch between the different line ratio space estimates. Switch my_method from 'gauss' to 'multiv', and watch how the joint PDF (shown as shades of gray) traces the distribution of black dots in a significantly worse/better manner.
End of explanation
"""
my_qz_sampling = 101j
pyqz.get_global_qz(np.array([[ 1.00e+00, 5.00e-02, 2.38e+00, 1.19e-01, 5.07e+00, 2.53e-01,
5.67e-01, 2.84e-02, 5.11e-01, 2.55e-02, 2.88e+00, 1.44e-01]]),
['Hb','stdHb','[OIII]','std[OIII]','[OII]+','std[OII]+',
'[NII]','std[NII]','[SII]+','std[SII]+','Ha','stdHa'],
['[NII]/[OII]+;[OIII]/[SII]+'],
ids = ['NGC_00'],
srs = 400,
KDE_pickle_loc = './examples/',
KDE_method = 'multiv',
KDE_qz_sampling=my_qz_sampling,
struct='pp',
sampling=1)
# And use pyqz_plots.plot_global_qz() to display the result
import glob
fn = glob.glob('./examples/*NGC_00*.pkl')
pyqzp.plot_global_qz(fn[0], show_plots = True, save_loc = './examples/', do_all_diags = False)
"""
Explanation: Parameter 3: KDE_qz_sampling
This sets the sampling of the {LogQ vs. Tot[O]+12} plane, when reconstructing the individual and global PDFs. Set to 101j by default (i.e. a grid with 101$\cdot$101 = 10201 sampling nodes), datasets with small errors ($<$5%) could benefit from using twice this resolution for better results (i.e. KDE_qz_sampling=201j). Resulting in a longer processing time of course. In the following example, the influence of KDE_qz_sampling can be seen in the size of the resolution elements of the joint PDF map, as well as the smoothness of the (orange) contour at 0.61%.
End of explanation
"""
|
tata-antares/tagging_LHCb
|
Stefania_files/track-tagging.ipynb
|
apache-2.0
|
import pandas
import numpy
from folding_group import FoldingGroupClassifier
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
from rep.report.metrics import RocAuc
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from utils import get_N_B_events, get_events_number, get_events_statistics
"""
Explanation: Import
End of explanation
"""
import root_numpy
data_nan = pandas.DataFrame(root_numpy.root2array('datasets/data/csv/JPsiK/Tracks.root'))
data_nan.head()
event_id_column = 'event_id'
event_id = data_nan.run.apply(str) + '_' + data_nan.event.apply(str)
data_nan['group_column'] = numpy.unique(event_id, return_inverse=True)[1]
data_nan[event_id_column] = event_id
get_events_statistics(data_nan)
get_N_B_events()
"""
Explanation: Reading initial data
End of explanation
"""
data = data_nan.dropna()
len(data_nan), len(data), get_events_statistics(data)
"""
Explanation: Remove rows with NAN from data
End of explanation
"""
from utils import add_diff_pt
# add diff pt
add_diff_pt(data)
# add cos(diff_phi)
data['cos_diff_phi'] = numpy.cos(data.diff_phi.values)
"""
Explanation: Add diff_pt and cos(diff_phi)
End of explanation
"""
from itertools import combinations
PIDs = {'k': data.PIDNNk.values,
'e': data.PIDNNe.values,
'mu': data.PIDNNm.values,
}
for (pid_name1, pid_values1), (pid_name2, pid_values2) in combinations(PIDs.items(), 2):
data.loc[:, 'max_PID_{}_{}'.format(pid_name1, pid_name2)] = numpy.maximum(pid_values1, pid_values2)
data.loc[:, 'sum_PID_{}_{}'.format(pid_name1, pid_name2)] = pid_values1 + pid_values2
"""
Explanation: Add max, sum among PIDs
End of explanation
"""
data.loc[:, 'label'] = (data.signB.values * data.signTrack.values > 0) * 1
', '.join(data.columns)
"""
Explanation: define label = signB * signTrack
if > 0 (same sign) - label 1
if < 0 (different sign) - label 0
End of explanation
"""
initial_cut = '(ghostProb < 0.4)'
data = data.query(initial_cut)
get_events_statistics(data)
"""
Explanation: Apply ghost prob cut
End of explanation
"""
threshold_kaon = 0.
threshold_muon = 0.
threshold_electron = 0.
threshold_pion = 0.
threshold_proton = 0.
cut_pid = " ( (PIDNNk > {trk}) | (PIDNNm > {trm}) | (PIDNNe > {tre}) | (PIDNNpi > {trpi}) | (PIDNNp > {trp})) "
cut_pid = cut_pid.format(trk=threshold_kaon, trm=threshold_muon, tre=threshold_electron, trpi=threshold_pion,
trp=threshold_proton)
data = data.query(cut_pid)
get_events_statistics(data)
"""
Explanation: Leave not muons, kaons, electrons, protons, pions
End of explanation
"""
from utils import compute_sum_of_charges
means = [compute_sum_of_charges(data[mask], name, bins=bins,
event_id_column=event_id_column) for mask, name, bins in \
zip([data.signB > -100,
(data.IPs > 3) & ((abs(data.diff_eta) > 0.6) | (abs(data.diff_phi) > 0.825)),
(abs(data.diff_eta) < 0.6) & (abs(data.diff_phi) < 0.825) & (data.IPs < 3)],
['full', 'OS', 'SS'], [21, 21, 21])]
"""
Explanation: Tracks sum of charges asymmetry checks
End of explanation
"""
N_B_passed = float(get_events_number(data))
tagging_efficiency = N_B_passed / get_N_B_events()
tagging_efficiency_delta = sqrt(N_B_passed) / get_N_B_events()
tagging_efficiency, tagging_efficiency_delta
hist(data.diff_pt.values, bins=100)
pass
"""
Explanation: Calculating tagging efficiency ($\epsilon_{tag}$)
$$N (\text{passed selection}) = \sum_{\text{passed selection}} sw_i$$
$$N (\text{all events}) = \sum_{\text{all events}} sw_i,$$
where $sw_i$ - sPLot weight (sWeight for signal)
$$\epsilon_{tag} = \frac{N (\text{passed selection})} {N (\text{all events})}$$
$$\Delta\epsilon_{tag} = \frac{\sqrt{\epsilon_{tag}(1-\epsilon_{tag}) \sum_{\text{all events}}sw_i^2}} {N (\text{all events})}$$
All events are not availables (some selections are applyed before), that is why we used
$$\Delta\epsilon_{tag} = \frac{\sqrt{N (\text{passed selection})}} {N (\text{all events})},$$
which is similar to the previous definition
End of explanation
"""
_, take_indices = numpy.unique(data[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data.Bmass.values[take_indices], bins=100)
title('B mass hist')
xlabel('mass')
subplot(1, 2, 2)
hist(data.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist')
xlabel('signal sWeights')
plt.savefig('img/Bmass_less_PID.png' , format='png')
"""
Explanation: Choose most probable B-events
End of explanation
"""
sweight_threshold = 1.
data_sw_passed = data[data.N_sig_sw > sweight_threshold]
data_sw_not_passed = data[data.N_sig_sw <= sweight_threshold]
get_events_statistics(data_sw_passed)
_, take_indices = numpy.unique(data_sw_passed[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data_sw_passed.Bmass.values[take_indices], bins=100)
title('B mass hist for sWeight > 1 selection')
xlabel('mass')
subplot(1, 2, 2)
hist(data_sw_passed.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist for sWeight > 1 selection')
xlabel('signal sWeights')
plt.savefig('img/Bmass_selected_less_PID.png' , format='png')
hist(data_sw_passed.diff_pt.values, bins=100)
pass
"""
Explanation: Define B-like events for training
Events with low sWeight still will be used only to test quality.
End of explanation
"""
features = list(set(data.columns) - {'index', 'run', 'event', 'i', 'signB', 'signTrack', 'N_sig_sw', 'Bmass', 'mult',
'PIDNNp', 'PIDNNpi', 'label', 'thetaMin', 'Dist_phi', event_id_column,
'mu_cut', 'e_cut', 'K_cut', 'ID', 'diff_phi', 'group_column'})
features
"""
Explanation: Main idea:
find tracks, which can help reconstruct the sign of B if you know track sign.
label = signB * signTrack
* the highest output means that this is same sign B as track
* the lowest output means that this is opposite sign B than track
Define features
End of explanation
"""
figure(figsize=[15, 16])
bins = 60
step = 3
for i, (feature1, feature2) in enumerate(combinations(['PIDNNk', 'PIDNNm', 'PIDNNe', 'PIDNNp', 'PIDNNpi'], 2)):
subplot(4, 3, i + 1)
Z, (x, y) = numpy.histogramdd(data_sw_passed[[feature1, feature2]].values, bins=bins, range=([0, 1], [0, 1]))
pcolor(numpy.log(Z).T, vmin=0)
xlabel(feature1)
ylabel(feature2)
xticks(numpy.arange(bins, step), x[::step]), yticks(numpy.arange(bins, step), y[::step])
plt.savefig('img/PID_selected_less_PID.png' , format='png')
"""
Explanation: PID pairs scatters
End of explanation
"""
hist(data_sw_passed.diff_pt.values, bins=60, normed=True)
pass
"""
Explanation: pt
End of explanation
"""
figure(figsize=(20, 6))
subplot(1, 2, 1)
_, n_tracks = numpy.unique(data_sw_passed[event_id_column], return_counts=True)
hist(n_tracks, bins=100)
title('Number of tracks for events with sWeight > 1')
subplot(1, 2, 2)
_, n_tracks_all = numpy.unique(data[event_id_column], return_counts=True)
hist(n_tracks_all, bins=106)
title('Number of tracks')
plt.savefig('img/tracks_number_less_PID.png' , format='png')
"""
Explanation: count of tracks
End of explanation
"""
figure(figsize=[15, 4])
for i, column in enumerate(['PIDNNm', 'PIDNNe', 'PIDNNk']):
subplot(1, 3, i + 1)
hist(data_sw_passed[column].values, bins=60, range=(0, 1), label=column)
legend()
"""
Explanation: PIDs histograms
End of explanation
"""
from decisiontrain import DecisionTrainClassifier
from rep.estimators import SklearnClassifier
from hep_ml.losses import LogLossFunction
data_sw_passed_lds = LabeledDataStorage(data_sw_passed, data_sw_passed.label.values, data_sw_passed.N_sig_sw.values)
"""
Explanation: Train to distinguish same sign vs opposite sign
End of explanation
"""
tt_base = DecisionTrainClassifier(learning_rate=0.1, n_estimators=3000, depth=6,
max_features=15, n_threads=14, loss=LogLossFunction(regularization=100))
tt_folding = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=11,
train_features=features, group_feature='group_column')
%time tt_folding.fit_lds(data_sw_passed_lds)
pass
import cPickle
with open('models/dt_full_group.pkl', 'w') as f:
cPickle.dump(tt_folding, f)
# import cPickle
# with open('models/dt_full_group.pkl', 'r') as f:
# tt_folding = cPickle.load(f)
comparison_report = tt_folding.test_on_lds(data_sw_passed_lds)
comparison_report.compute_metric(RocAuc())
comparison_report.roc()
lc = comparison_report.learning_curve(RocAuc(), steps=1)
lc
comparison_report.feature_importance()
"""
Explanation: DT
End of explanation
"""
from utils import get_result_with_bootstrap_for_given_part
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=30)
result
import utils
reload(utils)
from utils import get_result_with_bootstrap_for_given_part
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=1)
result
result.to_csv('img/tracks.csv', index=False, header=True)
"""
Explanation: Calibration
End of explanation
"""
from utils import prepare_B_data_for_given_part
Bdata_prepared = prepare_B_data_for_given_part(tt_folding, [data_sw_passed, data_sw_not_passed],
get_N_B_events(), logistic=True)
Bdata_prepared.to_csv('models/Bdata_tracks.csv', header=True, index=False)
"""
Explanation: Implementing best tracking
End of explanation
"""
from utils import estimate_algorithm
import cPickle
with open('models/dt_MC.pkl', 'r') as f:
tt_folding_MC = cPickle.load(f)
with open('models/calibrator_tracks_MC.pkl', 'r') as f:
calibrator_tracks_MC = cPickle.load(f)
with open('models/calibrator_B_MC.pkl', 'r') as f:
calibrator_B_MC = cPickle.load(f)
p_MC = tt_folding_MC.predict_proba(data)[:, 1]
roc_auc_score(data.label, p_MC, sample_weight=data.N_sig_sw.values.astype(float64))
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events())
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events(), calib_part_itself=True,
calib_itself=True)
"""
Explanation: MC trained algorithm testing
End of explanation
"""
|
christophmark/bayesloop
|
docs/source/tutorials/firststeps.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt # plotting
import seaborn as sns # nicer plots
sns.set_style('whitegrid') # plot styling
import bayesloop as bl
S = bl.Study()
"""
Explanation: First steps with bayesloop
bayesloop models feature a two-level hierarchical structure: the low-level, observation model filters out measurement noise and provides the parameters, that one is interested in (volatility of stock prices, diffusion coefficient of particles, directional persistence of migrating cancer cells, rate of randomly occurring events, ...). The observation model is, in most cases, given by a simple and well-known stochastic process: price changes are Gaussian-distributed, turning angles of moving cells follow a von-Mises distribution and the number of rare events within a given interval of time is Poisson-distributed. The aim of the observation model is to describe the measured data on a short time scale, while the parameters may change on longer time scales. The high-level, transition model describes how the parameters of the observation model change over time, i.e. whether there are abrupt parameter jumps or gradual variations. The transition model may itself depend on so-called hyper-parameters, for example the likelihood of parameter jumps, the magnitude of gradual parameter variations or the slope of a deterministic linear trend. The following tutorials show how to use the bayesloop module to infer both time-varying parameter values of the observation model as well as the hyper-parameter values of the transition model and compare different hypotheses about the parameter dynamics by approximating the model evidence, i.e. the probability of the measured data, given the observation model and transition model.
The first section of the tutorial introduces the main class of the module, Study, which enables fits of time-varying parameter models with fixed hyper-parameter values and the optimization of such hyper-parameters based on the model evidence. We provide a detailed description of how to import data, set the observation model and transition model, and perform the model fit. Finally, a plotting function to display the results is discussed briefly. This tutorial therefore provides the basis for later tutorials that discuss the extended classes HyperStudy, ChangepointStudy and OnlineStudy.
Study class
To start a new data study/analysis, create a new instance of the Study class:
End of explanation
"""
import numpy as np
data = np.array([5, 4, 1, 0, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1, 4,
4, 1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0,
0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,
0, 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 3, 3, 0,
0, 0, 1, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0])
S.load(data, timestamps=np.arange(1852, 1962))
"""
Explanation: This object is central to an analysis conducted with bayesloop. It stores the data and further provides the methods to perform probabilistic inference on the models defined within the class, as described below.
Data import
In this first study, we use a simple, yet instructive example of heterogeneous time series, the annual number of coal mining accidents in the UK from 1851 to 1962. The data is imported as a NumPy array, together with corresponding timestamps. Note that setting timestamps is optional (if none are provided, timestamps are set to an integer sequence: 0, 1, 2,...).
End of explanation
"""
S.loadExampleData()
"""
Explanation: Note that this particular data set is also hard-coded into the Study class, for convenient testing:
End of explanation
"""
L = bl.observationModels.Poisson('accident_rate', bl.oint(0, 6, 1000))
S.set(L)
"""
Explanation: In case you have multiple observations for each time step, you may also provide the data in the form np.array([[x1,y1,z1], [x2,y2,z2], ..., [xn,yn,zn]]). Missing data points should be included as np.nan.
Observation model
The first step to create a probabilistic model to explain the data is to define the observation model, or likelihood. The observation model states the probability (density) of a data point at time $t$, given the parameter values at time $t$ and possibly past data points. It therefore resembles the low-level model, in contrast to the transition model which describes how the parameters of the observation model change over time.
As coal mining disasters fortunately are rare events, we may model the number of accidents per year by a Poisson distribution. In bayesloop, this is done as follows:
End of explanation
"""
L = bl.observationModels.Poisson('accident_rate')
S.set(L)
"""
Explanation: We first define the observation model and provide two arguments: A name for the only parameter of the model, the 'accident_rate'. We further have to provide discrete values for this parameter, as bayesloop computes all probability distributions on grids. As the Poisson distribution expects its parameter to be greater than zero, we choose an open interval between 0 and 6 with 1000 equally spaced values in between, by using the function bl.oint(). For closed intervals, one can also use bl.cint(), which acts exactly like the function linspace from NumPy. To avoid singularities in the probability values of the observation model, it is however recommended to use bl.oint() in most cases. Finally, we assign the defined observation model to our study instance with the method set().
As the parameter boundaries depend on the data at hand, bayesloop will estimate appropriate parameter values, if one does not provide them:
End of explanation
"""
T = bl.transitionModels.GaussianRandomWalk('sigma', 0.2, target='accident_rate')
S.set(T)
"""
Explanation: Note that you can also use the following short form to define observation models: L = bl.om.Poisson(). All currently implemented observation models can be looked up in the API Docs or directly in observationModels.py. bayesloop further supports all probability distributions that are included in the scipy.stats as well as the sympy.stats module. See this tutorial for instructions on how to build custom observation models from arbitrary distributions.
In this example, the observation model only features a single parameter. If we wanted to model the annual number of accidents with a Gaussian distribution instead, we have to supply two parameter names (mean and std) and corresponding values:
L = bl.om.Gaussian('mean', bl.cint(0, 6, 200), 'std', bl.oint(0, 2, 200))
S.set(L)
Again, if we are not sure about parameter boundaries, we may assign None to one or all parameters, and bayesloop will estimate them:
L = bl.om.Gaussian('mean', None, 'std', bl.oint(0, 2, 200))
S.set(L)
The order has to remain Name, Value, Name, Value, ..., which is why we cannot simply omit the values and have to write None instead.
Transition model
As the dynamics of many real-world systems are the result of a multitude of underlying processes that act on different spatial and time scales, common statistical models with static parameters often miss important aspects of the systems' dynamics (see e.g. this article). bayesloop therefore calls for a second model, the transition model, which describes the temporal changes of the model parameters.
In this example, we assume that the accident rate itself may change gradually over time and choose a Gaussian random walk with the standard deviation $\sigma=0.2$ as transition model. As for the observation model, we supply a unique name for hyper-parameter $\sigma$ (named sigma) that describes the standard deviation of the parameter fluctuations and therefore the magnitude of changes. Again, we have to assign values for sigma, but only choose a single fixed value of 0.2, instead of a whole set of values. This single value can be optimized, by maximizing the model evidence, see here. To analyze and compare a set of different values, one may use an instance of a HyperStudy that is described in detail here. in this first example, we simply take the value of 0.2 as given. As the observation model may contain several parameters, we further have specify the parameter accident_rate as the target of this transition model.
End of explanation
"""
S.fit()
"""
Explanation: Note that you can also use the following short form to define transition models: T = bl.tm.GaussianRandomWalk(). All currently implemented transition models can be looked up in the API Docs or directly in transitionModels.py.
Model fit
At this point, the hierarchical time series model for the coal mining data set is properly defined and we may continue to perform the model fit. bayesloop employs a forward-backward algorithm that is based on Hidden Markov models. It basically breaks down the high-dimensional inference problem of all time steps into many low-dimensional ones for each individual time step. The inference algorithm is implemented by the fit method:
End of explanation
"""
plt.figure(figsize=(8, 4))
# plot of raw data
plt.bar(S.rawTimestamps, S.rawData, align='center', facecolor='r', alpha=.5)
# parameter plot
S.plot('accident_rate')
plt.xlim([1851, 1961])
plt.xlabel('year');
"""
Explanation: By default, fit computes the so-called smoothing distribution of the model parameters for each time step. This distribution states the probability (density) of the parameter value at a time step $t$, given all past and future data points. All distributions have the same shape as the parameter grid, and are stored in S.posteriorSequence for further analysis. Additionally, the mean values of each distribution are stored in S.posteriorMeanValues, as point estimates. Finally, the (natural) logarithmic value of the model evidence, the probability of the data given the chosen model, is stored in S.logEvidence (more details on evidence values follow).
To simulate an on-line analysis, where at each step in time $t$, only past data points are available, one may provide the keyword-argument forwardOnly=True. In this case, only the forward-part of the algorithm in run. The resulting parameter distributions are called filtering distributions.
Plotting
To display the temporal evolution or the distribution of the model parameters at a certain time step, the Study class provides the method plot. If no time step is specified, the method displays the mean values together with the marginal distributions for one parameter of the model. The parameter to be plotted can be chosen by providing its name.
Here, we plot the original data (in red) together with the inferred disaster rate (mean value in black). The marginal parameter distribution is displayed as a blue overlay, by default with a gamma correction of $\gamma=0.5$ to enhance relative differences in the width of the distribution (this behavior can be changed by the keyword argument gamma):
End of explanation
"""
plt.figure(figsize=(8, 4))
S.plot('accident_rate', t=1880, facecolor='r', alpha=0.5, label='1880')
S.plot('accident_rate', t=1900, facecolor='b', alpha=0.5, label='1900')
plt.legend()
plt.xlim([0, 5]);
"""
Explanation: From this first analysis, we may conclude that before 1880, an average of $\approx 3$ accidents per year were recorded. This changes significantly between 1880 and 1900, when the accident-rate drops to $\approx 1$ per year. We can also directly inspect the distribution of the accident rate at specific points in time, using the plot method with specified keyword argument t:
End of explanation
"""
S.eval('accident_rate < 1', t=1900);
"""
Explanation: Without the plot=True argument, this method only returns the parameter values (r1, r2, as specified when setting the observation model) as well as the corresponding probability values p1 and p2. Note that the returned probability values are always normalized to 1, so that we may easily evaluate the probability of certain conditions, like the probability of an accident rate < 1 in the year 1900:
We can further evaluate the probability of certain conditions, for example the probability that the accident rate was < 1 in the year 1900, using the eval method:
End of explanation
"""
|
kit-cel/wt
|
mloc/ch1_Preliminaries/steepest_gradient_descent.ipynb
|
gpl-2.0
|
import importlib
autograd_available = True
# if automatic differentiation is available, use it
try:
import autograd
except ImportError:
autograd_available = False
pass
if autograd_available:
import autograd.numpy as np
from autograd import grad
else:
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interactive
import ipywidgets as widgets
%matplotlib inline
if autograd_available:
print('Using autograd to compute gradients')
else:
print('Using hand-calculated gradient')
"""
Explanation: Steepest Gradient Descent Visualization
This code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br>
This code illustrates:
* Steepest gradient descent in two dimensions
* Interactive demonstration of step size influence
End of explanation
"""
# Valley
def myfun(x):
return (x[0]**2)/16 + 9*(x[1]**2)
if autograd_available:
gradient = grad(myfun)
else:
def gradient(x):
grad = [x[0]/8, 18*x[1]]
return grad;
"""
Explanation: Specify the function to minimize as a simple python function.<br>
We start with a very simple function that is given by
\begin{equation}
f(\boldsymbol{x}) = \frac{1}{16}x_1^2 + 9x_2^2
\end{equation}
The derivative is automatically computed using the autograd library, which returns a function that evaluates the gradient of myfun. The gradient can also be easily computed by hand and is given as
\begin{equation}
\nabla f(\boldsymbol{x}) = \begin{pmatrix} \frac{1}{8}x_1 \ 18x_2 \end{pmatrix}
\end{equation}
End of explanation
"""
x = np.arange(-5.0, 5.0, 0.02)
y = np.arange(-2.0, 2.0, 0.02)
X, Y = np.meshgrid(x, y)
fZ = myfun([X,Y])
plt.figure(1,figsize=(10,6))
plt.rcParams.update({'font.size': 14})
plt.contourf(X,Y,fZ,levels=20)
plt.colorbar()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
"""
Explanation: Plot the function as a 2d surface plot. Different colors indicate different values of the function.
End of explanation
"""
epsilon = 0.1
start = np.array([-4.0,-1.0])
points = []
while len(points) < 200:
points.append( (start,myfun(start)) )
start = start - np.array([epsilon*gradient(start)[0], epsilon*gradient(start)[1]])
"""
Explanation: Carry out the simple gradient descent strategy by using only the sign of the gradient. Carry out 200 iterations (without using a stopping criterion). The values of epsilon and the starting point are specified
End of explanation
"""
trajectory_x = [points[i][0][0] for i in range(len(points))]
trajectory_y = [points[i][0][1] for i in range(len(points))]
plt.figure(1,figsize=(16,6))
plt.subplot(121)
plt.rcParams.update({'font.size': 14})
plt.contourf(X,Y,fZ,levels=20)
plt.xlim(-5,0)
plt.ylim(-2,2)
plt.xlabel("x")
plt.ylabel("y")
plt.plot(trajectory_x, trajectory_y,marker='.',color='w',linewidth=2)
plt.subplot(122)
plt.plot(range(0,len(points)),list(zip(*points))[1])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("f(x^{(i)})")
plt.show()
"""
Explanation: Plot the trajectory and the value of the function (right subplot). Note that the minimum of this function is achieved for (0,0) and is 0
End of explanation
"""
def plot_function(epsilon, start_x, start_y):
start = [start_x,start_y]
points = []
while len(points) < 200:
points.append( (start,myfun(start)) )
start = start - np.array([epsilon*gradient(start)[0], epsilon*gradient(start)[1]])
trajectory_x = [points[i][0][0] for i in range(len(points))]
trajectory_y = [points[i][0][1] for i in range(len(points))]
plt.figure(3,figsize=(15,5))
plt.subplot(121)
plt.rcParams.update({'font.size': 14})
plt.contourf(X,Y,fZ,levels=20)
plt.xlim(-5,0)
plt.ylim(-2,2)
plt.xlabel("x")
plt.ylabel("y")
plt.plot(trajectory_x, trajectory_y,marker='.',color='w',linewidth=2)
plt.subplot(122)
plt.plot(range(0,len(points)),list(zip(*points))[1])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("f(x^{(i)})")
plt.show()
epsilon_values = np.arange(0.0,0.12,0.0002)
interactive_update = interactive(plot_function, \
epsilon = widgets.SelectionSlider(options=[("%g"%i,i) for i in epsilon_values], value=0.1, continuous_update=False,description='epsilon',layout=widgets.Layout(width='50%')),
start_x = widgets.FloatSlider(min=-5.0,max=0.0,step=0.001,value=-4.0, continuous_update=False, description='x'), \
start_y = widgets.FloatSlider(min=-1.0, max=1.0, step=0.001, value=-1.0, continuous_update=False, description='y'))
output = interactive_update.children[-1]
output.layout.height = '370px'
interactive_update
"""
Explanation: This is an interactive demonstration of gradient descent, where you can specify yourself the starting point as well as the step value. You can see that depending on the step size, the minimization can get unstable
End of explanation
"""
# Rosenbrock function
def rosenbrock_fun(x):
return (1-x[0])**2+100*((x[1]-(x[0])**2)**2)
if autograd_available:
rosenbrock_gradient = grad(rosenbrock_fun)
else:
def rosenbrock_gradient(x):
grad = [-2*(1-x[0])-400*(x[1]-x[0]**2)*x[0], 200*(x[1]-x[0]**2)]
return grad
xr = np.arange(-1.6, 1.6, 0.01)
yr = np.arange(-1.0, 3.0, 0.01)
Xr, Yr = np.meshgrid(xr, yr)
fZr = rosenbrock_fun([Xr,Yr])
def plot_function_rosenbrock(epsilon, start_x, start_y):
start = [start_x,start_y]
points = []
while len(points) < 1000:
points.append( (start,rosenbrock_fun(start)) )
rgradient = rosenbrock_gradient(start)
start = start - np.array([epsilon*rgradient[0], epsilon*rgradient[1]])
trajectory_x = [points[i][0][0] for i in range(len(points))]
trajectory_y = [points[i][0][1] for i in range(len(points))]
plt.figure(4,figsize=(15,5))
plt.subplot(121)
plt.rcParams.update({'font.size': 14})
plt.contourf(Xr,Yr,fZr,levels=20)
plt.xlabel("x")
plt.ylabel("y")
plt.plot(trajectory_x, trajectory_y,marker='.',color='w',linewidth=2)
plt.subplot(122)
plt.plot(range(0,len(points)),list(zip(*points))[1])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("f(x^{(i)})")
plt.show()
epsilon_values = np.arange(0.0,0.007,0.00002)
interactive_update = interactive(plot_function_rosenbrock, \
epsilon = widgets.SelectionSlider(options=[("%g"%i,i) for i in epsilon_values], value=0.001, continuous_update=False,description='epsilon',layout=widgets.Layout(width='50%')), \
start_x = widgets.FloatSlider(min=-1.0,max=2.0,step=0.0001,value=0.6, continuous_update=False, description='x'), \
start_y = widgets.FloatSlider(min=-1.0, max=2.0, step=0.0001, value=0.1, continuous_update=False, description='y'))
output = interactive_update.children[-1]
output.layout.height = '350px'
interactive_update
"""
Explanation: Next, we consider the so-called Rosenbrock function, which is given by
\begin{equation}
f(\boldsymbol{x}) = (1-x_1)^2 + 100(x_2-x_1^2)^2
\end{equation}
Its gradient is given by
\begin{equation}
\nabla f(\boldsymbol{x}) = \begin{pmatrix} -2(1-x_1)-400(x_2-x_1^2)x_1 \ 200(x_2-x_1^2)\end{pmatrix}
\end{equation}
The Rosenbrock function has a global minimum at (1,1) but is difficult to optimize due to its curved valley. For details, see <url>https://en.wikipedia.org/wiki/Rosenbrock_function</url>
End of explanation
"""
|
olgabot/cshl-singlecell-2017
|
notebooks/2.4_matrix_decomposition_pca_ica_nmf.ipynb
|
mit
|
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting style defaults
import seaborn as sns; sns.set()
"""
Explanation: <small><i>The PCA section of this notebook was put together by Jake Vanderplas. Source and license info is on GitHub.</i></small>
Dimensionality Reduction: Principal Component Analysis in-depth
Here we'll explore Principal Component Analysis, which is an extremely useful linear dimensionality reduction technique.
We'll start with our standard set of initial imports:
End of explanation
"""
np.random.seed(1)
X = np.dot(np.random.random(size=(2, 2)), np.random.normal(size=(2, 200))).T
plt.plot(X[:, 0], X[:, 1], 'o')
plt.axis('equal');
"""
Explanation: Introducing Principal Component Analysis
Principal Component Analysis is a very powerful unsupervised method for dimensionality reduction in data. It's easiest to visualize by looking at a two-dimensional dataset:
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
print(pca.explained_variance_)
print(pca.components_)
"""
Explanation: We can see that there is a definite trend in the data. What PCA seeks to do is to find the Principal Axes in the data, and explain how important those axes are in describing the data distribution:
End of explanation
"""
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.5)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
plt.plot([0, v[0]], [0, v[1]], '-k', lw=3)
plt.axis('equal');
"""
Explanation: To see what these numbers mean, let's view them as vectors plotted on top of the data:
End of explanation
"""
clf = PCA(0.95) # keep 95% of variance
X_trans = clf.fit_transform(X)
print(X.shape)
print(X_trans.shape)
"""
Explanation: Notice that one vector is longer than the other. In a sense, this tells us that that direction in the data is somehow more "important" than the other direction.
The explained variance quantifies this measure of "importance" in direction.
Another way to think of it is that the second principal component could be completely ignored without much loss of information! Let's see what our data look like if we only keep 95% of the variance:
End of explanation
"""
X_new = clf.inverse_transform(X_trans)
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.2)
plt.plot(X_new[:, 0], X_new[:, 1], 'ob', alpha=0.8)
plt.axis('equal');
"""
Explanation: By specifying that we want to throw away 5% of the variance, the data is now compressed by a factor of 50%! Let's see what the data look like after this compression:
End of explanation
"""
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
pca = PCA(2) # project from 64 to 2 dimensions
Xproj = pca.fit_transform(X)
print(X.shape)
print(Xproj.shape)
plt.scatter(Xproj[:, 0], Xproj[:, 1], c=y, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('tab10', 10))
plt.colorbar();
"""
Explanation: The light points are the original data, while the dark points are the projected version. We see that after truncating 5% of the variance of this dataset and then reprojecting it, the "most important" features of the data are maintained, and we've compressed the data by 50%!
This is the sense in which "dimensionality reduction" works: if you can approximate a data set in a lower dimension, you can often have an easier time visualizing it or fitting complicated models to the data.
Application of PCA to Digits
The dimensionality reduction might seem a bit abstract in two dimensions, but the projection and dimensionality reduction can be extremely useful when visualizing high-dimensional data. Let's take a quick look at the application of PCA to the digits data we looked at before:
End of explanation
"""
from decompositionplots import plot_image_components
sns.set_style('white')
plot_image_components(digits.data[0])
"""
Explanation: We could also do the same plot, using Altair and Pandas:
digits_smushed = pd.DataFrame(Xproj)
digits_smushed['target'] = digits.target
digits_smushed.head()
This gives us an idea of the relationship between the digits. Essentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits, without reference to the labels.
What do the Components Mean?
PCA is a very useful dimensionality reduction algorithm, because it has a very intuitive interpretation via eigenvectors.
The input data is represented as a vector: in the case of the digits, our data is
$$
x = [x_1, x_2, x_3 \cdots]
$$
but what this really means is
$$
image(x) = x_1 \cdot{\rm (pixel~1)} + x_2 \cdot{\rm (pixel~2)} + x_3 \cdot{\rm (pixel~3)} \cdots
$$
If we reduce the dimensionality in the pixel space to (say) 6, we recover only a partial image:
End of explanation
"""
from decompositionplots import plot_pca_interactive
plot_pca_interactive(digits.data)
"""
Explanation: But the pixel-wise representation is not the only choice. We can also use other basis functions, and write something like
$$
image(x) = {\rm mean} + x_1 \cdot{\rm (basis~1)} + x_2 \cdot{\rm (basis~2)} + x_3 \cdot{\rm (basis~3)} \cdots
$$
What PCA does is to choose optimal basis functions so that only a few are needed to get a reasonable approximation.
The low-dimensional representation of our data is the coefficients of this series, and the approximate reconstruction is the result of the sum:
End of explanation
"""
sns.set()
pca = PCA().fit(X)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
"""
Explanation: Here we see that with only six PCA components, we recover a reasonable approximation of the input!
Thus we see that PCA can be viewed from two angles. It can be viewed as dimensionality reduction, or it can be viewed as a form of lossy data compression where the loss favors noise. In this way, PCA can be used as a filtering process as well.
Choosing the Number of Components
But how much information have we thrown away? We can figure this out by looking at the explained variance as a function of the components:
End of explanation
"""
import fig_code
fig_code.cocktail_party()
"""
Explanation: Here we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.
Other Dimensionality Reducting Routines
Note that scikit-learn contains many other unsupervised dimensionality reduction routines: some you might wish to try are
Other dimensionality reduction techniques which are useful to know about:
sklearn.decomposition.PCA:
Principal Component Analysis
sklearn.decomposition.RandomizedPCA:
extremely fast approximate PCA implementation based on a randomized algorithm
sklearn.decomposition.SparsePCA:
PCA variant including L1 penalty for sparsity
sklearn.decomposition.FastICA:
Independent Component Analysis
sklearn.decomposition.NMF:
non-negative matrix factorization
sklearn.manifold.LocallyLinearEmbedding:
nonlinear manifold learning technique based on local neighborhood geometry
sklearn.manifold.IsoMap:
nonlinear manifold learning technique based on a sparse graph algorithm
Each of these has its own strengths & weaknesses, and areas of application. You can read about them on the scikit-learn website.
Independent component analysis
Here we'll learn about indepednent component analysis (ICA), a matrix decomposition method that's an alternative to PCA.
Independent Component Analysis (ICA)
ICA was originally created for the "cocktail party problem" for audio processing. It's an incredible feat that our brains are able to filter out all these different sources of audio, automatically!
(I really like how smug that guy looks - it's really over the top)
Source
Cocktail party problem
Given multiple sources of sound (people talking, the band playing, glasses clinking), how do you distinguish independent sources of sound? Imagine at a cocktail party you have multiple microphones stationed throughout, and you get to hear all of these different sounds.
Source
What if you applied PCA to the cocktail party problem?
Example adapted from the excellent scikit-learn documentation.
End of explanation
"""
from decompositionplots import explore_smushers
explore_smushers()
"""
Explanation: Discussion
What do you get when you apply PCA to the cocktail party problem?
How would you describe the difference between maximizing variance via orthogonal features (PCA) and finding independent signals (ICA)?
Non-negative matrix factorization
NMF is like ICA in that it is trying to learn the parts of the data that make up the whole, by looking at the reconstructability of them matrix. This was originally published by Lee and Seung, "Learning the parts of objects by non-negative matrix factorization", and applied to image data below.
VQ here is vector quantization (VQ), yet another dimensionality reduction method ... it's kinda like K-means but not
Back to biology!
Enough images and signal processing ... where is the RNA!??!? Let's apply these algorithms to some biological datasets.
We'll use the 300-cell dataset (6 clusters, 50 cells each) data from the Macosko2015 paper.
Rather than plotting each cell in each component, we'll look at the mean (or median) contribution of each component to the cell types.
End of explanation
"""
|
tpin3694/tpin3694.github.io
|
regex/match_any_of_series_of_words.ipynb
|
mit
|
# Load regex package
import re
"""
Explanation: Title: Match Any Of A Series Of Words
Slug: match_any_of_series_of_words
Summary: Match Any Of A Series Of Words
Date: 2016-05-01 12:00
Category: Regex
Tags: Basics
Authors: Chris Albon
Based on: Regular Expressions Cookbook
Preliminaries
End of explanation
"""
# Create a variable containing a text string
text = 'The quick brown fox jumped over the lazy brown bear.'
"""
Explanation: Create some text
End of explanation
"""
# Find any of fox, snake, or bear
re.findall(r'\b(fox|snake|bear)\b', text)
"""
Explanation: Apply regex
End of explanation
"""
|
jArumugam/python-notes
|
libraries/DS05 Web Scraping.ipynb
|
mit
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
from pandas import Series,DataFrame
"""
Explanation: Web Scraping in Python
Source
In this appendix lecture we'll go over how to scrape information from the web using Python.
We'll go to a website, decide what information we want, see where and how it is stored, then scrape it and set it as a pandas DataFrame!
Some things you should consider before web scraping a website:
1.) You should check a site's terms and conditions before you scrape them.
2.) Space out your requests so you don't overload the site's server, doing this could get you blocked.
3.) Scrapers break after time - web pages change their layout all the time, you'll more than likely have to rewrite your code.
4.) Web pages are usually inconsistent, more than likely you'll have to clean up the data after scraping it.
5.) Every web page and situation is different, you'll have to spend time configuring your scraper.
To learn more about HTML I suggest theses two resources:
W3School
Codecademy
There are three modules we'll need in addition to python are:
1.) BeautifulSoup, which you can download by typing: pip install beautifulsoup4 or conda install beautifulsoup4 (for the Anaconda distrbution of Python) in your command prompt.
2.) lxml , which you can download by typing: pip install lxml or conda install lxml (for the Anaconda distrbution of Python) in your command prompt.
3.) requests, which you can download by typing: pip install requests or conda install requests (for the Anaconda distrbution of Python) in your command prompt.
We'll start with our imports:
End of explanation
"""
url = 'http://www.ucop.edu/operating-budget/budgets-and-reports/legislative-reports/2013-14-legislative-session.html'
"""
Explanation: For our quick web scraping tutorial, we'll look at some legislative reports from the University of California Web Page. Feel free to experiment with other webpages, but remember to be cautious and respectful in what you scrape and how often you do it. Always check the legality of a web scraping job.
Let's go ahead and set the url.
End of explanation
"""
# Request content from web page
result = requests.get(url)
c = result.content
# Set as Beautiful Soup Object
soup = BeautifulSoup(c)
"""
Explanation: Now let's go ahead and set up requests to grab content form the url, and set it as a Beautiful Soup object.
End of explanation
"""
# Go to the section of interest
summary = soup.find("div",{'class':'list-land','id':'content'})
# Find the tables in the HTML
tables = summary.find_all('table')
"""
Explanation: Now we'll use Beautiful Soup to search for the table we want to grab!
End of explanation
"""
# Set up empty data list
data = []
# Set rows as first indexed object in tables with a row
rows = tables[0].findAll('tr')
# now grab every HTML cell in every row
for tr in rows:
cols = tr.findAll('td')
# Check to see if text is in the row
for td in cols:
text = td.find(text=True)
print text,
data.append(text)
"""
Explanation: Now we need to use Beautiful Soup to find the table entries. A 'td' tag defines a standard cell in an HTML table. The 'tr' tag defines a row in an HTML table.
We'll parse through our tables object and try to find each cell using the findALL('td') method.
There are tons of options to use with findALL in beautiful soup. You can read about them here.
End of explanation
"""
data
"""
Explanation: Let's see what the data list looks like
End of explanation
"""
# Set up empty lists
reports = []
date = []
# Se tindex counter
index = 0
# Go find the pdf cells
for item in data:
if 'pdf' in item:
# Add the date and reports
date.append(data[index-1])
# Get rid of \xa0
reports.append(item.replace(u'\xa0', u' '))
index += 1
"""
Explanation: Now we'll use a for loop to go through the list and grab only the cells with a pdf file in them, we'll also need to keep track of the index to set up the date of the report.
End of explanation
"""
# Set up Dates and Reports as Series
date = Series(date)
reports = Series(reports)
# Concatenate into a DataFrame
legislative_df = pd.concat([date,reports],axis=1)
# Set up the columns
legislative_df.columns = ['Date','Reports']
# Show the finished DataFrame
legislative_df
"""
Explanation: You'll notice a line to take care of '\xa0 ' This is due to a unicode error that occurs if you don't do this. Web pages can be messy and inconsistent and it is very likely you'll have to do some research to take care of problems like these.
Here's the link I used to solve this particular issue: StackOverflow Page
Now all that is left is to organize our data into a pandas DataFrame!
End of explanation
"""
# http://docs.python-guide.org/en/latest/scenarios/scrape/
from lxml import html
import requests
page = requests.get('http://econpy.pythonanywhere.com/ex/001.html')
tree = html.fromstring(page.content)
# inspect element
# <div title="buyer-name">Carson Busses</div>
# <span class="item-price">$29.95</span>
#This will create a list of buyers:
buyers = tree.xpath('//div[@title="buyer-name"]/text()')
#This will create a list of prices
prices = tree.xpath('//span[@class="item-price"]/text()')
print 'Buyers: ', buyers
print 'Prices: ', prices
# https://www.flightradar24.com/56.16,-52.58/7
# http://stackoverflow.com/questions/39489168/how-to-scrape-real-time-streaming-data-with-python
# If you look at the network tab in the developer console in Chrome (for example), you'll see the requests to https://data-live.flightradar24.com/zones/fcgi/feed.js?bounds=59.09,52.64,-58.77,-47.71&faa=1&mlat=1&flarm=1&adsb=1&gnd=1&air=1&vehicles=1&estimated=1&maxage=7200&gliders=1&stats=1
import requests
from bs4 import BeautifulSoup
import time
def get_count():
url = "https://data-live.flightradar24.com/zones/fcgi/feed.js?bounds=57.78,54.11,-56.40,-48.75&faa=1&mlat=1&flarm=1&adsb=1&gnd=1&air=1&vehicles=1&estimated=1&maxage=7200&gliders=1&stats=1"
# Request with fake header, otherwise you will get an 403 HTTP error
r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
# Parse the JSON
data = r.json()
counter = 0
# Iterate over the elements to get the number of total flights
for element in data["stats"]["total"]:
counter += data["stats"]["total"][element]
return counter
while True:
print(get_count())
time.sleep(8)
# Hmm, that was just my first thaught. As I wrote, the code is not meant as something final
"""
Explanation: There are other less intense options for web scraping:
Check out these two companies:
https://import.io/
https://www.kimonolabs.com/
Aside
End of explanation
"""
|
thiagoqd/queirozdias-deep-learning
|
sentiment-rnn/Sentiment RNN.ipynb
|
mit
|
import numpy as np
import tensorflow as tf
with open('../sentiment_network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment_network/labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
"""
Explanation: Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=400px>
Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.
From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.
We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
End of explanation
"""
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
"""
Explanation: Data preprocessing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
End of explanation
"""
# Create your dictionary that maps vocab words to integers here
vocab_to_int =
# Convert the reviews to integers, same shape as reviews list, but with integers
reviews_ints =
"""
Explanation: Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.
Also, convert the reviews to integers and store the reviews in a new list called reviews_ints.
End of explanation
"""
# Convert labels to 1s and 0s for 'positive' and 'negative'
labels =
"""
Explanation: Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
Exercise: Convert labels from positive and negative to 1 and 0, respectively.
End of explanation
"""
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
"""
Explanation: If you built labels correctly, you should see the next output.
End of explanation
"""
# Filter out that review with 0 length
reviews_ints =
"""
Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Exercise: First, remove the review with zero length from the reviews_ints list.
End of explanation
"""
seq_len = 200
features =
"""
Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
End of explanation
"""
features[:10,:100]
"""
Explanation: If you build features correctly, it should look like that cell output below.
End of explanation
"""
split_frac = 0.8
train_x, val_x =
train_y, val_y =
val_x, test_x =
val_y, test_y =
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
"""
Explanation: Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
End of explanation
"""
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
"""
Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2501, 200)
Build the graph
Here, we'll build the graph. First up, defining the hyperparameters.
lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.
batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.
learning_rate: Learning rate
End of explanation
"""
n_words = len(vocab)
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ =
labels_ =
keep_prob =
"""
Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.
End of explanation
"""
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding =
embed =
"""
Explanation: Embedding
Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.
Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].
End of explanation
"""
with graph.as_default():
# Your basic LSTM cell
lstm =
# Add dropout to the cell
drop =
# Stack up multiple LSTM layers, for deep learning
cell =
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
"""
Explanation: LSTM cell
<img src="assets/network_diagram.png" width=400px>
Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.
To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:
tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=<function tanh at 0x109f1ef28>)
you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
Most of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.
So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.
Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.
Here is a tutorial on building RNNs that will help you out.
End of explanation
"""
with graph.as_default():
outputs, final_state =
"""
Explanation: RNN forward pass
<img src="assets/network_diagram.png" width=400px>
Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)
Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.
Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.
End of explanation
"""
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: Output
We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.
End of explanation
"""
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: Validation accuracy
Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
End of explanation
"""
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
"""
Explanation: Batching
This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].
End of explanation
"""
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
"""
Explanation: Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.
End of explanation
"""
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
"""
Explanation: Testing
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions
|
Sessions/Session01/Day4/SGRandForestSolutions.ipynb
|
mit
|
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Supervised Machine Learning Break Out:
Separating Stars and Galaxies from SDSS
Version 0.1
Many (nearly all?) of the science applications for LSST data will rely on the accurate separation of stars and galaxies in the LSST imaging data. As an example, imagine measuring galaxy clustering without knowing which sources are galaxies and which are stars. During this exercise, we will utilize supervised machine-learning methods to separate extended (galaxies) and point sources (stars, QSOs) in imaging data. These methods are highly flexible, and as a result can classify sources at higher fidelity than methods that simply make cuts in a low-dimensional space.
By AA Miller (c) 2016 Jul 11
End of explanation
"""
# execute this cell
from astroquery.sdss import SDSS # enables direct queries to the SDSS database
"""
Explanation: Problem 1) Obtain and Examine Training Data
As a reminder, for supervised-learning problems we use a training set, sources with known labels, i.e. they have been confirmed as normal stars, QSOs, or galaxies, to build a model to classify new observations where we do not know the source label.
The training set for this exercise will be pulled from SDSS. For features, we will start with each Magnitude measured in the $r$-band. This results in a total of 8 features (more than the Iris data set, but significantly fewer than everything in SDSS).
The first step when pursuring a machine learning problem is to examine the potential training set. A machine-learning model is only as good as its training set. This point cannot be emphasized enough. Machine-learning models are data-driven, they do not capture any physical theory, and thus it is essential that the training set satisfy several criteria. Two of the most important criteria for a good training set are:
the training set should be unbiased [this is actually really hard to achieve in astronomy since most surveys are magnitude limited]
the training set should be representative of the (unobserved or field) population of sources [a training set with no stars will yield a model incapable of discovering point sources]
As a first step (and this is always a good idea), we are going to examine the training set to see if anything suspicious is going on. We will use astroquery to directly access the SDSS database, and store the results in an astropy Table.
Note The SDSS API for astroquery is not standard for the package, which leads to a warning. This is not, however, a problem for our purposes.
End of explanation
"""
TSquery = """SELECT TOP 10000
p.psfMag_r, p.fiberMag_r, p.fiber2Mag_r, p.petroMag_r,
p.deVMag_r, p.expMag_r, p.modelMag_r, p.cModelMag_r,
s.class
FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND s.class != 'QSO'
ORDER BY p.objid ASC
"""
SDSSts = SDSS.query_sql(TSquery)
SDSSts
"""
Explanation: While it is possible to look up each of the names of the $r$-band magnitudes in the SDSS PhotoObjAll schema, the schema list is long, and thus there is a chance of missing one column. Better to identify the desired columns using the database itself:
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.Columns
WHERE table_name = 'PhotoObjAll' AND
COLUMN_NAME LIKE '%Mag/_r' ESCAPE '/'
which returns the following list of columns: psfMag_r, fiberMag_r, fiber2Mag_r, petroMag_r, deVMag_r, expMag_r, modelMag_r, cModelMag_r.
We now select these magnitude measurements for 10000 stars and galaxies from SDSS. Additionally, we join these results with the SpecObjAll table to obtain their spectroscopic classifications, which will serve as labels for the machine-learning model.
Note - the SDSS database contains duplicate observations, flagged observations, and non-detections, which we condition the query to exclude (as explained further below). We also exclude quasars, as the precise photometric classification of these objects is ambiguous: low-$z$ AGN have resolvable host galaxies, while high-$z$ QSOs are point-sources. Query conditions:
p.mode = 1 select only the primary photometric detection of a source
s.sciencePrimary = 1 select only the primary spectroscopic detection of a source (together with above, prevents duplicates)
p.clean = 1 the SDSS clean flag excludes flagged observations and sources with non-detections
s.class != 'QSO' removes potentially ambiguous QSOs from the training set
End of explanation
"""
import seaborn as sns
sns.pairplot(SDSSts.to_pandas(), hue = 'class', diag_kind = 'kde')
"""
Explanation: To reiterate a point from above: data-driven models are only as good as the training set. Now that we have a potential training set, it is essential to inspect the data for any peculiarities.
Problem 1a Visualize the training set (an intentially open-ended question, think back to yesterday's exercises) to inspect the features and whether or not they may be useful for a machine-learning model.
Hint astropy Tables can be converted to pandas DataFrames with the .to_pandas() operator, which may be helpful.
End of explanation
"""
from sklearn.cross_validation import train_test_split
rs = 23 # we are in Chicago after all
feats = list(SDSSts.columns)
feats.remove('class')
X = np.array(SDSSts[feats].to_pandas())
y = np.array(SDSSts['class'])
train_X, val_X, train_y, val_y = train_test_split( X, y, test_size = 0.3, random_state = rs)
"""
Explanation: Problem 1b Can the acquired training set be used to separate stars and galaxies? [Write your reasoning below]
Type response to 1b here
To test the efficacy of the machine-learning model we need to separate the spectroscopic sample into an independent training and validation set. There is no set number for the precise fraction of the data to include in the validation set, and typical choices vary between $\sim{0.2}-0.4$. For this problem we will adopt 0.3.
sklearn.cross_validation has a handy function train_test_split, which will simplify this process.
Problem 1c Split the 10k spectroscopic sources 70-30 into training and validation sets. Save the results in arrays called: train_X, train_y, val_X, val_y, respectively. Use rs for the random_state when selecting a random portion of the spectroscopic set.
Hint - recall that sklearn utilizes X, a 2D np.array(), and y as the features and labels arrays, respecitively.
End of explanation
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
RFmod = RandomForestClassifier(n_estimators = 50)
RFmod.fit(train_X, train_y)
val_preds = RFmod.predict(val_X)
print("The raw features produce a model with accuracy ~{:.4f}".format(accuracy_score(val_y, val_preds)))
"""
Explanation: Problem 2) Feature Engineering
It has been said that all machine learning is an exercise in feature engineering. In other words, all machine-learning are worthless without the correct set of features. Here we will quickly examine the set of features selected above to test how well they separate stars and galaxies. We will use the Random Forest (RF) algorithm Breiman 2001 as implemented by sklearn. RandomForestClassifier is part of the sklearn.ensemble module.
Problem 2a Train a RF model on your training set, and determine the accuracy of the model using the validation set. Set n_estimators, the number of trees in the forest, to 25 in the model.
Hint - you may find sklearn.metrics.accuracy_score useful for this exercise.
End of explanation
"""
SDSSpred = SDSSts['psfMag_r'] - SDSSts['cModelMag_r'] > 0.145
SDSSphot_class = np.empty(len(SDSSpred), dtype = '|S6')
SDSSphot_class[SDSSpred] = 'GALAXY'
SDSSphot_class[SDSSpred == False] = 'STAR'
print("The SDSS phot model produces an accuracy ~{:.4f}".format(accuracy_score(SDSSts['class'], SDSSphot_class)))
"""
Explanation: Not bad for a model with no optimization!
For reference, the SDSS photometric classifier uses a single hard cut to separate stars and galaxies in imaging data:
$$\mathtt{psfMag} - \mathtt{cmodelMag} > 0.145,$$
sources that satisfy this criteria are considered galaxies.
Problem 2b Determine the accuracy of the SDSS photometric model on the 10k spectroscopic sources obtained above. This accuracy will represent the "benchmark" upon which we hope to improve with the machine learning model.
End of explanation
"""
train_Xnorm = train_X[:,0][:,np.newaxis] - train_X[:,1:]
val_Xnorm = val_X[:,0][:,np.newaxis] - val_X[:,1:]
RFmod = RandomForestClassifier(n_estimators = 50)
RFmod.fit(train_Xnorm, train_y)
val_predsNorm = RFmod.predict(val_Xnorm)
print("The normalized features produce a model with accuracy ~{:.4f}".format(accuracy_score(val_y, val_predsNorm)))
"""
Explanation: One advantage of the SDSS model is that the magnitude measurements have been normalized. The SDSS spectroscopic training set is biased towards stars at bright the end, and towards galaxies at the faint end. (This can be quickly confirmed by plotting histograms, or better yet KDEs, of each.) While the universe is biased in this way, it is not clear that the SDSS targeting function properly captured this bias. Thus, we will attempt to engineer the features to remove this bias (similar to SDSS).
Problem 2c Normalize the feature vector relative to psfMag_r, and refit the RF model to see if the accuracy improves when predicting the class of sources in the validation set.
Hint - be sure you apply the eaxct same normalization to both the training and validation set
End of explanation
"""
print(RFmod.feature_importances_) # print the importances
indicies = np.argsort(RFmod.feature_importances_)[::-1] # sort the features most imp. --> least imp.
# recall that all features are normalized relative to psfMag_r
featStr = ", \n".join(['psfMag_r - {:s}'.format(x) for x in list(np.array(feats)[1:][indicies])])
print("The relative importance of the features is: \n{:s}".format(featStr))
"""
Explanation: Problem 2d How does the accuracy of each of these 3 models compare?
If normalizing the features did not improve the accuracy of the model, can you think of any reasons why?
Hint - think about the name of the algorithm.
Type response to 2d here
While there are additional feature engineering steps one could take:
remove correlated features
add contextual features (e.g., galactic latitude? distance to nearest neighbor?)
create new features (ellipticity measurements?)
we will stop at this stage. Generally speaking, RF is immune to correlated features (this is not true of all machine-learning algorithms), contextual features, especially galactic latitude, can bias the model due to the manner in which the training data were obtained.
Finally, we exploit one of the most novel aspects of RF - the ability to measure the relative importance of each feature. This is accomplished by randomly shuffling the values of a particular feature, and examining the decrease in performance of the model's overall accuracy. The relative feature importances can be accessed using the .feature_importances_ attribute associated with the RandomForestClassifer() class. The higher the value, the more important feature.
Problem 2e Calculate the relative importance of each feature. Which feature is most important? Can you make sense of the feature ordering?
Hint - do not dwell too long on the final ordering of the features.
End of explanation
"""
from sklearn.cross_validation import cross_val_score
CVscores = cross_val_score(RFmod, train_Xnorm, train_y, cv = 5)
print(CVscores)
print("The CV accuracy for the training set is {:.4f}".format(np.mean(CVscores)))
"""
Explanation: In some cases, the final step in feature engineering is identifying the useful features and removing those that are noisy and/or provide no information. Again, RF is relatively immune to these problems, and in this case (7 features) there are few features. Thus, we do not need to remove any features. Nevertheless, we see that the psfMag_r -fiber2Mag_r feature has very little importance. This is due to the high correlation between fiberMag_r and fiber2Mag_r. It is likely we could remove one of these features without harming the model. We could even measure the model improvement (if any) via cross-validation or the validation set.
Problem 3) Optimize Model Tuning Parameters
All machine-learning models have tuning parameters. In brief, these parameters capture the smoothness of the model in the multidimentional-feature space. Whether the model is smooth or coarse is an application dependent problem, though one should always be weary of over-fitting or under-fitting the data. Generally speaking, RF (and most tree-based methods) have 3 flavors of tuning parameter:
$N_\mathrm{tree}$ - the number of trees in the forest n_estimators (default: 10) in sklearn
$m_\mathrm{try}$ - the number of (random) features to explore as splitting criteria at each node max_features (default: sqrt(n_features)) in sklearn
Pruning criteria - defined stopping criteria for ending continued growth of the tree, there are many choices for this in sklearn (My preference is min_samples_leaf (default: 1) which sets the minimum number of sources allowed in a terminal node, or leaf, of the tree)
An important lesson at this stage: to avoid overfitting the validation set cannot be used to optimize the tuning parameters. This leaves two potential options: further split the training set into a training and test set or via cross validation (CV). As a reminder, in CV, the training set is partitioned, and sources outside each individual partition are used to predict the class of sources within the partition. This procedure is then repeated for each partition, so that every source in the training set has exactly one prediction, allowing a measurement of the cross-validation error. The most common form of CV is known as K-fold, where the training set is divided into $k$ partitions of equal size. In order of increasing computational time, typical choices for $k$ are 2, 3, 10, or $N$, where $N$ is the total number of sources in the training set, this is known as leave-one-out CV.
Fortunately, scikit-learn has a cross validation module that simplifies CV. The CV accuracy can be obtained via cross_validation.cross_val_score(), which requires the model, training features, and training classes as arguments. The default is $k = 3$ folds. Finally, note that this method uses Stratified K-folds, which is slightly different from the procedure described above.
Problem 3a Determine the 5-fold CV accuracy for the training set.
End of explanation
"""
CVscores1 = cross_val_score(RandomForestClassifier(n_estimators = 1), train_Xnorm, train_y, cv = 5)
CVscores10 = cross_val_score(RandomForestClassifier(n_estimators = 10), train_Xnorm, train_y, cv = 5)
CVscores100 = cross_val_score(RandomForestClassifier(n_estimators = 100), train_Xnorm, train_y, cv = 5)
print("The CV accuracy for 1, 10, 100 trees is {:.4f}, {:.4f}, {:.4f}".format(np.mean(CVscores1), np.mean(CVscores10), np.mean(CVscores100)))
"""
Explanation: Now, let's try to develop some intuition for the effects of changing the tuning parameters.
Problem 3b Determine the 5-fold CV accuracy for models with $N_\mathrm{tree}$ = 1, 10, 100.
End of explanation
"""
from sklearn.grid_search import GridSearchCV
grid_results = GridSearchCV(RandomForestClassifier(),
{'n_estimators': [30, 100, 300], 'max_features': [1, 3, 7], 'min_samples_leaf': [1,10]},
cv = 5)
grid_results.fit(train_Xnorm, train_y)
print("The optimal parameters are:")
for key, item in grid_results.best_params_.items(): # warning - slightly different meanings in Py2 & Py3
print("{}: {}".format(key, item))
"""
Explanation: From the above results, we can see that 1 tree is likely too few, while the results seem to be stabilizing with $N_\mathrm{tree} \gtrsim 10$. So - how does one actually determine the optimal set of tuning parameters? Brute force. At least in our case we will use brute force, as the data set and number of tuning parameters is small (there are alternative approaches that are less time consuming when this isn't the case). We will perform a grid search over the three tuning parameters, measuring the CV accuracy at each point within the 3D grid, and picking the point with the highest accuracy. Two general rules of thumb: (i) if the highest accuracy occurs at the edge of the grid, it is best to refit a new grid centered on that point, and (ii) the results should be stable in the vicinity of the grid maximum. If this is not the case, then it is likely that the model has been overfit.
Once again, scikit-learn has made our lives significantly easier by providing a convenience function GridSearchCV, which will enable a single line call to optimize the model. One thing to keep in mind, which likely won't be a problem here, depending on how GridSearchCV is called it is possible to run into memory issues.
Problem 3c Perform a 3-fold CV grid search to optimize the RF star-galaxy model. Remember the rules of thumb. What are the optimal tuning parameters for the model?
Hint - think about the computational runtime based on the number of points in the grid. Do not start with a very dense or large grid.
End of explanation
"""
RFmod = RandomForestClassifier(n_estimators=300, max_features=3, min_samples_leaf=10, n_jobs=-1).fit(train_Xnorm, train_y)
val_predsNorm = RFmod.predict(val_Xnorm)
print("The accuracy of the optimized model is ~{:.4f}".format(accuracy_score(val_y, val_predsNorm)))
"""
Explanation: Problem 4) Evaluating the Accuracy of the Model
Now that the model has been fully optimized, we can estimate its accuracy with the validation set. This estimate provides insight for the model accuracy expected for photometric data where we do not have spectroscopy, and thus cannot validate the predictions.
Problem 4a Train a RF model using the optimized tuning parameters from 3c, and estimate the classification accuracy of the model using the validation set.
End of explanation
"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(val_y, val_predsNorm)
print(cm)
"""
Explanation: A standard method for evaluating classification models is the confusion matrix, which summarizes both the accuracy for each individual class, as well the most-likely misclassifications for each class. [For the 2 class problem, such as ours, these quantities are essentially one and the same, but for multi-class problems this is highly helpful.] In examining the matrix, "confusion" for the classifier is summarized. In a confusion matrix, one axis shows the true class and the other shows the predicted class. For a perfect classifier all of the power will be along the diagonal, while confusion is represented by off-diagonal signal.
Like almost everything else we have encountered during this exercise, scikit-learn makes it easy to compute a confusion_matrix within the sklearn.metrics module.
Problem 4b Calculate (and examine) the confusion matrix for the training set from Part 1.
End of explanation
"""
cmNorm = cm.astype(float)/cm.sum(axis = 1)[:,np.newaxis]
plt.imshow(cmNorm, interpolation = 'Nearest', cmap = 'Blues', vmin = 0, vmax = 1)
plt.colorbar()
tick_names = ["GALAXY", "STAR"]
tick_marks = np.arange(len(tick_names))
plt.xticks(tick_marks, tick_names, rotation=45)
plt.yticks(tick_marks, tick_names)
plt.ylabel('True label')
plt.xlabel('Predicted label')
"""
Explanation: Visual representations are often the most useful means for interpreting the confusion matrix (this is especially true for multiclass problems).
Problem 4c Plot the normalized confusion matrix for the validation set.
Hint - you might find the sklearn confusion matrix tutorial helpful.
End of explanation
"""
from sklearn.metrics import roc_curve
train_yInt = np.zeros(len(train_y), dtype = int)
train_yInt[train_y == 'STAR'] = 1
val_yInt = np.zeros(len(val_y), dtype = int)
val_yInt[val_y == 'STAR'] = 1
RFmod = RandomForestClassifier(n_estimators=300, max_features=3, min_samples_leaf=10)
RFmod.fit(train_Xnorm, train_yInt)
val_predsProba = RFmod.predict_proba(val_Xnorm)
fpr, tpr, thresh = roc_curve(val_yInt, val_predsProba[:,1])
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
"""
Explanation: Note - if your plot looks a little funny, that is likely because you imported seaborne earlier. Remember, import seaborne also changes defaults, but you can change the default appearance of seaborne.
For this problem, where the accuracy is very high and there are only two classes, the confusion matrix is not necessarily that insightful. Another common machine-learning evalutation tool is the Receiver Operating Characteristic (ROC) curve, which is espeically useful for threshold tuning.
One of the useful properties of RF models, which has not been mentioned yet, is that RF provides probabilistic predictions. In short, a source that is classified as a star in 95% of trees is much more likely to be a star than a source classified as such in only 53% of trees. [It is better to think of these probabilities as relative rankings based on the training set rather than true probabilities. Biases in the training set, which will almost certainly always exist in astronomy, result in "probabilities" that are not properly calibrated.] These "probabilities" are highly useful because they allow us to tune the purity and completeness of the final model, and they can be access for some (not all) sklearn models using the .predict_proba() function.
The model output is tuned via the ROC curve (or the somewhat related precision-recall curve), which plots the the true positive rate (TPR) against the false positive rate (FPR):
$$ \mathrm{TPR} = \frac{\mathrm{TP}}{\mathrm{TP} + \mathrm{FN}},$$
$$ \mathrm{FPR} = \frac{\mathrm{FP}}{\mathrm{FP} + \mathrm{TN}},$$
where $\mathrm{TP}$ is the number of true positives, $\mathrm{FN}$ is the number of false negatives, $\mathrm{FP}$ is the number of false positives, and $\mathrm{TN}$ is the number of true negatives. The ideal model has high TPR and low FPR. The ROC curve comes from varying the classification decision threshold from 1 to 0.
Once again, scikit-learn comes to the rescue by making it easy to calculate ROC curves with roc_curve in the sklearn.metrics module.
Problem 4d Calculate and plot the ROC curve for stars as determined by the validation set.
Hint - for this problem, you'll want to recast y as an integer array, so that the definition of TP and FP is unambiguous.
End of explanation
"""
fpr01_idx = (np.abs(fpr-0.01)).argmin()
tpr01 = tpr[fpr01_idx]
thresh01 = thresh[fpr01_idx]
print("At FPR = 0.01, the TPR = {:.3f} corresponding to decision threshold = {:.3f}".format(tpr01, thresh01))
"""
Explanation: The ROC curve should come very close to (0, 1), which is the sign of a good model (and with ~97% accuracy we already knew we have a pretty good model). We will now explore the results of thresholding. Suppose you are searching for supernovae in a time domain survey, and it is essential you examine transients in as many galaxies as possible. While searching, there will be many false positives in the form of stars, thus, you need a model that rejects stars (high TPR) with a very low FPR (does not misclassify galaxies).
Problem 4e Determine the classification threshold and TPR corresponding to a FPR = 0.01.
End of explanation
"""
QSOquery = """SELECT TOP 10000
p.psfMag_r, p.fiberMag_r, p.fiber2Mag_r, p.petroMag_r,
p.deVMag_r, p.expMag_r, p.modelMag_r, p.cModelMag_r,
s.class
FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND s.class = 'QSO'
ORDER BY s.specobjid ASC
"""
QSOts = SDSS.query_sql(QSOquery)
"""
Explanation: That's it! You now have the basic outline necessary to create a full-blown, publication-ready machine-learning model.
Now, before you run off importing sklearn left and right and re-writing all your code to use machine-learning, remember: you should always, always, always, always (how many times can I copy paste the word "always"?), always be extremely concerned about the bias present in your training set. (Even after the paper is accepted, the community loves it, and you've discovered some new great thing - you should be worried about the training set.) In my (AAM) personal experience, the vast majority of my time as a machine-learning practicioner is spent chasing down unusual things in the training set. So, proceed with caution.
If you have time - try the challenge problem below, which should reinforce my words of caution.
Challenge Problem) Taking the Plunge
Applying the model to field data
QSOs are unresolved sources that look like stars in optical imaging data. We will now download photometric measurements for 10k QSOs from SDSS and see how accurate the RF model performs for these sources.
End of explanation
"""
qso_X = np.array(QSOts[feats].to_pandas())
qso_y = np.ones(len(QSOts)) # we are defining QSOs as stars for this exercise
qso_Xnorm = qso_X[:,0][:,np.newaxis] - qso_X[:,1:]
qso_preds = RFmod.predict(qso_Xnorm)
print("The RF model correctly classifies ~{:.4f} of the QSOs".format(accuracy_score(qso_y, qso_preds)))
"""
Explanation: Challenge 1 Calculate the accuracy with which the model classifies QSOs based on the 10k QSOs selected with the above command. How does that accuracy compare to that estimated by the validation set?
End of explanation
"""
# As discussed above, low-z AGN have resolved host galaxies which will confuse the classifier,
# this can be resolved by only selecting high-z QSOs (z > 1.5)
QSOquery = """SELECT TOP 10000
p.psfMag_r, p.fiberMag_r, p.fiber2Mag_r, p.petroMag_r,
p.deVMag_r, p.expMag_r, p.modelMag_r, p.cModelMag_r,
s.class
FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND s.class = 'QSO'
AND s.z > 1.5
ORDER BY s.specobjid ASC
"""
QSOts = SDSS.query_sql(QSOquery)
qso_X = np.array(QSOts[feats].to_pandas())
qso_y = np.ones(len(QSOts)) # we are defining QSOs as stars for this exercise
qso_Xnorm = qso_X[:,0][:,np.newaxis] - qso_X[:,1:]
qso_preds = RFmod.predict(qso_Xnorm)
print("The RF model correctly classifies ~{:.4f} of the QSOs".format(accuracy_score(qso_y, qso_preds)))
"""
Explanation: Challenge 2 Can you think of any reasons why the performance would be so much worse for the QSOs than it is for the stars? Can you obtain a ~.97 accuracy when classifying QSOs?
End of explanation
"""
import # brokenBrain
"""
Explanation: Uber Challenge Problem) The 3-class problem
Uber Challenge Create a 3 class RF model that separates stars, galaxies, and QSOs in SDSS imaging data. Limit the size of your training set to $< 25000$ sources, but that's it as far as suggestions and hints are concerned. Can you achieve a classification accuracy around 97%?
End of explanation
"""
|
locationtech/geowave
|
examples/data/notebooks/jupyter/geowave-gpx.ipynb
|
apache-2.0
|
#!pip install --user --upgrade pixiedust
import pixiedust
import geowave_pyspark
"""
Explanation: Geowave GPX Demo
This Demo runs KMeans on the GPX dataset consisting of approximately 285 million point locations. We use a cql filter to reduce the KMeans set to a bounding box over Berlin, Germany. Simply focus a cell and use [SHIFT + ENTER] to run the code.
Import pixiedust
Start by importing pixiedust which if all bootstrap and install steps were run correctly.
You should see below for opening the pixiedust database successfully with no errors.
Depending on the version of pixiedust that gets installed, it may ask you to update.
If so, run this first cell.
End of explanation
"""
pixiedust.enableJobMonitor()
"""
Explanation: Pixiedust also allows us to monitor spark job progress directly from the notebook. Simply run the cell below and anytime a spark job is run from the notebook you should see incremental progress shown in the output below.
NOTE If this function fails or produces a error often this is just a link issue between pixiedust and python the first time pixiedust is imported. Restart the Kernel and rerun the cells to fix the error.
End of explanation
"""
# Print Spark info and create sql_context
print('Spark Version: {0}'.format(sc.version))
print('Python Version: {0}'.format(sc.pythonVer))
print('Application Name: {0}'.format(sc.appName))
print('Application ID: {0}'.format(sc.applicationId))
print('Spark Master: {0}'.format( sc.master))
"""
Explanation: Creating the SQLContext and inspecting pyspark Context
Pixiedust imports pyspark and the SparkContext + SparkSession should be already available through the "sc" and "spark" variables respectively.
End of explanation
"""
%%bash
s3-dist-cp -D mapreduce.task.timeout=60000000 --src=s3://geowave-gpx-data/gpx --dest=hdfs://$HOSTNAME:8020/tmp/
%%bash
/opt/accumulo/bin/accumulo shell -u root -p secret -e "importtable geowave.germany_gpx_SPATIAL_IDX /tmp/spatial"
/opt/accumulo/bin/accumulo shell -u root -p secret -e "importtable geowave.germany_gpx_GEOWAVE_METADATA /tmp/metadata"
"""
Explanation: Download and ingest the GPX data
NOTE Depending on cluster size sometimes the copy can fail. This appears to be a race condition error with the copy command when downloading the files from s3. This may make the following import into acccumulo command fail. You can check the accumulo tables by looking at port 9995 of the emr cluster. There should be 5 tables after importing.
End of explanation
"""
%%bash
# clear out potential old runs
geowave store clear kmeans_gpx
geowave store rm kmeans_gpx
geowave store clear germany_gpx_accumulo
geowave store rm germany_gpx_accumulo
# configure geowave connection params for name stores "germany_gpx_accumulo" and "kmeans_gpx"
geowave store add germany_gpx_accumulo --gwNamespace geowave.germany_gpx -t accumulo --zookeeper $HOSTNAME:2181 --instance accumulo --user root --password secret
geowave store add kmeans_gpx --gwNamespace geowave.kmeans -t accumulo --zookeeper $HOSTNAME:2181 --instance accumulo --user root --password secret
"""
Explanation: Setup Datastores
End of explanation
"""
%%bash
geowave store clear kmeans_gpx
# Pull core GeoWave datastore classes
hbase_options_class = sc._jvm.org.locationtech.geowave.datastore.hbase.cli.config.HBaseRequiredOptions
accumulo_options_class = sc._jvm.org.locationtech.geowave.datastore.accumulo.cli.config.AccumuloRequiredOptions
query_options_class = sc._jvm.org.locationtech.geowave.core.store.query.QueryOptions
byte_array_class = sc._jvm.org.locationtech.geowave.core.index.ByteArrayId
# Pull core GeoWave Spark classes from jvm
geowave_rdd_class = sc._jvm.org.locationtech.geowave.analytic.spark.GeoWaveRDD
rdd_loader_class = sc._jvm.org.locationtech.geowave.analytic.spark.GeoWaveRDDLoader
rdd_options_class = sc._jvm.org.locationtech.geowave.analytic.spark.RDDOptions
sf_df_class = sc._jvm.org.locationtech.geowave.analytic.spark.sparksql.SimpleFeatureDataFrame
kmeans_runner_class = sc._jvm.org.locationtech.geowave.analytic.spark.kmeans.KMeansRunner
datastore_utils_class = sc._jvm.org.locationtech.geowave.core.store.util.DataStoreUtils
spatial_encoders_class = sc._jvm.org.locationtech.geowave.analytic.spark.sparksql.GeoWaveSpatialEncoders
spatial_encoders_class.registerUDTs()
#setup input datastore
input_store = accumulo_options_class()
input_store.setInstance('accumulo')
input_store.setUser('root')
input_store.setPassword('secret')
input_store.setZookeeper(os.environ['HOSTNAME'] + ':2181')
input_store.setGeowaveNamespace('geowave.germany_gpx')
#Setup output datastore
output_store = accumulo_options_class()
output_store.setInstance('accumulo')
output_store.setUser('root')
output_store.setPassword('secret')
output_store.setZookeeper(os.environ['HOSTNAME'] + ':2181')
output_store.setGeowaveNamespace('geowave.kmeans')
#Create a instance of the runner
kmeans_runner = kmeans_runner_class()
input_store_plugin = input_store.createPluginOptions()
output_store_plugin = output_store.createPluginOptions()
#set the appropriate properties
#We want it to execute using the existing JavaSparkContext wrapped by python.
kmeans_runner.setSparkSession(sc._jsparkSession)
kmeans_runner.setAdapterId('gpxpoint')
kmeans_runner.setNumClusters(8)
kmeans_runner.setInputDataStore(input_store_plugin)
kmeans_runner.setOutputDataStore(output_store_plugin)
kmeans_runner.setCqlFilter("BBOX(geometry, 13.3, 52.45, 13.5, 52.5)")
kmeans_runner.setCentroidTypeName('mycentroids')
kmeans_runner.setHullTypeName('myhulls')
kmeans_runner.setGenerateHulls(True)
kmeans_runner.setComputeHullData(True)
#execute the kmeans runner
kmeans_runner.run()
"""
Explanation: Run KMeans
Run Kmeans on the reduced dataset over Berlin, Germany. Once the spark job begins running you should be able to monitor its progress from the cell with pixiedust, or you can monitor the progress from the spark history server on the emr cluster.
End of explanation
"""
# Create the dataframe and get a rdd for the output of kmeans
# Grab adapter and setup query options for rdd load
adapter_id = byte_array_class('mycentroids')
query_adapter = datastore_utils_class.getDataAdapter(output_store_plugin, adapter_id)
query_options = query_options_class(query_adapter)
# Create RDDOptions for loader
rdd_options = rdd_options_class()
rdd_options.setQueryOptions(query_options)
output_rdd = rdd_loader_class.loadRDD(sc._jsc.sc(), output_store_plugin, rdd_options)
# Create a SimpleFeatureDataFrame from the GeoWaveRDD
sf_df = sf_df_class(spark._jsparkSession)
sf_df.init(output_store_plugin, adapter_id)
df = sf_df.getDataFrame(output_rdd)
# Convert Java DataFrame to Python DataFrame
import pyspark.mllib.common as convert
py_df = convert._java2py(sc, df)
py_df.createOrReplaceTempView('mycentroids')
df = spark.sql("select * from mycentroids")
display(df)
"""
Explanation: Load Centroids into DataFrame and display
End of explanation
"""
# Convert the string point information into lat long columns and create a new dataframe for those.
import pyspark
def parseRow(row):
lat=row.geom.y
lon=row.geom.x
return pyspark.sql.Row(lat=lat,lon=lon,ClusterIndex=row.ClusterIndex)
row_rdd = df.rdd
new_rdd = row_rdd.map(lambda row: parseRow(row))
new_df = new_rdd.toDF()
display(new_df)
"""
Explanation: Parse DataFrame data into lat/lon columns and display centroids on map
Using pixiedust's built in map visualization we can display data on a map assuming it has the following properties.
- Keys: put your latitude and longitude fields here. They must be floating values. These fields must be named latitude, lat or y and longitude, lon or x.
- Values: the field you want to use to thematically color the map. Only one field can be used.
Also you will need a access token from whichever map renderer you choose to use with pixiedust (mapbox, google).
Follow the instructions in the token help on how to create and use the access token.
End of explanation
"""
# Create the dataframe and get a rdd for the output of kmeans
# Grab adapter and setup query options for rdd load
adapter_id = byte_array_class('myhulls')
query_adapter = datastore_utils_class.getDataAdapter(output_store_plugin, adapter_id)
query_options = query_options_class(query_adapter)
# Use GeoWaveRDDLoader to load an RDD
rdd_options = rdd_options_class()
rdd_options.setQueryOptions(query_options)
output_rdd_hulls = rdd_loader_class.loadRDD(sc._jsc.sc(), output_store_plugin, rdd_options)
# Create a SimpleFeatureDataFrame from the GeoWaveRDD
sf_df_hulls = sf_df_class(spark._jsparkSession)
sf_df_hulls.init(output_store_plugin, adapter_id)
df_hulls = sf_df_hulls.getDataFrame(output_rdd_hulls)
# Convert Java DataFrame to Python DataFrame
import pyspark.mllib.common as convert
py_df_hulls = convert._java2py(sc, df_hulls)
# Create a sql table view of the hulls data
py_df_hulls.createOrReplaceTempView('myhulls')
# Run SQL Query on Hulls data
df_hulls = spark.sql("select * from myhulls order by Density")
display(df_hulls)
"""
Explanation: Export KMeans Hulls to DataFrame
If you have some more complex data to visualize pixiedust may not be the best option.
The Kmeans hull generation outputs polygons that would be difficult for pixiedust to display without
creating a special plugin.
Instead, we can use another map renderer to visualize our data. For the Kmeans hulls we will use folium to visualize the data. Folium allows us to easily add wms layers to our notebook, and we can combine that with GeoWaves geoserver functionality to render the hulls and centroids.
End of explanation
"""
%%bash
# set up geoserver
geowave config geoserver "$HOSTNAME:8000"
# add the centroids layer
geowave gs layer add kmeans_gpx -id mycentroids
geowave gs style set mycentroids --styleName point
# add the hulls layer
geowave gs layer add kmeans_gpx -id myhulls
geowave gs style set myhulls --styleName line
import owslib
from owslib.wms import WebMapService
url = "http://" + os.environ['HOSTNAME'] + ":8000/geoserver/geowave/wms"
web_map_services = WebMapService(url)
#print layers available wms
print('\n'.join(web_map_services.contents.keys()))
import folium
#grab wms info for centroids
layer = 'mycentroids'
wms = web_map_services.contents[layer]
#build center of map off centroid bbox
lon = (wms.boundingBox[0] + wms.boundingBox[2]) / 2.
lat = (wms.boundingBox[1] + wms.boundingBox[3]) / 2.
center = [lat, lon]
m = folium.Map(location = center,zoom_start=10)
name = wms.title
centroids = folium.raster_layers.WmsTileLayer(
url=url,
name=name,
fmt='image/png',
transparent=True,
layers=layer,
overlay=True,
COLORSCALERANGE='1.2,28',
)
centroids.add_to(m)
layer = 'myhulls'
wms = web_map_services.contents[layer]
name = wms.title
hulls = folium.raster_layers.WmsTileLayer(
url=url,
name=name,
fmt='image/png',
transparent=True,
layers=layer,
overlay=True,
COLORSCALERANGE='1.2,28',
)
hulls.add_to(m)
m
"""
Explanation: Visualize results using geoserver and wms
folium provides an easy way to visualize leaflet maps in jupyter notebooks. When the data is too complicated or big to work within the simple framework pixiedust provides for map display we can instead turn to geoserver and wms to render our layers. First we configure geoserver then setup wms layers for folium to display the kmeans results on the map.
End of explanation
"""
|
kit-cel/lecture-examples
|
mloc/ch4_Deep_Learning/pytorch/pytorch_tutorial_1.ipynb
|
gpl-2.0
|
import torch
import numpy as np
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("We are using the following device for learning:",device)
"""
Explanation: PyTorch Tutorial - Part 1
This code is provided as supplementary material of the lecture Machine Learning and Optimization in Communications (MLOC).<br>
This code illustrates
* Get accustomed to the basics of pytorch
* Do simple operations
End of explanation
"""
a = torch.tensor([1,2,3]).float().to(device)
b = torch.tensor([5,6,7]).float().to(device)
result = a * b
print(a)
print(b)
print(result)
"""
Explanation: Generate some tensors, convert to float, and copy them to the device (possibly the GPU). Then carry out an operation with the tensors and print the result.
End of explanation
"""
# generate a random number vector
d = np.random.randn(2,4)
# construct a pytorch tensor from numpy array
e = torch.from_numpy(d).float().to(device)
print(e)
"""
Explanation: We can see that the objects a, b, and c are pyTorch tensors that also carry the result as such. We have built a computation graph linking variables a and b to the result via the (element-wise) multiplication and have the result immediately ready.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/hammoz-consortium/cmip6/models/sandbox-2/toplevel.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-2', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: HAMMOZ-CONSORTIUM
Source ID: SANDBOX-2
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:03
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
ARM-software/bart
|
docs/notebooks/sched/SchedDeadline.ipynb
|
apache-2.0
|
from trappy.stats.Topology import Topology
from bart.sched.SchedMultiAssert import SchedMultiAssert
from bart.sched.SchedAssert import SchedAssert
import trappy
import os
import operator
import json
#Define a CPU Topology (for multi-cluster systems)
BIG = [1, 2]
LITTLE = [0, 3, 4, 5]
CLUSTERS = [BIG, LITTLE]
topology = Topology(clusters=CLUSTERS)
BASE_PATH = "/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/"
THRESHOLD = 10.0
def between_threshold(a, b):
return abs(((a - b) * 100.0) / b) < THRESHOLD
"""
Explanation: Setup
End of explanation
"""
TRACE_FILE = os.path.join(BASE_PATH, "yield")
ftrace = trappy.FTrace(TRACE_FILE, "cpuhog")
# Assert Period
s = SchedMultiAssert(ftrace, topology, execnames="periodic_yield")
if s.assertPeriod(30, between_threshold, rank=1):
print "PASS: Period"
print json.dumps(s.getPeriod(), indent=3)
print ""
# Assert DutyCycle
if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):
print "PASS: DutyCycle"
print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)
"""
Explanation: Periodic Yield
The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.
The expectation is that the task will have a duty cycle < 1% and a period of 30ms.
There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name "periodic_yeild"
End of explanation
"""
TRACE_FILE = os.path.join(BASE_PATH, "cpuhog")
ftrace = trappy.FTrace(TRACE_FILE, "cpuhog")
s = SchedMultiAssert(ftrace, topology, execnames="cpuhog")
s.plot().view()
# Assert DutyCycle
if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):
print "PASS: DutyCycle"
print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)
"""
Explanation: CPU Hog
The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%
End of explanation
"""
TRACE_FILE = os.path.join(BASE_PATH, "cancel_dl_timer")
ftrace = trappy.FTrace(TRACE_FILE, "cpuhog")
s = SchedAssert(ftrace, topology, execname="cpuhog")
s.plot().view()
NUM_PHASES = 10
PHASE_DURATION = 2
start = s.getStartTime()
DUTY_CYCLE_FACTOR = 10
for phase in range(NUM_PHASES + 1):
window = (start + (phase * PHASE_DURATION),
start + ((phase + 1) * PHASE_DURATION))
if phase % 2 == 0:
DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2
else:
DUTY_CYCLE = 100
print "WINDOW -> [{:.2f}, {:.2f}]".format(window[0],
window[1])
if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):
print "PASS: Expected={} Actual={:.2f} THRESHOLD={}".format(DUTY_CYCLE,
s.getDutyCycle(window=window),
THRESHOLD)
else:
print "FAIL: Expected={} Actual={:.2f} THRESHOLD={}".format(DUTY_CYCLE,
s.getDutyCycle(window=window),
THRESHOLD)
print ""
"""
Explanation: Changing Reservations
A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution
End of explanation
"""
|
gargraghav/tensorflow
|
Learning Tensorflow/Working_of_TensorFlow.ipynb
|
mit
|
import tensorflow as tf
"""
Explanation: <div style="text-align:center"><img src = "https://www.tensorflow.org/_static/images/tensorflow/logo.png"></div>
<a id="ref2"></a>
How does TensorFlow work?
TensorFlow defines computations as Graphs, and these are made with operations (also know as “ops”). So, when we work with TensorFlow, it is the same as defining a series of operations in a Graph.
To execute these operations as computations, we must launch the Graph into a Session. The session translates and passes the operations represented into the graphs to the device you want to execute them on, be it a GPU or CPU.
For example, the image below represents a graph in TensorFlow. W, x and b are tensors over the edges of this graph. MatMul is an operation over the tensors W and x, after that Add is called and add the result of the previous operator with b.
<img src='https://ibm.box.com/shared/static/a94cgezzwbkrq02jzfjjljrcaozu5s2q.png'>
Importing TensorFlow
<p>To use TensorFlow, we need to import the library. We imported it and optionally gave it the name "tf", so the modules can be accessed by __tf.module-name__:
End of explanation
"""
a = tf.constant([2])
b = tf.constant([3])
"""
Explanation: <a id="ref3"></a>
Building a Graph
As we said before, TensorFlow works as a graph computational model. Let's create our first graph.
To create two source operations which will output numbers we will define two constants:
End of explanation
"""
c = tf.add(a,b)
"""
Explanation: After that, let's make an operation over these variables. The function tf.add() adds two elements (you could also use c = a + b).
End of explanation
"""
session = tf.Session()
"""
Explanation: Then TensorFlow needs to initialize a session to run our code. Sessions are, in a way, a context for creating a graph inside TensorFlow. Let's define our session:
End of explanation
"""
result = session.run(c)
print(result)
"""
Explanation: Let's run the session to get the result from the previous defined 'c' operation:
End of explanation
"""
session.close()
"""
Explanation: Close the session to release resources:
End of explanation
"""
with tf.Session() as session:
result = session.run(c)
print(result)
"""
Explanation: To avoid having to close sessions every time, we can define them in a with block, so after running the with block the session will close automatically:
End of explanation
"""
Scalar = tf.constant([2])
Vector = tf.constant([5,6,2])
Matrix = tf.constant([[1,2,3],[2,3,4],[3,4,5]])
Tensor = tf.constant( [ [[1,2,3],[2,3,4],[3,4,5]] , [[4,5,6],[5,6,7],[6,7,8]] , [[7,8,9],[8,9,10],[9,10,11]] ] )
with tf.Session() as session:
result = session.run(Scalar)
print ("Scalar (1 entry):\n %s \n" % result)
result = session.run(Vector)
print ("Vector (3 entries) :\n %s \n" % result)
result = session.run(Matrix)
print ("Matrix (3x3 entries):\n %s \n" % result)
result = session.run(Tensor)
print ("Tensor (3x3x3 entries) :\n %s \n" % result)
"""
Explanation: Even this silly example of adding 2 constants to reach a simple result defines the basis of TensorFlow. Define your edge (In this case our constants), include nodes (operations, like tf.add), and start a session to build a graph.
<a id="ref2"></a>
TensorFlow Basic Elements
Tensor
Variable
Operation
Session
Placeholder
Tensorboard
What is the meaning of Tensor?
<div class="alert alert-success alertsuccess" style="margin-top: 20px">
<font size = 3><strong>In TensorFlow all data is passed between operations in a computation graph, and these are passed in the form of Tensors, hence the name of TensorFlow.</strong></font>
<br>
<br>
The word __tensor__ from new latin means "that which stretches". It is a mathematical object that is named __tensor__ because an early application of tensors was the study of materials stretching under tension. The contemporary meaning of tensors can be taken as multidimensional arrays.
<p></p>
</div>
What are multidimensional arrays here?
<table style="width:100%">
<tr>
<td><b>Dimension</b></td>
<td><b>Physical Representation</b></td>
<td><b>Mathematical Object</b></td>
<td><b>In Code</b></td>
</tr>
<tr>
<td>Zero </td>
<td>Point</td>
<td>Scalar (Single Number)</td>
<td>[ 1 ]</td>
</tr>
<tr>
<td>One</td>
<td>Line</td>
<td>Vector (Series of Numbers) </td>
<td>[ 1,2,3,4,... ]</td>
</tr>
<tr>
<td>Two</td>
<td>Surface</td>
<td>Matrix (Table of Numbers)</td>
<td>[ [1,2,3,4,...], [1,2,3,4,...], [1,2,3,4,...],... ]</td>
</tr>
<tr>
<td>Three</td>
<td>Volume</td>
<td>Tensor (Cube of Numbers)</td>
<td>[ [[1,2,...], [1,2,...], [1,2,...],...], [[1,2,...], [1,2,...], [1,2,...],...], [[1,2,...], [1,2,...], [1,2,...] ,...]... ]</td>
</tr>
</table>
<a id="ref4"></a>
Defining multidimensional arrays using TensorFlow
Now we will try to define such arrays using TensorFlow:
End of explanation
"""
state = tf.Variable(0)
"""
Explanation: <a id="ref5"></a>
Why Tensors?
The Tensor structure helps us by giving the freedom to shape the dataset the way we want.
And it is particularly helpful when dealing with images, due to the nature of how information in images are encoded,
Thinking about images, its easy to understand that it has a height and width, so it would make sense to represent the information contained in it with a two dimensional strucutre (a matrix)... until you remember that images have colors, and to add information about the colors, we need another dimension, and thats when Tensors become particulary helpful.
Images are encoded into color channels, the image data is represented into each color intensity in a color channel at a given point, the most common one being RGB, which means Red, Blue and Green. The information contained into an image is the intensity of each channel color into the width and height of the image, just like this:
<img src='https://ibm.box.com/shared/static/xlpv9h5xws248c09k1rlx7cer69y4grh.png'>
So the intensity of the red channel at each point with width and height can be represented into a matrix, the same goes for the blue and green channels, so we end up having three matrices, and when these are combined they form a tensor.
<a id="ref6"></a>
Variables
Now that we are more familiar with the structure of data, we will take a look at how TensorFlow handles variables.
To define variables we use the command tf.variable().
To be able to use variables in a computation graph it is necessary to initialize them before running the graph in a session. This is done by running tf.global_variables_initializer().
To update the value of a variable, we simply run an assign operation that assigns a value to the variable:
End of explanation
"""
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
"""
Explanation: Let's first create a simple counter, a variable that increases one unit at a time:
End of explanation
"""
init = tf.global_variables_initializer()
"""
Explanation: Variables must be initialized by running an initialization operation after having launched the graph. We first have to add the initialization operation to the graph:
End of explanation
"""
with tf.Session() as session:
session.run(init)
print(session.run(state))
for i in range(3):
session.run(update)
print(session.run(state))
"""
Explanation: We then start a session to run the graph, first initialize the variables, then print the initial value of the state variable, and then run the operation of updating the state variable and printing the result after each update:
End of explanation
"""
a=tf.placeholder(tf.float32)
"""
Explanation: <a id="ref7"></a>
Placeholders
Now we know how to manipulate variables inside TensorFlow, but what about feeding data outside of a TensorFlow model?
If you want to feed data to a TensorFlow model from outside a model, you will need to use placeholders.
So what are these placeholders and what do they do?
Placeholders can be seen as "holes" in your model, "holes" which you will pass the data to, you can create them using <br/> <b>tf.placeholder(datatype)</b>, where <b>datatype</b> specifies the type of data (integers, floating points, strings, booleans) along with its precision (8, 16, 32, 64) bits.
The definition of each data type with the respective python sintax is defined as:
|Data type |Python type|Description|
| --------- | --------- | --------- |
|DT_FLOAT |tf.float32 |32 bits floating point.|
|DT_DOUBLE |tf.float64 |64 bits floating point.|
|DT_INT8 |tf.int8 |8 bits signed integer.|
|DT_INT16 |tf.int16 |16 bits signed integer.|
|DT_INT32 |tf.int32 |32 bits signed integer.|
|DT_INT64 |tf.int64 |64 bits signed integer.|
|DT_UINT8 |tf.uint8 |8 bits unsigned integer.|
|DT_STRING |tf.string |Variable length byte arrays. Each element of a Tensor is a byte array.|
|DT_BOOL |tf.bool |Boolean.|
|DT_COMPLEX64 |tf.complex64 |Complex number made of two 32 bits floating points: real and imaginary parts.|
|DT_COMPLEX128 |tf.complex128 |Complex number made of two 64 bits floating points: real and imaginary parts.|
|DT_QINT8 |tf.qint8 |8 bits signed integer used in quantized Ops.|
|DT_QINT32 |tf.qint32 |32 bits signed integer used in quantized Ops.|
|DT_QUINT8 |tf.quint8 |8 bits unsigned integer used in quantized Ops.|
<div style="text-align:center">[[Table Source]](https://www.tensorflow.org/versions/r0.9/resources/dims_types.html)</div>
So we create a placeholder:
End of explanation
"""
b=a*2
"""
Explanation: And define a simple multiplication operation:
End of explanation
"""
with tf.Session() as sess:
result = sess.run(b,feed_dict={a:3.5})
print (result)
"""
Explanation: Now we need to define and run the session, but since we created a "hole" in the model to pass the data, when we initialize the session we are obligated to pass an argument with the data, otherwise we would get an error.
To pass the data to the model we call the session with an extra argument <b> feed_dict</b> in which we should pass a dictionary with each placeholder name folowed by its respective data, just like this:
End of explanation
"""
dictionary={a: [ [ [1,2,3],[4,5,6],[7,8,9],[10,11,12] ] , [ [13,14,15],[16,17,18],[19,20,21],[22,23,24] ] ] }
with tf.Session() as sess:
result = sess.run(b,feed_dict=dictionary)
print (result)
"""
Explanation: Since data in TensorFlow is passed in form of multidimensional arrays we can pass any kind of tensor through the placeholders to get the answer to the simple multiplication operation:
End of explanation
"""
a = tf.constant([5])
b = tf.constant([2])
c = tf.add(a,b)
d = tf.subtract(a,b)
with tf.Session() as session:
result = session.run(c)
print ('c =: %s' % result)
result = session.run(d)
print ('d =: %s' % result)
"""
Explanation: <a id="ref8"></a>
Operations
Operations are nodes that represent the mathematical operations over the tensors on a graph. These operations can be any kind of functions, like add and subtract tensor or maybe an activation function.
tf.matmul, tf.add, tf.nn.sigmoid are some of the operations in TensorFlow. These are like functions in python but operate directly over tensors and each one does a specific thing.
<div class="alert alert-success alertsuccess" style="margin-top: 20px">Other operations can be easily found in: https://www.tensorflow.org/versions/r0.9/api_docs/python/index.html</div>
End of explanation
"""
import tensorflow as tf
with tf.name_scope("Operations"):
with tf.name_scope("Scope_a"):
a = tf.add(1, 2, name="a")
b = tf.multiply(a, 3, name="b")
with tf.name_scope("Scope_b"):
c = tf.add(4, 5, name="c")
d = tf.multiply(c, 6, name="d")
with tf.name_scope("Scope_c"):
e = tf.multiply(4, 5, name="e")
f = tf.div(c, 6, name="f")
g = tf.add(b, d, name="g")
h = tf.multiply(g, f, name="h")
with tf.Session() as sess:
print(sess.run(h))
with tf.Session() as sess:
writer = tf.summary.FileWriter("/home/raghav/TECH/output4", sess.graph)
print(sess.run(h))
writer.close()
"""
Explanation: <a id="ref8"></a>
Tensorboard
TensorBoard is a suite of web applications for inspecting and understanding your TensorFlow runs and graphs. TensorBoard currently supports five visualizations: scalars, images, audio, histograms, and graphs. The computations you will use in TensorFlow for things such as training a massive deep neural network, can be fairly complex and confusing, TensorBoard will make this a lot easier to understand, debug, and optimize your TensorFlow programs.
This is what a tensorboard looks like:
<img src='https://learningtensorflow.com/images/ezgif.com-video-to-gif.gif'>
End of explanation
"""
|
tylere/docker-tmpnb-ee
|
notebooks/1 - IPython Notebook Examples/IPython Project Examples/Interactive Widgets/Custom Widget - Hello World.ipynb
|
apache-2.0
|
from __future__ import print_function # For py 2.7 compat
"""
Explanation: Index - Back
End of explanation
"""
from IPython.html import widgets
from IPython.utils.traitlets import Unicode
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView', sync=True)
"""
Explanation: Building a Custom Widget
The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows you send/receive JSON messages to/from the front-end (as seen below).
To create a custom widget, you need to define the widget both in the back-end and in the front-end.
Building a Custom Widget
To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets.
Back-end (Python)
DOMWidget and Widget
To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the IPython notebook, you'll need to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets.
_view_name
Inheriting from the DOMWidget does not tell the widget framework what front-end widget to associate with your back-end widget. Instead, you must tell it yourself by defining a specially named Traitlet, _view_name (as seen below).
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
});
"""
Explanation: sync=True traitlets
Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the configurable piece of the traitlets machinery. The sync=True keyword argument tells the widget framework to handle synchronizing that value to the front-end. Without sync=True, the front-end would have no knowledge of _view_name.
Other traitlet types
Unicode, used for _view_name, is not the only Traitlet type, there are many more some of which are listed below:
Any
Bool
Bytes
CBool
CBytes
CComplex
CFloat
CInt
CLong
CRegExp
CUnicode
CaselessStrEnum
Complex
Dict
DottedObjectName
Enum
Float
FunctionType
Instance
InstanceType
Int
List
Long
Set
TCPAddress
Tuple
Type
Unicode
Union
Not all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized.
Front-end (JavaScript)
Models and views
The IPython widget framework front-end relies heavily on Backbone.js. Backbone.js is an MVC (model view controller) framework. Widgets defined in the back-end are automatically synchronized with generic Backbone.js models in the front-end. The traitlets are added to the front-end instance automatically on first state push. The _view_name trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model.
Import the WidgetManager
You first need to import the widget and manager modules. You will use the manager later to register your view by name (the same name you used in the back-end). To import the modules, use the require method of require.js (as seen below).
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
// Define the HelloView
var HelloView = widget.DOMWidgetView.extend({
});
// Register the HelloView with the widget manager.
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
"""
Explanation: Define the view
Next define your widget view class. Inherit from the DOMWidgetView by using the .extend method. Register the view class with the widget manager by calling .register_widget_view. The first parameter is the widget view name (_view_name that you defined earlier in Python) and the second is a handle to the class type.
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
// Render the view.
render: function(){
this.$el.text('Hello World!');
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
"""
Explanation: Render method
Lastly, override the base render method of the view to define custom rendering logic. A handle to the widget's default div element can be acquired via this.$el. The $el property is a jQuery object handle (which can be thought of as a supercharged version of the normal DOM element's handle).
End of explanation
"""
HelloWidget()
"""
Explanation: Test
You should be able to display your widget just like any other widget now.
End of explanation
"""
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView', sync=True)
value = Unicode('Hello World!', sync=True)
"""
Explanation: Making the widget stateful
There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back-end. First you need to add a traitlet in the back-end. Use the name of value to stay consistent with the rest of the widget framework and to allow your widget to be used with interact.
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
render: function(){
this.$el.text(this.model.get('value'));
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
"""
Explanation: Accessing the model from the view
To access the model associate with a view instance, use the model property of the view. get and set methods are used to interact with the Backbone model. get is trivial, however you have to be careful when using set. After calling the model set you need call the view's touch method. This associates the set operation with a particular view so output will be routed to the correct cell. The model also has a on method which allows you to listen to events triggered by the model (like value changes).
Rendering model contents
By replacing the string literal with a call to model.get, the view will now display the value of the back-end upon display. However, it will not update itself to a new value when the value changes.
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
render: function(){
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.$el.text(this.model.get('value'));
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
"""
Explanation: Dynamic updates
To get the view to update itself dynamically, register a function to update the view's value when the model's value property changes. This can be done using the model.on method. The on method takes three parameters, an event name, callback handle, and callback context. The Backbone event named change will fire whenever the model changes. By appending :value to it, you tell Backbone to only listen to the change event of the value property (as seen below).
End of explanation
"""
w = HelloWidget()
w
w.value = 'test'
"""
Explanation: Test
End of explanation
"""
from IPython.utils.traitlets import CInt
class SpinnerWidget(widgets.DOMWidget):
_view_name = Unicode('SpinnerView', sync=True)
value = CInt(0, sync=True)
"""
Explanation: Finishing
Bidirectional communication
The examples above dump the value directly into the DOM. There is no way for you to interact with this dumped data in the front-end. To create an example that accepts input, you will have to do something more than blindly dumping the contents of value into the DOM. In this part of the tutorial, you will use a jQuery spinner to display and accept input in the front-end. IPython currently lacks a spinner implementation so this widget will be unique.
Update the Python code
You will need to change the type of the value traitlet to Int. It also makes sense to change the name of the widget to something more appropriate, like SpinnerWidget.
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var SpinnerView = widget.DOMWidgetView.extend({
render: function(){
// jQuery code to create a spinner and append it to $el
this.$input = $('<input />');
this.$el.append(this.$input);
this.$spinner = this.$input.spinner({
change: function( event, ui ) {}
});
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
},
});
manager.WidgetManager.register_widget_view('SpinnerView', SpinnerView);
});
"""
Explanation: Updating the Javascript code
The jQuery docs for the spinner control say to use .spinner to create a spinner in an element. Calling .spinner on $el will create a spinner inside $el. Make sure to update the widget name here too so it's the same as _view_name in the back-end.
End of explanation
"""
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var SpinnerView = widget.DOMWidgetView.extend({
render: function(){
var that = this;
this.$input = $('<input />');
this.$el.append(this.$input);
this.$spinner = this.$input.spinner({
change: function( event, ui ) {
that.handle_spin();
},
spin: function( event, ui ) {
that.handle_spin();
}
});
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.$spinner.spinner('value', this.model.get('value'));
},
handle_spin: function() {
this.model.set('value', this.$spinner.spinner('value'));
this.touch();
},
});
manager.WidgetManager.register_widget_view('SpinnerView', SpinnerView);
});
"""
Explanation: Getting and setting the value
To set the value of the spinner on update from the back-end, you need to use jQuery's spinner API. spinner.spinner('value', new) will set the value of the spinner. Add that code to the value_changed method to make the spinner update with the value stored in the back-end((. Using jQuery's spinner API, you can add a function to handle the spinner change event by passing it in when constructing the spinner. Inside the change event, call model.set to set the value and then touch to inform the framework that this view was the view that caused the change to the model. Note: The var that = this; is a JavaScript trick to pass the current context into closures.**
End of explanation
"""
w = SpinnerWidget(value=5)
w
w.value
w.value = 20
"""
Explanation: Test
End of explanation
"""
from IPython.display import display
w1 = SpinnerWidget(value=0)
w2 = widgets.IntSlider()
display(w1,w2)
from IPython.utils.traitlets import link
mylink = link((w1, 'value'), (w2, 'value'))
"""
Explanation: Trying to use the spinner with another widget.
End of explanation
"""
|
oemof/examples
|
oemof_examples/oemof.solph/v0.2.x/sector_coupling/sector_coupling.ipynb
|
gpl-3.0
|
from oemof.solph import EnergySystem
import pandas as pd
# initialize energy system
energysystem = EnergySystem(timeindex=pd.date_range('1/1/2016',
periods=168,
freq='H'))
"""
Explanation: Multisectoral energy system with oemof
General description:
The jupyter notebook gives a simple example of how to couple the sectors power, heat and mobility.
Installation requirements:
This example requires the latest oemof version and jupyter. Install by:
pip install oemof jupyter
Create a simple energy system
Initialize energy system
End of explanation
"""
# import example data with scaled demands and feedin timeseries of renewables
# as dataframe
data = pd.read_csv("data/example_data.csv", sep=",", index_col='timeindex', parse_dates=['timeindex'])
"""
Explanation: Import input data
End of explanation
"""
from oemof.solph import Bus, Flow, Sink, Source, Transformer
### BUS
# create electricity bus
b_el = Bus(label="b_el")
energysystem.add(b_el)
# add excess sink to help avoid infeasible problems
energysystem.add(Sink(label="excess_el",
inputs={b_el: Flow()}))
energysystem.add(Source(label="shortage_el",
outputs={b_el: Flow(variable_costs=1000)}))
### DEMAND
# add electricity demand
energysystem.add(Sink(label="demand_el",
inputs={b_el: Flow(nominal_value=85,
actual_value=data['demand_el'],
fixed=True)}))
### SUPPLY
# add wind and pv feedin
energysystem.add(Source(label="wind",
outputs={b_el: Flow(actual_value=data['wind'],
nominal_value=60,
fixed=True)}))
energysystem.add(Source(label="pv",
outputs={b_el: Flow(actual_value=data['pv'],
nominal_value=200,
fixed=True)}))
"""
Explanation: Add entities to energy system
End of explanation
"""
from oemof.solph import Model
from oemof.outputlib import processing, views
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def optimize(energysystem):
### optimize
# create operational model
om = Model(energysystem=energysystem)
# solve using the cbc solver
om.solve(solver='cbc',
solve_kwargs={'tee': False})
# save LP-file
om.write('sector_coupling.lp', io_options={'symbolic_solver_labels': True})
# generic result object
#results = processing.results(om=om)
return om
def plot(om, bus_label):
"""
Plots...
Parameters
----------
om : oemof.solph.models.OperationalModel
bus_label : String
Label of bus to be plotted.
"""
#ToDo: remove this once #387 is solved
def get_flows_to_and_from_bus(node_results_flows):
"Function to devide flows into ingoing and outgoing flows."
flows = list(node_results_flows.columns)
to_flows = []
from_flows = []
for flow in flows:
if flow[0][0] == bus_label:
from_flows.append(flow)
elif flow[0][1] == bus_label:
to_flows.append(flow)
else:
print("{} is neither from nor to bus.".format(flow))
return (to_flows, from_flows)
# node_results is a dictionary keyed by 'scalars' and 'sequences'
# holding respective data in a pandas Series and DataFrame.
node_results = views.node(om.results(), bus_label)
node_results_flows = node_results['sequences']
to_flows, from_flows = get_flows_to_and_from_bus(node_results_flows)
# set up plot
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
node_results_flows[to_flows].plot(kind='bar', stacked=True, ax=ax1)
node_results_flows[from_flows].plot(kind='bar', stacked=True, ax=ax2)
ax1.set_title('Flows to and from {}'.format(bus_label))
ax1.legend(loc='upper right', bbox_to_anchor=(1.7, 1.0))
ax2.legend(loc='upper right', bbox_to_anchor=(1.7, 1.0))
ax1.set_ylabel('Power')
ax2.set_xlabel('Time')
ax2.set_ylabel('Power')
# set x-tick-labels
dates = node_results_flows.index
tick_distance = int(len(dates) / 7) - 1
ax2.set_xticks(range(0, len(dates), tick_distance), minor=False)
ax2.set_xticklabels([item.strftime('%d-%m-%Y')
for item in dates.tolist()[0::tick_distance]],
rotation=90, minor=False)
plt.show()
om = optimize(energysystem)
plot(om, 'b_el')
"""
Explanation: Optimize energy system and plot results
End of explanation
"""
# add gas bus
b_gas = Bus(label="b_gas",
balanced=False)
energysystem.add(b_gas)
# add gas power plant
energysystem.add(Transformer(label="pp_gas",
inputs={b_gas: Flow(summed_max_flow=200)},
outputs={b_el: Flow(nominal_value=40,
variable_costs=40)},
conversion_factors={b_el: 0.50}));
om = optimize(energysystem)
plot(om, 'b_el')
"""
Explanation: Adding the gas sector
In order to add a gas power plant, a gas ressource bus is needed. The gas power plant connects the gas and electricity busses and thereby couples the gas and electricity sector.
End of explanation
"""
# add heat bus
b_heat = Bus(label="b_heat",
balanced=True)
energysystem.add(b_heat)
# add heat demand
energysystem.add(Sink(label="demand_th",
inputs={b_heat: Flow(nominal_value=60,
actual_value=data['demand_th'],
fixed=True)}))
# add heater rod
energysystem.add(Transformer(label="heater_rod",
inputs={b_el: Flow()},
outputs={b_heat: Flow(variable_costs=10)},
conversion_factors={b_heat: 0.98}));
om = optimize(energysystem)
plot(om, 'b_el')
plot(om, 'b_heat')
"""
Explanation: Adding the heat sector
The heat sector is added and coupled to the electricity sector similarly to the gas sector. The same component, the LinearTransformer, is used to couple the two sectors. Only through its parametrisation it becomes a heater rod or a heat pump.
End of explanation
"""
# COP can be calculated beforehand, assuming the heat reservoir temperature
# is infinite random timeseries for COP
import numpy as np
COP = np.random.uniform(low=3.0, high=5.0, size=(168,))
# add heater rod
#Transformer(label="heater_rod",
# inputs={b_el: Flow()},
# outputs={b_heat: Flow(variable_costs=10)},
# conversion_factors={b_heat: 0.98});
# add heat pump
energysystem.add(Transformer(label="heat_pump",
inputs={b_el: Flow()},
outputs={b_heat: Flow(nominal_value=20,
variable_costs=10)},
conversion_factors={b_heat: COP}));
om = optimize(energysystem)
plot(om, 'b_heat')
"""
Explanation: Adding a heat pump
There are different ways to model a heat pump. Here the approach of precalculating a COP and using this as a conversion factor for the LinearTransformer is used. Another approach is to use the LinearN1Transformer that has two inputs - electricity and heat from a heat source. See the solph example "simple_dispatch".
End of explanation
"""
# add CHP with fixed ratio of heat and power (back-pressure turbine)
energysystem.add(Transformer(label='pp_chp',
inputs={b_gas: Flow()},
outputs={b_el: Flow(nominal_value=30,
variable_costs=42),
b_heat: Flow(nominal_value=40)},
conversion_factors={b_el: 0.3,
b_heat: 0.4}));
from oemof.solph.components import ExtractionTurbineCHP
# add CHP with variable ratio of heat and power (extraction turbine)
energysystem.add(ExtractionTurbineCHP(label='pp_chp_extraction',
inputs={b_gas: Flow()},
outputs={b_el: Flow(nominal_value=30,
variable_costs=42),
b_heat: Flow(nominal_value=40)},
conversion_factors={b_el: 0.3,
b_heat: 0.4},
conversion_factor_full_condensation={b_el: 0.5}));
om = optimize(energysystem)
plot(om, 'b_el')
om = optimize(energysystem)
plot(om, 'b_heat')
"""
Explanation: Adding a combined heat and power plant
The combined heat and power plant couples the gas, electricity and heat sector.
End of explanation
"""
from oemof.solph.components import GenericStorage as Storage
charging_power = 20
bev_battery_cap = 50
# add mobility bus
b_bev = Bus(label="b_bev",
balanced=True)
energysystem.add(b_bev)
# add transformer to transport electricity from grid to mobility sector
energysystem.add(Transformer(label="transport_el_bev",
inputs={b_el: Flow()},
outputs={b_bev: Flow(variable_costs=10,
nominal_value=charging_power,
max=data['bev_charging_power'])},
conversion_factors={b_bev: 1.0}))
# add BEV storage
energysystem.add(Storage(label='bev_storage',
inputs={b_bev: Flow()},
outputs={b_bev: Flow()},
nominal_capacity=bev_battery_cap,
capacity_min=data['bev_cap_min'],
capacity_max=data['bev_cap_max'],
capacity_loss=0.00,
initial_capacity=None,
inflow_conversion_factor=1.0,
outflow_conversion_factor=1.0,
nominal_input_capacity_ratio=1.0,
nominal_output_capacity_ratio=1.0))
# add sink for leaving vehicles
energysystem.add(Sink(label="leaving_bev",
inputs={b_bev: Flow(nominal_value=bev_battery_cap,
actual_value=data['bev_sink'],
fixed=True)}))
# add source for returning vehicles
energysystem.add(Source(label="returning_bev",
outputs={b_bev: Flow(nominal_value=bev_battery_cap,
actual_value=data['bev_source'],
fixed=True)}));
om = optimize(energysystem)
plot(om, 'b_bev')
plot(om, 'b_el')
"""
Explanation: Adding the mobility sector
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.22/_downloads/5514ea6c90dde531f8026904a417527e/plot_10_evoked_overview.ipynb
|
bsd-3-clause
|
import os
import mne
"""
Explanation: The Evoked data structure: evoked/averaged data
This tutorial covers the basics of creating and working with :term:evoked
data. It introduces the :class:~mne.Evoked data structure in detail,
including how to load, query, subselect, export, and plot data from an
:class:~mne.Evoked object. For info on creating an :class:~mne.Evoked
object from (possibly simulated) data in a :class:NumPy array
<numpy.ndarray>, see tut_creating_data_structures.
:depth: 2
As usual we'll start by importing the modules we need:
End of explanation
"""
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
# we'll skip the "face" and "buttonpress" conditions, to save memory:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7, event_id=event_dict,
preload=True)
evoked = epochs['auditory/left'].average()
del raw # reduce memory usage
"""
Explanation: Creating Evoked objects from Epochs
:class:~mne.Evoked objects typically store an EEG or MEG signal that has
been averaged over multiple :term:epochs, which is a common technique for
estimating stimulus-evoked activity. The data in an :class:~mne.Evoked
object are stored in an :class:array <numpy.ndarray> of shape
(n_channels, n_times) (in contrast to an :class:~mne.Epochs object,
which stores data of shape (n_epochs, n_channels, n_times)). Thus to
create an :class:~mne.Evoked object, we'll start by epoching some raw data,
and then averaging together all the epochs from one condition:
End of explanation
"""
evoked.plot()
"""
Explanation: Basic visualization of Evoked objects
We can visualize the average evoked response for left-auditory stimuli using
the :meth:~mne.Evoked.plot method, which yields a butterfly plot of each
channel type:
End of explanation
"""
print(evoked.data[:2, :3]) # first 2 channels, first 3 timepoints
"""
Explanation: Like the plot() methods for :meth:Raw <mne.io.Raw.plot> and
:meth:Epochs <mne.Epochs.plot> objects,
:meth:evoked.plot() <mne.Evoked.plot> has many parameters for customizing
the plot output, such as color-coding channel traces by scalp location, or
plotting the :term:global field power <GFP> alongside the channel traces.
See tut-visualize-evoked for more information about visualizing
:class:~mne.Evoked objects.
Subselecting Evoked data
.. sidebar:: Evokeds are not memory-mapped
:class:~mne.Evoked objects use a :attr:~mne.Evoked.data attribute
rather than a :meth:~mne.Epochs.get_data method; this reflects the fact
that the data in :class:~mne.Evoked objects are always loaded into
memory, never memory-mapped_ from their location on disk (because they
are typically much smaller than :class:~mne.io.Raw or
:class:~mne.Epochs objects).
Unlike :class:~mne.io.Raw and :class:~mne.Epochs objects,
:class:~mne.Evoked objects do not support selection by square-bracket
indexing. Instead, data can be subselected by indexing the
:attr:~mne.Evoked.data attribute:
End of explanation
"""
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
print(evoked_eeg.ch_names)
new_order = ['EEG 002', 'MEG 2521', 'EEG 003']
evoked_subset = evoked.copy().reorder_channels(new_order)
print(evoked_subset.ch_names)
"""
Explanation: To select based on time in seconds, the :meth:~mne.Evoked.time_as_index
method can be useful, although beware that depending on the sampling
frequency, the number of samples in a span of given duration may not always
be the same (see the time-as-index section of the
tutorial about Raw data <tut-raw-class> for details).
Selecting, dropping, and reordering channels
By default, when creating :class:~mne.Evoked data from an
:class:~mne.Epochs object, only the "data" channels will be retained:
eog, ecg, stim, and misc channel types will be dropped. You
can control which channel types are retained via the picks parameter of
:meth:epochs.average() <mne.Epochs.average>, by passing 'all' to
retain all channels, or by passing a list of integers, channel names, or
channel types. See the documentation of :meth:~mne.Epochs.average for
details.
If you've already created the :class:~mne.Evoked object, you can use the
:meth:~mne.Evoked.pick, :meth:~mne.Evoked.pick_channels,
:meth:~mne.Evoked.pick_types, and :meth:~mne.Evoked.drop_channels methods
to modify which channels are included in an :class:~mne.Evoked object.
You can also use :meth:~mne.Evoked.reorder_channels for this purpose; any
channel names not provided to :meth:~mne.Evoked.reorder_channels will be
dropped. Note that channel selection methods modify the object in-place, so
in interactive/exploratory sessions you may want to create a
:meth:~mne.Evoked.copy first.
End of explanation
"""
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, verbose=False)
print(evokeds_list)
print(type(evokeds_list))
"""
Explanation: Similarities among the core data structures
:class:~mne.Evoked objects have many similarities with :class:~mne.io.Raw
and :class:~mne.Epochs objects, including:
They can be loaded from and saved to disk in .fif format, and their
data can be exported to a :class:NumPy array <numpy.ndarray> (but through
the :attr:~mne.Evoked.data attribute, not through a get_data()
method). :class:Pandas DataFrame <pandas.DataFrame> export is also
available through the :meth:~mne.Evoked.to_data_frame method.
You can change the name or type of a channel using
:meth:evoked.rename_channels() <mne.Evoked.rename_channels> or
:meth:evoked.set_channel_types() <mne.Evoked.set_channel_types>.
Both methods take :class:dictionaries <dict> where the keys are existing
channel names, and the values are the new name (or type) for that channel.
Existing channels that are not in the dictionary will be unchanged.
:term:SSP projector <projector> manipulation is possible through
:meth:~mne.Evoked.add_proj, :meth:~mne.Evoked.del_proj, and
:meth:~mne.Evoked.plot_projs_topomap methods, and the
:attr:~mne.Evoked.proj attribute. See tut-artifact-ssp for more
information on SSP.
Like :class:~mne.io.Raw and :class:~mne.Epochs objects,
:class:~mne.Evoked objects have :meth:~mne.Evoked.copy,
:meth:~mne.Evoked.crop, :meth:~mne.Evoked.time_as_index,
:meth:~mne.Evoked.filter, and :meth:~mne.Evoked.resample methods.
Like :class:~mne.io.Raw and :class:~mne.Epochs objects,
:class:~mne.Evoked objects have evoked.times,
:attr:evoked.ch_names <mne.Evoked.ch_names>, and :class:info <mne.Info>
attributes.
Loading and saving Evoked data
Single :class:~mne.Evoked objects can be saved to disk with the
:meth:evoked.save() <mne.Evoked.save> method. One difference between
:class:~mne.Evoked objects and the other data structures is that multiple
:class:~mne.Evoked objects can be saved into a single .fif file, using
:func:mne.write_evokeds. The example data <sample-dataset>
includes just such a .fif file: the data have already been epoched and
averaged, and the file contains separate :class:~mne.Evoked objects for
each experimental condition:
End of explanation
"""
for evok in evokeds_list:
print(evok.comment)
"""
Explanation: Notice that :func:mne.read_evokeds returned a :class:list of
:class:~mne.Evoked objects, and each one has an evoked.comment
attribute describing the experimental condition that was averaged to
generate the estimate:
End of explanation
"""
right_vis = mne.read_evokeds(sample_data_evk_file, condition='Right visual')
print(right_vis)
print(type(right_vis))
"""
Explanation: If you want to load only some of the conditions present in a .fif file,
:func:~mne.read_evokeds has a condition parameter, which takes either a
string (matched against the comment attribute of the evoked objects on disk),
or an integer selecting the :class:~mne.Evoked object based on the order
it's stored in the file. Passing lists of integers or strings is also
possible. If only one object is selected, the :class:~mne.Evoked object
will be returned directly (rather than a length-one list containing it):
End of explanation
"""
evokeds_list[0].plot(picks='eeg')
"""
Explanation: Above, when we created an :class:~mne.Evoked object by averaging epochs,
baseline correction was applied by default when we extracted epochs from the
~mne.io.Raw object (the default baseline period is (None, 0),
which assured zero mean for times before the stimulus event). In contrast, if
we plot the first :class:~mne.Evoked object in the list that was loaded
from disk, we'll see that the data have not been baseline-corrected:
End of explanation
"""
evokeds_list[0].apply_baseline((None, 0))
evokeds_list[0].plot(picks='eeg')
"""
Explanation: This can be remedied by either passing a baseline parameter to
:func:mne.read_evokeds, or by applying baseline correction after loading,
as shown here:
End of explanation
"""
left_right_aud = epochs['auditory'].average()
print(left_right_aud)
"""
Explanation: Notice that :meth:~mne.Evoked.apply_baseline operated in-place. Similarly,
:class:~mne.Evoked objects may have been saved to disk with or without
:term:projectors <projector> applied; you can pass proj=True to the
:func:~mne.read_evokeds function, or use the :meth:~mne.Evoked.apply_proj
method after loading.
Combining Evoked objects
One way to pool data across multiple conditions when estimating evoked
responses is to do so prior to averaging (recall that MNE-Python can select
based on partial matching of /-separated epoch labels; see
tut-section-subselect-epochs for more info):
End of explanation
"""
left_aud = epochs['auditory/left'].average()
right_aud = epochs['auditory/right'].average()
print([evok.nave for evok in (left_aud, right_aud)])
"""
Explanation: This approach will weight each epoch equally and create a single
:class:~mne.Evoked object. Notice that the printed representation includes
(average, N=145), indicating that the :class:~mne.Evoked object was
created by averaging across 145 epochs. In this case, the event types were
fairly close in number:
End of explanation
"""
left_right_aud = mne.combine_evoked([left_aud, right_aud], weights='nave')
assert left_right_aud.nave == left_aud.nave + right_aud.nave
"""
Explanation: However, this may not always be the case; if for statistical reasons it is
important to average the same number of epochs from different conditions,
you can use :meth:~mne.Epochs.equalize_event_counts prior to averaging.
Another approach to pooling across conditions is to create separate
:class:~mne.Evoked objects for each condition, and combine them afterward.
This can be accomplished by the function :func:mne.combine_evoked, which
computes a weighted sum of the :class:~mne.Evoked objects given to it. The
weights can be manually specified as a list or array of float values, or can
be specified using the keyword 'equal' (weight each ~mne.Evoked object
by $\frac{1}{N}$, where $N$ is the number of ~mne.Evoked
objects given) or the keyword 'nave' (weight each ~mne.Evoked object
proportional to the number of epochs averaged together to create it):
End of explanation
"""
for ix, trial in enumerate(epochs[:3].iter_evoked()):
channel, latency, value = trial.get_peak(ch_type='eeg',
return_amplitude=True)
latency = int(round(latency * 1e3)) # convert to milliseconds
value = int(round(value * 1e6)) # convert to µV
print('Trial {}: peak of {} µV at {} ms in channel {}'
.format(ix, value, latency, channel))
"""
Explanation: Note that the nave attribute of the resulting ~mne.Evoked object will
reflect the effective number of averages, and depends on both the nave
attributes of the contributing ~mne.Evoked objects and the weights at
which they are combined. Keeping track of effective nave is important for
inverse imaging, because nave is used to scale the noise covariance
estimate (which in turn affects the magnitude of estimated source activity).
See minimum_norm_estimates for more information (especially the
whitening_and_scaling section). Note that mne.grand_average does
not adjust nave to reflect effective number of averaged epochs; rather
it simply sets nave to the number of evokeds that were averaged
together. For this reason, it is best to use mne.combine_evoked rather than
mne.grand_average if you intend to perform inverse imaging on the resulting
:class:~mne.Evoked object.
Other uses of Evoked objects
Although the most common use of :class:~mne.Evoked objects is to store
averages of epoched data, there are a couple other uses worth noting here.
First, the method :meth:epochs.standard_error() <mne.Epochs.standard_error>
will create an :class:~mne.Evoked object (just like
:meth:epochs.average() <mne.Epochs.average> does), but the data in the
:class:~mne.Evoked object will be the standard error across epochs instead
of the average. To indicate this difference, :class:~mne.Evoked objects
have a :attr:~mne.Evoked.kind attribute that takes values 'average' or
'standard error' as appropriate.
Another use of :class:~mne.Evoked objects is to represent a single trial
or epoch of data, usually when looping through epochs. This can be easily
accomplished with the :meth:epochs.iter_evoked() <mne.Epochs.iter_evoked>
method, and can be useful for applications where you want to do something
that is only possible for :class:~mne.Evoked objects. For example, here
we use the :meth:~mne.Evoked.get_peak method (which isn't available for
:class:~mne.Epochs objects) to get the peak response in each trial:
End of explanation
"""
|
PBrockmann/ipython_ferretmagic
|
notebooks/ferretmagic_06_InteractWidget.ipynb
|
mit
|
%load_ext ferretmagic
"""
Explanation: <hr>
Patrick BROCKMANN - LSCE (Climate and Environment Sciences Laboratory)<br>
<img align="left" width="40%" src="http://www.lsce.ipsl.fr/Css/img/banniere_LSCE_75.png" ><br><br>
<hr>
Updated: 2019/11/13
Load the ferret extension
End of explanation
"""
%%ferret -s 600,400
set text/font=arial
use monthly_navy_winds.cdf
show data/full
plot uwnd[i=@ave,j=@ave,l=@sbx:12]
"""
Explanation: "Classic" use with cell magic
End of explanation
"""
from ipywidgets import interact
@interact(var=['uwnd','vwnd'], smooth=(1, 20), vrange=(0.5,5,0.5))
def plot(var='uwnd', smooth=5, vrange=1) :
%ferret_run -s 600,400 'ppl color 6, 70, 70, 70; plot/grat=(dash,color=6)/vlim=-%(vrange)s:%(vrange)s %(var)s[i=@ave,j=@ave], %(var)s[i=@ave,j=@ave,l=@sbx:%(smooth)s]' % locals()
"""
Explanation: Explore interactive widgets
End of explanation
"""
# The line of code to make interactive
%ferret_run -q -s 600,400 'cancel mode logo; \
ppl color 6, 70, 70, 70; \
shade/grat=(dash,color=6) %(var)s[l=%(lstep)s] ; \
go land' % {'var':'uwnd','lstep':'3'}
import ipywidgets as widgets
from ipywidgets import interact
play = widgets.Play(
value=1,
min=1,
max=10,
step=1,
description="Press play",
disabled=False
)
slider = widgets.IntSlider(
min=1,
max=10
)
widgets.jslink((play, 'value'), (slider, 'value'))
a=widgets.HBox([play, slider])
@interact(var=['uwnd','vwnd'], lstep=slider, lstep1=play)
def plot(var='uwnd', lstep=1, lstep1=1) :
%ferret_run -q -s 600,400 'cancel mode logo; \
ppl color 6, 70, 70, 70; \
shade/grat=(dash,color=6)/lev=(-inf)(-10,10,2)(inf)/pal=mpl_Div_PRGn.spk %(var)s[l=%(lstep)s] ; \
go land' % locals()
"""
Explanation: Another example with a map
End of explanation
"""
|
pagutierrez/tutorial-sklearn
|
notebooks-spanish/21-reduccion_dimensionalidad_no_lineal.ipynb
|
cc0-1.0
|
from sklearn.datasets import make_s_curve
X, y = make_s_curve(n_samples=1000)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], X[:, 2], c=y)
ax.view_init(10, -60);
"""
Explanation: Aprendizaje de variedades
Una de las debilidades del PCA es que no puede detectar características no lineales. Un conjunto de algoritmos que evitan este problema son los algoritmos de aprendizaje de variedades (manifold learning). Un conjunto de datos que se suele emplear a menudo en este contexto es el S-curve:
End of explanation
"""
from sklearn.decomposition import PCA
X_pca = PCA(n_components=2).fit_transform(X)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);
"""
Explanation: Este es en realidad un conjunto de datos 2D (que sería la S desenrollada), pero se ha embebido en un espacio 3D, de tal forma que un PCA no es capaz de descubrir el conjunto de datos original
End of explanation
"""
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=15, n_components=2)
X_iso = iso.fit_transform(X)
plt.scatter(X_iso[:, 0], X_iso[:, 1], c=y);
"""
Explanation: Como puedes observar, al ser un método lineal, el PCA ha obtenido dos direcciones máxima variabilidad, pero ha perdido muchísima varianza en los datos, al proyectar la S directamente en un hiperplano. Los algoritmos de aprendizaje de variedades, disponibles en el paquete sklearn.manifold, pretenden descubrir el manifold que contiene a los datos (en este caso, es un manifold de dos dimensiones). Apliquemos, por ejemplo, el método Isomap:
End of explanation
"""
from sklearn.datasets import load_digits
digits = load_digits()
fig, axes = plt.subplots(2, 5, figsize=(10, 5),
subplot_kw={'xticks':(), 'yticks': ()})
for ax, img in zip(axes.ravel(), digits.images):
ax.imshow(img, interpolation="none", cmap="gray")
"""
Explanation: Aprendizaje de variedades para la base de datos de dígitos
Podemos aplicar este tipo de algoritmos para bases de datos de alta dimensionalidad, como la base de datos de dígitos manuscritos:
End of explanation
"""
# Construir un modelo PCA
pca = PCA(n_components=2)
pca.fit(digits.data)
# Transformar los dígitos según las dos primeras componentes principales
digits_pca = pca.transform(digits.data)
colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
"#A83683", "#4E655E", "#853541", "#3A3120","#535D8E"]
plt.figure(figsize=(10, 10))
plt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max() + 1)
plt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max() + 1)
for i in range(len(digits.data)):
# Representar los dígitos usando texto
plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.xlabel("primera componente principal")
plt.ylabel("segunda componente principal");
"""
Explanation: Si visualizamos el dataset utilizando una técnica lineal como PCA, ya pudimos comprobar como conseguíamos algo de información sobre la estructura de los datos:
End of explanation
"""
from sklearn.manifold import TSNE
tsne = TSNE(random_state=42)
# utilizamos fit_transform en lugar de fit:
digits_tsne = tsne.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)
plt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)
for i in range(len(digits.data)):
# actually plot the digits as text instead of using scatter
plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
"""
Explanation: Sin embargo, podemos usar técnicas no lineales, que nos llevarán, en este caso, a una mejor visualización. Vamos a aplicar el método t-SNE de manifold learning:
End of explanation
"""
|
googlesamples/mlkit
|
tutorials/mlkit_image_labeling_model_maker.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
!pip install git+https://github.com/tensorflow/examples.git#egg=tensorflow-examples[model_maker]
"""
Explanation: Create ML Kit Image labeling model with Tensorflow Lite Model Maker
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/tutorials/model_maker_image_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Model Maker library simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications.
This notebook shows an end-to-end example that utilizes this Model Maker library to create an image labeling model for ML Kit custom Image Labeling and Object Detection and Tracking features.
Prerequisites
To run this example, we first need to install serveral required packages, including Model Maker package that in github repo.
End of explanation
"""
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader
from tensorflow_examples.lite.model_maker.core.task import image_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import ImageModelSpec
from tensorflow_examples.lite.model_maker.core.task import configs
from tensorflow_examples.lite.model_maker.core import compat
import matplotlib.pyplot as plt
"""
Explanation: Import the required packages.
End of explanation
"""
compat.setup_tf_behavior(tf_version=1)
"""
Explanation: Make sure to set tf_version as 1 to produce models with uint8 input and output types to be compatible with ML Kit.
End of explanation
"""
image_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
"""
Explanation: Get the data path
Let's get some images to play with this simple end-to-end example. Hundreds of images is a good start for Model Maker while more data could achieve better accuracy.
End of explanation
"""
train_data, test_data = ImageClassifierDataLoader.from_folder(image_path).split(0.9)
"""
Explanation: You could replace image_path with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path.
<img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_image_classification.png" alt="Upload File" width="800" hspace="100">
Make sure the file structure is correct. For example, the flower dataset contains 3670 images belonging to 5 classes.
The dataset has the following directory structure:
<pre>
<b>flower_photos</b>
|__ <b>daisy</b>
|______ 100080576_f52e8ee070_n.jpg
|______ 14167534527_781ceb1b7a_n.jpg
|______ ...
|__ <b>dandelion</b>
|______ 10043234166_e6dd915111_n.jpg
|______ 1426682852_e62169221f_m.jpg
|______ ...
|__ <b>roses</b>
|______ 102501987_3cdb8e5394_n.jpg
|______ 14982802401_a3dfb22afb.jpg
|______ ...
|__ <b>sunflowers</b>
|______ 12471791574_bb1be83df4.jpg
|______ 15122112402_cafa41934f.jpg
|______ ...
|__ <b>tulips</b>
|______ 13976522214_ccec508fe7.jpg
|______ 14487943607_651e8062a1_m.jpg
|______ ...
</pre>
If you prefer not to upload your images to the cloud, you could try to run the library locally following the guide in github.
Run the example
The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process.
Step 1. Load input data specific to an on-device ML app. Split it to training data and testing data.
End of explanation
"""
model = image_classifier.create(train_data)
"""
Explanation: Step 2. Customize the TensorFlow model.
End of explanation
"""
loss, accuracy = model.evaluate(test_data)
"""
Explanation: Step 3. Evaluate the model.
End of explanation
"""
config = configs.QuantizationConfig.create_full_integer_quantization(
representative_data=test_data, is_integer_only=True)
"""
Explanation: Step 4. Setup config for quantized model with uint8 input and output type
End of explanation
"""
model.export(export_dir='.', quantization_config=config)
"""
Explanation: Step 4. Export to TensorFlow Lite model.
Here, we export TensorFlow Lite model with metadata which provides a standard for model descriptions.
You could download it in the left sidebar same as the uploading part for your own use.
End of explanation
"""
|
detcitty/intro-numerical-methods
|
1_intro_to_python.ipynb
|
mit
|
2 + 2
32 - (4 + 2)**2
1 / 2
"""
Explanation: Discussion 1: Introduction to Python
So you want to code in Python? We will do some basic manipulations and demonstrate some of the basics of the notebook interface that we will be using extensively throughout the course.
Topics:
- Math
- Variables
- Lists
- Control flow
- Coding style
- Other data structures
- IPython/Jupyter notebooks
Other intros:
- Basic Python
- Software Carpentry - Programming in Python
Python Math
Lets start with some basic functions:
End of explanation
"""
1.0 / 2
"""
Explanation: Why do we get the answer above rather than what we would expect?
The answer has to do with the type of number being used. Python is a "dynamically" typed language and automatically determines what kind of number to allocate for us. Above, because we did not include a decimal, Python automatically treated the expression as integers (int type) and according to integer arithmetic, 1 / 2 = 0. Now if we include a decimal we get:
End of explanation
"""
4.0 + 4**(3/2)
4.0 + 4.0**(3.0 / 2.0)
"""
Explanation: Note that Python will make the output a float in this case. What happens for the following though?
End of explanation
"""
3+5j
"""
Explanation: Good practice to just add a decimal after any number you really want to treat as a float.
Additional types of numbers include complex, Decimal and Fraction.
End of explanation
"""
import math
math.sqrt(4)
math.sin(math.pi / 2.0)
math.exp(-math.pi / 4.0)
"""
Explanation: Note that to use "named" functions such as sqrt or sin we need to import a module so that we have access to those functions. When you import a module (or package) in Python we are asking Python to go look for the code that is named and make them active in our workspace (also called a namespace in more general parlance). Here is an example where we use Python's builtin math module:
End of explanation
"""
from math import *
sin(pi / 2.0)
"""
Explanation: Note that in order to access these functions we need to prepend the math. to the functions and the constant $\pi$. We can forgo this and import all of what math holds if we do the following:
End of explanation
"""
num_students = 80
room_capacity = 85
(room_capacity - num_students) / room_capacity * 100.0
"""
Explanation: Note that many of these functions always return a float number regardless of their input.
Variables
Assign variables like you would in any other language:
End of explanation
"""
float(room_capacity - num_students) / float(room_capacity) * 100.0
"""
Explanation: Note that we do not get what we expect from this expression as we expected from above. What would we have to change to get this to work?
We could go back to change our initializations but we could also use the function float to force these values to be of float type:
End of explanation
"""
a = 10
b = a + 2
print b
"""
Explanation: Note here we have left the defined variables as integers as it makes sense that they remain that way (fractional students aside).
End of explanation
"""
grades = [90.0, 67.0, 85.0, 76.0, 98.0, 70.0]
"""
Explanation: Lists
One of the most useful data structures in Python is the list.
End of explanation
"""
grades[3]
"""
Explanation: Lists are defined with square brackets and delineated by commas. Note that there is another data type called sequences denoted by ( ) which are immutable (cannot be changed) once created. Lets try to do some list manipulations with our list of grades above.
Access a single value in a list
End of explanation
"""
len(grades)
"""
Explanation: Note that Python is 0 indexed, i.e. the first value in the list is accessed by 0.
Find the length of a list
End of explanation
"""
grades = grades + [62.0, 82.0, 59.0]
print grades
"""
Explanation: Add values to a list
End of explanation
"""
grades[2:5]
grades[0:4]
grades[:4]
grades[4:]
"""
Explanation: Slicing is another important operation
End of explanation
"""
grades[4:11]
"""
Explanation: Note that the range of values does not include the last indexed! This is important to remember for more than lists but we will get to that later.
End of explanation
"""
remember = ["2", 2, 2.0]
remember[0] / 1
remember[1] / 1
remember[2] / 1
"""
Explanation: Another property of lists is that you can put different types in them at the same time. This can be important to remember if you may have both int and float types.
End of explanation
"""
count = range(3,7)
print count
"""
Explanation: Finally, one of the more useful list creation functions is range which creates a list with the bounds requested
End of explanation
"""
x = 4
if x > 5:
print "x is greater than 5"
elif x < 5:
print "x is less than 5"
else:
print "x is equal to 5"
"""
Explanation: Control Flow
if
Most basic logical control
End of explanation
"""
for i in range(5):
print i
for i in range(3,7):
print i
for animal in ['cat', 'dog', 'chinchilla']:
print animal
"""
Explanation: for
The for statements provide the most common type of loops in Python (there is also a while construct).
End of explanation
"""
for n in range(2, 10):
is_prime = True
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n / x
is_prime = False
break
if is_prime:
print "%s is a prime number" % (n)
"""
Explanation: Related to the for statement are the control statements break and continue. Ideally we can create a loop with logic that can avoid these but sometimes code can be more readable with judiciuos use of these statements.
End of explanation
"""
def my_func(x):
# Remember to implement this later!
pass
"""
Explanation: The pass statement might appear fairly useless as it simply does nothing but can provide a stub to remember to come back and implement something
End of explanation
"""
def my_print_function(x):
print x
my_print_function(3)
def my_add_function(a, b):
return a + b
my_add_function(3.0, 5.0)
def my_crazy_function(a, b, c=1.0):
d = a + b**c
return d
my_crazy_function(2.0, 3.0), my_crazy_function(2.0, 3.0, 2.0), my_crazy_function(2.0, 3.0, c=2.0)
def my_other_function(a, b, c=1.0):
return a + b, a + b**c, a + b**(3.0 / 7.0)
my_other_function(2.0, 3.0, c=2.0)
"""
Explanation: Defining Functions
The last statement above defines a function in Python with an argument called x. Functions can be defined and do lots of different things, here are a few examples.
End of explanation
"""
def fibonacci(n):
"""Return a list of the Fibonacci sequence up to n"""
values = [0, 1]
while values[-1] <= n:
values.append(values[-1] + values[-2])
print values
return values
fibonacci(100)
"""
Explanation: Lets try writing a bit more of a complex (and useful) function. The Fibinocci sequence is formed by adding the previous two numbers of the sequence to get the next value (starting with [0, 1]).
End of explanation
"""
|
statsmodels/statsmodels.github.io
|
v0.13.2/examples/notebooks/generated/statespace_news.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
"""
Explanation: Forecasting, updating datasets, and the "news"
In this notebook, we describe how to use Statsmodels to compute the impacts of updated or revised datasets on out-of-sample forecasts or in-sample estimates of missing data. We follow the approach of the "Nowcasting" literature (see references at the end), by using a state space model to compute the "news" and impacts of incoming data.
Note: this notebook applies to Statsmodels v0.12+. In addition, it only applies to the state space models or related classes, which are: sm.tsa.statespace.ExponentialSmoothing, sm.tsa.arima.ARIMA, sm.tsa.SARIMAX, sm.tsa.UnobservedComponents, sm.tsa.VARMAX, and sm.tsa.DynamicFactor.
End of explanation
"""
# De-mean the inflation series
y = macrodata['infl'] - macrodata['infl'].mean()
"""
Explanation: Forecasting exercises often start with a fixed set of historical data that is used for model selection and parameter estimation. Then, the fitted selected model (or models) can be used to create out-of-sample forecasts. Most of the time, this is not the end of the story. As new data comes in, you may need to evaluate your forecast errors, possibly update your models, and create updated out-of-sample forecasts. This is sometimes called a "real-time" forecasting exercise (by contrast, a pseudo real-time exercise is one in which you simulate this procedure).
If all that matters is minimizing some loss function based on forecast errors (like MSE), then when new data comes in you may just want to completely redo model selection, parameter estimation and out-of-sample forecasting, using the updated datapoints. If you do this, your new forecasts will have changed for two reasons:
You have received new data that gives you new information
Your forecasting model or the estimated parameters are different
In this notebook, we focus on methods for isolating the first effect. The way we do this comes from the so-called "nowcasting" literature, and in particular Bańbura, Giannone, and Reichlin (2011), Bańbura and Modugno (2014), and Bańbura et al. (2014). They describe this exercise as computing the "news", and we follow them in using this language in Statsmodels.
These methods are perhaps most useful with multivariate models, since there multiple variables may update at the same time, and it is not immediately obvious what forecast change was created by what updated variable. However, they can still be useful for thinking about forecast revisions in univariate models. We will therefore start with the simpler univariate case to explain how things work, and then move to the multivariate case afterwards.
Note on revisions: the framework that we are using is designed to decompose changes to forecasts from newly observed datapoints. It can also take into account revisions to previously published datapoints, but it does not decompose them separately. Instead, it only shows the aggregate effect of "revisions".
Note on exog data: the framework that we are using only decomposes changes to forecasts from newly observed datapoints for modeled variables. These are the "left-hand-side" variables that in Statsmodels are given in the endog arguments. This framework does not decompose or account for changes to unmodeled "right-hand-side" variables, like those included in the exog argument.
Simple univariate example: AR(1)
We will begin with a simple autoregressive model, an AR(1):
$$y_t = \phi y_{t-1} + \varepsilon_t$$
The parameter $\phi$ captures the persistence of the series
We will use this model to forecast inflation.
To make it simpler to describe the forecast updates in this notebook, we will work with inflation data that has been de-meaned, but it is straightforward in practice to augment the model with a mean term.
End of explanation
"""
y_pre = y.iloc[:-5]
y_pre.plot(figsize=(15, 3), title='Inflation');
"""
Explanation: Step 1: fitting the model on the available dataset
Here, we'll simulate an out-of-sample exercise, by constructing and fitting our model using all of the data except the last five observations. We'll assume that we haven't observed these values yet, and then in subsequent steps we'll add them back into the analysis.
End of explanation
"""
mod_pre = sm.tsa.arima.ARIMA(y_pre, order=(1, 0, 0), trend='n')
res_pre = mod_pre.fit()
print(res_pre.summary())
"""
Explanation: To construct forecasts, we first estimate the parameters of the model. This returns a results object that we will be able to use produce forecasts.
End of explanation
"""
# Compute the forecasts
forecasts_pre = res_pre.forecast(4)
# Plot the last 3 years of data and the four out-of-sample forecasts
y_pre.iloc[-12:].plot(figsize=(15, 3), label='Data', legend=True)
forecasts_pre.plot(label='Forecast', legend=True);
"""
Explanation: Creating the forecasts from the results object res is easy - you can just call the forecast method with the number of forecasts you want to construct. In this case, we'll construct four out-of-sample forecasts.
End of explanation
"""
# Get the estimated AR(1) coefficient
phi_hat = res_pre.params[0]
# Get the last observed value of the variable
y_T = y_pre.iloc[-1]
# Directly compute the forecasts at the horizons h=1,2,3,4
manual_forecasts = pd.Series([phi_hat * y_T, phi_hat**2 * y_T,
phi_hat**3 * y_T, phi_hat**4 * y_T],
index=forecasts_pre.index)
# We'll print the two to double-check that they're the same
print(pd.concat([forecasts_pre, manual_forecasts], axis=1))
"""
Explanation: For the AR(1) model, it is also easy to manually construct the forecasts. Denoting the last observed variable as $y_T$ and the $h$-step-ahead forecast as $y_{T+h|T}$, we have:
$$y_{T+h|T} = \hat \phi^h y_T$$
Where $\hat \phi$ is our estimated value for the AR(1) coefficient. From the summary output above, we can see that this is the first parameter of the model, which we can access from the params attribute of the results object.
End of explanation
"""
# Get the next observation after the "pre" dataset
y_update = y.iloc[-5:-4]
# Print the forecast error
print('Forecast error: %.2f' % (y_update.iloc[0] - forecasts_pre.iloc[0]))
"""
Explanation: Step 2: computing the "news" from a new observation
Suppose that time has passed, and we have now received another observation. Our dataset is now larger, and we can evaluate our forecast error and produce updated forecasts for the subsequent quarters.
End of explanation
"""
# Create a new results object by passing the new observations to the `append` method
res_post = res_pre.append(y_update)
# Since we now know the value for 2008Q3, we will only use `res_post` to
# produce forecasts for 2008Q4 through 2009Q2
forecasts_post = pd.concat([y_update, res_post.forecast('2009Q2')])
print(forecasts_post)
"""
Explanation: To compute forecasts based on our updated dataset, we will create an updated results object res_post using the append method, to append on our new observation to the previous dataset.
Note that by default, the append method does not re-estimate the parameters of the model. This is exactly what we want here, since we want to isolate the effect on the forecasts of the new information only.
End of explanation
"""
# Compute the impact of the news on the four periods that we previously
# forecasted: 2008Q3 through 2009Q2
news = res_pre.news(res_post, start='2008Q3', end='2009Q2')
# Note: one alternative way to specify these impact dates is
# `start='2008Q3', periods=4`
"""
Explanation: In this case, the forecast error is quite large - inflation was more than 10 percentage points below the AR(1) models' forecast. (This was largely because of large swings in oil prices around the global financial crisis).
To analyse this in more depth, we can use Statsmodels to isolate the effect of the new information - or the "news" - on our forecasts. This means that we do not yet want to change our model or re-estimate the parameters. Instead, we will use the news method that is available in the results objects of state space models.
Computing the news in Statsmodels always requires a previous results object or dataset, and an updated results object or dataset. Here we will use the original results object res_pre as the previous results and the res_post results object that we just created as the updated results.
Once we have previous and updated results objects or datasets, we can compute the news by calling the news method. Here, we will call res_pre.news, and the first argument will be the updated results, res_post (however, if you have two results objects, the news method could can be called on either one).
In addition to specifying the comparison object or dataset as the first argument, there are a variety of other arguments that are accepted. The most important specify the "impact periods" that you want to consider. These "impact periods" correspond to the forecasted periods of interest; i.e. these dates specify with periods will have forecast revisions decomposed.
To specify the impact periods, you must pass two of start, end, and periods (similar to the Pandas date_range method). If your time series was a Pandas object with an associated date or period index, then you can pass dates as values for start and end, as we do below.
End of explanation
"""
print(news.summary())
"""
Explanation: The variable news is an object of the class NewsResults, and it contains details about the updates to the data in res_post compared to res_pre, the new information in the updated dataset, and the impact that the new information had on the forecasts in the period between start and end.
One easy way to summarize the results are with the summary method.
End of explanation
"""
# Print the news, computed by the `news` method
print(news.news)
# Manually compute the news
print()
print((y_update.iloc[0] - phi_hat * y_pre.iloc[-1]).round(6))
# Print the total impacts, computed by the `news` method
# (Note: news.total_impacts = news.revision_impacts + news.update_impacts, but
# here there are no data revisions, so total and update impacts are the same)
print(news.total_impacts)
# Manually compute the impacts
print()
print(forecasts_post - forecasts_pre)
# Print the weights, computed by the `news` method
print(news.weights)
# Manually compute the weights
print()
print(np.array([1, phi_hat, phi_hat**2, phi_hat**3]).round(6))
"""
Explanation: Summary output: the default summary for this news results object printed four tables:
Summary of the model and datasets
Details of the news from updated data
Summary of the impacts of the new information on the forecasts between start='2008Q3' and end='2009Q2'
Details of how the updated data led to the impacts on the forecasts between start='2008Q3' and end='2009Q2'
These are described in more detail below.
Notes:
There are a number of arguments that can be passed to the summary method to control this output. Check the documentation / docstring for details.
Table (4), showing details of the updates and impacts, can become quite large if the model is multivariate, there are multiple updates, or a large number of impact dates are selected. It is only shown by default for univariate models.
First table: summary of the model and datasets
The first table, above, shows:
The type of model from which the forecasts were made. Here this is an ARIMA model, since an AR(1) is a special case of an ARIMA(p,d,q) model.
The date and time at which the analysis was computed.
The original sample period, which here corresponds to y_pre
The endpoint of the updated sample period, which here is the last date in y_post
Second table: the news from updated data
This table simply shows the forecasts from the previous results for observations that were updated in the updated sample.
Notes:
Our updated dataset y_post did not contain any revisions to previously observed datapoints. If it had, there would be an additional table showing the previous and updated values of each such revision.
Third table: summary of the impacts of the new information
Columns:
The third table, above, shows:
The previous forecast for each of the impact dates, in the "estimate (prev)" column
The impact that the new information (the "news") had on the forecasts for each of the impact dates, in the "impact of news" column
The updated forecast for each of the impact dates, in the "estimate (new)" column
Notes:
In multivariate models, this table contains additional columns describing the relevant impacted variable for each row.
Our updated dataset y_post did not contain any revisions to previously observed datapoints. If it had, there would be additional columns in this table showing the impact of those revisions on the forecasts for the impact dates.
Note that estimate (new) = estimate (prev) + impact of news
This table can be accessed independently using the summary_impacts method.
In our example:
Notice that in our example, the table shows the values that we computed earlier:
The "estimate (prev)" column is identical to the forecasts from our previous model, contained in the forecasts_pre variable.
The "estimate (new)" column is identical to our forecasts_post variable, which contains the observed value for 2008Q3 and the forecasts from the updated model for 2008Q4 - 2009Q2.
Fourth table: details of updates and their impacts
The fourth table, above, shows how each new observation translated into specific impacts at each impact date.
Columns:
The first three columns table described the relevant update (an "updated" is a new observation):
The first column ("update date") shows the date of the variable that was updated.
The second column ("forecast (prev)") shows the value that would have been forecasted for the update variable at the update date based on the previous results / dataset.
The third column ("observed") shows the actual observed value of that updated variable / update date in the updated results / dataset.
The last four columns described the impact of a given update (an impact is a changed forecast within the "impact periods").
The fourth column ("impact date") gives the date at which the given update made an impact.
The fifth column ("news") shows the "news" associated with the given update (this is the same for each impact of a given update, but is just not sparsified by default)
The sixth column ("weight") describes the weight that the "news" from the given update has on the impacted variable at the impact date. In general, weights will be different between each "updated variable" / "update date" / "impacted variable" / "impact date" combination.
The seventh column ("impact") shows the impact that the given update had on the given "impacted variable" / "impact date".
Notes:
In multivariate models, this table contains additional columns to show the relevant variable that was updated and variable that was impacted for each row. Here, there is only one variable ("infl"), so those columns are suppressed to save space.
By default, the updates in this table are "sparsified" with blanks, to avoid repeating the same values for "update date", "forecast (prev)", and "observed" for each row of the table. This behavior can be overridden using the sparsify argument.
Note that impact = news * weight.
This table can be accessed independently using the summary_details method.
In our example:
For the update to 2008Q3 and impact date 2008Q3, the weight is equal to 1. This is because we only have one variable, and once we have incorporated the data for 2008Q3, there is no no remaining ambiguity about the "forecast" for this date. Thus all of the "news" about this variable at 2008Q3 passes through to the "forecast" directly.
Addendum: manually computing the news, weights, and impacts
For this simple example with a univariate model, it is straightforward to compute all of the values shown above by hand. First, recall the formula for forecasting $y_{T+h|T} = \phi^h y_T$, and note that it follows that we also have $y_{T+h|T+1} = \phi^h y_{T+1}$. Finally, note that $y_{T|T+1} = y_T$, because if we know the value of the observations through $T+1$, we know the value of $y_T$.
News: The "news" is nothing more than the forecast error associated with one of the new observations. So the news associated with observation $T+1$ is:
$$n_{T+1} = y_{T+1} - y_{T+1|T} = Y_{T+1} - \phi Y_T$$
Impacts: The impact of the news is the difference between the updated and previous forecasts, $i_h \equiv y_{T+h|T+1} - y_{T+h|T}$.
The previous forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} \phi y_T & \phi^2 y_T & \phi^3 y_T & \phi^4 y_T \end{pmatrix}'$.
The updated forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} y_{T+1} & \phi y_{T+1} & \phi^2 y_{T+1} & \phi^3 y_{T+1} \end{pmatrix}'$.
The impacts are therefore:
$${ i_h }{h=1}^4 = \begin{pmatrix} y{T+1} - \phi y_T \ \phi (Y_{T+1} - \phi y_T) \ \phi^2 (Y_{T+1} - \phi y_T) \ \phi^3 (Y_{T+1} - \phi y_T) \end{pmatrix}$$
Weights: To compute the weights, we just need to note that it is immediate that we can rewrite the impacts in terms of the forecast errors, $n_{T+1}$.
$${ i_h }{h=1}^4 = \begin{pmatrix} 1 \ \phi \ \phi^2 \ \phi^3 \end{pmatrix} n{T+1}$$
The weights are then simply $w = \begin{pmatrix} 1 \ \phi \ \phi^2 \ \phi^3 \end{pmatrix}$
We can check that this is what the news method has computed.
End of explanation
"""
import pandas_datareader as pdr
levels = pdr.get_data_fred(['PCEPILFE', 'CPILFESL'], start='1999', end='2019').to_period('M')
infl = np.log(levels).diff().iloc[1:] * 1200
infl.columns = ['PCE', 'CPI']
# Remove two outliers and de-mean the series
infl['PCE'].loc['2001-09':'2001-10'] = np.nan
"""
Explanation: Multivariate example: dynamic factor
In this example, we'll consider forecasting monthly core price inflation based on the Personal Consumption Expenditures (PCE) price index and the Consumer Price Index (CPI), using a Dynamic Factor model. Both of these measures track prices in the US economy and are based on similar source data, but they have a number of definitional differences. Nonetheless, they track each other relatively well, so modeling them jointly using a single dynamic factor seems reasonable.
One reason that this kind of approach can be useful is that the CPI is released earlier in the month than the PCE. One the CPI is released, therefore, we can update our dynamic factor model with that additional datapoint, and obtain an improved forecast for that month's PCE release. A more involved version of this kind of analysis is available in Knotek and Zaman (2017).
We start by downloading the core CPI and PCE price index data from FRED, converting them to annualized monthly inflation rates, removing two outliers, and de-meaning each series (the dynamic factor model does not
End of explanation
"""
# Previous dataset runs through 2017-02
y_pre = infl.loc[:'2017-01'].copy()
const_pre = np.ones(len(y_pre))
print(y_pre.tail())
# For the updated dataset, we'll just add in the
# CPI value for 2017-03
y_post = infl.loc[:'2017-03'].copy()
y_post.loc['2017-03', 'PCE'] = np.nan
const_post = np.ones(len(y_post))
# Notice the missing value for PCE in 2017-03
print(y_post.tail())
"""
Explanation: To show how this works, we'll imagine that it is April 14, 2017, which is the data of the March 2017 CPI release. So that we can show the effect of multiple updates at once, we'll assume that we haven't updated our data since the end of January, so that:
Our previous dataset will consist of all values for the PCE and CPI through January 2017
Our updated dataset will additionally incorporate the CPI for February and March 2017 and the PCE data for February 2017. But it will not yet the PCE (the March 2017 PCE price index was not released until May 1, 2017).
End of explanation
"""
# Plot the updated dataset
fig, ax = plt.subplots(figsize=(15, 3))
y_post.plot(ax=ax)
ax.hlines(0, '2009', '2017-06', linewidth=1.0)
ax.set_xlim('2009', '2017-06');
"""
Explanation: We chose this particular example because in March 2017, core CPI prices fell for the first time since 2010, and this information may be useful in forecast core PCE prices for that month. The graph below shows the CPI and PCE price data as it would have been observed on April 14th$^\dagger$.
$\dagger$ This statement is not entirely true, because both the CPI and PCE price indexes can be revised to a certain extent after the fact. As a result, the series that we're pulling are not exactly like those observed on April 14, 2017. This could be fixed by pulling the archived data from ALFRED instead of FRED, but the data we have is good enough for this tutorial.
End of explanation
"""
mod_pre = sm.tsa.DynamicFactor(y_pre, exog=const_pre, k_factors=1, factor_order=6)
res_pre = mod_pre.fit()
print(res_pre.summary())
"""
Explanation: To perform the exercise, we first construct and fit a DynamicFactor model. Specifically:
We are using a single dynamic factor (k_factors=1)
We are modeling the factor's dynamics with an AR(6) model (factor_order=6)
We have included a vector of ones as an exogenous variable (exog=const_pre), because the inflation series we are working with are not mean-zero.
End of explanation
"""
# Create the news results
# Note
const_post_plus1 = np.ones(len(y_post) + 1)
news = res_pre.news(y_post, exog=const_post_plus1, start='2017-03', end='2017-04')
"""
Explanation: With the fitted model in hand, we now construct the news and impacts associated with observing the CPI for March 2017. The updated data is for February 2017 and part of March 2017, and we'll examining the impacts on both March and April.
In the univariate example, we first created an updated results object, and then passed that to the news method. Here, we're creating the news by directly passing the updated dataset.
Notice that:
y_post contains the entire updated dataset (not just the new datapoints)
We also had to pass an updated exog array. This array must cover both:
The entire period associated with y_post
Any additional datapoints after the end of y_post through the last impact date, specified by end
Here, y_post ends in March 2017, so we needed our exog to extend one more period, to April 2017.
End of explanation
"""
# Show the summary of the news results
print(news.summary())
"""
Explanation: Note:
In the univariate example, above, we first constructed a new results object, and then passed that to the news method. We could have done that here too, although there is an extra step required. Since we are requesting an impact for a period beyond the end of y_post, we would still need to pass the additional value for the exog variable during that period to news:
python
res_post = res_pre.apply(y_post, exog=const_post)
news = res_pre.news(res_post, exog=[1.], start='2017-03', end='2017-04')
Now that we have computed the news, printing summary is a convenient way to see the results.
End of explanation
"""
print(news.summary_details())
"""
Explanation: Because we have multiple variables, by default the summary only shows the news from updated data along and the total impacts.
From the first table, we can see that our updated dataset contains three new data points, with most of the "news" from these data coming from the very low reading in March 2017.
The second table shows that these three datapoints substantially impacted the estimate for PCE in March 2017 (which was not yet observed). This estimate revised down by nearly 1.5 percentage points.
The updated data also impacted the forecasts in the first out-of-sample month, April 2017. After incorporating the new data, the model's forecasts for CPI and PCE inflation in that month revised down 0.29 and 0.17 percentage point, respectively.
While these tables show the "news" and the total impacts, they do not show how much of each impact was caused by each updated datapoint. To see that information, we need to look at the details tables.
One way to see the details tables is to pass include_details=True to the summary method. To avoid repeating the tables above, however, we'll just call the summary_details method directly.
End of explanation
"""
|
mbeyeler/opencv-machine-learning
|
notebooks/12.00-Wrapping-Up.ipynb
|
mit
|
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
class MyClassifier(BaseEstimator, ClassifierMixin):
"""An example classifier"""
def __init__(self, param1=1, param2=2):
"""Called when initializing the classifier
The constructor is used to define some optional
parameters of the classifier. Store them as class
attributes for future access.
Parameters
----------
param1 : int, optional, default: 1
The first parameter
param2 : int, optional, default: 2
The second parameter
"""
self.param1 = param1
self.param2 = param2
def fit(self, X, y=None):
"""Fits the classifier to data
This should fit the classifier to the training data.
All the "work" should be done here.
Parameters
----------
X : array-like
The training data, where the first dimension is
the number of training samples, and the second
dimension is the number of features.
y : array-like, optional, default: None
Vector of class labels
Returns
-------
The fit method returns the classifier object it
belongs to.
"""
return self
def predict(self, X):
"""Predicts target labels
This should predict the target labels of some data `X`.
Parameters
----------
X : array-like
Data samples for which to predict the target labels.
Returns
-------
y_pred : array-like
Target labels for every data sample in `X`
"""
return np.zeros(X.shape[0])
"""
Explanation: <!--BOOK_INFORMATION-->
<a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a>
This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler.
The code is released under the MIT license,
and is available on GitHub.
Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations.
If you find this content useful, please consider supporting the work by
buying the book!
<!--NAVIGATION-->
< Chaining Algorithms Together to Form a Pipeline | Contents |
Wrapping Up
Congratulations! You have just made a big step toward becoming a machine learning
practitioner. Not only are you familiar with a wide variety of fundamental machine
learning algorithms, you also know how to apply them to both supervised and
unsupervised learning problems.
Before we part ways, I want to give you some final words of advice, point you toward some
additional resources, and give you some suggestions on how you can further improve your
machine learning and data science skills.
Approaching a machine learning problem
When you see a new machine learning problem in the wild, you might be tempted to jump
ahead and throw your favorite algorithm at the problem—perhaps the one you understood
best or had the most fun implementing. But knowing beforehand which algorithm will
perform best on your specific problem is not often possible.
Instead, you need to take a step back and look at the big picture.
Here the book provides an easy-to-follow outline on how to approach machine learning problems in the wild (p.331ff.).
Writing your own OpenCV based classifier in C++
Since OpenCV is one of those Python libraries that does not contain a single line of Python
code under the hood (I'm kidding, but it's close), you will have to implement your custom
estimator in C++.
The first step is to define a
file MyClass.cpp:
#include <opencv2/opencv.hpp>
#include <opencv2/ml/ml.hpp>
#include <stdio.h>
class MyClass : public cv::ml::StatModel
{
public:
MyClass()
{
print("MyClass constructor\n");
}
~MyClass() {}
int getVarCount() const
{
// returns the number of variables in the training samples
return 0;
}
bool empty() const
{
return true;
}
bool isTrained() const
{
// returns true if the model is trained
return false;
}
bool isClassifier() const
{
// returns true if the model is a classifier
return true;
}
bool train(const cv::Ptr<cv::ml::TrainData>& trainData, int flags=0) const
{
// trains the model
// trainData: training data that can be loaded from file using
// TrainData::loadFromCSV or created with TrainData::create.
// flags: optional flags, depending on the model. Some of the models
// can be updated with the new training samples, not completely
// overwritten (such as NormalBayesClassifier or ANN_MLP).
return false;
}
bool train(cv::InputArray samples, int layout, cv::InputArray responses)
{
// trains the model
// samples: training samples
// layout: see ml::SampleTypes
// responses: vector of responses associated with the training samples
return false;
}
float calcError(const cv::Ptr<cv::ml::TrainData>& data, bool test, cv::OutputArray resp)
{
// calculates the error on the training or test set
// data: the training data
// test: if true, the error is computed over the test subset of the data, otherwise
// it's computed over the training subset of the data.
return 0.0f;
}
float predict(cv::InputArray samples, cv::OutputArray results=cv::noArray(), int flags=0) const
{
// predicts responses for the provided samples
// samples: the input samples, floating-point matrix
// results: the optional matrix of results
// flags: the optional flags, model-dependent. see cv::ml::StatModel::Flags
return 0.0f;
}
};
int main()
{
MyClass myclass;
return 0;
}
Then create a file CMakeLists.txt:
cmake_minimum_required(VERSION 2.8)
project(MyClass)
find_package(OpenCV REQUIRED)
add_executable(MyClass MyClass.cpp)
target_link_libraries(MyClass ${OpenCV_LIBS})
Then you can compile the file from the command line via cmake and make:
$ cmake .
$ make
Then run the file:
$ ./MyClass
This should not generate any error, and print to console:
MyClass constructor
Writing your own Scikit-Learn based classifier in Python:
Alternatively, you can write your own classifier using the scikit-learn library.
You can do this by importing BaseEstimator and ClassifierMixin. The latter will
provide a corresponding score method, which works for all classifiers. Optionally, you can
overwrite the score method to provide your own.
The following mixins are available:
- ClassifierMixin if you are writing a classifier (will provide a basic score method)
- RegressorMixin if you are writing a regressor (will provide a basic score method)
- ClusterMixin if you are writing a clustering algorithm (will provide a basic fit_predict method)
- TransformerMixin if you are writing a transformer (will provide a basic fit_predict method)
End of explanation
"""
myclass = MyClassifier()
"""
Explanation: The classifier can be instantiated as follows:
End of explanation
"""
X = np.random.rand(10, 3)
myclass.fit(X)
"""
Explanation: You can then fit the model to some arbitrary data:
End of explanation
"""
myclass.predict(X)
"""
Explanation: And then you can proceed to predicting the target responses:
End of explanation
"""
|
arsenovic/galgebra
|
examples/ipython/Smith Sphere.ipynb
|
bsd-3-clause
|
#from IPython.display import SVG
#SVG('pics/smith_sphere.svg')
from galgebra.printer import Format, Fmt
from galgebra import ga
from galgebra.ga import Ga
from sympy import *
Format()
(o3d,er,ex,es) = Ga.build('e_r e_x e_s',g=[1,1,1])
(o2d,zr,zx) = Ga.build('z_r z_x',g=[1,1])
Bz = er^ex # impedance plance
Bs = es^ex # reflection coefficient plane
Bx = er^es
I = o3d.I()
def down(p, N):
'''
stereographically project a vector in G3 downto the bivector N
'''
n= -1*N.dual()
return -(n^p)*(n-n*(n|p)).inv()
def up(p):
'''
stereographically project a vector in G2 upto the space G3
'''
if (p^Bz).obj == 0:
N = Bz
elif (p^Bs).obj == 0:
N = Bs
n = -N.dual()
return n + 2*(p*p + 1).inv()*(p-n)
a,b,c,z,s,n = [o3d.mv(k,'vector') for k in ['a','b','c','z','s' ,'n']]
"""
Explanation: Smith Sphere
The smith chart is a nomogram used frequently in RF/Microwave Engineering. Since its inception it has been recognised that projecting the chart onto the reimen sphere [1].
[1]H. . Wheeler, “Reflection Charts Relating to Impedance Matching,” IEEE Transactions on Microwave Theory and Techniques, vol. 32, no. 9, pp. 1008–1021, Sep. 1984.
End of explanation
"""
Bz.dual()
Bz.is_zero()
z = z.proj([er,ex])
z
"""
Explanation: Starting with an impedance vector $z$, defined by a vector in the impedance plane $B_z$, this vector has two scalar components ( $z^r$, $z^x$) known as resistance and reactance
End of explanation
"""
p = up(z)
p
simplify(p.norm2())
"""
Explanation: stereographically up-projecting this onto the sphere to point $p$,
End of explanation
"""
down(p, Bz)
down(p,Bs).simplify()
(z-er)*(z+er).inv()
p
R=((-pi/4)*Bx).exp()
R
R*p*R.rev()
down(R*p*R.rev(),Bz)
"""
Explanation: If we stereo-project this back onto the impedance plane
End of explanation
"""
|
ceos-seo/data_cube_notebooks
|
notebooks/training/ardc_training/Training_TaskA_Mosaics.ipynb
|
apache-2.0
|
import datacube
import utils.data_cube_utilities.data_access_api as dc_api
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
api = dc_api.DataAccessApi()
dc = datacube.Datacube(app = 'ardc_task_a')
api.dc = dc
"""
Explanation: ARDC Training: Python Notebooks
Task-A: Cloud-free Mosaics and K-means Clustering
Import the Datacube Configuration
End of explanation
"""
list_of_products = dc.list_products()
netCDF_products = list_of_products[list_of_products['format'] == 'NetCDF']
netCDF_products
"""
Explanation: Browse the available Data Cubes
End of explanation
"""
# Change the data platform and data cube here
platform = 'LANDSAT_7'
product = 'ls7_usgs_sr_scene'
"""
Explanation: Pick a product
Use the platform and product names from the previous block to select a Data Cube.
End of explanation
"""
from utils.data_cube_utilities.dc_time import _n64_to_datetime, dt_to_str
extents = api.get_full_dataset_extent(platform = platform, product = product, measurements=[])
latitude_extents = (min(extents['latitude'].values),max(extents['latitude'].values))
longitude_extents = (min(extents['longitude'].values),max(extents['longitude'].values))
time_extents = (min(extents['time'].values),max(extents['time'].values))
print("Latitude Extents:", latitude_extents)
print("Longitude Extents:", longitude_extents)
print("Time Extents:", list(map(dt_to_str, map(_n64_to_datetime, time_extents))))
"""
Explanation: Display Latitude-Longitude and Time Bounds of the Data Cube
End of explanation
"""
## The code below renders a map that can be used to orient yourself with the region.
from utils.data_cube_utilities.dc_display_map import display_map
display_map(latitude = latitude_extents, longitude = longitude_extents)
"""
Explanation: Visualize Data Cube Region
End of explanation
"""
## Vietnam - Central Lam Dong Province ##
# longitude_extents = (107.80, 108.00)
# latitude_extents = (11.70, 11.90)
## Zanzibar - Zanzibar City
latitude_extents = (-6.25, -6.07)
longitude_extents = (39.15, 39.29)
time_extents = ('2015-01-01', '2015-12-31')
display_map(latitude = latitude_extents, longitude = longitude_extents)
"""
Explanation: Pick a smaller analysis region and display that region
Try to keep your region to less than 0.2-deg x 0.2-deg for rapid processing.
Pick the time extents for your mosaic product (keep to 1 year or less).
End of explanation
"""
landsat_dataset = dc.load(latitude = latitude_extents,
longitude = longitude_extents,
platform = platform,
time = time_extents,
product = product,
measurements = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2', 'pixel_qa'])
landsat_dataset
#view the dimensions and sample content from the cube
"""
Explanation: Load the dataset and the required spectral bands or other parameters
After loading, you will view the Xarray dataset. Notice the dimensions represent the number of pixels in your latitude and longitude dimension as well as the number of time slices (time) in your time series.
End of explanation
"""
acquisition_number = 10
# select an acquisition number from 1 to "time" using the array limits above
%matplotlib inline
# landsat_dataset.red.isel(time = acquisition_number).plot(cmap = "Greys")
landsat_dataset.green.isel(time = acquisition_number).plot(cmap = "Greys")
# landsat_dataset.blue.isel(time = acquisition_number).plot(cmap = "Greys")
#landsat_dataset.nir.isel(time = acquisition_number).plot(cmap = "Greys")
#landsat_dataset.swir1.isel(time = acquisition_number).plot(cmap = "Greys")
#landsat_dataset.swir2.isel(time = acquisition_number).plot(cmap = "Greys")
"""
Explanation: Display Example Images
Single band visualization
For a quick inspection, let's look at two images. The first image will allow the selection of any band (red, blue, green, nir, swir1, swir2) to produce a grey-scale image of any band. The second image will mask clouds with bright red on an RGB image.
Select the desired acquisition (time slice) in the block below. You can select from 1 to #, where the max value is the number of time slices noted in the block above. Change the comment statements below to select the bands for the first image.
End of explanation
"""
import numpy as np
def generate_cloud_mask(dataset, include_shadows = False):
#Create boolean Masks for clear and water pixels
clear_pixels = dataset.pixel_qa.values == 2 + 64
water_pixels = dataset.pixel_qa.values == 4 + 64
shadow_pixels= dataset.pixel_qa.values == 8 + 64
a_clean_mask = np.logical_or(clear_pixels, water_pixels)
if include_shadows:
a_clean_mask = np.logical_or(a_clean_mask, shadow_pixels)
return np.invert(a_clean_mask)
def remove_clouds(dataset, include_shadows = False):
#Create boolean Masks for clear and water pixels
clear_pixels = dataset.pixel_qa.values == 2 + 64
water_pixels = dataset.pixel_qa.values == 4 + 64
shadow_pixels= dataset.pixel_qa.values == 8 + 64
a_clean_mask = np.logical_or(clear_pixels, water_pixels)
if include_shadows:
a_clean_mask = np.logical_or(a_clean_mask, shadow_pixels)
return dataset.where(a_clean_mask)
"""
Explanation: Define Cloud Masking Function
Removes clouds and cloud shadows based on the Landsat pixel QA information
This is only for reference ... nothing to modify here
End of explanation
"""
cloud_mask = generate_cloud_mask(landsat_dataset)
cloudless = remove_clouds(landsat_dataset)
import matplotlib.pyplot as plt
from utils.data_cube_utilities.dc_rgb import rgb
rgb(landsat_dataset, time_index = acquisition_number)
plt.show()
red = [255,0,0]
rgb(landsat_dataset,time_index = acquisition_number,
paint_on_mask = [(cloud_mask, red)])
"""
Explanation: Mask clouds from your selected acquisition and visualize the scene and mask
Now we will look at two RGB images where the second image includes the cloud, cloud shadow and no data mask in RED. Also, the scene is the same as the acquistion selected above.
End of explanation
"""
from utils.data_cube_utilities.dc_mosaic import create_mosaic
def mrf_mosaic(dataset):
# The mask here is based on pixel_qa products. It comes bundled in with most Landsat Products.
cloud_free_boolean_mask = np.invert(generate_cloud_mask(dataset))
return create_mosaic(dataset, clean_mask = cloud_free_boolean_mask)
recent_composite = mrf_mosaic(landsat_dataset)
recent_composite.nir.plot(cmap = "Greys")
rgb(recent_composite)
"""
Explanation: Cleaning up the clouds and creating a cloud-free mosaic
Remember that this process will filter clouds from the entire time series stack
Most Recent Pixel Mosaic
Masks clouds from imagery and uses the most recent cloud-free pixels.
End of explanation
"""
from utils.data_cube_utilities.dc_mosaic import create_median_mosaic
def median_mosaic(dataset):
# The mask here is based on pixel_qa products. It comes bundled in with most Landsat Products.
cloud_free_boolean_mask = np.invert(generate_cloud_mask(dataset))
return create_median_mosaic(dataset, clean_mask = cloud_free_boolean_mask)
median_composite = median_mosaic(landsat_dataset)
median_composite.nir.plot(cmap = "Greys")
rgb(median_composite)
"""
Explanation: Median Mosaic
Masks clouds from imagery using the median valued cloud-free pixels in the time series
End of explanation
"""
cluster_bands = ['red', 'green', 'blue', 'swir1']
"""
Explanation: Select bands used for clustering
End of explanation
"""
def figure_ratio(ds, fixed_width = 10):
width = fixed_width
height = len(ds.latitude) * (fixed_width / len(ds.longitude))
return (width, height)
from utils.data_cube_utilities.dc_clustering import kmeans_cluster_dataset
# change the number of clusters in the line below, as desired
# this example uses the "median composite" image from above
classification_x = kmeans_cluster_dataset(median_composite, cluster_bands, n_clusters=8)
# plot the k-mean classification result
classification_x.plot(figsize = figure_ratio(classification_x))
"""
Explanation: Perform K-Means clustering and view the output
End of explanation
"""
|
radhikapc/foundation-homework
|
homework12/311 time series homework.ipynb
|
mit
|
#200,000 rows giving errors, so imported only 200,00 rows :-) to solve the loading issues and memory error.
df = pd.read_csv("small-311-2015.csv")
df.head(5)
df.columns.values
dateutil.parser.parse("07/04/2015 03:33:09 AM")
df.info()
def parse_date(str_date):
return dateutil.parser.parse(str_date)
df['created_datetime'] = df['Created Date'].apply(parse_date)
df.head(2)
df.index = df['created_datetime']
df.head(5)
"""
Explanation: First, I made a mistake naming the data set! It's 2015 data, not 2014 data. But yes, still use 311-2014.csv. You can rename it.
Importing and preparing your data
1.Import your data, but only the first 200,000 rows. You'll also want to change the index to be a datetime based on the Created Date column - you'll want to check if it's already a datetime, and parse it if not.
End of explanation
"""
df['Complaint Type'].value_counts().head(5)
"""
Explanation: 2.What was the most popular type of complaint, and how many times was it filed?
End of explanation
"""
df['Complaint Type'].value_counts().head(5).plot(kind='barh', y='Complaint Type')
"""
Explanation: 3.Make a horizontal bar graph of the top 5 most frequent complaint types.
End of explanation
"""
columns_to_show = ['Park Borough', 'Complaint Type']
complaints = df[columns_to_show]
complaints.head()
new_list = pd.DataFrame(complaints['Park Borough'].value_counts())
new_list.head(6)
new_comp = complaints.merge(new_list, left_on='Park Borough', right_index=True)
new_comp.head(3)
new_comp['count'] = new_comp['Park Borough_y']
new_comp.head()
new_complaints = new_comp.drop(['Park Borough_x','Park Borough_y'], axis=1)
new_complaints.head()
new_list.head(6)
#mh 1,636,268, Bronx 1,438,159 brooklyn 2,621,793, queens -2,321,580 stalen
per_capita = {'BROOKLYN':2621793,'QUEENS': 2321580,'MANHATTAN':1636268,'BRONX':1438159, 'STATEN ISLAND':472621, 'Unspecified':0}
my_list = pd.DataFrame.from_dict(per_capita,orient='index')
my_list.head()
my_list.columns = ['population']
my_list
per_cap = new_list.merge(my_list, left_index=True, right_index=True)
per_cap.head(6)
per_cap['count'] = per_cap['Park Borough']
per_cap.drop('Park Borough', axis=1)
#finally calculating complaints per capita
per_cap['per_capital'] = per_cap['Park Borough'] / per_cap['population']
per_cap.head(6).sort_values(by='per_capital', ascending=False)
"""
Explanation: 4.Which borough has the most complaints per capita? Since it's only 5 boroughs, you can do the math manually.
End of explanation
"""
df.head(3)
march = df['2015-03']
total_march = pd.value_counts(march['Complaint Type'],sort=True)
total_march
print("The total number of complaints is", total_march.sum())
may = df['2015-05']
total_may = pd.value_counts(may['Complaint Type'],sort=True)
total_may
print("The total number of complaints is", total_may.sum())
"""
Explanation: 6.According to your selection of data, how many cases were filed in March? How about May?
End of explanation
"""
April = df['2015-04']
April.sort('created_datetime').head()
April[:"20150401"]
"""
Explanation: 7). I'd like to see all of the 311 complaints called in on April 1st.
Surprise! We couldn't do this in class, but it was just a limitation of our data set
End of explanation
"""
april_complaints = April[:"20150401"]
pd.value_counts(april_complaints['Complaint Type'],sort=True).head(1)
"""
Explanation: 8.What was the most popular type of complaint on April 1st?
End of explanation
"""
pd.value_counts(april_complaints['Complaint Type'],sort=True).head(3)
"""
Explanation: 9.What were the most popular three types of complaint on April 1st
End of explanation
"""
df.head(3)
df.resample('M').count().sort_values('Created Date', ascending=False).head(1)
ax = df.resample('M').count().plot(y='Created Date')
ax.set_title("Monthly Complaints")
"""
Explanation: 10) What month has the most reports filed? How many? Graph it.
End of explanation
"""
df.resample('W').count().sort_values('Created Date', ascending=False).head(1)
ax = df.resample('W').count().plot(y='Created Date')
ax.set_title("Weekly Complaints")
"""
Explanation: 11) What week of the year has the most reports filed? How many? Graph the weekly complaints.
End of explanation
"""
columns_req = ['Complaint Type']
find_noice= df[columns_req]
find_noice.head()
df_noice = find_noice[find_noice['Complaint Type'].str.contains("Noise", case=False)]
df_noice.head(3)
df_noice.resample('D').count().head(4)
ax = df_noice.resample('M').count().plot(y='Complaint Type')
ax.set_title("Noice Complaints in Year 2015")
ax = df_noice.resample('D').count().plot(y='Complaint Type')
ax.set_title("Daily Noice Complaints")
"""
Explanation: 12). Noise complaints are a big deal. Use .str.contains to select noise complaints, and make an chart of when they show up annually. Then make a chart about when they show up every day (cyclic).
End of explanation
"""
top_days = find_noice.resample('D').count().sort_values('Complaint Type', ascending=False)
top_days.head(5)
top_days['complaint count'] = top_days['Complaint Type']
t_days = top_days.drop('Complaint Type', axis=1).head(5)
ax = t_days['complaint count'].plot(kind='bar',x= 't_days.index', y='complaint count')
ax.set_title("Top Five Days with the Highest Number of Complaints")
"""
Explanation: 13) Which were the top five days of the year for filing complaints? How many on each of those days? Graph it.
End of explanation
"""
df['Unique Key'].groupby(by=df.index.hour).count()
ax = df['Unique Key'].groupby(by=df.index.hour).count().plot()
ax.set_title("Complaint Flow in a Typical Day")
"""
Explanation: 14) What hour of the day are the most complaints? Graph a day of complaints.
End of explanation
"""
df.head(3)
# 0 stands for 12.0 midnight
df[df.index.hour==0].head(3)
most_comp = df[df.index.hour==0]
# after filtering out, counted all the unique values in the Complaint Type.
most_comp['Complaint Type'].value_counts().head(3)
most_comp_before = df[df.index.hour==23]
most_comp_before['Complaint Type'].value_counts().head(3)
most_comp_after = df[df.index.hour==1]
most_comp_after['Complaint Type'].value_counts().head(3)
"""
Explanation: 15) . One of the hours has an odd number of complaints. What are the most common complaints at that hour, and what are the most common complaints the hour before and after?
End of explanation
"""
most_comp.head(3)
most_comp['Complaint Type'].groupby(by=most_comp.index.minute).value_counts()
"""
Explanation: 16 ) So odd. What's the per-minute breakdown of complaints between 12am and 1am? You don't need to include 1am.
End of explanation
"""
df['Agency'].value_counts().head(5)
agency = df[(df['Agency'] == 'NYPD') | (df['Agency'] == 'DOT') | (df['Agency'] == 'DPR') | (df['Agency'] == 'HPD') | (df['Agency'] == 'DOHMH')]
agency.head(3)
nypd=agency[(agency['Agency'] == 'NYPD')]
dot=agency[(agency['Agency'] == 'DOT')]
dpr=agency[(agency['Agency'] == 'DPR')]
hpd=agency[(agency['Agency'] == 'HPD')]
dohmh=agency[(agency['Agency'] == 'DOHMH')]
dohmh.head(3)
ax=nypd.groupby(by=nypd.index.hour).count().plot(y='Unique Key', label='NYPD')
dot.groupby(by=dot.index.hour).count().plot(y='Unique Key', ax=ax, label='DOT')
dpr.groupby(by=dpr.index.hour).count().plot(y='Unique Key',ax=ax, label='DPR')
hpd.groupby(by=hpd.index.hour).count().plot(y='Unique Key', ax=ax, label='HPD')
dohmh.groupby(by=dohmh.index.hour).count().plot(y='Unique Key', ax=ax, label='DOHMH')
"""
Explanation: 17) Looks like midnight is a little bit of an outlier. Why might that be? Take the 5 most common agencies and graph the times they file reports at (all day, not just midnight).
End of explanation
"""
ax=nypd.groupby(by=nypd.index.week).count().plot(y='Unique Key', label='NYPD')
dot.groupby(by=dot.index.week).count().plot(y='Unique Key', ax=ax, label='DOT')
dpr.groupby(by=dpr.index.week).count().plot(y='Unique Key',ax=ax, label='DPR')
hpd.groupby(by=hpd.index.week).count().plot(y='Unique Key', ax=ax, label='HPD')
dohmh.groupby(by=dohmh.index.week).count().plot(y='Unique Key', ax=ax, label='DOHMH')
"""
Explanation: 18) Graph those same agencies on an annual basis - make it weekly. When do people like to complain? When does the NYPD have an odd number of complaints?
End of explanation
"""
july_aug = nypd[(nypd.index.month==7)| (nypd.index.month==8)]
july_aug['Complaint Type'].value_counts().head(5)
may = nypd[(nypd.index.month==5)]
may['Complaint Type'].value_counts().head(5)
winter = hpd[(hpd.index.month==12)| (hpd.index.month==1)| (hpd.index.month==2)]
winter['Complaint Type'].value_counts().head(5)
summer = hpd[(hpd.index.month==6)| (hpd.index.month==7)| (hpd.index.month==8)]
summer['Complaint Type'].value_counts().head(5)
"""
Explanation: 19) Maybe the NYPD deals with different issues at different times? Check the most popular complaints in July and August vs the month of May. Also check the most common complaints for the Housing Preservation Bureau (HPD) in winter vs. summer.
End of explanation
"""
|
Benedicto/ML-Learning
|
Analyzing product sentiment.ipynb
|
gpl-3.0
|
import graphlab
"""
Explanation: Predicting sentiment from product reviews
Fire up GraphLab Create
End of explanation
"""
products = graphlab.SFrame('amazon_baby.gl/')
"""
Explanation: Read some product review data
Loading reviews for a set of baby products.
End of explanation
"""
products.head()
"""
Explanation: Let's explore this data together
Data includes the product name, the review text and the rating of the review.
End of explanation
"""
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
products.head()
graphlab.canvas.set_target('ipynb')
products['name'].show()
"""
Explanation: Build the word count vector for each review
End of explanation
"""
giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']
len(giraffe_reviews)
giraffe_reviews['rating'].show(view='Categorical')
"""
Explanation: Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether'
End of explanation
"""
products['rating'].show(view='Categorical')
"""
Explanation: Build a sentiment classifier
End of explanation
"""
#ignore all 3* reviews
products = products[products['rating'] != 3]
#positive sentiment = 4* or 5* reviews
products['sentiment'] = products['rating'] >=4
products.head()
"""
Explanation: Define what's a positive and a negative sentiment
We will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment.
End of explanation
"""
train_data,test_data = products.random_split(.8, seed=0)
sentiment_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
"""
Explanation: Let's train the sentiment classifier
End of explanation
"""
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
"""
Explanation: Evaluate the sentiment model
End of explanation
"""
giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')
giraffe_reviews.head()
"""
Explanation: Applying the learned model to understand sentiment for Giraffe
End of explanation
"""
giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)
giraffe_reviews.head()
"""
Explanation: Sort the reviews based on the predicted sentiment and explore
End of explanation
"""
giraffe_reviews[0]['review']
giraffe_reviews[1]['review']
"""
Explanation: Most positive reviews for the giraffe
End of explanation
"""
giraffe_reviews[-1]['review']
giraffe_reviews[-2]['review']
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
def awesome_count(word_count):
return word_count.get('awesome', 0)
products['awesome'] = products['word_count'].apply(awesome_count)
def get_count(word_count, word):
return word_count.get(word, 0)
products['awesome'].head()
products['awesome'].sum()
for word in selected_words:
products[word] = products['word_count'].apply(lambda word_count: get_count(word_count, word))
products.head()
len(selected_words)
for word in selected_words:
print word, products[word].sum()
train_data, test_data = products.random_split(.8, seed=0)
simple_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=selected_words,
validation_set=test_data)
simple_model['coefficients'].sort('value').print_rows(15)
simple_model.evaluate(test_data)
sentiment_model.evaluate(test_data)
diaper_champ_reviews = products[products['name']=='Baby Trend Diaper Champ']
diaper_champ_reviews['predicted_sentiment'] = sentiment_model.predict(diaper_champ_reviews, output_type='probability')
diaper_champ_reviews = diaper_champ_reviews.sort('predicted_sentiment', ascending=False)
simple_model.predict(diaper_champ_reviews[0:1], output_type='probability')
diaper_champ_reviews[0]
test_data['sentiment'].show(view='Categorical')
products[['word_count']].stack('word_count',new_column_name=['word', 'count'])
"""
Explanation: Show most negative reviews for giraffe
End of explanation
"""
|
edosedgar/xs-pkg
|
blockchain/edgar_kaziakhmedov_HW1.ipynb
|
gpl-2.0
|
#instructor key info
n1 = 11 * 7
e1 = 37
d1 = 13
#student key info
n2 = 13 * 19
e2 = 41
d2 = 137
grade = 5
m = pow(grade, e2, n2)
signature = pow(m, d1, n1)
print(f'message|signature: {m}|{signature}')
if (pow(m, e1, n1) != signature):
print("Failed to verify")
"""
Explanation: Introduction to blockchain HW 1
Problem 1 (10 points)
Assume that at the end of the term an instructor uses an RSA Public Key Cryptosystem to sign the grades sent to students and to the Education Office. He signs each grade with his private key and transmits both the grade and the signature in a secure manner encrypted with a public key. After you decrypt the message you use his public key to decipher the signature and verify the grade. Assume the cryptosystem has the following features: n=55, e=33 and that the grading system features integer grades from 0 to 10. You have received the grade-signature pairs 8||13 and 6||41
1.Verify that the grades you received are indeed originating from the instructor showing just intermediate results of your calculation (2 points)
$ 8 \| 13: m = s^e\, mod\, n = [e = 33; s = 13; n = 55] = 13^{33}\, mod\, 55 = 8 $, so the signature is valid
$ 6 \| 41: m = s^e\, mod\, n = [e = 33; s = 41; n = 55] = 41^{33}\, mod\, 55 = 6 $, so the signature is valid
2.Given the information above only can you fabricate the instructors’ signature for another grade? If yes, for which grade and how? (3 points)
The question is not 100% clear, because on the one hand is I am given with info below (like the public key) I have the following answer: <br>
- Yes, we can. Any grade, given with a public key of instructor we can calculate his private key $d$: <br>
$$ d = e ^ {-1} \, mod \, \varphi(n) = [e = 33; n = 55] = 33^{-1} \, mod \, \varphi(55) = [\varphi(55) = 40] = 17 $$
Then we can claim to be the instructor and sign messages, for example: <br>
$$ s = m^d \, mod\, n = [m = 8; d = 17; n = 55] = 8^{17} \, mod \, 55 = 13 $$
But if are not given with info about key and suppose that it's big enough, then teacher has to avoid signing $0$ and $1$ grades, because their signatures are the same as grades.
3.What would you advise the instructor to do so as to prevent such fabrication? (2 points)
Here the answer again depends on case we consider in previous question, in the first case the advise is the following:
- Use much bigger prime numbers $p$ and $q$ at the stage of calculating $n$ feature, such that the lenght of key will be 2048 bits. That will make the proccess of finding inverse number impractical.
In the second case:
- Shift grade scale on 2, like $0 \to 2$, $1 \to 3$, etc
4.Discuss what can go wrong in case the instructor sends a grade secretly by first enciphering it with a student’s public key, the signing it with his private key. Provide an example (3 points)
In this case we might get the case when it is not possible to verify signature, because the encrypted grade might be greater than the length of teacher private key [for certainty we consider case when $n_1$ (teacher) is lower than $n_2$ (student)]
End of explanation
"""
# Choose big prime number p
# Choose two random numbers g and x
# Compute y = g^x mod p
# public key - (y, g, p); private key - (x, g, p)
from math import gcd as bltin_gcd
from random import randint
import gmpy2
from functools import wraps
import math
import sympy
def is_prime(n, k):
if n == 1:
return False
if k >= n:
k = num - 1
for i in range(k):
a = randint(1, n - 1)
if pow(a, n - 1, n) != 1:
return False
return True
def gen_prime(n, k=1000):
found_prime = False
while not found_prime:
p = randint(2 ** (n - 1), 2 ** n)
if is_prime(p, k):
return p
def gen_safe_prime(n, k=1000):
found_safe_prime = False
while not found_safe_prime:
p = gen_prime(n, k)
sp = 2 * p + 1
if (is_prime(sp, k)):
return sp
def prim_root(p):
found_prime_root = False
while not found_prime_root:
g = randint(0, p - 1)
first_exp = 2
second_exp = (p - 1) // 2
if (pow(g, first_exp, p) != 1 and pow(g, second_exp, p) != 1):
return g
def elg_key_gen(n):
p = gen_safe_prime(n)
g = prim_root(p)
x = randint(2 ** (n - 1), 2 ** n)
y = pow(g, x, p)
return (y, g, p), (x, g, p)
"""
Explanation: Problem 2 (10 points)
El-Gamal is videly used cryptographic standart. In this task you will implement El-Gamal encryption scheme using Python
1.Implement function for generating keys. The function must generate big random prime number (problem of generating big prime numbers was discussed during seminar after lecture 3). (2 points)
End of explanation
"""
from math import gcd as bltin_gcd
from random import randint
import gmpy2
def elg_encrypt(msg, pub_key):
y, g, p = pub_key
k = randint(1, p - 1)
a = pow(g, k, p)
b = pow(y, k, p) * msg % p
return (a, b)
def elg_decrypt(emsg, priv_key):
a, b = emsg
x, g, p = priv_key
msg = b * gmpy2.invert(pow(a, x, p), p) % p
return msg
"""
Explanation: 2.Implement functions that implement the encryption and decryption functions. (2 points)
End of explanation
"""
key_length_bits = 256
msg_orig = 0xdeadface
pub_key, priv_key = elg_key_gen(key_length_bits)
emsg = elg_encrypt(msg_orig, pub_key)
msg = elg_decrypt(emsg, priv_key)
if (msg_orig == msg):
print("Success")
else:
print("Something went wrong")
"""
Explanation: 3.Test your functions on random values and show that your implementation works correctly (1 point)
End of explanation
"""
from math import gcd as bltin_gcd
from random import randint
import gmpy2
def find_coprime(p):
while True:
k = randint(1, p - 1)
if bltin_gcd(k, p):
return k
def elg_sign(msg, priv_key):
while True:
x, g, p = priv_key
k = find_coprime(p)
a = pow(g, k, p)
try:
b = (msg - x * a) * (gmpy2.invert(k, p - 1)) % (p - 1)
break
except ZeroDivisionError:
pass
return (a, b)
def elg_verify_sign(msg, sign, pub_key):
a, b = sign
y, g, p = pub_key
left_op = pow(y, a, p) * pow(a, b, p) % p
right_op = pow(g, msg, p)
if (left_op == right_op):
return "OK"
else:
return "ERR"
"""
Explanation: 4.Implement functions that perform creation and verification of digital signature (2 points)
End of explanation
"""
key_length_bits = 256
msg_orig = 0xdeadface
pub_key, priv_key = elg_key_gen(key_length_bits)
signature = elg_sign(msg_orig, priv_key)
sign_verificaton = elg_verify_sign(msg_orig, signature, pub_key)
print("Signature verification result:", sign_verificaton)
"""
Explanation: 5.Test your functions on random values and show that your algorithm works correctly (1 point)
End of explanation
"""
def sha256(data):
# Set of helper functions
def chunks(data, block_size):
return [data[i:i + block_size] for i in range(0, len(data), block_size)]
def ror(x, n):
return (x >> n) | (x << 32 - n) & 0xffffffff
bytes = ""
# Initialize hash values, such that
# first 32 bits of the fractional
# parts of the square roots of the first 8 primes 2..19:
h0 = 0x6a09e667
h1 = 0xbb67ae85
h2 = 0x3c6ef372
h3 = 0xa54ff53a
h4 = 0x510e527f
h5 = 0x9b05688c
h6 = 0x1f83d9ab
h7 = 0x5be0cd19
# Initialize array of round constants such that
# first 32 bits of the fractional parts of the cube
# roots of the first 64 primes 2..311:
k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
# Pre-processing (Padding):
# begin with the original message of length L bits
# append a single '1' bit
for n in range(len(data)):
bytes += '{0:08b}'.format(ord(data[n]))
bits = bytes + "1"
pBits = bits
# append K '0' bits, where K is the minimum number >= 0
# such that L + 1 + K + 64 is a multiple of 512
while (len(pBits) + 64) % 512 != 0:
pBits += "0"
# append L as a 64-bit big-endian integer, making
# the total post-processed length a multiple of 512 bits
pBits += '{0:064b}'.format(len(bits) - 1)
# Process the message in successive 512-bit chunks:
for block in chunks(pBits, 512):
#print(block)
#print(len(block))
# create a 64-entry message schedule array w[0..63] of 32-bit words
w = [0 for x in range(64)]
# copy chunk into first 16 words w[0..15] of the message schedule array
words = chunks(block, 32)
for n in range(len(words)):
w[n] = int(words[n], 2)
# extend the first 16 words into the remaining 48 words
# w[16..63] of the message schedule array:
for i in range(16, 64):
s0 = ror(w[i - 15], 7) ^ ror(w[i - 15], 18) ^ (w[i - 15] >> 3)
s1 = ror(w[i - 2], 17) ^ ror(w[i - 2], 19) ^ (w[i - 2] >> 10)
w[i] = w[i - 16] + s0 + w[i - 7] + s1 & 0xffffffff
# Initialize working variables to current hash value:
a, b, c, d, e, f, g, h = h0, h1, h2, h3, h4, h5, h6, h7
# Compression function main loop
for i in range(0, 64):
S1 = ((ror(e, 6) ^ ror(e, 11) ^ ror(e, 25)))
ch = (e & f) ^ ((~e) & g)
temp1 = ((h + S1 + ch + k[i] + w[i] ))
S0 = (ror(a, 2) ^ ror(a, 13) ^ ror(a, 22))
maj = ((a & b) ^ (a & c) ^ (b & c))
temp2 = (S0 + maj)
h, g, f, e, d, c, b, a = g, f, e, d + temp1 & 0xffffffff, c, b, a, temp1 + temp2 & 0xffffffff
# Add the compressed chunk to the current hash value:
h0 = h0 + a & 0xffffffff
h1 = h1 + b & 0xffffffff
h2 = h2 + c & 0xffffffff
h3 = h3 + d & 0xffffffff
h4 = h4 + e & 0xffffffff
h5 = h5 + f & 0xffffffff
h6 = h6 + g & 0xffffffff
h7 = h7 + h & 0xffffffff
return '%08x%08x%08x%08x%08x%08x%08x%08x' % (h0, h1, h2, h3, h4, h5, h6, h7)
"""
Explanation: Problem 3 (15 points)
1.Implement SHA256 (https://en.wikipedia.org/wiki/SHA-2) hashing algorithm using the pseudo-code below. Note the great increase in mixing between bits of the w[16..63] words compared to SHA-1 (10 points)
End of explanation
"""
string_small = 'This is a very small string with a few characters.'
string_larger = 'This is a larger string that contains more characters.'
string_big = 'This is a larger string that contains more characters. This demonstrates that no matter how big the input stream is, the generated hash is the same size (but of course, not the same value). If two files have a different hash, they surely contain different data.'
string_empty = ''
print(sha256(string_small))
print(sha256(string_larger))
print(sha256(string_big))
print(sha256(string_empty))
"""
Explanation: 2.Calculate hashes of the texts below (1 point)
End of explanation
"""
# 64 hex values, each hex value represents 4 bits
print(4 * len(sha256(string_small)))
print(4 * len(sha256(string_larger)))
print(4 * len(sha256(string_big)))
print(4 * len(sha256(string_empty)))
"""
Explanation: 3.What is a bit length of each hash? (1 point)
End of explanation
"""
def bitwise_distance(str1, str2):
bits1 = ''
bits2 = ''
diff = 0
if (len(str1) == len(str2) == 0):
return 0
for n in range(len(str1)):
bits1 += '{0:08b}'.format(ord(str1[n]))
for n in range(len(str2)):
bits2 += '{0:08b}'.format(ord(str2[n]))
n = max(len(bits1), len(bits2))
while (len(bits1)) % n != 0 or len(bits1) == 0:
bits1 += "0"
while (len(bits2)) % n != 0 or len(bits2) == 0:
bits2 += "0"
for i in range(len(bits1)):
if (bits1[i] != bits2[i]):
diff += 1
return diff
def hamming2(str1, str2):
bits1 = ''
bits2 = ''
for n in range(len(str1)):
bits1 += '{0:04b}'.format(int(str1[n], 16))
for n in range(len(str2)):
bits2 += '{0:04b}'.format(int(str2[n], 16))
"""Calculate the Hamming distance between two bit strings"""
return sum(c1 != c2 for c1, c2 in zip(bits1, bits2))
string = [string_small, string_larger, string_big, string_empty]
for i, substr1 in enumerate(string):
for substr2 in string[i:len(string)]:
if (substr1 == substr2):
continue
print("--------------------------------------------------------------------------------------------")
print("String1: %.70s" % substr1)
print("String2: %.70s" % substr2)
print("Bitwise difference between strings: %5d" % bitwise_distance(substr1, substr2))
print("Bitwise difference between hashes: %5d" % hamming2(sha256(substr1), sha256(substr2)))
"""
Explanation: 4.What is the bitwise distance between them? What is bitwise distance between their hashes? (1 point)
End of explanation
"""
def str2num(data):
res = 0
for n in range(len(data)):
res += (2 ** (n * 8)) * ord(data[n])
return res
msg = "easy-peasy"
msg_num = str2num(msg)
hash_val = int(sha256(msg), 16)
key_length_bits = 256
pub_key, priv_key = elg_key_gen(key_length_bits)
print("Calculate the signature of hashed message")
sign_hash = elg_sign(hash_val, priv_key)
print(sign_hash)
print("Calculate the signature of message itself")
sign_msg = elg_sign(msg_num, priv_key)
print(sign_msg)
"""
Explanation: 5.Typically use apply hash function to our passwords and texts that we want to digitally sign. Implement digital signature of hashed string using El-Gamal digital signature. Compare the digital signature of plain text and hashed text. (2 points)
End of explanation
"""
import matplotlib
import networkx as nx
%matplotlib qt5
%matplotlib inline
from networkx import balanced_tree, draw_networkx, draw
"""
Explanation: Problem 4 (15 points)
Merkle hash trees play an important role in forming transaction blocks in blockchain. In this assignment we ask you to plot your own Merkle hash tree and check its' properties. Below we provide you with some code fragment what you can use in your assignment
End of explanation
"""
G = nx.Graph()
positions = {}
coordinates = [
[0, 4],
[-2, 3],
[2, 3],
[-3, 2],
[-1, 2],
[1, 2],
[3, 2],
[-3, 1],
[-1, 1],
[1, 1],
[3, 1]
]
parents = [0, 0, 0, 1, 1, 2, 2, 3, 4, 5, 6]
for index in range(11):
G.add_node(index)
G.add_edge(index, parents[index])
positions[index] = coordinates[index]
nx.draw(G, coordinates, node_size = 1000)
labels = {
0: b'0',
1: b'1',
2: b'2',
3: b'3',
4: b'4',
5: b'5',
6: b'6',
7: b'tx1',
8: b'tx2',
9: b'tx3',
10: b'tx4',
}
nx.draw_networkx_labels(G, positions, labels = labels)
"""
Explanation: Let us plot graph basis for Merkle hash tree
End of explanation
"""
import hashlib
first_hash = hashlib.sha256(b"hello") # "b" stands for binary representation
second_hash = hashlib.sha256()
print('First hash represented as a hexadecimal number:', first_hash.hexdigest())
second_hash.update(first_hash.digest())
print('Second hash represented as a hexadecimal number:', second_hash.hexdigest())
"""
Explanation: In Bitcoin double sha256 hash scheme is used. Here is an example.
End of explanation
"""
import matplotlib.pyplot as plt
labels[3] = hashlib.sha256(hashlib.sha256(b"tx1").digest()).hexdigest()
# and plot the graph again
plt.figure(figsize=(10,10))
nx.draw(G, positions, node_size = 2000)
nx.draw_networkx_labels(G, positions, labels = labels, font_size = 8)
print(labels[3])
"""
Explanation: Now we can easily change vertices' labels to hashes of corresponding messages and plot new graph
End of explanation
"""
#labels[3] = hashlib.sha256(hashlib.sha256(b"tx1").digest()).hexdigest()
# and plot the graph again
def do_merkle_tree(parents, labels):
marks = [0 for x in range(len(parents))]
for node, parent in reversed(list(enumerate(parents))):
if node not in parents:
labels[parents[node]] = hashlib.sha256(labels[node]).hexdigest()
marks[parents[node]] = 1
continue
if (marks[node] == 0):
if (len(labels[node]) == 64):
continue
index = sorted([i for i, x in enumerate(parents) if (x == node)])
str_to_be_hashed = ''
fail = False
for children in index:
if (children == 0):
continue
if (len(labels[children]) < 64):
fail = True
break
str_to_be_hashed += labels[children]
if (fail == True):
continue
else:
labels[node] = hashlib.sha256(str_to_be_hashed.encode('utf-8')).hexdigest()
marks[node] = 1
return labels
labels = do_merkle_tree(parents, labels)
labels_short = {}
for key in labels:
labels_short[key] = labels[key][:11]
plt.figure(figsize=(7,7))
nx.draw(G, positions, node_size = 3000)
nx.draw_networkx_labels(G, positions, labels = labels_short, font_size = 8)
"""
Explanation: 1.Construct Merkle hash tree using previously constructed graph by finding corresponding SHA256 hashes on vertices (2 points). Plot obtained Merkle hash tree (1 point)
End of explanation
"""
hash1 = hashlib.sha256(b'tx2').hexdigest() # hash of tx2
hash0 = labels[3] # hash of tx1
hash2 = labels[2] # hash of the right part of tree
hash3 = labels[0] # root hash
hash01 = hashlib.sha256((hash0 + hash1).encode('utf-8')).hexdigest()
hash012 = hashlib.sha256((hash01 + hash2).encode('utf-8')).hexdigest()
if (hash012 == hash3):
print("Success")
"""
Explanation: 2.Provide a proof of correctness of leaf tx2 (2 points).
End of explanation
"""
hash0 = hashlib.sha256(b'tx3').hexdigest() # hash of tx3
hash1 = hashlib.sha256(b'tx4').hexdigest() # hash of tx4
hash2 = labels[1] # hash of the left part of tree
hash3 = labels[0] # root hash
hash01 = hashlib.sha256((hash0 + hash1).encode('utf-8')).hexdigest()
hash012 = hashlib.sha256((hash2 + hash01).encode('utf-8')).hexdigest()
if (hash012 == hash3):
print("Success")
"""
Explanation: 3.Provide a proof of correctness for set of leafs (tx3-tx4) (2 points)
End of explanation
"""
labels = {
0: b'0',
1: b'1',
2: b'2',
3: b'3',
4: b'4',
5: b'5',
6: b'6',
7: b'tx12',
8: b'tx2',
9: b'tx3',
10: b'tx4',
}
labels = do_merkle_tree(parents, labels)
labels_short = {}
for key in labels:
labels_short[key] = labels[key][:11]
plt.figure(figsize=(7,7))
nx.draw(G, positions, node_size = 3000)
nx.draw_networkx_labels(G, positions, labels = labels_short, font_size = 8)
"""
Explanation: 4.Change the value on leaf tx1 and recompute corresponding hashes. Plot newly obtained Merkle hash tree (2 points)
End of explanation
"""
G = nx.Graph()
positions = {}
coordinates = [
[0, 4],
[-2, 3],
[0, 3],
[+2, 3],
[-3, 2],
[-1, 2],
[0, 2],
[+2, 2],
[-3, 1],
[-1, 1],
[0, 1],
[+2,1]
]
parents = [0, 0, 0, 0, 1, 1, 2, 3, 4, 5, 6, 7]
for index in range(12):
G.add_node(index)
G.add_edge(index, parents[index])
positions[index] = coordinates[index]
plt.figure(figsize=(7,7))
nx.draw(G, coordinates, node_size = 3000)
labels = {
0: b'0',
1: b'1',
2: b'2',
3: b'3',
4: b'4',
5: b'5',
6: b'6',
7: b'7',
8: b'tx1',
9: b'tx2',
10: b'tx3',
11: b'tx4'
}
nx.draw_networkx_labels(G, positions, labels = labels)
"""
Explanation: 5.Nodes in Merkle hash trees may have arbitrary fanout. In previouse items we consider the case of fanout equals to two. But what will change if we set a fanout equals to three? Construct Merkle hash trees with fanout 3 to sign 9 values? Construct the hash tree with fanout 2 to sign the same set of values? Plot obtained trees (4 points)
If we change fanout to 3 in the previous case there will be no change in number of hash calculations
End of explanation
"""
G = nx.Graph()
positions = {}
coordinates = [
[0, 4],
[-3, 3], [0, 3], [+3, 3],
[-4, 2], [-3, 2], [-2, 2], [-1, 2], [0, 2], [1, 2], [2, 2], [3, 2], [4, 2],
[-4, 1], [-3, 1], [-2, 1], [-1, 1], [0, 1], [1, 1], [2, 1], [3, 1], [4, 1]
]
parents = [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
for index in range(22):
G.add_node(index)
G.add_edge(index, parents[index])
positions[index] = coordinates[index]
plt.figure(figsize=(10,10))
nx.draw(G, coordinates, node_size = 3000)
labels = {
0: b'0',
1: b'1',
2: b'2',
3: b'3',
4: b'4',
5: b'5',
6: b'6',
7: b'7',
8: b'8',
9: b'9',
10: b'10',
11: b'11',
12: b'12',
13: b'tx1',
14: b'tx2',
15: b'tx3',
16: b'tx4',
17: b'tx5',
18: b'tx6',
19: b'tx7',
20: b'tx8',
21: b'tx9'
}
nx.draw_networkx_labels(G, positions, labels = labels)
"""
Explanation: Now let's try to construct merkle tree with fanout 3 and 9 values.
End of explanation
"""
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
G = nx.Graph()
parents = [0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10] + list(range(11, 20))
for index in range(29):
G.add_node(index)
G.add_edge(index, parents[index])
positions = graphviz_layout(G, prog='dot')
plt.figure(figsize=(10,10))
nx.draw(G, positions, node_size = 3000)
labels = [str(i).encode('utf-8') for i in range(0,29)]
for i in range(20, 29):
labels[i] = ('tx' + str(i - 20)).encode('utf-8')
labels_dict = {i : item for i, item in enumerate(labels)}
nx.draw_networkx_labels(G, positions, labels = labels_dict, font_size = '8')
"""
Explanation: In that configuration to check any of trX we need to calculate hash of it and concatenate with two neighboring, then calculate hash of it and concat with two neighboring and compare with root hash. So here we need to perform 3 hash calculations and 5 requests (including root).
If we consider 9 values in the tree with fanout 2 we will get 5 hash calculations.
End of explanation
"""
|
bioe-ml-w18/bioe-ml-winter2018
|
homeworks/Week2-Statistics.ipynb
|
mit
|
# This line tells matplotlib to include plots here
% matplotlib inline
import numpy as np # We'll need numpy later
from scipy.stats import kstest, ttest_ind, ks_2samp, zscore
import matplotlib.pyplot as plt # This lets us access the pyplot functions
"""
Explanation: Week 2 - Implementation of Shaffer et al
Due January 25 at 8 PM
End of explanation
"""
# Initial code here
numRepeats = 1000
mu, sigma = 5.0, 1.0
n = 50
sampleMean = np.empty((n, numRepeats))
nVec = np.array(range(1, n+1))
for i in range(numRepeats):
for j in range(n):
sampleMean[j, i] = np.mean(np.random.normal(loc=mu, scale=sigma, size=(j + 1, )))
"""
Explanation: (1) Estimation of a sample mean from a normally distributed variable.
Let us assume that a true distribution of a process is described by the normal distribution with $\mu=5$ and $\sigma=1$. You have a measurement technique that allows you to sample n points from this distribution. In Matlab this is a random number generator whose numbers will be chosen from the desired normal distribution by using the call normrnd(mu, sigma, [1, n]). Sample from this normal distribution from n=1 to 50 (I.e. n=1:50). Create a plot for the standard deviation of the calculated mean from each n when you repeat the sampling 1000 times each. (i.e. You will repeat your n observations 1000 times and will calculate the sample mean for each of the 1000 trials).
End of explanation
"""
# Answer to 1a here
plt.plot(nVec, np.std(sampleMean, axis=1), label='stddev sample mean')
plt.plot(nVec, 1./np.sqrt(nVec), 'r', label='1/sqrt(n)');
plt.title('Stdev of mean estimate v. n, 100 trials');
plt.ylabel('Stdev');
plt.xlabel('n');
"""
Explanation: (1a) Plot the standard deviation of the estimate of the sample mean versus n. Add a second line which is 1/sqrt(n). Describe what this tells you about the relationship between n and your power to estimate the underlying mean.
End of explanation
"""
# Answer to 1b here
plt.boxplot(np.transpose(sampleMean));
plt.ylabel('Values');
plt.xlabel('n');
"""
Explanation: This shows that the standard deviation of the sample mean (also called the standard error) follows a $1/\sqrt{n}$ relationship.
1b. Plot the boxplot for the sample means for all values n. Using words, interpret what the boxplot view of the 1000 trials for n=1 means and what the trends in the boxplot demonstrate compared to the plot in 1a (I.e. What information do you gain or lose in the two different plotting schemes)?
End of explanation
"""
# Answer to 1c here
sampleVec = sampleMean[4, :]
plt.hist(sampleVec)
plt.xlabel('Value')
plt.ylabel('Density')
# Normalize sample mean and stdev
P = kstest(zscore(sampleVec), 'norm')[1]
print(P)
"""
Explanation: The box plot shows that the values with higher n converge on the true mean (5 in this case). The box plot shows the overall distribution of values in greater detail, while the standard plot is easier to read for a single value plotted over the x-axis.
1c. For n=3, plot the histogram of the mean for the 1000 trials. Use the Kolmogorov-Smirnov test to see if this sample distribution is normal (hint you will need to translate this to the standard normal distribution). Report the sample mean and sample standard deviation, the p-value from the test, and whether you would reject the null hypothesis.
End of explanation
"""
# Answer to 1d here
sampleVec = sampleMean[21, :]
plt.hist(sampleVec)
plt.xlabel('Value')
plt.ylabel('Density')
# Normalize sample mean and stdev
P = kstest(zscore(sampleVec), 'norm')[1]
print(P)
"""
Explanation: We do not reject the null hypothesis.
1d. Repeat 1c but for n=20. What changes when the number of samples increases?
End of explanation
"""
# Answer 2a here
Wvec = np.random.weibull(1, size=(1000, ))
plt.hist(Wvec);
"""
Explanation: Nothing changes in this case. For both low and high n the sample mean is normally distributed.
(2) Weibull distribution. Now we will explore sampling from an alternate distribution type.
(2a) Sample the Weibull distribution with parameters a = 1, 1000 times. Plot the histogram of these values. Describe the shape of this histogram in words. Is it anything like the normal distribution?
End of explanation
"""
# Answer 2b here
sampleMean = np.empty((1000, n))
for i in range(sampleMean.shape[0]):
for j in range(n):
sampleMean[i, j] = np.mean(np.random.weibull(1.0, size=(j + 1, )))
plt.boxplot(sampleMean);
nVec = np.arange(1, n+1)
plt.plot(nVec, np.std(sampleMean, axis=0), label='stddev sample mean')
plt.plot(nVec, 1./np.sqrt(nVec), 'r', label='1/sqrt(n)');
plt.title('Stdev of mean estimate v. n, 1000 trials');
plt.ylabel('Stdev');
plt.xlabel('n');
"""
Explanation: Doesn't look anything like a normal distribution. Much heavier right tail.
(2b) As in problem 1, plot a boxplot of the sample distribution of the Weibull with A=1,B=1 from n=1:50. How does this differ from the plot in 1b and why? Plot the standard deviations of the sample means versus n. Is this any different?
End of explanation
"""
sampleVec = sampleMean[2, :]
plt.hist(sampleVec)
plt.xlabel('Value')
plt.ylabel('Density')
# Normalize sample mean and stdev
Pnorm = kstest(zscore(sampleVec), 'norm')[1]
Pweib = kstest(sampleVec, 'expon')[1]
# Scipy and numpy's weibull distributions are different, but a weibull(a=1)
# is the same as an exponential distribution.
print(Pnorm)
print(Pweib)
"""
Explanation: Doesn't differ from 1b.
(2c) For n=3, plot the histogram of the sample means. What is this distribution, is it Weibull or normal? Report your test results.
End of explanation
"""
sampleVec = sampleMean[19, :]
plt.hist(sampleVec)
plt.xlabel('Value')
plt.ylabel('Density')
# Normalize sample mean and stdev
Pnorm = kstest(zscore(sampleVec), 'norm')[1]
Pweib = kstest(zscore(sampleVec), 'expon')[1]
print(Pnorm)
print(Pweib)
"""
Explanation: This distribution is closer to normal.
2d. Repeat 2c and 2d for n=20 (don’t include the plots, but do include the test result for normality and explain the impact of the number of samples n, on normality).
End of explanation
"""
# Answer to 2f
# The distribution changes shape but the same outcomes, of the sampling distribution
# morphing to look normally distributed as N increases, hold.
"""
Explanation: Also looks normally distributed.
2e. Repeat 2c but with A=10 and B=2 (I.e plot the histogram of the calculated sample means for 1000 trials of n=3). What is this distribution, Weibull or normal? Why does it look different than in 1c?
End of explanation
"""
dOne = lambda n: np.random.normal(loc=1.0, scale=1.0, size=(n, ))
dTwo = lambda n: np.random.normal(loc=3.0, scale=1.0, size=(n, ))
"""
Explanation: (3) Differential expression . In this problem you will use the two-sample t-test to explore what differential hypothesis testing looks like in known standards and how multiple hypothesis correction effects the number of false positives and negatives from these tests.
Distribution 1, normal with mu=1, sigma=1
Distribution 2, normal with mu=3, sigma=1
End of explanation
"""
def falseNeg(n=3, nTrial=100, p=0.05):
compare = np.empty((nTrial, ))
for ii, _ in enumerate(compare):
compare[ii] = ttest_ind(dOne(n), dTwo(n), equal_var=False)[1]
return sum(compare > p)
print(falseNeg())
"""
Explanation: 3a. False Negative: Using n=3, perform 100 comparisons of distribution 1 versus distribution 2 with an alpha=0.05. Anytime you fail to reject the hypothesis it is a false negative. Why is this a false negative? Report the number of false negatives from your 100 tests.
Hint: It'd be helpful to define a function that does this for you at this point.
End of explanation
"""
def falsePos(n=3, nTrial=100, p=0.05):
compare = np.empty((nTrial, ))
for ii in range(len(compare)):
compare[ii] = ttest_ind(dOne(n), dOne(n), equal_var=False)[1]
return sum(compare < p)
print(falsePos())
"""
Explanation: In reality these are two distributions, so the test has missed an occasion when we should have rejected the null hypothesis.
3b. False Positives: Using n=3, perform 100 comparisons of distribution 1 versus distribution 1 with an alpha=0.05. Anytime you reject the hypothesis this is a false positive. Why is this a false positive? Report the number of false positives from your 100 tests.
End of explanation
"""
print(falsePos(nTrial=1000))
print(falsePos(nTrial=10000))
"""
Explanation: These are the same distribution, so the null hypothesis is true.
3c. Repeat 3b but 1000 times. What is the number of false positives? Predict the number of false positives you would get if you compared samples from the same distribution 10,000 times and explain why.
End of explanation
"""
nVec = np.array(range(3, 31))
fPos = np.empty(nVec.shape)
fNeg = np.empty(nVec.shape)
for nn, nItem in enumerate(nVec):
fPos[nn] = falsePos(n=nItem)
fNeg[nn] = falseNeg(n=nItem)
plt.plot(nVec, fPos);
plt.plot(nVec, fNeg);
"""
Explanation: Number of false positives is p-value * trials, so proportional to the number of trials run.
3d. Now sweep n from 3 to 30 and report the number of false positives and false negatives for each n when you run 100 comparisons. (Provide this in a table format). Please explain the trend you see and interpret its meaning.
End of explanation
"""
dThree = lambda n: np.random.normal(loc=3.0, scale=2.0, size=(n, ))
def falseNegB(n=3, nTrial=100):
compare = np.empty((nTrial, ))
for ii, _ in enumerate(compare):
compare[ii] = ttest_ind(dOne(n), dThree(n), equal_var=False)[1]
return sum(compare > 0.05)
print(falseNegB())
"""
Explanation: Number of false positives is not dependent upon $n$, while the number of false negatives is.
3e. For n=3, suggest how the number of false negatives changes according to sigma for the two distributions and test this. Report your new values and sigma and the number of false negatives in 100 tests.
End of explanation
"""
nVec = np.array(range(3, 31))
fPos = np.empty(nVec.shape)
fNeg = np.empty(nVec.shape)
for nn, nItem in enumerate(nVec):
fPos[nn] = falsePos(n=nItem, p=0.01)
fNeg[nn] = falseNeg(n=nItem, p=0.01)
plt.plot(nVec, fPos);
plt.plot(nVec, fNeg);
"""
Explanation: The number of false negatives increases with sigma.
(3f) Lastly, perform 3d for p < 0.01 instead of p < 0.05. How does this influence the rate of false positives and negatives? How might you use this when performing many tests?
End of explanation
"""
repOne = np.loadtxt("data/wk2/expt_rep1.csv")
repTwo = np.loadtxt("data/wk2/expt_rep2.csv")
"""
Explanation: This decreases the number of false positives but increases the number of false negatives.
(5) Shaffer et al
In this excercise we're going to explore some basic concepts of statistics, and use them to build up to some more advanced ideas. To examine these ideas we're going to consider a classic of molecular biology—the Luria-Delbrück experiment.
End of explanation
"""
# Runs the simulation a bunch of times, and looks for how often the fano (cv/mean) comes out to one side
def simLuriaDelbruck(cultureSize, mutationRate):
nCells, nMuts = 1, 0 # Start with 1 non-resistant cell
for _ in range(np.int(np.floor(np.log2(cultureSize)))): # num of gens
nCells = 2 * nCells # Double the number of cells, simulating division
newMuts = np.random.poisson(nCells * mutationRate) # de novo
nMuts = 2 * nMuts + newMuts # Previous mutants divide and add
nCells = nCells - newMuts # Non-resistant pop goes down by newMuts
return nMuts
def CVofNRuns(N, cultureSize, mutationRate):
return np.fromiter((simLuriaDelbruck(cultureSize, mutationRate) for x in range(N)), dtype = np.int)
cvs = CVofNRuns(3000, 120000, 0.0001)
plt.hist(cvs, bins=30);
"""
Explanation: (5a) First, we need to build up a distribution of outcomes for what an experiment would look like if it followed the Luria-Delbruck process.
Fill in the function below keeping track of normal and mutant cells. Then, make a second function, CVofNRuns, that runs the experiment 3000 times. You can assume a culture size of 120000 cells, and mutation rate of 0.0001 per cell per generation. What does the distribution of outcomes look like?
End of explanation
"""
ks_2samp(repOne/np.mean(repOne), repTwo/np.mean(repTwo))
"""
Explanation: (5b) Compare the distribution of outcomes between the two replicates of the experiment using the 2-sample KS test. Are they consistent with one another?
Hint: Each experiment varies slightly in the amount of time it was run. The absolute values of the numbers doesn't matter, so much as the variation of them. You'll need to correct for this by dividing by the mean of the results.
End of explanation
"""
ks_2samp(repOne/np.mean(repOne), cvs/np.mean(cvs))
"""
Explanation: (5c) Compare the distribution of outcomes between the experiment and model. Are our results consistent with resistance arising through a Luria-Delbruck related process?
End of explanation
"""
|
termoshtt/ndarray-odeint
|
CLV.ipynb
|
mit
|
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Covariant Lyapunov Vectors
ndarray-odeint has calculator of Covariant Lyapunov Vector (CLV).
The algorithm of CLV has introduced in Ginelli et al. PRL(2007) to analyze collective motions.
End of explanation
"""
! cargo run --release --example clv > clv.csv
"""
Explanation: A sample script for calculate CLV of Lorenz 63 model is placed at examples/clv.rs
End of explanation
"""
df = np.arccos(pd.read_csv("clv.csv"))
for col in df.columns:
plt.figure()
plt.title(col)
df[col].hist(bins=100)
plt.xlim(0, np.pi)
plt.yscale("log")
"""
Explanation: Tangency of CLVs
End of explanation
"""
|
Mayurji/Machine-Learning
|
Statistics/Pandas and ThinkStat.ipynb
|
gpl-3.0
|
############ First we import pandas ############
import pandas as pd
import numpy as np
import math
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
import scipy.stats as stat
import random
from IPython.display import Image
%matplotlib inline
############ Declaration of Series ############
numSeries = pd.Series([-1,52,33,64,15])
############ Viewing Series with Default index ############
numSeries
############ Finding the type of the Object ############
type(numSeries)
############ Getting the values of Series Object ############
numSeries.values
############ Getting the value of the Series Index ############
numSeries.index
############ Customizing the Index ############
numSeries2 = pd.Series([23,45,32,23],index=['a','b','c','d'])
############ viewing customized index ############
numSeries2
############ Checking the customized index ############
numSeries2.index
############ Accessing particular element of the index of a Series Object ############
numSeries2['a']
############ Modifying thr particular element of the Series with index to access it ############
numSeries2['a'] = 56
############ validating the modification in the series ############
numSeries2['a']
############ Creating DataFrame ############
############ A dict's Key is considered as column for a table in DataFrame and values of Dict is value of ############
############ column in a table. ############
data = {'empid':['E1','E2','E3'],
'Salary':[10000,25000,40000],
'Name':['Jack','Joe','Jackie']}
############ Dict object ############
type(data)
############ converting Dict to DataFrame ############
dataframe = pd.DataFrame(data)
############ Simple DataFrame ############
dataframe
############ Accessing Dataframe column ############
dataframe['Name']
############ New Dataframe ############
data2 = {'empid':['E1','E2','E3'],
'position':['Junior consultant','consultant','Senior consultant'],
}
dataframe2 = pd.DataFrame(data2)
############ merging two dataframe ############
df = pd.merge(dataframe,dataframe2,on='empid',how='inner')
############ View the dataframe ############
df.head()
#### Storing DF as CSV ############
df.to_csv('Sample_df.csv')
############ Reading a CSV ############
sample_df = pd.read_csv("Sample_df.csv")
############ Duplicate Indexes are created if index is not mentioned while Saving ############
sample_df.head()
############ Keeping default Index ############
# We can create different column as your index by mentioning the column name as index='empid'
df.to_csv('Sample_df.csv',index=None)
############ Reading a CSV ############
sample_df = pd.read_csv("Sample_df.csv")
sample_df.head()
############ Fetching record based on EMP ID ############
sample_df[sample_df["empid"]=='E1']
############ Filtering records based on Salary ############
sample_df[sample_df["Salary"]>=25000] #sample_df[sample_df.Salary>10000]
############ FIltering using String values of the Columns ############
sample_df[sample_df.position.str.contains('junior',case=False)]
"""
Explanation: Pandas and ThinkStat
End of explanation
"""
### Social Network Ads
social_network = pd.read_csv("Social_Network_Ads.csv")
social_network.head()
social_network.shape
"""
Explanation: Valuable Functions in Pandas
End of explanation
"""
social_network["Gender"].value_counts()
"""
Explanation: value_counts() - The Series class provides a method, value_counts, that counts the number of times each value appears.
End of explanation
"""
social_network.isnull().sum()
"""
Explanation: isnull - It finds the number of values in each column with value as null.
End of explanation
"""
social_network["Age"].value_counts().sort_index()
"""
Explanation: sort_index() - It sorts the Series by index, so the values appear in order.
End of explanation
"""
social_network["EstimatedSalary"].value_counts(sort=False)
"""
Explanation: value_counts(sort=False) - It counts the number of times each value appears with least frequent value on top in an increasing order.
End of explanation
"""
social_network["EstimatedSalary"].describe()
"""
Explanation: describe() - It gives the basic statistical metrics like mean etc.
End of explanation
"""
social_network.loc[social_network["EstimatedSalary"] > 140000, "EstimatedSalary"] = np.nan
social_network["EstimatedSalary"].describe()
"""
Explanation: Consider salary above 140,000 is error or an outlier, then we can eliminate this error using np.nan.
The attribute loc provides several ways to select rows and columns from a DataFrame. In this example, the first expression in brackets is the row indexer; the second expression selects the column.
The expression social_network["EstimatedSalary"] > 140000 yields a Series of type bool, where True indicates that the condition is true. When a boolean Series is used as an index, it selects only the elements that satisfy the condition.
End of explanation
"""
#Convert series into a list
age = list(social_network["Age"])
# Create dict format for age, it helps in building histogram easily.
# The result is a dictionary that maps from values to frequencies.
hist = {}
for a in age:
hist[a] = hist.get(a,0) + 1
#Same as dict format above.
counter = Counter(age)
#The result is a Counter object, which is a subclass of dictionary.
#To loop through the values in order, you can use the built-in function sorted:
for val in sorted(counter):
print(val, counter[val])
# Use items() to iterate over the dict/counter.
for value, freq in counter.items():
print(value, freq)
"""
Explanation: Histogram
One of the best ways to describe a variable is to report the values that appear in the dataset and how many times each value appears. This description is called the distribution of the variable.
The most common representation of a distribution is a histogram, which is a graph that shows the frequency of each value. In this context, “frequency” means the number of times the value appears.
End of explanation
"""
plt.hist(age)
plt.xlabel("Age")
plt.ylabel("Freq")
purchased_customer = social_network[social_network["Purchased"]==1]
plt.hist(purchased_customer["Age"])
plt.xlabel("Age")
plt.ylabel("Freq")
social_network.head()
no_purchase = social_network[social_network["Purchased"]==0]
no_purchase.Age.mean()
purchased_customer.Age.mean()
"""
Explanation: Plotting
End of explanation
"""
print("Variance: ",purchased_customer.Age.var())
print("Standard Deviation: ",purchased_customer.Age.std())
print("Variance: ",no_purchase.Age.var())
print("Standard Deviation:",no_purchase.Age.std())
"""
Explanation: Some of the characteristics we might want to report are:
central tendency: Do the values tend to cluster around a particular point?
modes: Is there more than one cluster?
spread: How much variability is there in the values?
tails: How quickly do the probabilities drop off as we move away from the modes?
outliers: Are there extreme values far from the modes?
Why and Why not "Mean" can be used for central tendenancy
Sometimes the mean is a good description of a set of values. For example, apples are all pretty much the same size (at least the ones sold in supermar- kets). So if I buy 6 apples and the total weight is 3 pounds, it would be a reasonable summary to say they are about a half pound each.
But pumpkins are more diverse. Suppose I grow several varieties in my gar- den, and one day I harvest three decorative pumpkins that are 1 pound each, two pie pumpkins that are 3 pounds each, and one Atlantic Giant⃝R pumpkin that weighs 591 pounds. The mean of this sample is 100 pounds, but if I told you “The average pumpkin in my garden is 100 pounds,” that would be misleading. In this example, there is no meaningful average because there is no typical pumpkin.
End of explanation
"""
purchased_customer.Age.mean() - no_purchase.Age.mean()
purchased_customer["EstimatedSalary"].mean() - no_purchase["EstimatedSalary"].mean()
def CohenEffectSize(group1, group2):
diff = group1.mean() - group2.mean()
var1 = group1.var()
var2 = group2.var()
n1, n2 = len(group1), len(group2)
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
CohenEffectSize(purchased_customer.Age, no_purchase.Age)
#Effect Size
13.6/max(social_network["Age"])*100
"""
Explanation: Effect Size
An effect size is a summary statistic intended to describe (wait for it) the size of an effect. For example, to describe the difference between two groups, one obvious choice is the difference in the means.
https://en.wikipedia.org/wiki/Effect_size
End of explanation
"""
pmf_age_purchased = {}
for age in purchased_customer["Age"].value_counts().index:
pmf_age_purchased[age] = purchased_customer[purchased_customer["Age"]==age]["Age"].count() / purchased_customer["Age"].shape[0]
#The Pmf is normalized so total probability is 1.
sum(list(pmf_age_purchased.values()))
"""
Explanation: PMF - Probability Mass Function
Purchased
End of explanation
"""
pmf_age_no_purchased = {}
for age in no_purchase["Age"].value_counts().index:
pmf_age_no_purchased[age] = no_purchase[no_purchase["Age"]==age]["Age"].count() / no_purchase["Age"].shape[0]
sum(list(pmf_age_no_purchased.values()))
"""
Explanation: Not Purchased
End of explanation
"""
plt.bar(pmf_age_no_purchased.keys(), pmf_age_no_purchased.values())
plt.bar(pmf_age_purchased.keys(), pmf_age_purchased.values())
#27-41
ages = range(27, 41)
diffs = []
for age in ages:
p1 = pmf_age_purchased[age]
p2 = pmf_age_no_purchased[age]
diff = 100 * (p1 - p2)
diffs.append(diff)
plt.bar(ages, diffs)
"""
Explanation: Difference between Hist and PMF
The biggest difference is that a Hist maps from values to integer counters; a Pmf maps from values to floating-point probabilities.
End of explanation
"""
### Create Dataframe from array
array = np.random.randn(4, 2)
df = pd.DataFrame(array)
df
columns = ['A', 'B']
df = pd.DataFrame(array, columns=columns)
index = ['a', 'b', 'c', 'd']
df = pd.DataFrame(array, columns=columns, index=index)
df
"""
Explanation: Dataframe Indexing
End of explanation
"""
df.loc['a']
"""
Explanation: To select a row by label, you can use the loc attribute, which returns a Series
End of explanation
"""
df.iloc[0]
indices = ['a', 'c']
df.loc[indices]
df['a':'c']
df[0:2]
"""
Explanation: If the integer position of a row is known, rather than its label, you can use the iloc attribute, which also returns a Series.
End of explanation
"""
def PercentileRank(scores, your_score):
count = 0
for score in scores:
if score <= your_score:
count += 1
percentile_rank = 100.0 * count / len(scores)
return percentile_rank
social_network.dropna(inplace=True)
salary = list(social_network["EstimatedSalary"])
my_sal = 100000
PercentileRank(salary, my_sal)
def Percentile(scores, percentile_rank):
scores.sort()
for score in scores:
if PercentileRank(scores, score) >= percentile_rank:
return score
Percentile(salary, 50)
def Percentile2(scores, percentile_rank):
scores.sort()
index = percentile_rank * (len(scores)-1) // 100
return scores[index]
Percentile2(salary, 50)
"""
Explanation: Above the result in either case is a DataFrame, but notice that the first result includes the end of the slice; the second doesn’t.
Limits of PMFs
PMFs work well if the number of values is small. But as the number of values increases, the probability associated with each value gets smaller and the effect of random noise increases.
For example, if we are interested in the distribution of age.
The parts of this figure are hard to interpret. There are many spikes and valleys, and some apparent differences between the distributions. It is hard to tell which of these features are meaningful. Also, it is hard to see overall patterns; for example, which distribution do you think has the higher mean?
These problems can be mitigated by binning the data; that is, dividing the range of values into non-overlapping intervals and counting the number of values in each bin. Binning can be useful, but it is tricky to get the size of the bins right. If they are big enough to smooth out noise, they might also smooth out useful information.
An alternative that avoids these problems is the cumulative distribution function (CDF), which is the subject of this chapter. But before I can explain CDFs, I have to explain percentiles.
Cumulative Distribution Function
The difference between “percentile” and “percentile rank” can be confusing, and people do not always use the terms precisely. To summarize, PercentileRank takes a value and computes its percentile rank in a set of values; Percentile takes a percentile rank and computes the corresponding value.
End of explanation
"""
# This function is almost identical to PercentileRank, except that the result is a probability in the range 0–1
# rather than a percentile rank in the range 0–100.
def EvalCdf(sample, x):
count = 0.0
for value in sample:
if value <= x:
count += 1
prob = count / len(sample)
return prob
sample = [1, 2, 2, 3, 5]
EvalCdf(sample, 2)
EvalCdf(sample, 3)
EvalCdf(sample, 0)
no_purchase_prob = []
for age in sorted(no_purchase["Age"]):
no_purchase_prob.append(EvalCdf(no_purchase["Age"], age))
purchase_prob = []
for age in sorted(purchased_customer["Age"]):
purchase_prob.append(EvalCdf(purchased_customer["Age"], age))
def step_plot(values, probabilities, xlabel, ylabel = "CDF probability"):
plt.step(values, probabilities)
plt.grid()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
"""
Explanation: CDF
The CDF is the function that maps from a value to its percentile rank.
The CDF is a function of x, where x is any value that might appear in the distribution. To evaluate CDF(x) for a particular value of x, we compute the fraction of values in the distribution less than or equal to x.
End of explanation
"""
step_plot(sorted(no_purchase["Age"]), no_purchase_prob, "Age")
"""
Explanation: One way to read a CDF is to look up percentiles. For example, it looks like about 90% of people are aged less than 40 years, who didnt make a purchase. The CDF also provides a visual representation of the shape of the distribution. Common values appear as steep or vertical sections of the CDF; in this example, the mode at 35 years is apparent.
End of explanation
"""
step_plot(sorted(purchased_customer["Age"]), purchase_prob, "Age")
"""
Explanation: It looks like about only 30% or less of people are aged less than 40 years, who made a purchase. Remaining 70%le people are aged above 40.
End of explanation
"""
no_purchase_prob = []
for sal in sorted(no_purchase["EstimatedSalary"]):
no_purchase_prob.append(EvalCdf(no_purchase["EstimatedSalary"], sal))
purchase_prob = []
for sal in sorted(purchased_customer["EstimatedSalary"]):
purchase_prob.append(EvalCdf(purchased_customer["EstimatedSalary"], sal))
"""
Explanation: Estimated Salary (Purchase vs No Purchase)
End of explanation
"""
step_plot(sorted(no_purchase["EstimatedSalary"]), no_purchase_prob, "Estimated Salary")
step_plot(sorted(purchased_customer["EstimatedSalary"]), purchase_prob, "Estimated Salary")
"""
Explanation: Under No purchase curve(Blue), the curve remains flat after 90K with minor bilps after that. But under purchase curve(orange), the curve keeps the steps increasing even after 90K, which suggest people with more salary have more purchasing power. And all above 50%le have 90K or more.
End of explanation
"""
Percentile(list(purchased_customer["EstimatedSalary"]),75)
"""
Explanation: Quantiles: https://en.wikipedia.org/wiki/Quantile
End of explanation
"""
def PositionToPercentile(position, field_size):
beat = field_size - position + 1
percentile = 100.0 * beat / field_size
return percentile
"""
Explanation: Percentile ranks are useful for comparing measurements across different groups. For example, people who compete in foot races are usually grouped by age and gender. To compare people in different age groups, you can convert race times to percentile ranks.
A few years ago I ran the James Joyce Ramble 10K in Dedham MA; I finished in 42:44, which was 97th in a field of 1633. I beat or tied 1537 runners out of 1633, so my percentile rank in the field is 94%.
More generally, given position and field size, we can compute percentile rank:
End of explanation
"""
def PercentileToPosition(percentile, field_size):
beat = percentile * field_size / 100.0
position = field_size - beat + 1
return position
"""
Explanation: In my age group, denoted M4049 for “male between 40 and 49 years of age”, I came in 26th out of 256. So my percentile rank in my age group was 90%.
If I am still running in 10 years (and I hope I am), I will be in the M5059 division. Assuming that my percentile rank in my division is the same, how much slower should I expect to be?
I can answer that question by converting my percentile rank in M4049 to a position in M5059. Here’s the code:
End of explanation
"""
babyboom = pd.read_csv('babyboom.dat',sep=" ", header=None)
babyboom.columns = ["time", "gender", "weight", "minutes"]
diffs = list(babyboom.minutes.diff())
e_cdf = []
l = 0.5
def exponential_distribution(x):
e_cdf.append(1 - math.exp(-1* l * x))
"""
Explanation: There were 171 people in M5059, so I would have to come in between 17th and 18th place to have the same percentile rank. The finishing time of the 17th runner in M5059 was 46:05, so that’s the time I will have to beat to maintain my percentile rank.
Modeling Distributions
Exponential Distribution
The CDF of the exponential distribution is
CDF(x) = 1 − e^(−λx)
The parameter, λ, determines the shape of the distribution.In the real world, exponential distributions come up when we look at a series of events and measure the times between events, called interarrival times. If the events are equally likely to occur at any time, the distribution of interarrival times tends to look like an exponential distribution.
End of explanation
"""
def EvalNormalCdf(x, mu=0, sigma=1):
return stat.norm.cdf(x, loc=mu, scale=sigma)
mu = social_network["Age"].mean()
sigma = social_network["Age"].std()
step_plot(sorted(social_network["Age"]),EvalNormalCdf(sorted(social_network["Age"]), mu=mu, sigma=sigma), "Age")
"""
Explanation: Normal Distribution
The normal distribution, also called Gaussian, is commonly used because it describes many phenomena, at least approximately. It turns out that there is a good reason for its ubiquity.
The normal distribution is characterized by two parameters: the mean, μ, and standard deviation σ. The normal distribution with μ = 0 and σ = 1 is called the standard normal distribution. Its CDF is defined by an integral that does not have a closed form solution, but there are algorithms that evaluate it efficiently.
End of explanation
"""
Image('PDF.png')
"""
Explanation: Pareto Distribution https://en.wikipedia.org/wiki/Pareto_distribution
Preferential Attachment https://en.wikipedia.org/wiki/Preferential_attachment
Probability Distribution Function
The derivative of CDF is PDF
End of explanation
"""
Xs = sorted(social_network["Age"])
mean, std = social_network["Age"].mean(), social_network["Age"].std()
PDF = stat.norm.pdf(Xs, mean, std)
step_plot(Xs, PDF, "Age", ylabel="Density")
"""
Explanation: Evaluating a PDF for a particular value of x is usually not useful. The result
is not a probability; it is a probability density.
In physics, density is mass per unit of volume; in order to get a mass, you have to multiply by volume or, if the density is not constant, you have to integrate over volume.
Similarly, probability density measures probability per unit of x. In order to get a probability mass, you have to integrate over x.
End of explanation
"""
sample = [random.gauss(mean, std) for i in range(500)]
Kernel_density_estimate = stat.gaussian_kde(sample)
sample_pdf = Kernel_density_estimate.evaluate(sorted(social_network["Age"]))
step_plot(Xs, PDF, "Age", ylabel="Density")
step_plot(Xs, sample_pdf, "Age", ylabel="Density")
"""
Explanation: Kernel Density Estimation
Kernel density estimation (KDE) is an algorithm that takes a sample and finds an appropriately smooth PDF that fits the data.
https://en.wikipedia.org/wiki/Kernel_density_estimation
End of explanation
"""
Image('distributions.png')
"""
Explanation: Estimating a density function with KDE is useful for several purposes:
Visualization: During the exploration phase of a project, CDFs are usually the best visualization of a distribution. After you look at a CDF, you can decide whether an estimated PDF is an appropriate model of the distribution. If so, it can be a better choice for presenting the distribution to an audience that is unfamiliar with CDFs.
Interpolation: An estimated PDF is a way to get from a sample to a model of the population. If you have reason to believe that the population distribution is smooth, you can use KDE to interpolate the density for values that don’t appear in the sample.
Simulation: Simulations are often based on the distribution of a sample. If the sample size is small, it might be appropriate to smooth the sample distribution using KDE, which allows the simulation to explore more possible outcomes, rather than replicating the observed data.
The distribution framework
We started with PMFs, which represent the probabilities for a discrete set of values. To get from a PMF to a CDF, you
add up the probability masses to get cumulative probabilities. To get from a CDF back to a PMF, you compute differences in cumulative probabilities.
A PDF is the derivative of a continuous CDF; or, equivalently, a CDF is the integral of a PDF. Remember that a PDF maps from values to probability densities; to get a probability, you have to integrate.
To get from a discrete to a continuous distribution, you can perform various kinds of smoothing. One form of smoothing is to assume that the data come from an analytic continuous distribution (like exponential or normal) and to estimate the parameters of that distribution. Another option is kernel density estimation.
The opposite of smoothing is discretizing, or quantizing. If you evaluate a PDF at discrete points, you can generate a PMF that is an approximation of the PDF. You can get a better approximation using numerical integration.
To distinguish between continuous and discrete CDFs, it might be better for a discrete CDF to be a “cumulative mass function,” but as far as I can tell no one uses that term.
End of explanation
"""
mall_customer = pd.read_csv("Mall_Customers.csv")
mall_customer.isnull().sum()
"""
Explanation: Pmf and Hist are almost the same thing, except that a Pmf maps values to floating-point probabilities, rather than integer frequencies. If the sum of the probabilities is 1, the Pmf is normalized. Pmf provides Normalize, which computes the sum of the probabilities and divides through by a factor
https://en.wikipedia.org/wiki/Moment_of_inertia
Skewness
Skewness is a property that describes the shape of a distribution. If the distribution is symmetric around its central tendency, it is unskewed. If the values extend farther to the right, it is “right skewed” and if the values extend left, it is “left skewed.”
This use of “skewed” does not have the usual connotation of “biased.” Skewness only describes the shape of the distribution; it says nothing about whether the sampling process might have been biased.
A way to evaluate the asymmetry of a distribution is to look at the relationship between the mean and median. Extreme values have more effect on the mean than the median, so in a distribution that skews left, the mean is less than the median. In a distribution that skews right, the mean is greater.
Pearson’s median skewness coefficient is a measure of skewness based on the difference between the sample mean and median:
gp = 3(x ̄ − m)/S
Where x ̄ is the sample mean, m is the median, and S is the standard deviation.
The sign of the skewness coefficient indicates whether the distribution skews left or right, but other than that, they are hard to interpret. Sample skewness is less robust; that is, it is more susceptible to outliers. As a result it is less reliable when applied to skewed distributions, exactly when it would be most relevant.
Pearson’s median skewness is based on a computed mean and variance, so it is also susceptible to outliers, but since it does not depend on a third moment, it is somewhat more robust.
Relationship between two variables
Two variables are related if knowing one gives you information about the other. For example, height and weight are related; people who are taller tend to be heavier. Of course, it is not a perfect relationship: there are short heavy people and tall light ones. But if you are trying to guess someone’s weight, you will be more accurate if you know their height than if you don’t.
Scatter Plot
End of explanation
"""
plt.scatter(mall_customer["Age"], mall_customer["Annual Income (k$)"],alpha=0.2)
plt.grid()
plt.ylabel("Annual Income")
plt.xlabel("Age")
"""
Explanation: Overlapping data points look darker, so darkness is proportional to density. In this version of the plot we can see two details that were not apparent before: vertical clusters at Annual income 57k$.
Jittering: https://blogs.sas.com/content/iml/2011/07/05/jittering-to-prevent-overplotting-in-statistical-graphics.html
End of explanation
"""
mall_customer.Age.describe()
"""
Explanation: HexBin for large Dataset
To handle larger datasets, another option is a hexbin plot, which divides the graph into hexagonal bins and colors each bin according to how many data points fall in it. An advantage of a hexbin is that it shows the shape of the relationship well, and it is efficient for large datasets, both in time and in the size of the file it generates. A drawback is that it makes the outliers invisible.
Characterizing the Relationship
Scatter plots provide a general impression of the relationship between vari- ables, but there are other visualizations that provide more insight into the nature of the relationship. One option is to bin one variable and plot percentiles of the other.
End of explanation
"""
bins = np.arange(18, 75, 5)
indices = np.digitize(mall_customer.Age, bins)
"""
Explanation: Digitize computes the index of the bin that contains each value in df.htm3. The result is a NumPy array of integer indices. Values that fall below the lowest bin are mapped to index 0. Values above the highest bin are mapped to len(bins).
End of explanation
"""
groups = mall_customer.groupby(indices)
"""
Explanation: groupby is a DataFrame method that returns a GroupBy object; used in a for loop, groups iterates the names of the groups and the DataFrames that represent them.
End of explanation
"""
for i, group in groups:
print(i, len(group))
for i, group in groups:
print(i, len(group))
ages = [group.Age.mean() for i, group in groups]
#heights
cdf_group_income = defaultdict(list)
for i, grp in groups:
for income in grp["Annual Income (k$)"]:
cdf_group_income[i].append(EvalCdf(grp["Annual Income (k$)"], income))
for percent in [75, 50, 25]:
incomes = [Percentile(cdf_group_income[k], percent) for k,v in cdf_group_income.items()]
label = '%dth' %percent
plt.plot(ages, incomes)
plt.xlabel("Age")
plt.ylabel("Annual Income")
"""
Explanation: So, for example, we can print the number of rows in each group like this:
End of explanation
"""
def Cov(xs, ys, meanx=None, meany=None):
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
"""
Explanation: Correlation
A correlation is a statistic intended to quantify the strength of the relationship between two variables.
A challenge in measuring correlation is that the variables we want to compare are often not expressed in the same units. And even if they are in the same units, they come from different distributions.
There are two common solutions to these problems:
1. Transform each value to a standard score, which is the number of standard deviations from the mean. This transform leads to the “Pearson product-moment correlation coefficient.”
2. Transform each value to its rank, which is its index in the sorted list of values. This transform leads to the “Spearman rank correlation coefficient.”
If X is a series of n values, xi, we can convert to standard scores by subtracting the mean and dividing by the standard deviation: zi = (xi − μ)/σ.
The numerator is a deviation: the distance from the mean. Dividing by σ standardizes the deviation, so the values of Z are dimensionless (no units) and their distribution has mean 0 and variance 1.
If X is normally distributed, so is Z. But if X is skewed or has outliers, so does Z; in those cases, it is more robust to use percentile ranks. If we compute a new variable, R, so that ri is the rank of xi, the distribution of R is uniform from 1 to n, regardless of the distribution of X.
Covariance
Covariance is a measure of the tendency of two variables to vary together.
If we have two series, X and Y , their deviations from the mean are
dxi = xi − x ̄
dyi = yi − y ̄
where x ̄ is the sample mean of X and y ̄ is the sample mean of Y. If X and Y vary together, their deviations tend to have the same sign.
If we multiply them together, the product is positive when the deviations have the same sign and negative when they have the opposite sign. So adding up the products gives a measure of the tendency to vary together.
Covariance is the mean of these products: Cov(X,Y)= 1/n * SUMMATION (dxi*dyi)
where n is the length of the two series (they have to be the same length).
If you have studied linear algebra, you might recognize that Cov is the dot product of the deviations, divided by their length. So the covariance is maximized if the two vectors are identical, 0 if they are orthogonal, and negative if they point in opposite directions.
End of explanation
"""
def Corr(xs, ys):
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = np.mean(xs), np.var(xs)
meany, vary = np.mean(ys), np.var(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
"""
Explanation: By default Cov computes deviations from the sample means, or you can provide known means. If xs and ys are Python sequences, np.asarray converts them to NumPy arrays. If they are already NumPy arrays, np.asarray does nothing.
This implementation of covariance is meant to be simple for purposes of explanation. NumPy and pandas also provide implementations of covariance, but both of them apply a correction for small sample sizes that we have not covered yet, and np.cov returns a covariance matrix, which is more than we need for now.
Pearson Correlation
Covariance is useful in some computations, but it is seldom reported as a summary statistic because it is hard to interpret. Among other problems, its units are the product of the units of X and Y .
One solution to this problem is to divide the deviations by the standard deviation, which yields standard scores, and
compute the product of standard scores:
p i = ( x i − x ̄ )*( y i − y ̄ )/ SX*SY
Where SX and SY are the standard deviations of X and Y . The mean of these products is
ρ = 1/n SUMMATION pi
Or we can rewrite ρ by factoring out SX and SY :
ρ= Cov(X,Y)/SX*SY
This value is called Pearson’s correlation after Karl Pearson, an influential early statistician. It is easy to compute and easy to interpret. Because standard scores are dimensionless, so is ρ.
End of explanation
"""
def SpearmanCorr(xs, ys):
xranks = pd.Series(xs).rank()
yranks = pd.Series(ys).rank()
return Corr(xranks, yranks)
"""
Explanation: MeanVar computes mean and variance slightly more efficiently than separate calls to np.mean and np.var.
Pearson’s correlation is always between -1 and +1 (including both). If ρ is positive, we say that the correlation is positive, which means that when one variable is high, the other tends to be high. If ρ is negative, the correlation is negative, so when one variable is high, the other is low.
The magnitude of ρ indicates the strength of the correlation. If ρ is 1 or -1, the variables are perfectly correlated, which means that if you know one, you can make a perfect prediction about the other.
Most correlation in the real world is not perfect, but it is still useful. The correlation of height and weight is 0.51, which is a strong correlation compared to similar human-related variables.
Nonlinear Relationship
If Pearson’s correlation is near 0, it is tempting to conclude that there is no relationship between the variables, but that conclusion is not valid. Pear- son’s correlation only measures linear relationships. If there’s a nonlinear relationship, ρ understates its strength.
https://wikipedia.org/wiki/Correlation_and_dependence
Look at a scatter plot of your data before blindly computing a correlation coefficient.
Spearman’s rank correlation
Pearson’s correlation works well if the relationship between variables is linear and if the variables are roughly normal. But it is not robust in the presence of outliers. Spearman’s rank correlation is an alternative that mitigates the effect of outliers and skewed distributions.
To compute Spearman’s correlation, we have to compute the rank of each value, which is its index in the sorted sample. For example, in the sample [1, 2, 5, 7] the rank of the value 5 is 3, because it appears third in the sorted list. Then we compute Pearson’s correlation for the ranks.
End of explanation
"""
def SpearmanCorr(xs, ys):
xs = pd.Series(xs)
ys = pd.Series(ys)
return xs.corr(ys, method='spearman')
SpearmanCorr(mall_customer["Age"], mall_customer["Annual Income (k$)"])
SpearmanCorr(mall_customer["Annual Income (k$)"], mall_customer["Spending Score (1-100)"])
SpearmanCorr(mall_customer["Age"], mall_customer["Spending Score (1-100)"])
SpearmanCorr(social_network["Age"], social_network["EstimatedSalary"])
Corr(social_network["Age"], social_network["EstimatedSalary"])
"""
Explanation: I convert the arguments to pandas Series objects so I can use rank, which computes the rank for each value and returns a Series. Then I use Corr to compute the correlation of the ranks.
I could also use Series.corr directly and specify Spearman’s method:
End of explanation
"""
def RMSE(estimates, actual):
e2 = [(estimate-actual)**2 for estimate in estimates]
mse = np.mean(e2)
return math.sqrt(mse)
def Estimate1(n=7, m=1000):
mu = 0
sigma = 1
means = []
medians = []
for _ in range(m):
xs = [random.gauss(mu, sigma) for i in range(n)]
xbar = np.mean(xs)
median = np.median(xs)
means.append(xbar)
medians.append(median)
print('rmse xbar', RMSE(means, mu))
print('rmse median', RMSE(medians, mu))
Estimate1()
"""
Explanation: The Spearman rank correlation for the BRFSS data is 0.54, a little higher than the Pearson correlation, 0.51. There are several possible reasons for the difference, including:
If the relationship is nonlinear, Pearson’s correlation tends to underestimate the strength of the relationship, and
Pearson’s correlation can be affected (in either direction) if one of the distributions is skewed or contains outliers. Spearman’s rank correlation is more robust.
Correlation and causation
If variables A and B are correlated, there are three possible explanations: A causes B, or B causes A, or some other set of factors causes both A and B. These explanations are called “causal relationships”.
Correlation alone does not distinguish between these explanations, so it does not tell you which ones are true. This rule is often summarized with the phrase “Correlation does not imply causation,” which is so pithy it has its own Wikipedia page: http://wikipedia.org/wiki/Correlation_does_not_imply_causation.
So what can you do to provide evidence of causation?
1. Use time. If A comes before B, then A can cause B but not the other way around (at least according to our common understanding of causation). The order of events can help us infer the direction of causation, but it does not preclude the possibility that something else causes both A and B.
2. Use randomness. If you divide a large sample into two groups at ran- dom and compute the means of almost any variable, you expect the difference to be small. If the groups are nearly identical in all variables but one, you can eliminate spurious relationships.
This works even if you don’t know what the relevant variables are, but it works even better if you do, because you can check that the groups are identical.
These ideas are the motivation for the randomized controlled trial, in which subjects are assigned randomly to two (or more) groups: a treatment group that receives some kind of intervention, like a new medicine, and a control group that receives no intervention, or another treatment whose effects are known.
A randomized controlled trial is the most reliable way to demonstrate a causal relationship, and the foundation of science-based medicine (see http://wikipedia.org/wiki/Randomized_controlled_trial).
Unfortunately, controlled trials are only possible in the laboratory sciences, medicine, and a few other disciplines.
In the social sciences, controlled experiments are rare, usually because they are impossible or unethical.
An alternative is to look for a natural experiment, where different “treatments” are applied to groups that are otherwise similar. One danger of natural experiments is that the groups might differ in ways that are not apparent. You can read more about this topic at http://wikipedia.org/wiki/Natural_experiment.
Estimation
Let’s play a game. I think of a distribution, and you have to guess what it is. I’ll give you two hints: it’s a normal distribution, and here’s a random sample drawn from it:
[-0.441, 1.774, -0.101, -1.138, 2.975, -2.138]
What do you think is the mean parameter, μ, of this distribution?
One choice is to use the sample mean, x ̄, as an estimate of μ. In this example, x ̄ is 0.155, so it would be reasonable to guess μ = 0.155. This process is called estimation, and the statistic we used (the sample mean) is called an estimator.
Using the sample mean to estimate μ is so obvious that it is hard to imagine a reasonable alternative. But suppose we change the game by introducing outliers.
Estimation if Outlier exists
I’m thinking of a distribution. It’s a normal distribution, and here’s a sam- ple that was collected by an unreliable surveyor who occasionally puts the decimal point in the wrong place.
[-0.441, 1.774, -0.101, -1.138, 2.975, -213.8]
Now what’s your estimate of μ? If you use the sample mean, your guess is -35.12. Is that the best choice? What are the alternatives?
One option is to identify and discard outliers, then compute the sample mean of the rest. Another option is to use the median as an estimator.
Which estimator is best depends on the circumstances (for example, whether there are outliers) and on what the goal is. Are you trying to minimize errors, or maximize your chance of getting the right answer?
If there are no outliers, the sample mean minimizes the mean squared error (MSE).
That is, if we play the game many times, and each time compute the error x ̄ − μ, the sample mean minimizes
M S E = 1/m SUMMATION ( x ̄ − μ )^2
Where m is the number of times you play the estimation game, not to be confused with n, which is the size of the sample used to compute x ̄.
Here is a function that simulates the estimation game and computes the root mean squared error (RMSE), which is the square root of MSE:
End of explanation
"""
Image('unbiased_estimator.png')
"""
Explanation: estimates is a list of estimates; actual is the actual value being estimated. In practice, of course, we don’t know actual; if we did, we wouldn’t have to estimate it. The purpose of this experiment is to compare the performance of the two estimators.
When I ran this code, the RMSE of the sample mean was 0.38, which means that if we use x ̄ to estimate the mean of this distribution, based on a sample with n = 7, we should expect to be off by 0.38 on average. Using the median to estimate the mean yields RMSE 0.45, which confirms that x ̄ yields lower RMSE, at least for this example.
Minimizing MSE is a nice property, but it’s not always the best strategy. For example, suppose we are estimating the distribution of wind speeds at a building site. If the estimate is too high, we might overbuild the structure, increasing its cost. But if it’s too low, the building might collapse. Because cost as a function of error is not symmetric, minimizing MSE is not the best strategy.
As another example, suppose I roll three six-sided dice and ask you to predict the total. If you get it exactly right, you get a prize; otherwise you get nothing. In this case the value that minimizes MSE is 10.5, but that would be a bad guess, because the total of three dice is never 10.5. For this game, you want an estimator that has the highest chance of being right, which is a maximum likelihood estimator (MLE). If you pick 10 or 11, your chance of winning is 1 in 8, and that’s the best you can do.
Estimate Variance
I’m thinking of a distribution. It’s a normal distribution, and here’s a (familiar) sample:
[-0.441, 1.774, -0.101, -1.138, 2.975, -2.138]
What do you think is the variance, σ2, of my distribution? Again, the obvious choice is to use the sample variance, S^2, as an estimator.
S^2 = 1/n SUMMATION ( x i − x ̄ )^2
For large samples, S^2 is an adequate estimator, but for small samples it tends to be too low. Because of this unfortunate property, it is called a biased estimator. An estimator is unbiased if the expected total (or mean) error, after many iterations of the estimation game, is 0.
Fortunately, there is another simple statistic that is an unbiased estimator of σ2:
End of explanation
"""
def Estimate2(n=7, m=1000):
mu = 0
sigma = 1
estimates1 = []
estimates2 = []
for _ in range(m):
xs = [random.gauss(mu, sigma) for i in range(n)]
biased = np.var(xs)
unbiased = np.var(xs, ddof=1)
estimates1.append(biased)
estimates2.append(unbiased)
print('mean error biased', MeanError(estimates1, sigma**2))
print('mean error unbiased', MeanError(estimates2, sigma**2))
"""
Explanation: For an explanation of why S^2 is biased, and a proof that (Sn−1)^2 is unbiased, http://wikipedia.org/wiki/Bias_of_an_estimator.
The biggest problem with this estimator is that its name and symbol are used inconsistently. The name “sample variance” can refer to either S^2 or (Sn−1)^2, and the symbol S^2 is used for either or both.
Here is a function that simulates the estimation game and tests the perfor- mance of S^2 and (Sn−1)^2:
End of explanation
"""
def MeanError(estimates, actual):
errors = [estimate-actual for estimate in estimates]
return np.mean(errors)
"""
Explanation: Again, n is the sample size and m is the number of times we play the game. np.var computes S^2 by default and (Sn−1)^2 if you provide the argument ddof=1, which stands for “delta degrees of freedom.”
DOF: http://en.wikipedia.org/wiki/Degrees_of_freedom_(statistics).
Mean Error
MeanError computes the mean difference between the estimates and the actual value:
End of explanation
"""
#Estimate2()
"""
Explanation: When I ran this code, the mean error for S^2 was -0.13. As expected, this biased estimator tends to be too low. For (Sn−1)^2, the mean error was 0.014, about 10 times smaller. As m increases, we expect the mean error for (Sn−1)^2 to approach 0.
Properties like MSE and bias are long-term expectations based on many iterations of the estimation game.
But when you apply an estimator to real data, you just get one estimate. It would not be meaningful to say that the estimate is unbiased; being unbiased is a property of the estimator, not the estimate.
After you choose an estimator with appropriate properties, and use it to generate an estimate, the next step is to characterize the uncertainty of the estimate.
End of explanation
"""
def SimulateSample(mu=90, sigma=7.5, n=9, m=1000):
means = []
for j in range(m):
xs = np.random.normal(mu, sigma, n)
xbar = np.mean(xs)
means.append(xbar)
return sorted(means)
"""
Explanation: Sampling Distributions
Suppose you are a scientist studying gorillas in a wildlife preserve. You want to know the average weight of the adult female gorillas in the preserve. To weigh them, you have to tranquilize them, which is dangerous, expensive, and possibly harmful to the gorillas. But if it is important to obtain this information, it might be acceptable to weigh a sample of 9 gorillas. Let’s assume that the population of the preserve is well known, so we can choose a representative sample of adult females. We could use the sample mean, x ̄, to estimate the unknown population mean, μ.
Having weighed 9 female gorillas, you might find x ̄ = 90 kg and sample standard deviation, S = 7.5 kg. The sample mean is an unbiased estimator of μ, and in the long run it minimizes MSE. So if you report a single estimate that summarizes the results, you would report 90 kg.
But how confident should you be in this estimate? If you only weigh n = 9 gorillas out of a much larger population, you might be unlucky and choose the 9 heaviest gorillas (or the 9 lightest ones) just by chance. Variation in the estimate caused by random selection is called sampling error.
Sampling Error
To quantify sampling error, we can simulate the sampling process with hypothetical values of μ and σ, and see how much x ̄ varies.
Since we don’t know the actual values of μ and σ in the population, we’ll use the estimates x ̄ and S. So the question we answer is: “If the actual values of μ and σ were 90 kg and 7.5 kg, and we ran the same experiment many times, how much would the estimated mean, x ̄, vary?”
End of explanation
"""
means = SimulateSample()
cdfs = [EvalCdf(means,m) for m in means]
plt.step(sorted(means),cdfs)
ci_5 = Percentile(means, 5)
ci_95 = Percentile(means, 95)
print(ci_5, ci_95)
stderr = RMSE(means, 90)
stderr
"""
Explanation: mu and sigma are the hypothetical values of the parameters. n is the sample size, the number of gorillas we measured. m is the number of times we run the simulation.
End of explanation
"""
def Estimate3(n=7, m=1000):
lam = 2
means = []
medians = []
for _ in range(m):
xs = np.random.exponential(1.0/lam, n)
L = 1 / np.mean(xs)
Lm = math.log(2) / pd.Series(xs).median()
means.append(L)
medians.append(Lm)
print('rmse L', RMSE(means, lam))
print('rmse Lm', RMSE(medians, lam))
print('mean error L', MeanError(means, lam))
print('mean error Lm', MeanError(medians, lam))
"""
Explanation: In each iteration, we choose n values from a normal distribution with the given parameters, and compute the sample mean, xbar. We run 1000 simulations and then compute the distribution, cdf, of the estimates. The result is shown in Figure. This distribution is called the sampling distribution of the estimator. It shows how much the estimates would vary if we ran the experiment over and over.
The mean of the sampling distribution is pretty close to the hypothetical value of μ, which means that the experiment yields the right answer, on average. After 1000 tries, the lowest result is 82 kg, and the highest is 98 kg. This range suggests that the estimate might be off by as much as 8 kg.
There are two common ways to summarize the sampling distribution:
* Standard error (SE) is a measure of how far we expect the estimate to be off, on average. For each simulated experiment, we compute the error, x ̄ − μ, and then compute the root mean squared error (RMSE). In this example, it is roughly 2.5 kg.
* A confidence interval (CI) is a range that includes a given fraction of the sampling distribution. For example, the 90% confidence interval is the range from the 5th to the 95th percentile. In this example, the 90% CI is (86, 94) kg.
Standard errors and confidence intervals are the source of much confusion:
People often confuse standard error and standard deviation. Remember that standard deviation describes variability in a measured quantity; in this example, the standard deviation of gorilla weight is 7.5 kg. Standard error describes variability in an estimate. In this example, the standard error of the mean, based on a sample of 9 measurements, is 2.5 kg.
One way to remember the difference is that, as sample size increases, standard error gets smaller; standard deviation does not.
* People often think that there is a 90% probability that the actual pa- rameter, μ, falls in the 90% confidence interval. Sadly, that is not true. If you want to make a claim like that, you have to use Bayesian methods (see my book, Think Bayes).
The sampling distribution answers a different question: it gives you a sense of how reliable an estimate is by telling you how much it would vary if you ran the experiment again.
It is important to remember that confidence intervals and standard errors only quantify sampling error; that is, error due to measuring only part of the population. The sampling distribution does not account for other sources of error, notably sampling bias and measurement error.
Sampling Bias
Suppose that instead of the weight of gorillas in a nature preserve, you want to know the average weight of women in the city where you live. It is unlikely that you would be allowed to choose a representative sample of women and weigh them.
A simple alternative would be “telephone sampling;” that is, you could choose random numbers from the phone book, call and ask to speak to an adult woman, and ask how much she weighs.
Telephone sampling has obvious limitations. For example, the sample is limited to people whose telephone numbers are listed, so it eliminates people without phones (who might be poorer than average) and people with unlisted numbers (who might be richer). Also, if you call home telephones during the day, you are less likely to sample people with jobs. And if you only sample the person who answers the phone, you are less likely to sample people who share a phone line.
If factors like income, employment, and household size are related to weight and it is plausible that they are the results of your survey would be affected one way or another. This problem is called sampling bias because it is a property of the sampling process.
This sampling process is also vulnerable to self-selection, which is a kind of sampling bias. Some people will refuse to answer the question, and if the tendency to refuse is related to weight, that would affect the results.
Finally, if you ask people how much they weigh, rather than weighing them, the results might not be accurate. Even helpful respondents might round up or down if they are uncomfortable with their actual weight. And not all respondents are helpful. These inaccuracies are examples of measurement error.
When you report an estimated quantity, it is useful to report standard error, or a confidence interval, or both, in order to quantify sampling error. But it is also important to remember that sampling error is only one source of error, and often it is not the biggest.
Exponential distributions
Let’s play one more round of the estimation game. I’m thinking of a distribution. It’s an exponential distribution, and here’s a sample:
[5.384, 4.493, 19.198, 2.790, 6.122, 12.844]
What do you think is the parameter, λ, of this distribution?
In general, the mean of an exponential distribution is 1/λ, so working backwards, we might choose
L = 1 / x ̄
L is an estimator of λ. And not just any estimator; it is also the maximum likelihood estimator (see http://wikipedia.org/wiki/Exponential_distribution#Maximum_likelihood). So if you want to maximize your chance of guessing λ exactly, L is the way to go.
But we know that x ̄ is not robust in the presence of outliers, so we expect L to have the same problem.
We can choose an alternative based on the sample median. The median of an exponential distribution is ln(2)/λ,
so working backwards again, we can define an estimator
Lm = ln(2)/m
where m is the sample median. To test the performance of these estimators, we can simulate the sampling process:
End of explanation
"""
Estimate3()
"""
Explanation: When I run this experiment with λ = 2, the RMSE of L is 1.1. For the median-based estimator Lm, RMSE is 2.2. We can’t tell from this experiment whether L minimizes MSE, but at least it seems better than Lm.
Sadly, it seems that both estimators are biased. For L the mean error is 0.39; for Lm it is 0.54. And neither converges to 0 as m increases. It turns out that x ̄ is an unbiased estimator of the mean of the distribution, 1/λ, but L is not an unbiased estimator of λ. The values changes with each call to the function.
End of explanation
"""
data = (140, 110)
heads, tails = data[0], data[1]
actual = heads - tails
def test_statistic(data):
heads, tails = data["H"], data["T"]
test_stat = abs(heads - tails)
return test_stat
def generate_sample(data):
heads, tails = data[0], data[1]
n = data[0] + data[1]
toss_sample = {}
sample = [random.choice('HT') for _ in range(n)]
for toss, count in zip(pd.Series(sample).value_counts().index, pd.Series(sample).value_counts().values):
toss_sample[toss] = count
return toss_sample
def calculate_pvalue(data, iters=1000):
test_stats = [test_statistic(generate_sample(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual)
return count / iters
calculate_pvalue(data)
"""
Explanation: Hypothesis Testing
The fundamental question we want to address is whether the effects we see in a sample are likely to appear in the larger population. For example, in the Social Network ads sample we see a difference in mean Age for purchased customer and others. We would like to know if that effect reflects a real difference for women in the U.S., or if it might appear in the sample by chance.
There are several ways we could formulate this question, including Fisher null hypothesis testing, Neyman-Pearson decision theory, and Bayesian in- ference1. What I present here is a subset of all three that makes up most of what people use in practice, which I will call classical hypothesis testing.
The goal of classical hypothesis testing is to answer the question, “Given a sample and an apparent effect, what is the probability of seeing such an effect by chance?” Here’s how we answer that question:
The first step is to quantify the size of the apparent effect by choosing a test statistic. In the Social Network ads example, the apparent effect is a difference in Age between purchased customer and others, so a natural choice for the test statistic is the difference in means between the two groups.
The second step is to define a null hypothesis, which is a model of the system based on the assumption that the apparent effect is not real. Social Network ads example the null hypothesis is that there is no difference between purchased customer and others; that is, that age for both groups have the same distribution.
The third step is to compute a p-value, which is the probability of seeing the apparent effect if the null hypothesis is true. In the Social Network ads example, we would compute the actual difference in means, then compute the probability of seeing a difference as big, or bigger, under the null hypothesis.
The last step is to interpret the result. If the p-value is low, the effect is said to be statistically significant, which means that it is unlikely to have occurred by chance. In that case we infer that the effect is more likely to appear in the larger population.
The logic of this process is similar to a proof by contradiction. To prove a mathematical statement, A, you assume temporarily that A is false. If that assumption leads to a contradiction, you conclude that A must actually be true.
Similarly, to test a hypothesis like, “This effect is real,” we assume, temporarily, that it is not. That’s the null hypothesis. Based on that assumption, we compute the probability of the apparent effect. That’s the p-value. If the p-value is low, we conclude that the null hypothesis is unlikely to be true.
Implement Hypothesis Testing
As a simple example2, suppose we toss a coin 250 times and see 140 heads and 110 tails. Based on this result, we might suspect that the coin is biased; that is, more likely to land heads. To test this hypothesis, we compute the probability of seeing such a difference if the coin is actually fair:
End of explanation
"""
def TestStatistic(data):
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(data):
group1, group2 = data
n, m = len(group1), len(group2)
pool = np.hstack((group1, group2))
#print(pool.shape)
return pool, n
def RunModel(pool, n):
np.random.shuffle(pool)
data = pool[:n], pool[n:]
return data
def sample_generator(data):
pool, n = MakeModel(data)
return RunModel(pool, n)
"""
Explanation: The result is about 0.059, which means that if the coin is fair, we expect to see a difference as big as 30 about 5.9% of the time.
Interpreting the Results
How should we interpret this result? By convention, 5% is the threshold of statistical significance. If the p-value is less than 5%, the effect is considered significant; otherwise it is not.
But the choice of 5% is arbitrary, and (as we will see later) the p-value depends on the choice of the test statistics and the model of the null hypothesis. So p-values should not be considered precise measurements.
I recommend interpreting p-values according to their order of magnitude: if the p-value is less than 1%, the effect is unlikely to be due to chance; if it is greater than 10%, the effect can plausibly be explained by chance. P-values between 1% and 10% should be considered borderline. So in this example I conclude that the data do not provide strong evidence that the coin is biased or not
DiffMeansPermute
Testing a difference in means
One of the most common effects to test is a difference in mean between two groups. In the NSFG data, we saw that the mean age for purchasing customer is slightly longer, and the mean estimated_salary of purchasing customer is more than other. Now we will see if those effects are statistically significant.
For these examples, the null hypothesis is that the distributions for the two groups are the same. One way to model the null hypothesis is by permutation; that is, we can take values for purchasing customer and others and shuffle them, treating the two groups as one big group:
End of explanation
"""
purchased_customer.dropna(inplace=True)
no_purchase.dropna(inplace=True)
data = purchased_customer.Age.values, no_purchase.Age.values
ht = sample_generator(data)
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(sample_generator(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
test_stats, pval = calculate_pvalue(ht)
cdfs = [EvalCdf(test_stats,ts) for ts in test_stats]
plt.step(sorted(test_stats),cdfs)
plt.xlabel("test statistics")
plt.ylabel("CDF")
"""
Explanation: data is a pair of sequences, one for each group.
The test statistic is the absolute difference in the means.
MakeModel records the sizes of the groups, n and m, and combines the groups into one NumPy array, pool.
RunModel simulates the null hypothesis by shuffling the pooled values and splitting them into two groups with sizes n and m. As always, the return value from RunModel has the same format as the observed data.
End of explanation
"""
def TestStatistic(data):
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
def MakeModel(data):
group1, group2 = data
n, m = len(group1), len(group2)
pool = np.hstack((group1, group2))
#print(pool.shape)
return pool, n
def RunModel(pool, n):
np.random.shuffle(pool)
data = pool[:n], pool[n:]
return data
"""
Explanation: The result pvalue is about 0.0, which means that we expect to see a difference as big as the observed effect about 0% of the time. So this effect is statistically significant.
If we run the same analysis with estimated salary, the computed p-value is 0; after 1000 attempts, the simulation never yields an effect as big as the observed difference, 18564.80. So we would report p < 0.001, and conclude that the difference in estimated salary is statistically significant.
Other statistics Test
Choosing the best test statistic depends on what question you are trying to address. For example, if the relevant question is whether age are different for purchasing customer, then it makes sense to test the absolute difference in means, as we did in the previous section.
If we had some reason to think that purchasing customer are likely to be older, then we would not take the absolute value of the difference; instead we would use this test statistic:
DiffMeansOneSided
End of explanation
"""
def TestStatistic(data):
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
def MakeModel(data):
group1, group2 = data
n, m = len(group1), len(group2)
pool = np.hstack((group1, group2))
#print(pool.shape)
return pool, n
def RunModel(pool, n):
np.random.shuffle(pool)
data = pool[:n], pool[n:]
return data
def sample_generator(data):
pool, n = MakeModel(data)
return RunModel(pool, n)
"""
Explanation: DiffMeansOneSided inherits MakeModel and RunModel from above testing technique; the only difference is that TestStatistic does not take the absolute value of the difference. This kind of test is called one-sided because it only counts one side of the distribution of differences. The previous test, using both sides, is two-sided.
For this version of the test, the p-value is half of previous. In general the p-value for a one-sided test is about half the p-value for a two-sided test, depending on the shape of the distribution.
The one-sided hypothesis, that purchasing customer is old, is more specific than the two-sided hypothesis, so the p-value is smaller.
We can use the same framework to test for a difference in standard deviation. So we might hypothesize that the standard deviation is higher. Here’s how we can test that:
DiffStdPermute
End of explanation
"""
data = purchased_customer.Age.values, no_purchase.Age.values
ht = sample_generator(data)
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(sample_generator(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
actual_diff
test_stats, pval = calculate_pvalue(ht)
cdfs = [EvalCdf(test_stats,ts) for ts in test_stats]
plt.step(sorted(test_stats),cdfs)
plt.xlabel("test statistics")
plt.ylabel("CDF")
pval
"""
Explanation: This is a one-sided test because the hypothesis is that the standard deviation for customer purchasing is high, not just different. The p-value is 0.23, which is not statistically significant.
End of explanation
"""
Corr(social_network["Age"], social_network["EstimatedSalary"])
def TestStatistic(data):
xs, ys = data
test_stat = abs(Corr(xs, ys))
return test_stat
def RunModel(data):
xs, ys = data
xs = np.random.permutation(xs)
return xs, ys
"""
Explanation: Testing Correlation
This framework can also test correlations. For example, in the NSFG data set, the correlation between customer's Age and his estimated salary is about 0.11. It seems like older customers have more salary. But could this effect be due to chance?
For the test statistic, I use Pearson’s correlation, but Spearman’s would work as well. If we had reason to expect positive correlation, we would do a one-sided test. But since we have no such reason, I’ll do a two-sided test using the absolute value of correlation.
The null hypothesis is that there is no correlation between customers age and his salary. By shuffling the observed values, we can simulate a world where the distributions of age and salary are the same, but where the variables are unrelated:
End of explanation
"""
data = social_network.Age.values, social_network.EstimatedSalary.values
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(RunModel(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
test_stats, pval = calculate_pvalue(data)
pval
"""
Explanation: data is a pair of sequences. TestStatistic computes the absolute value of Pearson’s correlation. RunModel shuffles the xs and returns simulated data.
End of explanation
"""
def TestStatistic(data):
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(data):
n = sum(data)
values = [1, 2, 3, 4, 5, 6]
rolls = np.random.choice(values, n, replace=True)
freqs = Counter(rolls)
freqs = list(freqs.values())
return freqs
"""
Explanation: The actual correlation is 0.11. The computed p-value is 0.019; after 1000 iterations the largest simulated correlation is 0.16. So although the observed correlation is small, it is statistically significant.
This example is a reminder that “statistically significant” does not always mean that an effect is important, or significant in practice. It only means that it is unlikely to have occurred by chance.
Testing Proportions
Suppose you run a casino and you suspect that a customer is using a crooked die; that is, one that has been modified to make one of the faces more likely than the others. You apprehend the alleged cheater and confiscate the die, but now you have to prove that it is crooked. You roll the die 60 times and get the following results:
On average you expect each value to appear 10 times. In this dataset, the value 3 appears more often than expected, and the value 4 appears less often. But are these differences statistically significant?
Value
1
2
3
4
5
6
Frequency
8
9
19
5
8
11
To test this hypothesis, we can compute the expected frequency for each value, the difference between the expected and observed frequencies, and the total absolute difference. In this example, we expect each side to come up 10 times out of 60; the deviations from this expectation are -2, -1, 9, -5, -2, and 1; so the total absolute difference is 20.
How often would we see such a difference by chance?
End of explanation
"""
data = [8, 9, 19, 5, 8, 11]
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(RunModel(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
test_stats, pval = calculate_pvalue(data)
pval
"""
Explanation: The data are represented as a list of frequencies: the observed values are [8, 9, 19, 5, 8, 11]; the expected frequencies are all 10. The test statistic is the sum of the absolute differences
The null hypothesis is that the die is fair, so we simulate that by drawing random samples from values. RunModel uses Hist to compute and return the list of frequencies.
End of explanation
"""
def TestStatistic(self, data):
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
"""
Explanation: The p-value for this data is 0.13, which means that if the die is fair we expect to see the observed total deviation, or more, about 13% of the time. So the apparent effect is not statistically significant.
Chi-squared tests
In the previous section we used total deviation as the test statistic. But for testing proportions it is more common to use the chi-squared statistic:
χ2 = SUMMATION (Oi − Ei)^2 / Ei
Where Oi are the observed frequencies and Ei are the expected frequencies. Here’s the Python code:
End of explanation
"""
def resample(xs):
return np.random.choice(xs, len(xs), replace=True)
def TestStatistic(data):
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(data):
group1, group2 = data
n, m = len(group1), len(group2)
pool = np.hstack((group1, group2))
#print(pool.shape)
return pool, n
def RunModel(pool, n):
np.random.shuffle(pool)
data = pool[:n], pool[n:]
return data
def sample_generator(data):
pool, n = MakeModel(data)
return RunModel(pool, n)
data = purchased_customer.Age.values, no_purchase.Age.values
ht = sample_generator(data)
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(sample_generator(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
def FalseNegRate(data, num_runs=100):
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = resample(group1)
sample2 = resample(group2)
ht = sample_generator((sample1,sample2))
test_stats, pval = calculate_pvalue(ht)
if pval > 0.05:
count += 1
return count / num_runs
"""
Explanation: Squaring the deviations (rather than taking absolute values) gives more weight to large deviations. Dividing through by expected standardizes the deviations, although in this case it has no effect because the expected fre- quencies are all equal.
The p-value using the chi-squared statistic is 0.04, substantially smaller than what we got using total deviation, 0.13. If we take the 5% threshold seriously, we would consider this effect statistically significant. But considering the two tests togther, I would say that the results are borderline. I would not rule out the possibility that the die is crooked, but I would not convict the accused cheater.
This example demonstrates an important point: the p-value depends on the choice of test statistic and the model of the null hypothesis, and sometimes these choices determine whether an effect is statistically significant or not.
Errors
In classical hypothesis testing, an effect is considered statistically significant if the p-value is below some threshold, commonly 5%. This procedure raises two questions:
If the effect is actually due to chance, what is the probability that we will wrongly consider it significant? This probability is the false positive rate.
If the effect is real, what is the chance that the hypothesis test will fail? This probability is the false negative rate.
The false positive rate is relatively easy to compute: if the threshold is 5%, the false positive rate is 5%. Here’s why:
If there is no real effect, the null hypothesis is true, so we can compute the distribution of the test statistic by simulating the null hypothesis. Call this distribution CDFT .
Each time we run an experiment, we get a test statistic, t, which is drawn from CDFT . Then we compute a p-value, which is the probability that a random value from CDFT exceeds t, so that’s 1−CDFT(t).
The p-value is less than 5% if CDFT (t) is greater than 95%; that is, if t exceeds the 95th percentile. And how often does a value chosen from CDFT exceed the 95th percentile? 5% of the time.
So if you perform one hypothesis test with a 5% threshold, you expect a false positive 1 time in 20.
Power
The false negative rate is harder to compute because it depends on the actual effect size, and normally we don’t know that. One option is to compute a rate conditioned on a hypothetical effect size.
For example, if we assume that the observed difference between groups is accurate, we can use the observed samples as a model of the population and run hypothesis tests with simulated data:
End of explanation
"""
data = purchased_customer.Age.values, no_purchase.Age.values
neg_rate = FalseNegRate(data)
neg_rate
"""
Explanation: FalseNegRate takes data in the form of two sequences, one for each group. Each time through the loop, it simulates an experiment by drawing a random sample from each group and running a hypothesis test. Then it checks the result and counts the number of false negatives.
Resample takes a sequence and draws a sample with the same length, with replacement:
End of explanation
"""
#Implementation of Linear Least square
def LeastSquares(xs, ys):
meanx, varx = pd.Series(xs).mean(), pd.Series(xs).var()
meany = pd.Series(ys).mean()
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
"""
Explanation: Replication
The hypothesis testing process I demonstrated in this above, strictly speaking, good practice.
First, I performed multiple tests. If you run one hypothesis test, the chance of a false positive is about 1 in 20, which might be acceptable. But if you run 20 tests, you should expect at least one false positive, most of the time.
Second, I used the same dataset for exploration and testing. If you explore a large dataset, find a surprising effect, and then test whether it is significant, you have a good chance of generating a false positive.
To compensate for multiple tests, you can adjust the p-value threshold (see https://en.wikipedia.org/wiki/Holm-Bonferroni_method). Or you can address both problems by partitioning the data, using one set for exploration and the other for testing.
In some fields these practices are required or at least encouraged. But it is also common to address these problems implicitly by replicating published results. Typically the first paper to report a new result is considered ex- ploratory. Subsequent papers that replicate the result with new data are considered confirmatory.
Linear Least Squares
Correlation coefficients measure the strength and sign of a relationship, but not the slope. There are several ways to estimate the slope; the most common is a linear least squares fit. A “linear fit” is a line intended to model the relationship between variables. A “least squares” fit is one that minimizes the mean squared error (MSE) between the line and the data.
Suppose we have a sequence of points, ys, that we want to express as a function of another sequence xs. If there is a linear relationship between xs and ys with intercept inter and slope slope,
we expect each y[i] to be inter + slope * x[i].
But unless the correlation is perfect, this prediction is only approximate. The vertical deviation from the line, or residual, is
res = ys - (inter + slope * xs)
The residuals might be due to random factors like measurement error, or non- random factors that are unknown. For example, if we are trying to predict Salary as a function of experience, unknown factors might include initial package, responsibilites, and role etc.
If we get the parameters inter and slope wrong, the residuals get bigger, so it makes intuitive sense that the parameters we want are the ones that minimize the residuals.
We might try to minimize the absolute value of the residuals, or their squares, or their cubes; but the most common choice is to minimize the sum of squared residuals, sum(res^2)).
Why? There are three good reasons and one less important one:
Squaring has the feature of treating positive and negative residuals the same, which is usually what we want.
Squaring gives more weight to large residuals, but not so much weight that the largest residual always dominates.
If the residuals are uncorrelated and normally distributed with mean 0 and constant (but unknown) variance, then the least squares fit is also the maximum likelihood estimator of inter and slope. See https://en.wikipedia.org/wiki/Linear_regression.
The values of inter and slope that minimize the squared residuals can be computed efficiently.
The last reason made sense when computational efficiency was more important than choosing the method most appropriate to the problem at hand. That’s no longer the case, so it is worth considering whether squared residuals are the right thing to minimize.
For example, if you are using xs to predict values of ys, guessing too high might be better (or worse) than guessing too low. In that case you might want to compute some cost function for each residual, and minimize total cost, sum(cost(res)). However, computing a least squares fit is quick, easy and often good enough.
End of explanation
"""
def FitLine(xs, inter, slope):
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
"""
Explanation: LeastSquares takes sequences xs and ys and returns the estimated parameters inter and slope. For details on how it works, see http://wikipedia.org/wiki/Numerical_methods_for_linear_least_squares.
FitLine, which takes inter and slope and re- turns the fitted line for a sequence of xs.
End of explanation
"""
regression_data = pd.read_csv("Salary_Data.csv")
inter, slope = LeastSquares(regression_data["YearsExperience"], regression_data["Salary"])
fit_xs, fit_ys = FitLine(regression_data["YearsExperience"], inter, slope)
print("intercept: ", inter)
print("Slope: ", slope)
"""
Explanation: Least square fit between salary and experience
End of explanation
"""
plt.scatter(regression_data["YearsExperience"], regression_data["Salary"])
plt.plot(fit_xs, fit_ys)
plt.xlabel("Experience")
plt.ylabel("Salary")
"""
Explanation: The estimated intercept and slope are 27465.89 and 9134.96 salary per year. These values are hard to interpret in this form: the intercept is the expected salary of an employee, who has 0 year experience, like salary for fresher.
End of explanation
"""
def Residuals(xs, ys, inter, slope):
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
"""
Explanation: It’s a good idea to look at a figure like this to assess whether the relationship is linear and whether the fitted line seems like a good model of the relationship.
Another useful test is to plot the residuals. A residuals function below
End of explanation
"""
residuals = list(Residuals(regression_data["YearsExperience"], regression_data["Salary"], inter, slope))
regression_data["Residuals"] = residuals
bins = np.arange(0, 15, 2)
indices = np.digitize(regression_data.YearsExperience, bins)
groups = regression_data.groupby(indices)
for i, group in groups:
print(i, len(group))
year_exps = [group.YearsExperience.mean() for i, group in groups]
age_residuals = defaultdict(list)
for i, grp in groups:
for res in grp["Residuals"]:
age_residuals[i].append(EvalCdf(grp["Residuals"], res))
age_residuals
for percent in [75, 50, 25]:
residue = [Percentile(age_residuals[k], percent) for k,v in age_residuals.items()]
label = '%dth' %percent
plt.plot(year_exps, residue)
plt.xlabel("Experience Year")
plt.ylabel("Residuals")
"""
Explanation: Residuals takes sequences xs and ys and estimated parameters inter and slope. It returns the differences between the actual values and the fitted line.
End of explanation
"""
def CoefDetermination(ys, res):
return 1 - pd.Series(res).var() / pd.Series(ys).var()
"""
Explanation: Ideally these lines should be flat, indicating that the residuals are random, and parallel, indicating that the variance of the residuals is the same for all age groups. In fact, the lines are close to parallel, so that’s good; but they have some curvature, indicating that the relationship is nonlinear. Nevertheless, the linear fit is a simple model that is probably good enough for some purposes.
Estimation
The parameters slope and inter are estimates based on a sample; like other estimates, they are vulnerable to sampling bias, measurement error, and sampling error. sampling bias is caused by non-representative sampling, measurement error is caused by errors in collecting and recording data, and sampling error is the result of measuring a sample rather than the entire population.
To assess sampling error, we ask, “If we run this experiment again, how much variability do we expect in the estimates?” We can answer this question by running simulated experiments and computing sampling distributions of the estimates.
Goodness of Fit
There are several ways to measure the quality of a linear model, or goodness
of fit. One of the simplest is the standard deviation of the residuals.
If you use a linear model to make predictions, Std(res) is the root mean squared error (RMSE) of your predictions.
Another way to measure goodness of fit is the coefficient of determina- tion, usually denoted R2 and called “R-squared”:
End of explanation
"""
#IQ scores are normalized with Std(ys) = 15, so
var_ys = 15**2
rho = 0.72
r2 = rho**2
var_res = (1 - r2) * var_ys
std_res = math.sqrt(var_res)
print(std_res)
"""
Explanation: Var(res) is the MSE of your guesses using the model, Var(ys) is the MSE without it. So their ratio is the fraction of MSE that remains if you use the model, and R2 is the fraction of MSE the model eliminates.
There is a simple relationship between the coefficient of determination and Pearson’s coefficient of correlation: R2 = ρ2. For example, if ρ is 0.8 or -0.8, R2 = 0.64.
Although ρ and R2 are often used to quantify the strength of a relationship, they are not easy to interpret in terms of predictive power. In my opinion, Std(res) is the best representation of the quality of prediction, especially if it is presented in relation to Std(ys).
For example, when people talk about the validity of the SAT (a standardized test used for college admission in the U.S.) they often talk about correlations between SAT scores and other measures of intelligence.
According to one study, there is a Pearson correlation of ρ = 0.72 between total SAT scores and IQ scores, which sounds like a strong correlation. But R2 = ρ2 = 0.52, so SAT scores account for only 52% of variance in IQ.
End of explanation
"""
Corr(regression_data["YearsExperience"], regression_data["Salary"])
def TestStatistic(data):
exp, sal = data
_, slope = LeastSquares(exp, sal)
return slope
def MakeModel(data):
_, sals = data
ybar = sals.mean()
res = sals - ybar
return ybar, res
def RunModel(data):
exp, _ = data
sals = ybar + np.random.permutation(res)
return exp, sals
ybar, res = MakeModel(data)
"""
Explanation: So using SAT score to predict IQ reduces RMSE from 15 points to 10.4 points. A correlation of 0.72 yields a reduction in RMSE of only 31%.
If you see a correlation that looks impressive, remember that R2 is a better indicator of reduction in MSE, and reduction in RMSE is a better indicator of predictive power.
Testing a linear model
The effect of years of experience is high on salary. So is it possible that the apparent relationship is due to chance? There are several ways we might test the results of a linear fit.
One option is to test whether the apparent reduction in MSE is due to chance. In that case, the test statistic is R2 and the null hypothesis is that there is no relationship between the variables. We can simulate the null hypothesis by permutation. In fact, because R2 = ρ2, a one-sided test of R2 is equivalent to a two-sided test of ρ. We’ve already done that test, and found p < 0.001, so we conclude that the apparent relationship between experience and salary is statistically significant.
Another approach is to test whether the apparent slope is due to chance. The null hypothesis is that the slope is actually zero; in that case we can model the salary as random variations around their mean. Try out hypothesis as before!
End of explanation
"""
data = regression_data.YearsExperience.values, regression_data.Salary.values
actual_diff = TestStatistic(data)
def calculate_pvalue(data, iters=1000):
test_stats = [TestStatistic(RunModel(data))
for _ in range(iters)]
count = sum(1 for x in test_stats if x >= actual_diff)
return sorted(test_stats),count / iters
test_stats, pval = calculate_pvalue(data)
pval
"""
Explanation: The data are represented as sequences of exp and sals. The test statistic is the slope estimated by LeastSquares. The model of the null hypothesis is represented by the mean sals of all employees and the deviations from the mean. To generate simulated data, we permute the deviations and add them to the mean.
End of explanation
"""
import statsmodels.formula.api as smf
formula = 'Salary ~ YearsExperience'
model = smf.ols(formula, data=regression_data)
results = model.fit()
results
"""
Explanation: The p-value is less than 0.001, so although the estimated slope is small, it is unlikely to be due to chance.
Weighted Resampling
As an example, if you survey 100,000 people in a country of 300 million, each respondent represents 3,000 people. If you oversample one group by a factor of 2, each person in the oversampled group would have a lower weight, about 1500.
To correct for oversampling, we can use resampling; that is, we can draw samples from the survey using probabilities proportional to sampling weights. Then, for any quantity we want to estimate, we can generate sampling dis- tributions, standard errors, and confidence intervals.
Regression
The linear least squares fit is an example of regression, which is the more general problem of fitting any kind of model to any kind of data. This use of the term “regression” is a historical accident; it is only indirectly related to the original meaning of the word.
The goal of regression analysis is to describe the relationship between one set of variables, called the dependent variables, and another set of variables, called independent or explanatory variables.
Previously we used employee's experience as an explanatory variable to predict salary as a dependent variable. When there is only one depen- dent and one explanatory variable, that’s simple regression. Here, we move on to multiple regression, with more than one explanatory variable. If there is more than one dependent variable, that’s multivariate regression.
If the relationship between the dependent and explanatory variable is linear, that’s linear regression. For example, if the dependent variable is y and the explanatory variables are x1 and x2, we would write the following linear regression model:
y = β0 + β1x1 + β2x2 + ε
where β0 is the intercept, β1 is the parameter associated with x1, β2 is the parameter associated with x2, and ε is the residual due to random variation or other unknown factors.
Given a sequence of values for y and sequences for x1 and x2, we can find the parameters, β0 , β1 , and β2 , that minimize the sum of ε2 . This process is called ordinary least squares. The computation is similar to LeastSquare, but generalized to deal with more than one explanatory variable. You can find the details at https://en.wikipedia.org/wiki/Ordinary_least_squares
Linear Regression using statsmodel
For multiple regression we’ll switch to StatsModels, a Python package that provides several forms of regression and other analyses. If you are using Anaconda, you already have StatsModels; otherwise you might have to install it.
End of explanation
"""
inter = results.params['Intercept']
slope = results.params['YearsExperience']
slope_pvalue = results.pvalues['YearsExperience']
print(slope_pvalue)
"""
Explanation: statsmodels provides two interfaces (APIs); the “formula” API uses strings to identify the dependent and explanatory variables. It uses a syntax called patsy; in this example, the ~ operator separates the dependent variable on the left from the explanatory variables on the right.
smf.ols takes the formula string and the DataFrame, regression_data, and returns an OLS object that represents the model. The name ols stands for “ordinary least squares.”
The fit method fits the model to the data and returns a RegressionResults object that contains the results.
The results are also available as attributes. params is a Series that maps from variable names to their parameters, so we can get the intercept and slope like this:
End of explanation
"""
print(results.summary())
print(results.rsquared)
"""
Explanation: pvalues is a Series that maps from variable names to the associated p-values, so we can check whether the estimated slope is statistically significant:
The p-value associated with agepreg is 1.14e-20, which is less than 0.001, as
expected.
End of explanation
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
#And we start with the initial guesses
β0 = -1.5,
β1 = 2.8,
β2 = 1.1
beta = [-1.5, 2.8, 1.1]
#Then for each row we can compute log_o:
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
#convert from log odds to probabilities:
o = np.exp(log_o)
p = o / (o+1)
"""
Explanation: results.rsquared contains R2, which is 0.0047. results also provides f_pvalue, which is the p-value associated with the model as a whole, similar to testing whether R2 is statistically significant.
And results provides resid, a sequence of residuals, and fittedvalues, a sequence of fitted values corresponding to agepreg.
The results object provides summary(), which represents the results in a readable format.
print(results.summary())
Betting Pool https://en.wikipedia.org/wiki/Betting_pool
Theory
Linear regression can be generalized to handle other kinds of dependent vari- ables. If the dependent variable is boolean, the generalized model is called logistic regression. If the dependent variable is an integer count, it’s called Poisson regression.
Suppose a friend of yours is pregnant and you want to predict whether the baby is a boy or a girl. You could use data from the NSFG to find factors that affect the “sex ratio”, which is conventionally defined to be the probability of having a boy.
If you encode the dependent variable numerically, for example 0 for a girl and 1 for a boy, you could apply ordinary least squares, but there would be problems. The linear model might be something like this:
y = β0 + β1x1 + β2x2 + ε
Where y is the dependent variable, and x1 and x2 are explanatory variables.
Then we could find the parameters that minimize the residuals.
The problem with this approach is that it produces predictions that are hard to interpret. Given estimated parameters and values for x1 and x2, the model might predict y = 0.5, but the only meaningful values of y are 0 and 1.
It is tempting to interpret a result like that as a probability; for example, we might say that a respondent with particular values of x1 and x2 has a 50% chance of having a boy. But it is also possible for this model to predict y = 1.1 or y = −0.1, and those are not valid probabilities.
Logistic regression avoids this problem by expressing predictions in terms of odds rather than probabilities. If you are not familiar with odds, “odds in favor” of an event is the ratio of the probability it will occur to the probability that it will not.
So if I think my team has a 75% chance of winning, I would say that the odds in their favor are three to one, because the chance of winning is three times the chance of losing.
Odds and probabilities are different representations of the same information. Given a probability, you can compute the odds like this:
o = p / (1-p)
Given odds in favor, you can convert to probability like this:
p = o / (o+1)
Logistic regression is based on the following model: logo=β0 +β1x1 +β2x2 +ε
Where o is the odds in favor of a particular outcome; in the example, o would be the odds of having a boy.
Suppose we have estimated the parameters β0, β1, and β2 (I’ll explain how in a minute). And suppose we are given values for x1 and x2. We can compute the predicted value of log o, and then convert to a probability:
o = np.exp(log_o)
p = o / (o+1)
So in the office pool scenario we could compute the predictive probability of having a boy. But how do we estimate the parameters?
Estimating parameters
Unlike linear regression, logistic regression does not have a closed form solu- tion, so it is solved by guessing an initial solution and improving it iteratively.
The usual goal is to find the maximum-likelihood estimate (MLE), which is the set of parameters that maximizes the likelihood of the data. For example, suppose we have the following data:
End of explanation
"""
likes = y * p + (1-y) * (1-p)
print(likes)
#The overall likelihood of the data is the product of likes:
like = np.prod(likes)
"""
Explanation: Notice that when log_o is greater than 0, o is greater than 1 and p is greater than 0.5.
The likelihood of an outcome is p when y==1 and 1-p when y==0. For example, if we think the probability of a boy is 0.8 and the outcome is a boy, the likelihood is 0.8; if the outcome is a girl, the likelihood is 0.2. We can compute that like this:
End of explanation
"""
mj_clean = pd.read_csv('mj-clean.csv', engine='python', parse_dates=[5])
#parse_dates tells read_csv to interpret values in column 5 as dates and convert them to NumPy datetime64 objects.
"""
Explanation: For these values of beta, the likelihood of the data is 0.18. The goal of logistic regression is to find parameters that maximize this like- lihood. To do that, most statistics packages use an iterative solver like Newton’s method (see https://en.wikipedia.org/wiki/Logistic_regression#Model_fitting).
Note I have skipped few lessons on Multiple regression and Logistic regression, as the lesson where straightforward.
Only difference between them is the dependent variable which is binary in logistics and continous in ml regression. Refer the thinkstat books for reference for implementation.
Time Series Analysis
A time series is a sequence of measurements from a system that varies in time. One famous example is the “hockey stick graph” that shows global average temperature over time (see https://en.wikipedia.org/wiki/Hockey_stick_graph).
The example I work with in this chapter comes from Zachary M. Jones, a researcher in political science who studies the black market for cannabis in the U.S. (http://zmjones.com/marijuana). He collected data from a web site called “Price of Weed” that crowdsources market information by asking participants to report the price, quantity, quality, and location of cannabis transactions (http://www.priceofweed.com/). The goal of his project is to investigate the effect of policy decisions, like legalization, on markets. I find this project appealing because it is an example that uses data to address important political questions, like drug policy.
I hope you will find this chapter interesting, but I’ll take this opportunity to reiterate the importance of maintaining a professional attitude to data analysis. Whether and which drugs should be illegal are important and difficult public policy questions; our decisions should be informed by accurate data reported honestly.
End of explanation
"""
def GroupByQualityAndDay(transactions):
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
"""
Explanation: The DataFrame has a row for each reported transaction and the following
columns:
city: string city name.
state: two-letter state abbreviation.
price: price paid in dollars.
amount: quantity purchased in grams.
quality: high, medium, or low quality, as reported by the purchaser. • date: date of report, presumed to be shortly after date of purchase. • ppg: price per gram, in dollars.
state.name: string state name.
lat: approximate latitude of the transaction, based on city name.
lon: approximate longitude of the transaction.
Each transaction is an event in time, so we could treat this dataset as a time series. But the events are not equally spaced in time; the number of transactions reported each day varies from 0 to several hundred. Many methods used to analyze time series require the measurements to be equally spaced, or at least things are simpler if they are.
In order to demonstrate these methods, I divide the dataset into groups by reported quality, and then transform each group into an equally spaced series by computing the mean daily price per gram.
End of explanation
"""
def GroupByDay(transactions, func=np.mean):
grouped = transactions[['date', 'ppg']].groupby('date')
daily = grouped.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
"""
Explanation: groupby is a DataFrame method that returns a GroupBy object, groups; used in a for loop, it iterates the names of the groups and the DataFrames that represent them. Since the values of quality are low, medium, and high, we get three groups with those names.
The loop iterates through the groups and calls GroupByDay, which computes the daily average price and returns a new DataFrame:
End of explanation
"""
dailies = GroupByQualityAndDay(mj_clean)
plt.figure(figsize=(6,8))
plt.subplot(3, 1, 1)
for i, (k, v) in enumerate(dailies.items()):
plt.subplot(3, 1, i+1)
plt.title(k)
plt.scatter(dailies[k].index, dailies[k].ppg, s=10)
plt.xticks(rotation=30)
plt.ylabel("Price per gram")
plt.xlabel("Months")
plt.tight_layout()
"""
Explanation: The parameter, transactions, is a DataFrame that contains columns date
and ppg. We select these two columns, then group by date.
The result, grouped, is a map from each date to a DataFrame that contains prices reported on that date. aggregate is a GroupBy method that iterates through the groups and applies a function to each column of the group; in this case there is only one column, ppg. So the result of aggregate is a DataFrame with one row for each date and one column, ppg.
Dates in these DataFrames are stored as NumPy datetime64 objects, which are represented as 64-bit integers in nanoseconds. For some of the analyses coming up, it will be convenient to work with time in more human-friendly units, like years. So GroupByDay adds a column named date by copying the index, then adds years, which contains the number of years since the first transaction as a floating-point number.
The resulting DataFrame has columns ppg, date, and years.
End of explanation
"""
def RunLinearModel(daily):
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.items():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
print('\n')
except AttributeError:
print('R^2 %.4g' % results.prsquared)
print('\n')
#Then we can iterate through the qualities and fit a model to each:
for name, daily in dailies.items():
model, results = RunLinearModel(daily)
print(name)
SummarizeResults(results)
"""
Explanation: One apparent feature in these plots is a gap around November 2013. It’s possible that data collection was not active during this time, or the data might not be available. We will consider ways to deal with this missing data later.
Visually, it looks like the price of high quality cannabis is declining during this period, and the price of medium quality is increasing. The price of low quality might also be increasing, but it is harder to tell, since it seems to be more volatile. Keep in mind that quality data is reported by volunteers, so trends over time might reflect changes in how participants apply these labels.
Linear regression
Although there are methods specific to time series analysis, for many prob- lems a simple way to get started is by applying general-purpose tools like linear regression. The following function takes a DataFrame of daily prices and computes a least squares fit, returning the model and results objects from StatsModels:
End of explanation
"""
#The following code plots the observed prices and the fitted values:
def PlotFittedValues(model, results, label=''):
years = model.exog[:,1]
values = model.endog
plt.scatter(years, values, s=15, label=label)
plt.plot(years, results.fittedvalues, label='model')
plt.xlabel("years")
plt.ylabel("ppg")
PlotFittedValues(model, results)
"""
Explanation: The estimated slopes indicate that the price of high quality cannabis dropped by about 71 cents per year during the observed interval; for medium quality it increased by 28 cents per year, and for low quality it increased by 57 cents per year. These estimates are all statistically significant with very small p-values.
The R2 value for high quality cannabis is 0.44, which means that time as an explanatory variable accounts for 44% of the observed variability in price. For the other qualities, the change in price is smaller, and variability in prices is higher, so the values of R2 are smaller (but still statistically significant).
End of explanation
"""
series = pd.Series(np.arange(10))
moving_avg = series.rolling(3).mean()
"""
Explanation: PlotFittedValues makes a scatter plot of the data points and a line plot of the fitted values. Plot shows the results for high quality cannabis. The model seems like a good linear fit for the data; nevertheless, linear regression is not the most appropriate choice for this data:
First, there is no reason to expect the long-term trend to be a line or any other simple function. In general, prices are determined by supply and demand, both of which vary over time in unpredictable ways.
Second, the linear regression model gives equal weight to all data, recent and past. For purposes of prediction, we should probably give more weight to recent data.
Finally, one of the assumptions of linear regression is that the residuals are uncorrelated noise. With time series data, this assumption is often false because successive values are correlated.
Moving Average
Most time series analysis is based on the modeling assumption that the ob- served series is the sum of three components:
* Trend: A smooth function that captures persistent changes.
* Seasonality: Periodic variation, possibly including daily, weekly,
monthly, or yearly cycles.
* Noise: Random variation around the long-term trend.
Regression is one way to extract the trend from a series, as we saw in the previous section. But if the trend is not a simple function, a good alternative is a moving average. A moving average divides the series into overlapping regions, called windows, and computes the average of the values in each window.
One of the simplest moving averages is the rolling mean, which computes the mean of the values in each window. For example, if the window size is 3, the rolling mean computes the mean of values 0 through 2, 1 through 3, 2 through 4, etc.
pandas provides rolling_mean, which takes a Series and a window size and returns a new Series.
End of explanation
"""
dates = pd.date_range(dailies["high"].index.min(), dailies["high"].index.max())
reindexed = dailies["high"].reindex(dates)
#dailies["high"].index
reindexed.shape
"""
Explanation: The first two values are nan; the next value is the mean of the first three elements, 0, 1, and 2. The next value is the mean of 1, 2, and 3. And so on.
Before we can apply rolling mean to the cannabis data, we have to deal with missing values. There are a few days in the observed interval with no reported transactions for one or more quality categories, and a period in 2013 when data collection was not active.
In the DataFrames we have used so far, these dates are absent; the index skips days with no data. For the analysis that follows, we need to represent this missing data explicitly. We can do that by “reindexing” the DataFrame:
End of explanation
"""
#Now we can plot the rolling mean like this:
#The window size is 30, so each value in roll_mean is the mean of 30 values from reindexed.ppg.
roll_mean = reindexed.ppg.rolling(30).mean()
plt.plot(roll_mean.index, roll_mean)
plt.xticks(rotation=30)
"""
Explanation: The first line computes a date range that includes every day from the be- ginning to the end of the observed interval. The second line creates a new DataFrame with all of the data from daily, but including rows for all dates, filled with nan.
End of explanation
"""
ewma = reindexed.ppg.ewm(30).mean()
plt.plot(ewma.index, ewma)
plt.xticks(rotation=30)
"""
Explanation: The rolling mean seems to do a good job of smoothing out the noise and extracting the trend. The first 29 values are nan, and wherever there’s a missing value, it’s followed by another 29 nans. There are ways to fill in these gaps, but they are a minor nuisance.
An alternative is the exponentially-weighted moving average (EWMA), which has two advantages. First, as the name suggests, it computes a weighted average where the most recent value has the highest weight and the weights for previous values drop off exponentially. Second, the pandas implementation of EWMA handles missing values better.
End of explanation
"""
reindexed.ppg.fillna(ewma, inplace=True)
"""
Explanation: The span parameter corresponds roughly to the window size of a moving average; it controls how fast the weights drop off, so it determines the number of points that make a non-negligible contribution to each average.
above plot shows the EWMA for the same data. It is similar to the rolling mean, where they are both defined, but it has no missing values, which makes it easier to work with. The values are noisy at the beginning of the time series, because they are based on fewer data points.
Missing Value
Now that we have characterized the trend of the time series, the next step is to investigate seasonality, which is periodic behavior. Time series data based on human behavior often exhibits daily, weekly, monthly, or yearly cycles. Next, I present methods to test for seasonality, but they don’t work well with missing data, so we have to solve that problem first.
A simple and common way to fill missing data is to use a moving average. The Series method fillna does just what we want:
End of explanation
"""
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
"""
Explanation: Wherever reindexed.ppg is nan, fillna replaces it with the corresponding value from ewma. The inplace flag tells fillna to modify the existing Series rather than create a new one.
A drawback of this method is that it understates the noise in the series. We can solve that problem by adding in resampled residuals:
End of explanation
"""
def SerialCorr(series, lag=1):
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
"""
Explanation: resid contains the residual values, not including days when ppg is nan. fake_data contains the sum of the moving average and a random sample of residuals. Finally, fillna replaces nan with values from fake_data.
The filled data is visually similar to the actual values. Since the resampled residuals are random, the results are different every time; later we’ll see how to characterize the error created by missing values.
Serial correlation
As prices vary from day to day, you might expect to see patterns. If the price is high on Monday, you might expect it to be high for a few more days; and if it’s low, you might expect it to stay low. A pattern like this is called serial
correlation, because each value is correlated with the next one in the series.
To compute serial correlation, we can shift the time series by an interval called a lag, and then compute the correlation of the shifted series with the original:
End of explanation
"""
ewma = reindexed.ppg.ewm(30).mean()
resid = reindexed.ppg - ewma
corr = SerialCorr(resid, 1)
print(corr)
"""
Explanation: After the shift, the first lag values are nan, so I use a slice to remove them before computing Corr.
If we apply SerialCorr to the raw price data with lag 1, we find serial correlation 0.48 for the high quality category, 0.16 for medium and 0.10 for low. In any time series with a long-term trend, we expect to see strong serial correlations; for example, if prices are falling, we expect to see values above the mean in the first half of the series and values below the mean in the second half.
It is more interesting to see if the correlation persists if you subtract away the trend. For example, we can compute the residual of the EWMA and then compute its serial correlation:
End of explanation
"""
ewma = reindexed.ppg.ewm(30).mean()
resid = reindexed.ppg - ewma
corr = SerialCorr(resid, 7)
print(corr)
ewma = reindexed.ppg.ewm(30).mean()
resid = reindexed.ppg - ewma
corr = SerialCorr(resid, 30)
print(corr)
"""
Explanation: With lag=1, the serial correlations for the de-trended data are -0.022 for high quality, -0.015 for medium, and 0.036 for low. These values are small, indicating that there is little or no one-day serial correlation in this series.
End of explanation
"""
import statsmodels.tsa.stattools as smtsa
acf = smtsa.acf(resid, nlags=120, unbiased=True)
acf[0], acf[1], acf[45], acf[60]
"""
Explanation: at this point we can tentatively conclude that there are no substantial seasonal patterns in these series, at least not with these lags.
Autocorrelation
If you think a series might have some serial correlation, but you don’t know which lags to test, you can test them all! The autocorrelation function is a function that maps from lag to the serial correlation with the given lag. “Autocorrelation” is another name for serial correlation, used more often when the lag is not 1.
StatsModels, which we used for linear regression, also provides functions for time series analysis, including acf, which computes the autocorrelation function:
End of explanation
"""
plt.plot(range(len(acf)),acf)
"""
Explanation: With lag=0, acf computes the correlation of the series with itself, which is always 1.
End of explanation
"""
def EvalNormalCdfInverse(p, mu=0, sigma=1):
return scipy.stats.norm.ppf(p, loc=mu, scale=sigma)
"""
Explanation: Prediction
Time series analysis can be used to investigate, and sometimes explain, the behavior of systems that vary in time. It can also make predictions.
The linear regressions can be used for prediction. The RegressionResults class provides predict, which takes a DataFrame containing the explanatory variables and returns a sequence of predictions.
If all we want is a single, best-guess prediction, we’re done. But for most purposes it is important to quantify error. In other words, we want to know how accurate the prediction is likely to be.
There are three sources of error we should take into account:
Sampling error: The prediction is based on estimated parameters, which depend on random variation in the sample. If we run the exper- iment again, we expect the estimates to vary.
Random variation: Even if the estimated parameters are perfect, the observed data varies randomly around the long-term trend, and we expect this variation to continue in the future.
Modeling error: We have already seen evidence that the long-term trend is not linear, so predictions based on a linear model will eventually fail.
Another source of error to consider is unexpected future events. Agricultural prices are affected by weather, and all prices are affected by politics and law. As I write this, cannabis is legal in two states and legal for medical purposes in 20 more. If more states legalize it, the price is likely to go down. But if the federal government cracks down, the price might go up.
Modeling errors and unexpected future events are hard to quantify.
Survival Analysis
Survival analysis is a way to describe how long things last. It is often used to study human lifetimes, but it also applies to “survival” of mechanical and electronic components, or more generally to intervals in time before an event.
If someone you know has been diagnosed with a life-threatening disease, you might have seen a “5-year survival rate,” which is the probability of surviving five years after diagnosis. That estimate and related statistics are the result of survival analysis.
Survival Curves
The fundamental concept in survival analysis is the survival curve, S(t), which is a function that maps from a duration, t, to the probability of surviv- ing longer than t. If you know the distribution of durations, or “lifetimes”, finding the survival curve is easy; it’s just the complement of the CDF:
S(t) = 1 − CDF(t)
where CDF(t) is the probability of a lifetime less than or equal to t.
Hazard function
From the survival curve we can derive the hazard function; for pregnancy lengths, the hazard function maps from a time, t, to the fraction of pregnan- cies that continue until t and then end at t. To be more precise:
λ(t) = S(t)−S(t+1) S (t)
The numerator is the fraction of lifetimes that end at t, which is also PMF(t)
Inferring survival curves
If someone gives you the CDF of lifetimes, it is easy to compute the survival and hazard functions. But in many real-world scenarios, we can’t measure the distribution of lifetimes directly. We have to infer it.
For example, suppose you are following a group of patients to see how long they survive after diagnosis. Not all patients are diagnosed on the same day, so at any point in time, some patients have survived longer than others. If some patients have died, we know their survival times. For patients who are still alive, we don’t know survival times, but we have a lower bound.
If we wait until all patients are dead, we can compute the survival curve, but if we are evaluating the effectiveness of a new treatment, we can’t wait that long! We need a way to estimate survival curves using incomplete information.
As a more cheerful example, I will use NSFG data to quantify how long respondents “survive” until they get married for the first time. The range of respondents’ ages is 14 to 44 years, so the dataset provides a snapshot of women at different stages in their lives.
For women who have been married, the dataset includes the date of their first marriage and their age at the time. For women who have not been married, we know their age when interviewed, but have no way of knowing when or if they will get married.
Since we know the age at first marriage for some women, it might be tempt- ing to exclude the rest and compute the CDF of the known data. That is a bad idea. The result would be doubly misleading: (1) older women would be overrepresented, because they are more likely to be married when interviewed, and (2) married women would be overrepresented! In fact, this analysis would lead to the conclusion that all women get married, which is obviously incorrect.
Kaplan-Meier estimation
In this example it is not only desirable but necessary to include observations of unmarried women, which brings us to one of the central algorithms in survival analysis, Kaplan-Meier estimation.
The general idea is that we can use the data to estimate the hazard function, then convert the hazard function to a survival curve. To estimate the hazard function, we consider, for each age, (1) the number of women who got married at that age and (2) the number of women “at risk” of getting married, which includes all women who were not married at an earlier age.
Cohort Effects
One of the challenges of survival analysis is that different parts of the esti- mated curve are based on different groups of respondents. The part of the curve at time t is based on respondents whose age was at least t when they were interviewed. So the leftmost part of the curve includes data from all respondents, but the rightmost part includes only the oldest respondents.
If the relevant characteristics of the respondents are not changing over time, that’s fine, but in this case it seems likely that marriage patterns are different for women born in different generations. We can investigate this effect by grouping respondents according to their decade of birth. Groups like this, defined by date of birth or similar events, are called cohorts, and differences between the groups are called cohort effects.
Analytics Methods
Suppose you are a scientist studying gorillas in a wildlife preserve. Having weighed 9 gorillas, you find sample mean x ̄ = 90 kg and sample standard deviation, S = 7.5 kg. If you use x ̄ to estimate the population mean, what is the standard error of the estimate?
To answer that question, we need the sampling distribution of x ̄. We approximated this distribution by simulating the experiment (weighing 9 gorillas), computing x ̄ for each simulated experiment, and accu- mulating the distribution of estimates.
The result is an approximation of the sampling distribution. Then we use the sampling distribution to compute standard errors and confidence intervals:
The standard deviation of the sampling distribution is the standard error of the estimate; in the example, it is about 2.5 kg.
The interval between the 5th and 95th percentile of the sampling dis- tribution is a 90% confidence interval. If we run the experiment many times, we expect the estimate to fall in this interval 90% of the time. In the example, the 90% CI is (86, 94) kg.
Now we’ll do the same calculation analytically. We take advantage of the fact that the weights of adult female gorillas are roughly normally distributed. Normal distributions have two properties that make them amenable for anal- ysis: they are “closed” under linear transformation and addition. To explain what that means, I need some notation.
If the distribution of a quantity, X, is normal with parameters μ and σ, you can write
X∼ N (μ,σ2)
where the symbol ∼ means “is distributed” and the script letter N stands for “normal.”
A linear transformation of X is something like X′ = aX+b, where a and b are real numbers. A family of distributions is closed under linear transformation if X′ is in the same family as X. The normal distribution has this property;
if X∼N (μ,σ2),
X′ ∼ N (aμ+b,a2σ2) (1)
Normal distributions are also closed under addition. If Z = X + Y and X∼N (μX,σX2 ) and Y ∼N (μY,σY2) then
Z ∼ N(μX+μY,σX2 +σY2) (2)
In the special case Z = X + X, we have
Z ∼ N (2μX , 2σX2 )
and in general if we draw n values of X and add them up, we have
Z ∼ N (nμX , nσX2 )
Sampling distributions
Now we have everything we need to compute the sampling distribution of x ̄. Remember that we compute x ̄ by weighing n gorillas, adding up the total weight, and dividing by n.
Assume that the distribution of gorilla weights, X, is approximately normal:
X∼N (μ,σ2)
If we weigh n gorillas, the total weight, Y , is distributed
Y∼N (nμ,nσ2)
using Equation 3. And if we divide by n, the sample mean, Z, is distributed
Z∼N (μ,σ2/n)
using Equation 1 with a = 1/n.
The distribution of Z is the sampling distribution of x ̄. The mean of Z is μ, which shows that x ̄ is an unbiased estimate of μ. The variance of the sampling distribution is σ2/n.
So the standard deviation of the sampling distribution, which is the standard error of the estimate, is σ/√n. In the example, σ is 7.5 kg and n is 9, so the standard error is 2.5 kg. That result is consistent with what we estimated by simulation, but much faster to compute!
We can also use the sampling distribution to compute confidence intervals. A 90% confidence interval for x ̄ is the interval between the 5th and 95th percentiles of Z. Since Z is normally distributed, we can compute percentiles by evaluating the inverse CDF.
There is no closed form for the CDF of the normal distribution or its inverse, but there are fast numerical methods and they are implemented in SciPy.
Given a probability, p, it returns the corresponding percentile from a normal distribution with parameters mu and sigma.
End of explanation
"""
def StudentCdf(n):
ts = np.linspace(-3, 3, 101)
ps = scipy.stats.t.cdf(ts, df=n-2)
rs = ts / np.sqrt(n - 2 + ts**2)
return thinkstats2.Cdf(rs, ps)
"""
Explanation: Central limit theorem
If we add values drawn from normal distributions, the distribution of the sum is normal. Most other distributions don’t have this property; if we add values drawn from other distributions, the sum does not generally have an analytic distribution.
But if we add up n values from almost any distribution, the distribution of the sum converges to normal as n increases.
More specifically, if the distribution of the values has mean and standard deviation μ and σ, the distribution of the sum is approximately N(nμ,nσ2).
This result is the Central Limit Theorem (CLT). It is one of the most useful tools for statistical analysis, but it comes with caveats:
* The values have to be drawn independently. If they are correlated, the CLT doesn’t apply (although this is seldom a problem in practice).
* The values have to come from the same distribution (although this requirement can be relaxed).
* The values have to be drawn from a distribution with finite mean and variance. So most Pareto distributions are out.
* The rate of convergence depends on the skewness of the distribution. Sums from an exponential distribution converge for small n. Sums from a lognormal distribution require larger sizes.
The Central Limit Theorem explains the prevalence of normal distributions in the natural world. Many characteristics of living things are affected by genetic and environmental factors whose effect is additive. The character- istics we measure are the sum of a large number of small effects, so their distribution tends to be normal.
Correlation test
we used a permutation test for the correlation between Age and Estimated Salary, and found that it is statistically significant, with p-value less than 0.001.
Now we can do the same thing analytically. The method is based on this mathematical result: given two variables that are normally distributed and uncorrelated, if we generate a sample with size n, compute Pearson’s corre- lation, r, and then compute the transformed correlation
t = r * sqrt(n-2/1-r^2)
the distribution of t is Student’s t-distribution with parameter n − 2. The t- distribution is an analytic distribution; the CDF can be computed efficiently using gamma functions.
We can use this result to compute the sampling distribution of correlation under the null hypothesis; that is, if we generate uncorrelated sequences of normal values, what is the distribution of their correlation? StudentCdf takes the sample size, n, and returns the sampling distribution of correlation:
End of explanation
"""
def ChiSquaredCdf(n):
xs = np.linspace(0, 25, 101)
ps = scipy.stats.chi2.cdf(xs, df=n-1)
return thinkstats2.Cdf(xs, ps)
"""
Explanation: ts is a NumPy array of values for t, the transformed correlation. ps contains the corresponding probabilities, computed using the CDF of the Student’s t-distribution implemented in SciPy. The parameter of the t-distribution, df, stands for “degrees of freedom.” I won’t explain that term, but you can read about it at http://en.wikipedia.org/wiki/Degrees_of_freedom_(statistics).
To get from ts to the correlation coefficients, rs, we apply the inverse transform,
r = t / sqrt(n − 2 + t^2)
The result is the sampling distribution of r under the null hypothesis. By the Central Limit Theorem, these moment- based statistics are normally distributed even if the data are not.
to occur if the variables are actually uncorrelated. Using the analytic distri- bution, we can compute just how unlikely:
t = r * math.sqrt((n-2) / (1-r**2))
p_value = 1 - scipy.stats.t.cdf(t, df=n-2)
We compute the value of t that corresponds to r=0.07, and then evaluate the t-distribution at t. The result is 2.9e-11. This example demonstrates an advantage of the analytic method: we can compute very small p-values. But in practice it usually doesn’t matter.
Chi-squared test
chi-squared statistic to test whether a die is crooked. The chi-squared statistic measures the total normalized deviation from the expected values in a table:
χ2 = SUMMATION (Oi − Ei)^2 / Ei
One reason the chi-squared statistic is widely used is that its sampling distri- bution under the null hypothesis is analytic; by a remarkable coincidence1, it is called the chi-squared distribution. Like the t-distribution, the chi-squared CDF can be computed efficiently using gamma functions.
SciPy provides an implementation of the chi-squared distribution, which we use to compute the sampling distribution of the chi-squared statistic:
End of explanation
"""
|
jehan60188/improved-octo-carnival
|
irisExample..ipynb
|
unlicense
|
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.decomposition import PCA
import seaborn as sns
import pandas as pd
import numpy as np
# import some data to play with
iris = datasets.load_iris()
dfX = pd.DataFrame(iris.data,columns = ['sepal_length','sepal_width','petal_length','petal_width'])
dfY = pd.DataFrame(iris.target,columns=['species'])
dfX['species'] = dfY
print(dfX.head())
sns.pairplot(dfX, hue="species")
"""
Explanation: Fisher's Iris data set is a collection of measurements commonly used to discuss various example algorithms. It is popular due to the fact that it consists of multiple dimensions, a large enough set of samples to perform most basic statistics, and uses a set of measurements that is understandable by most people.
Here, I will use the iris data set to discuss some basic machine learning algorithms. I will begin with some visuals to help understand the data, then perform some supervised algorithms to better characterize the data. I will conclude with a demonstration of a Support Vector Machine (SVM) classifier.
I start with some standard imports, by loading the iris data and shaping it into a Pandas dataframe for better manipulation.
End of explanation
"""
#find and print mean, median, 95% intervals
print('mean')
print(dfX.groupby('species').mean())
print('median')
print(dfX.groupby('species').median())
print('two-σ interval')
dfX_high = dfX.groupby('species').mean() + 2*dfX.groupby('species').std()
dfX_low = dfX.groupby('species').mean() - 2*dfX.groupby('species').std()
df = pd.DataFrame()
for C in dfX_high.columns:
df[C + '_hilo'] = dfX_high[C].astype(str) +'_' + dfX_low[C].astype(str)
print(df)
"""
Explanation: You should see that Species 0 (setosa) is quickly distinguishable from Species 1 (versicolor) and 2 (virginica).
I will now demonstrate how to calculate various descriptive statistics.
Before showing the code, I want to remind readers of the pitfall of relying entirely on descriptive statistics; Anscombe's quartet is a collection of 4 sets, each set consisting of eleven points. All of the sets have similar descriptive statistics but are visually very different.
End of explanation
"""
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
"""
Explanation: We can from both the scatterplots and the hi/low plots that petal length is sufficient to discriminate Species 0 from the other two
I will conclude with demonstrating how to use SVM to predict Species. I start with some helper functions
End of explanation
"""
dfX = pd.DataFrame(iris.data,columns = ['sepal_length','sepal_width','petal_length','petal_width'])
X = iris.data[:, :2]
C = 1.0 # SVM regularization parameter
clf = svm.SVC(kernel='rbf', gamma=0.7, C=C)
clf = clf.fit(X, dfY)
title = 'SVC with RBF kernel'
fig, ax = plt.subplots()
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
"""
Explanation: And we can now instantiate and train a model
End of explanation
"""
|
srnas/barnaba
|
examples/example_06_single_strand_motif.ipynb
|
gpl-3.0
|
import barnaba as bb
# find all GNRA tetraloops in H.Marismortui large ribosomal subunit (PDB 1S72)
query = "../test/data/GNRA.pdb"
target = "../test/data/1S72.pdb"
# call function.
results = bb.ss_motif(query,target,threshold=0.6,out='gnra_loops',bulges=1)
"""
Explanation: Search for single-stranded RNA motifs
We will now search for single-stranded motifs within a structure/trajectory.
This is performed by using the ss_motif function.
python
results = bb.ss_motif(query,target,threshold=0.6,out=None,bulges=0)
query is a PDB file with the structure you want to search for within the file target. If the keyword topology is specified, the query structure is searched in the target trajectory file.
threshold is the eRMSD threshold to consider a substructure in target to be significantly similar to query.
Typical relevant hits have eRMSD in the 0.6-0.9 range.
If you specify the optional string keyword out, PDB structures below the threshold are written with the specified prefix.
It is possible to specify the maximum number of allowed inserted or bulged bases with the option bulges.
The search is performed not considering the sequence. It is possible to specify a sequence with the sequence option. Abbreviations (i.e N/R/Y) are accepted.
The function returns a list of hits. Each element in this list is in turn a list containing the following information:
- element 0 is the frame index. This is relevant if the search is performed over a trajectory/multi model PDB.
- element 1 is the eRMSD distance from the query
- element 2 is the list of residues.
In the following example we search for structures similar to GNRA.pdb in a crystal structure of the H.Marismortui large ribosomal subunit (PDB 1S72).
End of explanation
"""
for j in range(len(results)):
#seq = "".join([r.split("_")[0] for r in results[j][2]])
print("%2d eRMSD:%5.3f " % (j,results[j][1]))
print(" Sequence: %s" % ",".join(results[j][2]))
print()
"""
Explanation: Now we print the fragment residues and their eRMSD
distance from the query structure.
End of explanation
"""
import glob
pdbs = glob.glob("gnra_loops*.pdb")
dists = [bb.rmsd(query,f)[0] for f in pdbs]
for j in range(len(results)):
seq = "".join([r.split("_")[0] for r in results[j][2]])
print("%2d eRMSD:%5.3f RMSD: %6.4f" % (j,results[j][1],10.*dists[j]), end="")
print(" Sequence: %s" % seq)
#print "%50s %6.4f AA" % (f,10.*dist[0])
"""
Explanation: We can also calculate RMSD distances for the different hits
End of explanation
"""
import py3Dmol
query_s = open(query,'r').read()
hit_0 = open(pdbs[0],'r').read()
p = py3Dmol.view(width=900,height=600,viewergrid=(1,2))
p.addModel(query_s,'pdb',viewer=(0,0))
p.addModel(hit_0,'pdb',viewer=(0,1))
p.setStyle({'stick':{}})
p.setBackgroundColor('0xeeeeee')
p.zoomTo()
p.show()
"""
Explanation: Note that the first hit has a low eRMSD, but no GNRA sequence. Let's have a look at this structure:
End of explanation
"""
hit_14 = open(pdbs[14],'r').read()
p = py3Dmol.view(width=900,height=600,viewergrid=(1,2))
p.addModel(query_s,'pdb',viewer=(0,0))
p.addModel(hit_14,'pdb',viewer=(0,1))
p.setStyle({'stick':{}})
p.setBackgroundColor('0xeeeeee')
p.zoomTo()
p.show()
"""
Explanation: We can also check hit 14, that has low eRMSD but (relatively) high RMSD
End of explanation
"""
|
intel-analytics/analytics-zoo
|
docs/docs/colab-notebook/chronos/chronos_nyc_taxi_tsdataset_forecaster.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Explanation: <a href="https://colab.research.google.com/github/intel-analytics/analytics-zoo/blob/master/docs/docs/colab-notebook/chronos/chronos_autots_nyc_taxi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2018 Analytics Zoo Authors.
End of explanation
"""
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo[automl]
exit() # restart the runtime to refresh installed pkg
"""
Explanation: Environment Preparation
Install Analytics Zoo
You can install the latest pre-release version with chronos support using pip install --pre --upgrade analytics-zoo[automl].
End of explanation
"""
# download the dataset
!wget https://raw.githubusercontent.com/numenta/NAB/v1.0/data/realKnownCause/nyc_taxi.csv
# load the dataset. The downloaded dataframe contains two columns, "timestamp" and "value".
import pandas as pd
df = pd.read_csv("nyc_taxi.csv", parse_dates=["timestamp"])
"""
Explanation: Step 0: Download & prepare dataset
We used NYC taxi passengers dataset in Numenta Anomaly Benchmark (NAB) for demo, which contains 10320 records, each indicating the total number of taxi passengers in NYC at a corresonponding time spot.
End of explanation
"""
from zoo.chronos.data import TSDataset
from sklearn.preprocessing import StandardScaler
"""
Explanation: Time series forecasting using Chronos Forecaster
Forecaster Step1. Data transformation and feature engineering using Chronos TSDataset
TSDataset is our abstract of time series dataset for data transformation and feature engineering. Here we use it to preprocess the data.
End of explanation
"""
tsdata_train, tsdata_valid, tsdata_test = TSDataset.from_pandas(df, dt_col="timestamp", target_col="value",
with_split=True, val_ratio=0.1, test_ratio=0.1)
"""
Explanation: Initialize train, valid and test tsdataset from raw pandas dataframe.
End of explanation
"""
lookback, horizon = 6, 1
scaler = StandardScaler()
for tsdata in [tsdata_train, tsdata_valid, tsdata_test]:
tsdata.deduplicate()\
.impute()\
.gen_dt_feature()\
.scale(scaler, fit=(tsdata is tsdata_train))\
.roll(lookback=lookback, horizon=horizon)
"""
Explanation: Preprocess the datasets. Here we perform:
- deduplicate: remove those identical data records
- impute: fill the missing values
- gen_dt_feature: generate feature from datetime (e.g. month, day...)
- scale: scale each feature to standard distribution.
- roll: sample the data with sliding window.
For forecasting task, we will look back 3 hours' historical data (6 records) and predict the value of next 30 miniutes (1 records).
We perform the same transformation processes on train, valid and test set.
End of explanation
"""
from zoo.chronos.forecaster.tcn_forecaster import TCNForecaster
x, y = tsdata_train.to_numpy()
# x.shape = (num of sample, lookback, num of input feature)
# y.shape = (num of sample, horizon, num of output feature)
forecaster = TCNForecaster(past_seq_len=lookback, # number of steps to look back
future_seq_len=horizon, # number of steps to predict
input_feature_num=x.shape[-1], # number of feature to use
output_feature_num=y.shape[-1], # number of feature to predict
seed=0)
res = forecaster.fit((x, y), epochs=3)
"""
Explanation: Forecaster Step 2: Time series forecasting using Chronos Forecaster
After preprocessing the datasets. We can use Chronos Forecaster to handle the forecasting tasks.
Transform TSDataset to sampled numpy ndarray and feed them to forecaster.
End of explanation
"""
x_test, y_test = tsdata_test.to_numpy()
pred = forecaster.predict(x_test)
pred_unscale, groundtruth_unscale = tsdata_test.unscale_numpy(pred), tsdata_test.unscale_numpy(y_test)
import matplotlib.pyplot as plt
plt.figure(figsize=(24,6))
plt.plot(pred_unscale[:,:,0])
plt.plot(groundtruth_unscale[:,:,0])
plt.legend(["prediction", "ground truth"])
"""
Explanation: Forecaster Step 3: Further deployment with fitted forecaster
Use fitted forecaster to predict test data and plot the result
End of explanation
"""
forecaster.save("nyc_taxi.fxt")
forecaster.load("nyc_taxi.fxt")
"""
Explanation: Save & restore the forecaster.
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples
|
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
|
apache-2.0
|
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
"""
Explanation: Vertex SDK: AutoML training image classification model for batch prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex SDK to create image classification models and do batch prediction using a Google Cloud AutoML model.
Dataset
The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.
Objective
In this tutorial, you create an AutoML image classification model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console.
The steps performed include:
Create a Vertex Dataset resource.
Train the model.
View the model evaluation.
Make a batch prediction.
There is one key difference between using batch prediction and using online prediction:
Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Set up your local development environment
If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
The Cloud Storage SDK
Git
Python 3
virtualenv
Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
Install and initialize the SDK.
Install Python 3.
Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment.
To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell.
To launch Jupyter, run jupyter notebook on the command-line in a terminal shell.
Open this notebook in the Jupyter Notebook Dashboard.
Installation
Install the latest version of Vertex SDK for Python.
End of explanation
"""
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
"""
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
"""
Explanation: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
End of explanation
"""
REGION = "us-central1" # @param {type: "string"}
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_NAME
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_NAME
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
import google.cloud.aiplatform as aip
"""
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
"""
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
"""
Explanation: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
End of explanation
"""
IMPORT_FILE = (
"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv"
)
"""
Explanation: Tutorial
Now you are ready to start creating your own AutoML image classification model.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
"""
Explanation: Quick peek at your data
This tutorial uses a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.
End of explanation
"""
dataset = aip.ImageDataset.create(
display_name="Flowers" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification,
)
print(dataset.resource_name)
"""
Explanation: Create the Dataset
Next, create the Dataset resource using the create method for the ImageDataset class, which takes the following parameters:
display_name: The human readable name for the Dataset resource.
gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.
import_schema_uri: The data labeling schema for the data items.
This operation may take several minutes.
End of explanation
"""
dag = aip.AutoMLImageTrainingJob(
display_name="flowers_" + TIMESTAMP,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
base_model=None,
)
print(dag)
"""
Explanation: Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
Create training pipeline
An AutoML training pipeline is created with the AutoMLImageTrainingJob class, with the following parameters:
display_name: The human readable name for the TrainingJob resource.
prediction_type: The type task to train the model for.
classification: An image classification model.
object_detection: An image object detection model.
multi_label: If a classification task, whether single (False) or multi-labeled (True).
model_type: The type of model for deployment.
CLOUD: Deployment on Google Cloud
CLOUD_HIGH_ACCURACY_1: Optimized for accuracy over latency for deployment on Google Cloud.
CLOUD_LOW_LATENCY_: Optimized for latency over accuracy for deployment on Google Cloud.
MOBILE_TF_VERSATILE_1: Deployment on an edge device.
MOBILE_TF_HIGH_ACCURACY_1:Optimized for accuracy over latency for deployment on an edge device.
MOBILE_TF_LOW_LATENCY_1: Optimized for latency over accuracy for deployment on an edge device.
base_model: (optional) Transfer learning from existing Model resource -- supported for image classification only.
The instantiated object is the DAG (directed acyclic graph) for the training job.
End of explanation
"""
model = dag.run(
dataset=dataset,
model_display_name="flowers_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
"""
Explanation: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters:
dataset: The Dataset resource to train the model.
model_display_name: The human readable name for the trained model.
training_fraction_split: The percentage of the dataset to use for training.
test_fraction_split: The percentage of the dataset to use for test (holdout data).
validation_fraction_split: The percentage of the dataset to use for validation.
budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour).
disable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
The run method when completed returns the Model resource.
The execution of the training pipeline will take upto 20 minutes.
End of explanation
"""
# Get model resource ID
models = aip.Model.list(filter="display_name=flowers_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
"""
Explanation: Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
End of explanation
"""
test_items = !gsutil cat $IMPORT_FILE | head -n2
if len(str(test_items[0]).split(",")) == 3:
_, test_item_1, test_label_1 = str(test_items[0]).split(",")
_, test_item_2, test_label_2 = str(test_items[1]).split(",")
else:
test_item_1, test_label_1 = str(test_items[0]).split(",")
test_item_2, test_label_2 = str(test_items[1]).split(",")
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
"""
Explanation: Send a batch prediction request
Send a batch prediction to your deployed model.
Get test item(s)
Now do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.
End of explanation
"""
file_1 = test_item_1.split("/")[-1]
file_2 = test_item_2.split("/")[-1]
! gsutil cp $test_item_1 $BUCKET_NAME/$file_1
! gsutil cp $test_item_2 $BUCKET_NAME/$file_2
test_item_1 = BUCKET_NAME + "/" + file_1
test_item_2 = BUCKET_NAME + "/" + file_2
"""
Explanation: Copy test item(s)
For the batch prediction, copy the test items over to your Cloud Storage bucket.
End of explanation
"""
import json
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": test_item_1, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
data = {"content": test_item_2, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
"""
Explanation: Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
content: The Cloud Storage path to the image.
mime_type: The content type. In our example, it is a jpeg file.
For example:
{'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}
End of explanation
"""
batch_predict_job = model.batch_predict(
job_display_name="flowers_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
sync=False,
)
print(batch_predict_job)
"""
Explanation: Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
"""
batch_predict_job.wait()
"""
Explanation: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
End of explanation
"""
import json
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
"""
Explanation: Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:
content: The prediction request.
prediction: The prediction response.
ids: The internal assigned unique identifiers for each prediction request.
displayNames: The class names for each class label.
confidences: The predicted confidence, between 0 and 1, per class label.
End of explanation
"""
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation
"""
|
endlesspint8/endlesspint8.github.io
|
code/spence_garcia/spence_garcia.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
from scipy.stats import binom, poisson, zscore
"""
Explanation: Spence/Garcia, What Were the Odds of That?
post @ endlesspint.com
End of explanation
"""
np.random.seed(8)
sim_cnt_poi = 10000
spence_tot_poi, spence_jab_poi, spence_pow_poi = np.random.poisson(64, sim_cnt_poi), \
np.random.poisson(31, sim_cnt_poi), \
np.random.poisson(33, sim_cnt_poi)
garcia_tot_poi, garcia_jab_poi, garcia_pow_poi = np.random.poisson(53, sim_cnt_poi), \
np.random.poisson(32, sim_cnt_poi), \
np.random.poisson(21, sim_cnt_poi)
plt.figure(figsize=(16,6))
# SPENCE
ax1 = plt.subplot(231)
plt.hist(spence_tot_poi, 14, density=True, color='green')
plt.title("Spence Total Punches/Rd")
ax2 = plt.subplot(232, sharey=ax1)
plt.hist(spence_jab_poi, 14, density=True, color='green')
plt.title("Spence Jabs/Rd")
ax3 = plt.subplot(233, sharey=ax1)
plt.hist(spence_pow_poi, 14, density=True, color='green')
plt.title("Spence Power Punches/Rd")
# GARCIA
plt.subplot(234, sharex=ax1, sharey=ax1)
plt.hist(garcia_tot_poi, 14, density=True)
plt.title("Garcia Total Punches/Rd")
plt.subplot(235, sharex=ax2, sharey=ax1)
plt.hist(garcia_jab_poi, 14, density=True)
plt.title("Garcia Jabs/Rd")
plt.subplot(236, sharex=ax3, sharey=ax1)
plt.hist(garcia_pow_poi, 14, density=True)
plt.title("Garcia Power Punches/Rd")
plt.tight_layout()
"""
Explanation: Coming into the Fight
source: https://www.boxingscene.com/errol-spence-vs-mikey-garcia-compubox-historical-review--137148
(mis)Using Poisson to Get an Idea of Expected Fighter Output
Spence could be expected to be busier, but by how much and where?
End of explanation
"""
spence_total_thrown = [46, 55, 75, 89, 86, 95, 70, 102, 121, 125, 118, 100]
spence_jabs_thrown = [39, 43, 53, 61, 62, 56, 50, 60, 56, 58, 45, 35]
spence_power_thrown = [7, 12, 22, 28, 24, 39, 20, 42, 65, 67, 73, 65]
garcia_total_thrown = [12, 28, 31, 35, 41, 43, 26, 41, 32, 62, 15, 40]
garcia_jabs_thrown = [9, 13, 18, 20, 17, 19, 10, 22, 17, 25, 2, 16]
garcia_power_thrown = [3, 15, 13, 15, 24, 24, 16, 19, 15, 37, 13, 24]
"""
Explanation: Fight Night
source: https://www.boxingscene.com/errol-spence-vs-mikey-garcia-compubox-punch-stats--137161
A look at actual thrown performance v expectations
End of explanation
"""
def chance_of_throwing_GE(sim, act):
return [np.sum(sim >= x)/len(sim) for x in act]
spence_comp_thrown = [(spence_tot_poi, spence_total_thrown), (spence_jab_poi, spence_jabs_thrown), (spence_pow_poi, spence_power_thrown)]
garcia_comp_thrown = [(garcia_tot_poi, garcia_total_thrown), (garcia_jab_poi, garcia_jabs_thrown), (garcia_pow_poi, garcia_power_thrown)]
print("SPENCE")
for spence in spence_comp_thrown:
spence_perf = chance_of_throwing_GE(spence[0], spence[1])
print(spence_perf, np.mean(spence_perf))
print("\nGARCIA")
for garcia in garcia_comp_thrown:
garcia_perf = chance_of_throwing_GE(garcia[0], garcia[1])
print(garcia_perf, np.mean(garcia_perf))
"""
Explanation: A tale of two jabs: Spence pumped out that lead left at a ridiculous rate, neutralizing Garcia while setting-up the rest of his own offense
End of explanation
"""
s_plot = 1
fig = plt.figure(figsize=(16,6))
for stat in ((406, .298, "Garcia total", 75), (188, .209, "Garcia jabs", 21), (218, .431, "Garcia power", 54)):
n, p, category, N = stat[0], stat[1], stat[2], stat[3]
x = np.arange(binom.ppf(0.025, n, p),
binom.ppf(0.975, n, p))
sim_bouts = 10000
print("95 percent range for %s: \t" % category, int(np.min(x)), " - ", int(np.max(x)))
print("\t %s actually landed: \t" % category, N)
print("\t prob of landing this %s count or less:" % category,
"{0:.0f}%".format(np.sum(np.random.binomial(n, p, sim_bouts) < N)/float(sim_bouts) * 100))
ax = fig.add_subplot(1,3,s_plot)
ax.plot(x, binom.pmf(x, n, p), 'ro', ms=5, label='binom pmf')
plt.title(category)
ax.vlines(x, 0, binom.pmf(x, n, p), colors='r', lw=5, alpha=0.5)
ax.vlines(N, 0, np.max(binom.pmf(x, n, p)), lw=3, linestyles=":")
s_plot += 1
plt.tight_layout()
plt.show()
"""
Explanation: What about contact? "Everyone's gotta plan until they get hit."
End of explanation
"""
s_plot = 1
fig = plt.figure(figsize=(16,6))
for stat in ((1082, .343, "Spence total", 345), (618, .198, "Spence jabs", 108), (464, .480, "Spence power", 237)):
n, p, category, N = stat[0], stat[1], stat[2], stat[3]
x = np.arange(binom.ppf(0.025, n, p),
binom.ppf(0.975, n, p))
sim_bouts = 10000
print("95 percent range for %s: \t" % category, int(np.min(x)), " - ", int(np.max(x)))
print("\t %s actually landed: \t" % category, N)
print("\t prob of landing this %s count or less:" % category,
"{0:.0f}%".format(np.sum(np.random.binomial(n, p, sim_bouts) < N)/float(sim_bouts) * 100))
ax = fig.add_subplot(1,3,s_plot)
ax.plot(x, binom.pmf(x, n, p), 'go', ms=5, label='binom pmf')
plt.title(category)
ax.vlines(x, 0, binom.pmf(x, n, p), colors='g', lw=5, alpha=0.5)
ax.vlines(N, 0, np.max(binom.pmf(x, n, p)), lw=3, linestyles=":")
s_plot += 1
plt.tight_layout()
plt.show()
"""
Explanation: Both fighters had their rates affected but while Mikey fell off the chart, almost literally and in a bad sense, Spence's superiour activity with the jab allowed him to set up and deliver the power punches, especially in volume over the second half of the bout
End of explanation
"""
def standardized_fight_night(sim_rds, poi_lambda, n_times, act):
act_rds = len(act)
fight_night_zscore = np.zeros(act_rds)
for i in range(n_times):
fight_night_zscore += zscore(np.append(np.random.poisson(poi_lambda, sim_rds), act))[-act_rds:]
return fight_night_zscore/n_times
spence_total_zscore = standardized_fight_night(60, 64, 100, spence_total_thrown)
spence_total_zscore
garcia_total_zscore = standardized_fight_night(60, 53, 100, garcia_total_thrown)
garcia_total_zscore
plt.plot(spence_total_zscore, garcia_total_zscore, 'go')
"""
Explanation: One more look at thrown punches
End of explanation
"""
|
brookthomas/GeneDive
|
preprocessing/AdjacencyMatrix.ipynb
|
mit
|
import sqlite3
import json
DATABASE = "data.sqlite"
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
"""
Explanation: Build Adjacency Matrix
End of explanation
"""
# For getting the maximum row id
QUERY_MAX_ID = "SELECT id FROM interactions ORDER BY id DESC LIMIT 1"
# Get interaction data
QUERY_INTERACTION = "SELECT geneids1, geneids2, probability FROM interactions WHERE id = {}"
max_id = cursor.execute(QUERY_MAX_ID).fetchone()[0]
"""
Explanation: Queries
End of explanation
"""
matrix = {}
row_id = 0
while row_id <= max_id:
row_id+= 1
row = cursor.execute(QUERY_INTERACTION.format(row_id))
row = row.fetchone()
if row == None:
continue
id1 = row[0]
id2 = row[1]
prob = int(round(row[2],2) * 1000)
# Forward
if id1 not in matrix:
matrix[id1] = {}
if id2 not in matrix[id1]:
matrix[id1][id2] = []
if prob not in matrix[id1][id2]:
matrix[id1][id2].append(prob)
# Backwards
if id2 not in matrix:
matrix[id2] = {}
if id1 not in matrix[id2]:
matrix[id2][id1] = []
if prob not in matrix[id2][id1]:
matrix[id2][id1].append(prob)
with open("matrix.json", "w+") as file:
file.write(json.dumps( matrix ))
"""
Explanation: Step through every interaction.
If geneids1 not in matrix - insert it as dict.
If geneids2 not in matrix[geneids1] - insert it as []
If probability not in matrix[geneids1][geneids2] - insert it.
Perform the reverse.
End of explanation
"""
|
WyoARCC/arcc-106-python
|
ARCC+Bootcamp+Machine+Learning.ipynb
|
mit
|
#NumPy is the fundamental package for scientific computing with Python
import numpy as np
# Matplotlib is a Python 2D plotting library
import matplotlib.pyplot as plt
#Number of data points
n=50
x=np.random.randn(n)
y=np.random.randn(n)
#Create a figure and a set of subplots
fig, ax = plt.subplots()
#Find best fitting straight line
#This will return the coefficients of the best fitting straight line
#i.e. m and c in the slope intercept form of a line-> y=m*x+c
fit = np.polyfit(x, y, 1)
#Plot the straight line
ax.plot(x, fit[0] * x + fit[1], color='black')
#scatter plot the data set
ax.scatter(x, y)
plt.ylabel('y axis')
plt.xlabel('x axis')
plt.show()
#predict output for an input say x=5
x_input=5
predicted_value= fit[0] * x_input + fit[1]
print(predicted_value)
"""
Explanation: Machine Learning Using Python by ARCC
What is Machine Learning?
Machine Learning is the subfield of computer science, which is defined by Arthur Samuel as "Giving computers the ability to learn without being explicitly programmed". Generally speaking, it can be defined as the ability of machines to learn to perform a task efficiently based on experience.
Two major types of Machine Learning problems
The two major types of Machine Learning problems are:
1. Supervised Learning : In this type of Machine Learning problem, the learning algorithms are provided with a data set that has a known label or result, such as classifying a bunch of emails as spam/not-spam emails.
2. Unsupervised Learning : In this type of Machine Learning problem, the learning algorithms are provided with a data set that has no known label or result. The algorithm without any supervision is expected to find some structure in the data by itself. For example search engines.
In order to limit the scope of this boot camp, as well as save us some time, we will focus on Supervised Learning today.
Machine Learning's "Hello World" programs
Supervised Learning
In this section we focus on three Supervised Learning algorithms namely Linear Regression, Linear Classifier and Support Vector Machines.
Linear Regression
In order to explain what Linear Regression is, lets write a program that performs Linear Regression. Our goal is to find the best fitting straight line through a data set comprising of 50 random points.
Equation of a line in slope intercept form is: $y=m*x+C$
End of explanation
"""
#Import the decision tree classifier class from the scikit-learn machine learning library
from sklearn import tree
#List of features
#Say we have 9 inputs each with two features i.e. [feature one=1:9, feature two=0 or 1]
features=[[1,1],[8,0],[5,1],[2,1],[6,0],[9,1],[3,1],[4,1],[7,0]]
#The 9 inputs are classified explicitly into three classes (0,1 and 2) by us
# For example input 1,1 belongs to class 0
# input 4,1 belongs to class 1
# input 8,1 belongs to class 2
labels=[0,0,0,1,1,1,2,2,2]
#Features are the inputs to the classifier and labels are the outputs
#Create decision tree classifier
clf=tree.DecisionTreeClassifier()
#Training algorithm, included in the object, is executed
clf=clf.fit(features,labels) #Fit is a synonym for
"find patterns in data"
#Predict to which class does an input belong
#for example [20,1]
print (clf.predict([[2,1]]))
"""
Explanation: Using the best fittng straight line, we can predict the next expected value. Hence Regression is a Machine Learning technique which helps to predict the output in a model that takes continous values.
Linear Classifier
A classifier, for now, can be thought of as a program that uses an object's characteristics to identify which class it belongs to. For example, classifying a fruit as an orange or an apple. The following program is a simple Supervised Learning classifier, that makes use of a decision tree classifier (An example of a decision tree is shown below).
End of explanation
"""
#import basic libraries for plotting and scientific computing
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
#emulates the aesthetics of ggplot in R
style.use("ggplot")
#import class svm from scikit-learn
from sklearn import svm
#input data
X = [1, 5, 1.5, 8, 1, 9]
Y = [2, 8, 1.8, 8, 0.6, 11]
#classes assigned to input data
y = [0,1,0,1,0,1]
#plot input data
#plt.scatter(X,Y)
#plt.show()
#Create the Linear Support Vector Classification
clf = svm.SVC(kernel='linear', C = 1.0)
#input data in 2-D
points=[[1,2],[5,8],[1.5,1.8],[8,8],[1,0.6],[9,11]]#,[2,2],[1,4]]
#Fit the data with Linear Support Vector Classification
clf.fit(points,y) #Fit is a synonym for "find patterns in data"
#Predict the class for the following two points depending on which side of the SVM they lie
print(clf.predict([0.58,0.76]))
print(clf.predict([10.58,10.76]))
#find coefficients of the linear svm
w = clf.coef_[0]
#print(w)
#find slope of the line we wish to draw between the two classes
#a=change in y/change in x
a = -w[0] / w[1]
#Draw the line
#x points for a line
#linspace ->Return evenly spaced numbers over a specified interval.
xx = np.linspace(0,12)
#equation our SVM hyperplane
yy = a * xx - clf.intercept_[0] / w[1]
#plot the hyperplane
h0 = plt.plot(xx, yy, 'k-', label="svm")
#plot the data points as a scatter plot
plt.scatter(X,Y)
plt.legend()
plt.show()
"""
Explanation: Support Vector Machines
“Support Vector Machine” (SVM) is a supervised machine learning algorithm which can be used for both classification or regression challenges. However, it is mostly used in classification problems. An example of SVM would be using a linear hyperplane to seperate two clusters of data points. The following code implements the same.
End of explanation
"""
#import numpy library
import numpy as np
# Sigmoid function which maps any value to between 0 and 1
# This is the function which will our layers will comprise of
# It is used to convert numbers to probabilties
def sigmoid(x):
return 1/(1+np.exp(-x))
# input dataset
# 4 set of inputs
x = np.array([[0.45,0,0.21],[0.5,0.32,0.21],[0.6,0.5,0.19],[0.7,0.9,0.19]])
# output dataset corresponding to our set of inputs
# .T takes the transpose of the 1x4 matrix which will give us a 4x1 matrix
y = np.array([[0.5,0.4,0.6,0.9]]).T
#Makes our model deterministic for us to judge the outputs better
#Numbers will be randomly distributed but randomly distributed in exactly the same way each time the model is trained
np.random.seed(1)
# initialize weights randomly with mean 0, weights lie within -1 to 1
# dimensions are 3x1 because we have three inputs and one output
weights = 2*np.random.random((3,1))-1
#Network training code
#Train our neural network
for iter in range(1000):
#get input
input_var = x
#This is our prediction step
#first predict given input, then study how it performs and adjust to get better
#This line first multiplies the input by the weights and then passes it to the sigmoid function
output = sigmoid(np.dot(input_var,weights))
#now we have guessed an output based on the provided input
#subtract from the actual answer to see how much did we miss
error = y - output
#based on error update our weights
weights += np.dot(input_var.T,error)
#The best fit weights by our neural net is as following:
print("The weights that the neural network found was:")
print(weights)
#Predict with new inputs i.e. dot with weights and then send to our prediction function
predicted_output = sigmoid(np.dot(np.array([0.3,0.9,0.1]),weights))
print ("Predicted Output:")
print (predicted_output)
"""
Explanation: Basic workflow when using Machine Learning algorithms
Neural Networks
Neural Networks can be thought of as a one large composite function, which is comprised of other functions. Each layer is a function that is fed an input which is the result of the previous function's output.
For example:
The example above was rather rudimentary. Let us look at a case where we have more than one inputs, fed to a prediction function that maps them to an output. This can be depicted by the following graph.
Building a Neural Network
The function carried out by our layer is termed as the sigmoid. It takes the form: $1/(1+exp(-x))$
Steps to follow to create our neural network:
<br>
1) Get set of input
<br>
2) dot with a set of weights i.e. weight1input1+weight2input2+weight3*input3
<br>
3) send the dot product to our prediction function i.e. sigmoid
<br>
4) check how much we missed i.e. calculate error
<br>
5) adjust weights accordingly
<br>
6) Do this for all inputs and about 1000 times
End of explanation
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Feature set containing (x,y) values of 25 known/training data
trainData = np.random.randint(0,100,(25,2)).astype(np.float32)
# Labels each one either Red or Blue with numbers 0 and 1
responses = np.random.randint(0,2,(25,1)).astype(np.float32)
# Take Red families and plot them
red = trainData[responses.ravel()==0]
plt.scatter(red[:,0],red[:,1],80,'r','^')
# Take Blue families and plot them
blue = trainData[responses.ravel()==1]
plt.scatter(blue[:,0],blue[:,1],80,'b','s')
#New unknown data point
newcomer = np.random.randint(0,100,(1,2)).astype(np.float32)
#Make this unknown data point green
plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
#Carry out the K nearest neighbour classification
knn = cv2.ml.KNearest_create()
#Train the algorithm
#passing 0 as a parameter considers the length of array as 1 for entire row.
knn.train(trainData, 0, responses)
#Find 3 nearest neighbours...also make sure the neighbours found belong to both classes
ret, results, neighbours ,dist = knn.findNearest(newcomer, 3)
print ("result: ", results,"\n")
print ("neighbours: ", neighbours,"\n")
print ("distance: ", dist)
plt.show()
"""
Explanation: Optional :
K-Nearest Neighbour
End of explanation
"""
|
fabm-model/code
|
src/models/bb/lorenz63/lorenz63.ipynb
|
gpl-2.0
|
import numpy
import scipy.integrate
"""
Explanation: The Lorenz63 model implemented in FABM
The equations read:
$ \frac{dx}{dt} = \sigma ( y - x ) - \beta x y$
$ \frac{dy}{dt} = x ( \rho - z ) - y$
$ \frac{dz}{dt} = x y - \beta z$
For further information see
Import standard python packages and pyfabm
End of explanation
"""
import pyfabm
#pyfabm.get_version()
"""
Explanation: Import pyfabm - the python module that contains the Fortran based FABM
End of explanation
"""
yaml_file = 'fabm-bb-lorenz63.yaml'
model = pyfabm.Model(yaml_file)
model.findDependency('bottom_depth').value = 1.
model.checkReady(stop=True)
"""
Explanation: Configuration
The model configuration is done via the YAML formatted file.
End of explanation
"""
def dy(y,t0):
model.state[:] = y
return model.getRates()
"""
Explanation: Model increment
End of explanation
"""
t = numpy.arange(0.0, 40.0, 0.01)
y = scipy.integrate.odeint(dy,model.state,t)
"""
Explanation: Time axis and model integration
End of explanation
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(y[:,0], y[:,1], y[:,2])
plt.show()
"""
Explanation: Plot the results
End of explanation
"""
|
queirozfcom/python-sandbox
|
python3/notebooks/boosting/effect-of-categories-credit-default.ipynb
|
mit
|
def fix_status(current_value):
if current_value == -2: return 'no_consumption'
elif current_value == -1: return 'paid_full'
elif current_value == 0: return 'revolving'
elif current_value in [1,2]: return 'delay_2_mths'
elif current_value in [3,4,5,6,7,8,9]: return 'delay_3+_mths'
else: return 'other'
for column_name in df.columns:
if column_name.startswith('status'):
df[column_name] = df[column_name].map(lambda x: fix_status(x)).astype(str)
"""
Explanation: turn payment status into categories
Author clarified codes for payment_status* columns
-2: No consumption; -1: Paid in full; 0: The use of revolving credit; 1 = payment delay for one month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months; 9 = payment delay for nine months and above.
So let's use these categories:
-2 => category 'no_consumption'
-1 => category 'paid_full'
0 => category 'revolving'
1 and 2 => 'delay_2_mths
3 to 9 => 'delay_3+_mths'
End of explanation
"""
df = pd.concat([df,pd.get_dummies(df['sex'], prefix='sex')],axis=1)
df.drop(['sex'],axis=1,inplace=True)
df = pd.concat([df,pd.get_dummies(df['education'], prefix='education')],axis=1)
df.drop(['education'],axis=1,inplace=True)
df = pd.concat([df,pd.get_dummies(df['marriage'], prefix='marriage')],axis=1)
df.drop(['marriage'],axis=1,inplace=True)
# also all status columns
for column_name in df.columns:
if column_name.startswith('status'):
df = pd.concat([df,pd.get_dummies(df[column_name], prefix=column_name)],axis=1)
df.drop([column_name],axis=1,inplace=True)
df.sample(10)
data = df.drop('default',axis=1)
target = df['default']
data.head()
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
n_features=43
plt.clf()
d = dict(zip(data.columns, clf.feature_importances_))
d['marriage'] = 0
d['sex'] = 0
d['education'] = 0
for n in range(0,3):
d['marriage'] += d['marriage_{}'.format(n)]
del(d['marriage_{}'.format(n)])
for n in range(1,3):
d['sex'] += d['sex_{}'.format(n)]
del(d['sex_{}'.format(n)])
for n in range(0,7):
d['education'] += d['education_{}'.format(n)]
del(d['education_{}'.format(n)])
ss = sorted(d, key=d.get, reverse=True)
top_names = ss[0:n_features]
plt.title("Feature importances")
plt.bar(range(n_features), [d[i] for i in top_names], color="r", align="center")
plt.xlim(-1, n_features)
plt.xticks(range(n_features), top_names, rotation='vertical')
plt.yticks(np.arange(0, 0.12, 0.005))
plot_value_labels(plt.gca(),format='{:.3f}')
plt.gcf().set_size_inches(10,6)
plt.ylim(0.0,0.11)
plt.tight_layout()
plt.show()
"""
Explanation: one hot encoding where needed
End of explanation
"""
df['age'].describe()
"""
Explanation: can we do better by training a different model by subpopulation?
End of explanation
"""
data = df[df['age']<=30].drop('default',axis=1)
target = df[df['age']<=30]['default']
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
"""
Explanation: young people (age<=30)
End of explanation
"""
data = df[df['age'].between(31,50)].drop('default',axis=1)
target = df[df['age'].between(31,50)]['default']
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
"""
Explanation: solid gains here
middle age (30 < age <=50)
End of explanation
"""
data = df[df['age'] > 50].drop('default',axis=1)
target = df[df['age'] > 50]['default']
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
"""
Explanation: older people (age > 50)
End of explanation
"""
data = df[df['sex_1'] == 1].drop('default',axis=1)
target = df[df['sex_1'] == 1]['default']
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
"""
Explanation: men only
End of explanation
"""
data = df[df['sex_2'] == 1].drop('default',axis=1)
target = df[df['sex_2'] == 1]['default']
X_train, X_test, y_train, y_test = train_test_split(
data.values,
target.values,
test_size=0.25)
clf = XGBClassifier()
clf.fit(X_train, y_train.ravel())
y_preds = clf.predict_proba(X_test)
# take the second column because the classifier outputs scores for
# the 0 class as well
preds = y_preds[:,1]
# fpr means false-positive-rate
# tpr means true-positive-rate
fpr, tpr, _ = metrics.roc_curve(y_test, preds)
auc_score = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, label='AUC = {:.3f}'.format(auc_score))
# it's helpful to add a diagonal to indicate where chance
# scores lie (i.e. just flipping a coin)
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc='lower right')
plt.show()
"""
Explanation: women only
End of explanation
"""
|
lwahedi/CurrentPresentation
|
talks/MDI5/Scraping+Lecture.ipynb
|
mit
|
import pandas as pd
import numpy as np
import pickle
import statsmodels.api as sm
from sklearn import cluster
import matplotlib.pyplot as plt
%matplotlib inline
from bs4 import BeautifulSoup as bs
import requests
import time
# from ggplot import *
"""
Explanation: Collecting and Using Data in Python
Laila A. Wahedi, PhD
Massive Data Institute Postdoctoral Fellow <br>McCourt School of Public Policy<br>
Follow along: Wahedi.us, Current Presentation
Follow Along
Go to https://notebooks.azure.com/Laila/libraries/MDI-workshopFA18_pt2
Clone the directory
<img src='step1.png'>
Follow Along
Sign in with any Microsoft Account (Hotmail, Outlook, Azure, etc.)
Create a folder to put it in, mark as private or public
<img src='step2.png'>
Follow Along
Open a notebook
Open this notebook to have the code to play with
Open a blank notebook to follow along and try on your own.
<img src='step4.png'>
Do you get this error?
HTTP Error 400. The size of the request headers is too long
Clear your cookies then refresh the browser.
Your Environment
Jupyter Notebook Hosted in Azure
Want to install it at home?
Install the Anaconda distribution of Python
https://www.anaconda.com/download/
Install Jupyter Notebooks
http://jupyter.org/install
Your Environment
ctrl/apple+ enter runs a cell
<img src='notebook.png'>
Your Environment
Persistent memory
If you run a cell, results remain as long as the kernel
ORDER MATTERS!
<img src='persist.png'>
Agenda for today:
Collect data from APIs
Scrape data
Merge data into a data frame
Statsmodels package
SKLearn package
Packages to Import For Today
Should all be included with your Anaconda Python Distribution
Raise your hand for help if you have trouble
Our plots will use matplotlib, similar to plotting in matlab
%matplotlib inline tells Jupyter Notebooks to display your plots
from allows you to import part of a package
End of explanation
"""
base_url = "http://www.mywebsite.com/data/api"
attributes = ["key1=value1",
"key2=value2",
"API_KEY=39DC3727-09BD-XXXX-XXXX-XXXXXXXXXXXX"
]
post_url = '&'.join(attributes)
print(base_url+post_url)
"""
Explanation: Other Useful Packages (not used today)
ggplot: the familiar ggplot2 you know and love from R
seaborn: Makes your plots prettier
plotly: makes interactive visualizations, similar to shiny
gensim: package for doing natural language processing
scipy: used with numpy to do math. Generates random numbers from distributions, does matrix operations, etc.
Scraping
How the Internet Works
Code is stored on servers
Web addresses point to the location of that code
Going to an address or clicking a button sends requests to the server for data,
The server returns the requested content
Your web browser interprets the code to render the web page
<img src='Internet.png'>
Scraping:
Collect the website code by emulating the process:
Can haz cheezburger?
<img src='burger.png'>
Extract the useful information from the scraped code:
Where's the beef?
<img src='beef.png'>
API
Application Programming Interface
The set of rules that govern communication between two pieces of code
Code requires clear expected inputs and outputs
APIs define required inputs to get the outputs in a format you can expect.
Easier than scraping a website because gives you exactly what you ask for
<img src="beef_direct.png">
API Keys
APIs often require identification
Go to https://docs.airnowapi.org
Register and get a key
Log in to the site
Select web services
DO NOT SHARE YOUR KEY
It will get stolen and used for malicious activity
Do Not Share Your Key
Make your notebook private:
<img src='settings.png'>
Do Not Share Your Key
Make your notebook private:
<img src='private.png'>
Requests to a Server
<div style="float: left;width:50%">
<h3> GET</h3>
<ul><li>Requests data from the server</li>
<li> Encoded into the URL</li></ul>
<img src='get.png'>
</div>
<div style="float: left;width:50%">
<h3>POST</h3>
<ul><li>Submits data to be processed by the server</li>
<li>For example, filter the data</li>
<li>Can attach additional data not directly in the url</li></ul>
<img src='post.png'>
</div>
Using an API
Change the parameters. What changes?
<img src='api.png'>
Requests encoded in the URL
Parsing a URL
<font color="blue">http://www.airnowapi.org/aq/observation/zipCode/historical/</font><font color="red">?</font><br><font color="green">format</font>=<font color="purple">application/json</font><font color="orange">&<br></font><font color="green">zipCode</font>=<font color="purple">20007</font><font color="orange">&</font><br><font color="green">date</font>=<font color="purple">2017-09-05T00-0000</font><font color="orange">&</font><br><font color="green">distance</font>=<font color="purple">25</font><font color="orange">&</font><br><font color="green">API_KEY</font>=<font color="purple">D9AA91E7-070D-4221-867CC-XXXXXXXXXXX</font>
The base URL or endpoint is:<br>
<font color="blue">http://www.airnowapi.org/aq/observation/zipCode/historical/</font>
<font color="red">?</font> tells us that this is a query.
<font color="orange">&</font> separates name, value pairs within the request.
Five <font color="green"><strong>name</strong></font>, <font color="purple"><strong>value</strong></font> pairs POSTED
format, zipCode, date, distance, API_KEY
Request from Python
prepare the url
List of attributes
Join them with "&" to form a string
End of explanation
"""
base_url = "http://www.airnowapi.org/aq/observation/zipCode/historical/"
attributes = ["format=application/json",
"zipCode=20007",
"date=2017-09-05T00-0000",
"distance=25",
"API_KEY=39DC3727-09BD-48C4-BBD8-XXXXXXXXXXXX"
]
post_url = '&'.join(attributes)
print(base_url+post_url)
"""
Explanation: Prepare another URL on your own
End of explanation
"""
ingredients=requests.get(base_url, post_url)
ingredients = ingredients.json()
print(ingredients[0])
"""
Explanation: Requests from Python
Use requests package
Requested json format
Returns list of dictionaries
Look at the returned keys
End of explanation
"""
for item in ingredients:
AQIType = item['ParameterName']
City=item['ReportingArea']
AQIValue=item['AQI']
print("For Location ", City, " the AQI for ", AQIType, "is ", AQIValue)
"""
Explanation: View Returned Data:
Each list gives a different parameter for zip code and date we searched
End of explanation
"""
time.sleep(1)
"""
Explanation: Ethics
Check the websites terms of use
Don't hit too hard:
Insert pauses in your code to act more like a human
Scraping can look like an attack
Server will block you without pauses
APIs often have rate limits
Use the time package to pause for a second between hits
End of explanation
"""
asthma_data = pd.read_csv('asthma-emergency-department-visit-rates-by-zip-code.csv')
asthma_data.head(2)
"""
Explanation: Collect Our Data
Python helps us automate repetitive tasks. Don't download each datapoint you want separately
Get a list of zip codes we want
take a subset to demo, so it doesn't take too long and so we don't all hit too hard from the same ip
California zip codes from asthma emergency room visits
Review from last week: Asthma Data
Load asthma data from csv
Display a few rows
Fix the zip codes
Pivot to have one row per zip code (repeated rows for children, adults, all)
Load from csv, display
End of explanation
"""
asthma_data[['zip','coordinates']] = asthma_data.loc[:,'ZIP code'].str.split(
pat='\n',expand=True)
asthma_data.drop('ZIP code', axis=1,inplace=True)
asthma_data.head(2)
"""
Explanation: Fix the zip codes
Fix zip codes
End of explanation
"""
asthma_unstacked = asthma_data.pivot_table(index = ['Year',
'zip',
'County',
'coordinates',
'County Fips code'],
columns = 'Age Group',
values = 'Number of Visits')
asthma_unstacked.reset_index(drop=False,inplace=True)
asthma_unstacked.head(2)
"""
Explanation: Pivot the data so age group are columns
End of explanation
"""
base_url = "http://www.airnowapi.org/aq/observation/zipCode/historical/"
zips = asthma_unstacked.zip.unique()
zips = zips[:450]
date ="date=2015-09-01T00-0000"
api_key = "API_KEY=39DC3727-09BD-48C4-BBD8-XXXXXXXXXXXX"
return_format = "format=application/json"
zip_str = "zipCode="
post_url = "&".join([date,api_key,return_format,zip_str])
data_dict = {}
for zipcode in zips:
time.sleep(1)
zip_post = post_url + str(zipcode)
ingredients = requests.get(base_url, zip_post)
ingredients = ingredients.json()
zip_data = {}
for data_point in ingredients:
AQIType = data_point['ParameterName']
AQIVal = data_point['AQI']
zip_data[AQIType] = AQIVal
data_dict[zipcode]= zip_data
"""
Explanation: Now we have some zip codes!
Automate Data Collection
Request the data for those zipcodes on a day in 2015 (you pick, fire season July-Oct)
Be sure to sleep between requests
Store that data as you go into a dictionary
Key: zip code
Value: Dictionary of the air quality parameters and their value
End of explanation
"""
ingredients = requests.get("https://en.wikipedia.org/wiki/Data_science")
soup = bs(ingredients.text)
print(soup.body.p)
"""
Explanation: Scraping: Parsing HTML
What about when you don't have an API that returns dictionaries?
HTML is a markup language that displays data (text, images, etc)
Puts content within nested tags to tell your browser how to display it
<Section_tag>
  <tag> Content </tag>
  <tag> Content </tag>
< /Section_tag>
<Section_tag>
  <tag> <font color="red">Beef</font> </tag>
< /Section_tag>
Find the tags that identify the content you want:
First paragraph of wikipedia article:
https://en.wikipedia.org/wiki/Data_science
Inspect the webpage:
Windows: ctrl+shift+i
Mac: ctrl+alt+i
<img src="wikipedia_scrape.png">
Parsing HTML with Beautiful Soup
Beautiful Soup takes the raw html and parses the tags so you can search through them.
text attribute returns raw html text from requests
Ignore the warning, default parser is fine
We know it's the first paragraph tag in the body tag, so:
Can find first tag of a type using <strong>.</strong>
What went wrong?
End of explanation
"""
parser_div = soup.find("div", class_="mw-parser-output")
wiki_content = parser_div.find_all('p')
print(wiki_content[1])
print('*****************************************')
print(wiki_content[1].text)
"""
Explanation: Use Find Feature to Narrow Your Search
Find the unique div we identified
Remember the underscore: "class_"
Find the p tag within the resulting html
Use an index to return just the first paragraph tag
Use the text attribute to ignore all the formatting and link tags
End of explanation
"""
parser_div = soup.find("div", id="toc")
wiki_content = parser_div.find_all('ul')
for item in wiki_content:
print(item.text)
"""
Explanation: 1. List the contents
2. Get all links in the history section
Hint: chrome's inspect replaces "&" with "&" in links
List Contents
End of explanation
"""
wiki_content = soup.find_all('a',href=True)
in_hist = False
links = []
for l in wiki_content:
link = l['href']
if link == '/w/index.php?title=Data_science&action=edit§ion=2':
in_hist = False
if in_hist:
links.append(link)
if link =="/w/index.php?title=Data_science&action=edit§ion=1":
in_hist = True
print(links)
"""
Explanation: Get All Links in the History Section
Hint: chrome's inspect replaces "&" with "&" in links
End of explanation
"""
topics = ['Data_scraping','Machine_learning','Statistics','Linear_algebra',
'Cluster_analysis','Scientific_modelling','Analysis','Linear_regression']
base_url = 'https://en.wikipedia.org/wiki/{}'
paragraphs = []
for topic in topics:
url = base_url.format(topic)
ingredients = requests.get("https://en.wikipedia.org/wiki/Data_science")
soup = bs(ingredients.text)
parser_div = soup.find("div", class_="mw-parser-output")
wiki_content = parser_div.find_all('p')
for p in range(10):
if len(wiki_content[p].text)>10:
paragraphs.append(wiki_content[p].text)
break
time.sleep(1)
print(dict(zip(topics,paragraphs)))
"""
Explanation: Use a for loop and scrape the first paragraph from a bunch of wikipedia articles
Add your own subjects
End of explanation
"""
pickle.dump(data_dict,open('AQI_data_raw.p','wb'))
"""
Explanation: Back To Our Data
If it's still running, go ahead and stop it by pushing the square at the top of the notebook:
<img src="interrupt.png">
Save what you collected, don't want to hit them twice!
End of explanation
"""
collected = list(data_dict.keys())
asthma_2015_sub = asthma_unstacked.loc[(asthma_unstacked.zip.isin(collected))&
(asthma_unstacked.Year == 2015),:]
"""
Explanation: Subset down to the data we have:
use the isin() method to include only those zip codes we've already collected
End of explanation
"""
aqi_data = pd.DataFrame.from_dict(data_dict, orient='index')
aqi_data.reset_index(drop=False,inplace=True)
aqi_data.rename(columns={'index':'zip'},inplace=True)
aqi_data.head()
"""
Explanation: Create a dataframe from the new AQI data
End of explanation
"""
asthma_aqi = asthma_2015_sub.merge(aqi_data,how='outer',on='zip')
asthma_aqi.rename(columns = {'Adults (18+)':'Adults',
'All Ages':'Incidents',
'Children (0-17)':'Children'},inplace=True)
asthma_aqi.head(2)
"""
Explanation: Combine The Data
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html
* Types of merges:
* Left: Use only rows from the dataframe you are merging into
* Right: use only rows from the dataframe you are inserting, (the one in the parentheses)
* Inner: Use only rows that match between both
* Outer: Use all rows, even if they only appear in one of the dataframes
* On: The variables you want to compare
* Specify right_on and left_on if they have different names
End of explanation
"""
asthma_aqi.Incidents.plot.hist(20)
"""
Explanation: Look At The Data: Histogram
20 bins
End of explanation
"""
asthma_aqi.loc[:,['Incidents','OZONE']].plot.density()
"""
Explanation: Look At The Data: Smoothed Distribution
End of explanation
"""
asthma_aqi.loc[:,['PM2.5','PM10']].plot.hist()
"""
Explanation: Look at particulates
There is a lot of missingness in 2015
Try other variables, such as comparing children and adults
End of explanation
"""
asthma_aqi.plot.scatter('OZONE','PM2.5')
"""
Explanation: Scatter Plot
Try some other combinations
Our data look clustered, but we'll ignore that for now
End of explanation
"""
y =asthma_aqi.loc[:,'Incidents']
x =asthma_aqi.loc[:,['OZONE','PM2.5']]
x['c'] = 1
ols_model1 = sm.OLS(y,x,missing='drop')
results = ols_model1.fit()
print(results.summary())
pickle.dump([results,ols_model1],open('ols_model_results.p','wb'))
"""
Explanation: Run a regression:
Note: statsmodels supports equation format like R <br>
http://www.statsmodels.org/dev/example_formulas.html
End of explanation
"""
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(results, fig=fig)
"""
Explanation: Evaluate the model with some regression plots
Learn more here: <br>
https://www.statsmodels.org/dev/examples/notebooks/generated/regression_plots.html
Partial Regressions to see effect of each variable
End of explanation
"""
ingredients = requests.get('https://www.california-demographics.com/zip_codes_by_population')
soup = bs(ingredients.text)
table = soup.find("table")
population = pd.read_html(str(table),flavor='html5lib')[0]
population.rename(columns=population.iloc[0],inplace=True)
population.drop(index=0,inplace=True)
population.head(2)
"""
Explanation: Population confound
Fires spread in less populated areas
Fewer people to have asthma attacks in less populated areas
Collect population data
Use pandas to read the html table directly
End of explanation
"""
population[['zip','zip2']]=population.loc[:,'Zip Code'].str.split(
pat =' and ',
expand=True)
population.Population = population.Population.astype(np.float)
population.loc[population.zip2!=None,'Population']=population.loc[population.zip2!=None,'Population']/2
temp_pop = population.loc[population.zip!=None,['Population','zip2']].copy()
temp_pop.rename(columns={'zip2':'zip'},inplace=True)
population = pd.concat([population.loc[:,['Population','zip']],
temp_pop],axis=0)
population.head(2)
"""
Explanation: Fix zipcode column
Split doubled up zip codes into separate lines
End of explanation
"""
asthma_aqi = asthma_aqi.merge(population,how='left',on='zip')
y =asthma_aqi.loc[:,'Adults']
x =asthma_aqi.loc[:,['OZONE','Population']]
x['c'] = 1
glm_model = sm.GLM(y,x,missing='drop',family=sm.families.Poisson())
ols_model2 = sm.OLS(y,x,missing='drop')
glm_results = glm_model.fit()
results = ols_model2.fit()
print(glm_results.summary())
pickle.dump([glm_results,glm_model],open('glm_model_pop_results.p','wb'))
"""
Explanation: Re-run regression
With population
Without PM2.5
End of explanation
"""
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(results, fig=fig)
"""
Explanation: Partial Regressions
End of explanation
"""
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(results, ax=ax, criterion="cooks")
"""
Explanation: Influence plot for outsized-effect of any observations
End of explanation
"""
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(results, "OZONE", fig=fig)
"""
Explanation: Diagnostic plots
End of explanation
"""
model_df = asthma_aqi.loc[:,['OZONE','PM2.5','Incidents',]]
model_df.dropna(axis=0,inplace=True)
model_df = (model_df - model_df.mean()) / (model_df.max() - model_df.min())
"""
Explanation: SciKitLearn
Package for machine learning models
Structured like statsmodels:
Create a model
Train on data
Output results object
Very good documentation:
http://scikit-learn.org
Clustering
Learn more about clustering here: <br>
http://scikit-learn.org/stable/modules/clustering.html
Many algorithms, each good in different contexts
<img src="http://scikit-learn.org/stable/_images/sphx_glr_plot_cluster_comparison_0011.png">
<h1>K-Means</h1>
<img src="rand.png">
<br>
<h3> Randomly pick k initial centroids </h3>
<h1>K-Means</h1>
<img src="agg1.png"> <br>
<h3> Assign all points to nearest centroids</h3>
<h1>K-Means</h1>
<img src="new_cent.png"><br>
<h3> Calculate new centroids for each set</h3>
<h1>K-Means</h1>
<img src="agg2.png"><br>
<h3> Assign all points to nearest centroid</h3>
<h1>K-Means</h1>
<img src="cent2.png"><br>
<h3>Calculate new centroids, assign points, continue until no change</h3>
Prepare data
Statsmodels default drops null values
Drop rows with missing values first
Standardize the data so they're all on the same scale
End of explanation
"""
asthma_air_clusters=cluster.KMeans(n_clusters = 3)
asthma_air_clusters.fit(model_df)
model_df['clusters3']=asthma_air_clusters.labels_
"""
Explanation: Create and train the model
Initialize a model with three clusters
fit the model
extract the labels
End of explanation
"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
labels = asthma_air_clusters.labels_
ax.scatter(model_df.loc[:, 'PM2.5'], model_df.loc[:, 'OZONE'], model_df.loc[:, 'Incidents'],
c=labels.astype(np.float), edgecolor='k')
ax.set_xlabel('Particulates')
ax.set_ylabel('Ozone')
ax.set_zlabel('Incidents')
"""
Explanation: Look At Clusters
Our data are very closely clustered, OLS was probably not appropriate.
End of explanation
"""
|
miklevin/pipulate
|
examples/LESSON11_Formatting.ipynb
|
mit
|
'{}'.format(1) # String formatting is actually the best way to FORMAT NUMBERS.
'{0}'.format(1) # I'm putting the optional index placholder in so that it's clearer as we build up the API.
'{0:}'.format(1) # After the placeholder, you can put an optional colon for a format_spec
'{:}'.format(1) # Because the number before colon is an optional placeholder, this works too.
'{:,}'.format(1000) # There's a whole mini-language here. Here's insertting commas.
"""
Explanation: This is the grammer for the replacement fileld (the part between the {curly brackets}.
replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"
field_name ::= arg_name ("." attribute_name | "[" element_index "]")*
arg_name ::= [identifier | digit+]
attribute_name ::= identifier
element_index ::= digit+ | index_string
index_string ::= <any source character except "]"> +
conversion ::= "r" | "s" | "a"
format_spec ::= <described in the next section>
End of explanation
"""
'{:}'.format(1/3) # Here's the unconstrained default decimal precision of floats.
'{:.1f}'.format(1/3) # This is floating point
'{:.1%}'.format(1/3) # Simnply replacing f with % multiplies by 100 and adds percent sign.
'{:.0%}'.format(1/3) # The number before the % specifies float precision just like with f
'{:8.0f}'.format(10) # A character IMMEDIATELY AFTER the comma specifies a forced width (if smaller).
'{:*^8.0f}'.format(10) # Fill with * and center-align
'{:*<8.0f}'.format(10) # Fill with * and left-align
'{:0=8.0f}'.format(100) # Pad number with 0
import datetime
now = datetime.datetime.now() # You can get extreme control over date/times.
now
'{:%Y-%m-%d %H:%M:%S}'.format(now) # datetime data-types have yet more format_spec rules to learn.
print('Day: {:%A}'.format(now))
print('Month: {:%B}'.format(now))
print('Day of year: {:%j}'.format(now)) # Lots possible documented https://docs.python.org/3/library/datetime.html|
"""
Explanation: These are the options for the format_spec (the mini-language following the colon)
format_spec ::= [[fill]align][sign][#][0][width][grouping_option][.precision][type]
fill ::= <any character>
align ::= "<" | ">" | "=" | "^"
sign ::= "+" | "-" | " "
width ::= digit+
grouping_option ::= "_" | ","
precision ::= digit+
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
End of explanation
"""
|
streety/biof509
|
Wk06-classification-and-clustering.ipynb
|
mit
|
digits = datasets.load_digits()
# X - how digits are handwritten
X = digits['data']
# y - what these digits actually are
y = digits['target']
print("Digits are classes:", set(y))
print("For instance this 64 pixel image is assigned class label", y[3])
plt.imshow(X[3].reshape((8,8)), cmap=plt.cm.gray)
plt.show()
"""
Explanation: Classification
Regression and classification methods are related. For intance, Logistic Regression is a classification method. However, in case of classification, predicted dependent variable is categorical, denoting a class.
In classification problems we distinguish:
* binary classification (predicting one of the two classes)
* multiclass or multivariate classification (predicting one of more than two classes)
* multilabel classification (predicting several class labels out of more than two classes)
Clustering
In clustering we attempt to group observations in such a way that observations assigned to the same cluster are more similar to each other than to observations in other clusters. Usually, the number of categories (clusters) is also unknown.
Digits dataset is an example of a classification problem
We can consider classification of digits as a multiclass classification or as 10 binary One-vs-All classification problems
In case of multilabel classification each image can be assigned more than one class, e.g. for a handwritten number (12) identify labels 1 and 2.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Binarize the output
y_bin = label_binarize(y, classes=[0,1,2,3,4,5,6,7,8,9])
n_classes = y_bin.shape[1]
# now we have one binary column for each class instead of one column with many class names
print(y.shape)
print(y_bin.shape)
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y_bin, test_size=.5)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = {}
tpr = {}
roc_auc = {}
precision = {}
recall = {}
pr_auc = {}
# we calculate 10 curves, one for each class
for i in range(n_classes):
# ROC:
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Precision-recall:
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i], y_score[:, i])
pr_auc[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Plot only one ROC curve for selected class
plt.figure()
lw = 2
digit = 8
plt.plot(fpr[digit], tpr[digit], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[digit])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for digit ' + str(digit))
plt.legend(loc="lower right")
plt.show()
# Plot only one Precision-Recall curve for selected class
plt.figure()
lw = 2
digit = 8
plt.plot(recall[digit], precision[digit], color='green',
lw=lw, label='PR curve (area = %0.2f)' % pr_auc[digit])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall for digit ' + str(digit))
plt.legend(loc="lower right")
plt.show()
# Classification Report
from sklearn.metrics import classification_report
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True))
classifier.fit(X_train, y_train)
y_predicted = classifier.predict(X_test)
print(y_test[0:10])
print(y_predicted[0:10])
print(classification_report(y_test, y_predicted))
"""
Explanation: An example of a classifier using Neural Network is Google Quick Draw:
Evalutaion of binary classifiers
Comparing predicted class labels to actual class labels
Consider two classes 0 and 1. For a given test dataset we obtain a vector of predicted class labels and compare it to the vector of actual class labels.
For instance,
Actual: [0, 1, 1, 0]
Predicted: [0, 0, 1, 1]
For class 0: [TP, FP, TN, FN]
For class 1: [TN, FN, TP, FP]
TP - True Positive
TN - True Negative
FP - False Positive (Type I error)
FN - False Negative (Type II error)
(see also Statistical Tests of Significance Type I and Type II Errors)
Although classes 0 and 1 look interchangeable, the interpretation of the results very much depends on the meaning of each class for your domain-specific problem. For instance, consider a hypotetical test where class 1 means disease and class 0 means healthy and interpret TP, TN, FP, FN.
Confusion matrix
Confusion matrix describes a binary classifier.
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
<table>
<tbody><tr>
<td colspan="2" rowspan="2"></td>
<td colspan="2">Predicted</td>
</tr>
<tr>
<td>Negative</td>
<td>Positive</td>
</tr>
<tr>
<td rowspan="2">Actual</td>
<td>Negative</td>
<td><b>TN</b></td>
<td><b>FP</b></td>
</tr>
<tr>
<td>Positive</td>
<td><b>FN</b></td>
<td><b>TP</b></td>
</tr>
</tbody>
</table>
considering Class 0 = Negative and Class 1 = Positive in our trivial case it will become:
<table>
<tbody><tr>
<td colspan="2" rowspan="2"></td>
<td colspan="2">Predicted</td>
</tr>
<tr>
<td>Class 0</td>
<td>Class 1</td>
</tr>
<tr>
<td rowspan="2">Actual</td>
<td>Class 0</td>
<td><b>1</b></td>
<td><b>1</b></td>
</tr>
<tr>
<td>Class 1</td>
<td><b>1</b></td>
<td><b>1</b></td>
</tr>
</tbody>
</table>
From confusion matrix we can calculate a number of useful metrics:
Model evaluation in Scikit Learn
Classification metrics in Scikit Learn
Imbalanced classification package
Accuracy
$$Accuracy = \frac{TP+TN}{TP+FP+FN+TN}$$
How often is the classifier correct?
Accuracy score
Accuracy for binary classifiers is [Jaccard Similarity(http://scikit-learn.org/stable/modules/generated/sklearn.metrics.jaccard_similarity_score.html#sklearn.metrics.jaccard_similarity_score)
Error rate (Misclassification rate)
$$ Error = 1 - Accuracy = \frac{FP+FN}{TP+FP+FN+TN} $$
How often is the classifier wrong?
True positive rate, also Sensitivity or Recall
Out of all actual positive cases, how many do we predict as positive?
$$ Sensitivity = \frac{TP}{TP + FN} $$
False positive rate
Out of all actual negative cases, how many are predicted as positive?
$$ FPR = \frac{FP}{TN + FP} $$
Specificity
Out of all actual negative cases, how many are predicted as negative?
$$ FPR = 1-FRP = \frac{TN}{TN + FP} $$
Precision (Positive predictive value)
Out of all predicted positive, how many are actually positive?
$$Precision = \frac{TP}{TP + FP}$$
Prevalence
Fraction of actually positive cases in the dataset? Shows if there is any imbalance between positive and negative cases.
$$Prevalence = \frac{TP + FN}{TP+FP+FN+TN}$$
Positive Predictive Value
Positive Predictive Value PPV is similar to precision but takes into account imbalance of the dataset:
$$PPV = \frac{Sensitivity * Prevalence}{Sensitivity * Prevalence + (1 - Specificity) * (1 - Prevalence)}$$
False Discovery Rate
Sometimes False Discovery Rate -- a complement of Positive Predictive Value is reported:
$$ FDR = 1 - PPV $$
Prediction baseline
Null Error Rate is a baseline metric that shows how often a classifier would be wrong if it always predicted the class with highest prevalence (be it Positive or Negative).
For instance, if positive class is prevalent:
$$ Null = \frac{FP}{FP + TP} $$
Or in case of majority of negative cases:
$$ Null = \frac{FN}{FN + TN} $$
Cohen's kappa statistic calculates agreement between annotators see more
In principle it can be used to compare observed accuracy (of a given classifier) to expected accuracy (random chance classifier). see example with explanations
F score ($F_1$ score, F-measure)
F score is a combination of precision and recall:
$$F_1 = \frac{2TP}{2TP + FP + FN}$$
Precision-Recall curve
ROC Curve: This is a commonly used graph that summarizes the performance of a classifier over all possible thresholds. It is generated by plotting the True Positive Rate (y-axis) against the False Positive Rate (x-axis) as you vary the threshold for assigning observations to a given class. (More details about ROC Curves.)
Receiver operating characteristic curve (ROC curve)
ROC curve illustrates the performance of a binary classifier system as its discrimination threshold is varied by plotting True Positive rate vs False Positive rate. It can also be referred to as a Sensitivity vs (1-Specificity) plot.
In order to make the plot you need to obtain a list of scores for each classified data point. They can be typically obtained with .predict_proba() or in some cases with .decision_function().
https://en.wikipedia.org/wiki/Receiver_operating_characteristic
http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
Area under ROC curve AUC is a quantitative characteristic of a binary classifier.
In a cross-validataion setting mean and variance of ROC AUC are useful measures of classifier robustness example
Precision-Recall curve
In some cases Precision and Recall plots are used for characterizing classifier performance as its discrimination threshold is varied.
http://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
As in the case of ROC, are under Precision-Recall curve can be calculated.
End of explanation
"""
# An intuitive 1-D classification example
# The task is to classify pixels in an image as object and background classes
# We calculate a histogram of pixel intensities as a 1-D parameter
# and introduce a threshold separating object from background
import matplotlib
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import threshold_otsu
matplotlib.rcParams['font.size'] = 9
image = camera()
thresh = threshold_otsu(image)
binary = image > thresh
#fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))
fig = plt.figure(figsize=(8, 2.5))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.hist(image)
ax2.set_title('Histogram')
ax2.axvline(thresh, color='r')
ax3.imshow(binary, cmap=plt.cm.gray)
ax3.set_title('Thresholded')
ax3.axis('off')
plt.show()
"""
Explanation: Unsupervised classification methods
Clustering
In clustering we attempt to group observations in such a way that observations assigned to the same cluster are more similar to each other than to observations in other clusters.
Although labels may be known, clustering is usually performed on unlabeled data as a step in exploratory data analysis.
The best method to use will vary developing on the particular problem.
Evaluation of clustering
How to measure success of clustering without true class labels?
How to find the optimal number of clusters?
Which clustering method to choose with what parameters?
Model evaluation
Several approaches have been developed for evaluating clustering models but are generally limited in requiring the true clusters to be known. In the general use case for clustering this is not known with the goal being exploratory.
Ultimately, a model is just a tool to better understand the structure of our data. If we are able to gain insight from using a clustering algorithm then it has served its purpose.
The metrics available are Adjusted Rand Index, Mutual Information based scores, Homogeneity, completeness, v-measure, and silhouette coefficient. Of these, only the silhouette coefficient does not require the true clusters to be known.
Although the silhouette coefficient can be useful it takes a very similar approach to k-means, favoring convex clusters over more complex, equally valid, clusters.
How to determine number of clusters
One important use for the model evaluation algorithms is in choosing the number of clusters. The clustering algorithms take as parameters either the number of clusters to partition a dataset into or other scaling factors that ultimately determine the number of clusters. It is left to the user to determine the correct value for these parameters.
As the number of clusters increases the fit to the data will always improve until each point is in a cluster by itself. As such, classical optimization algorithms searching for a minimum or maximum score will not work. Often, the goal is to find an inflection point.
If the cluster parameter is too low adding an additional cluster will have a large impact on the evaluation score. The gradient will be high at numbers of clusters less than the true value. If the cluster parameter is too high adding an additional cluster will have a small impact on the evaluation score. The gradient will be low at numbers of clusters higher than the true value.
At the correct number of clusters the gradient should suddenly change, this is an inflection point.
End of explanation
"""
from sklearn import cluster, datasets
dataset, true_labels = datasets.make_blobs(n_samples=200, n_features=2, random_state=0,
centers=3, cluster_std=1.5)
# fig, ax = plt.subplots(1,1)
# ax.scatter(dataset[:,0], dataset[:,1], c=true_labels)
# plt.show()
# Clustering algorithm can be used as a class
means = cluster.KMeans(n_clusters=3)
prediction = means.fit_predict(dataset)
# print(prediction)
fig, ax = plt.subplots(1,1)
ax.scatter(dataset[:,0], dataset[:,1], c=prediction)
plt.show()
"""
Explanation: Different clustering algorithms
Cluster comparison
The following algorithms are provided by scikit-learn
K-means
Affinity propagation
Mean Shift
Spectral clustering
Ward
Agglomerative Clustering
DBSCAN
Birch
K-means clustering divides samples between clusters by attempting to minimize the within-cluster sum of squares. It is an iterative algorithm repeatedly updating the position of the centroids (cluster centers), re-assigning samples to the best cluster and repeating until an optimal solution is reached. The clusters will depend on the starting position of the centroids so k-means is often run multiple times with random initialization and then the best solution chosen.
Affinity Propagation operates by passing messages between the samples updating a record of the exemplar samples. These are samples that best represent other samples. The algorithm functions on an affinity matrix that can be eaither user supplied or computed by the algorothm. Two matrices are maintained. One matrix records how well each sample represents other samples in the dataset. When the algorithm finishes the highest scoring samples are chosen to represent the clusters. The second matrix records which other samples best represent each sample so that the entire dataset can be assigned to a cluster when the algorithm terminates.
Mean Shift iteratively updates candidate centroids to represent the clusters. The algorithm attempts to find areas of higher density.
Spectral clustering operates on an affinity matrix that can be user supplied or computed by the model. The algorithm functions by minimizing the value of the links cut in a graph created from the affinity matrix. By focusing on the relationships between samples this algorithm performs well for non-convex clusters.
Agglomerative clustering starts all the samples in their own cluster and then progressively joins clusters together minimizing some performance measure. In addition to minimizing the variance as seen with Ward other options are, 1) minimizing the average distance between samples in each cluster, and 2) minimizing the maximum distance between observations in each cluster.
Ward is a type of agglomerative clustering using minimization of the within-cluster sum of squares to join clusters together until the specified number of clusters remain.
DBSCAN is another algorithm that attempts to find regions of high density and then expands the clusters from there.
Birch is a tree based clustering algorithm assigning samples to nodes on a tree
End of explanation
"""
X = np.array([[1, 5],
[2, 4]])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.patch.set_facecolor('white')
ax.scatter(X[...,0], X[...,1], c=("red", "green"), s=120, edgecolors='none')
ax.set_autoscale_on(False)
ax.axis('square')
print(X)
D = pairwise_distances(X, metric = 'euclidean')
print("Euclidean\n", D)
print()
D = pairwise_distances(X, metric = 'manhattan')
print("Manhattan\n", D)
X = np.array([[6, 0.0, 1.0],
[2, 1, 3.0]])
print(X)
print()
D = pairwise_distances(X, metric = 'euclidean')
print("Euclidean\n", D)
print()
D = pairwise_distances(X + 1, metric = 'cosine')
print("Cosine\n", D)
D = pairwise_distances(X, metric = 'manhattan')
print("Manhattan\n", D)
D = pairwise_distances(X + 20, metric = 'correlation')
print("Correlation is invariant\n", D)
"""
Explanation: Distances and similarities between data points
End of explanation
"""
## Binary data
binary = np.array([[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0]], dtype=np.bool)
print(binary)
D = pairwise_distances(binary, metric = 'jaccard')
print("\nJaccard\n", D)
D = pairwise_distances(binary, metric = 'hamming')
print("\nHamming\n", D)
"""
Explanation: Minkowsky distance: $\left(\sum_{i=1}^n |x_i-y_i|^p\right)^{1/p}$
for Euclidean: p=2
for Manhattan: p=1
End of explanation
"""
X = np.array([[0, 0, 0, 0], [5, 5, 5, 5], [5, 4, -10, 20]])
# Pairwise distances
D = pairwise_distances(X, metric = 'euclidean')
sns.heatmap(D, square=True)
plt.show()
"""
Explanation: Pairwise distance matrix
End of explanation
"""
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=30, centers=3, n_features=4, random_state=0, cluster_std=1.5)
# print(X)
print("Visualizing data points in the matrix as a heatmap")
sns.heatmap(X, robust=True, square=False, yticklabels=True, xticklabels=True, cbar=True)
plt.show()
print("Visualizing pairwise distances between the data points")
D = pairwise_distances(X, metric='euclidean', n_jobs=-1)
sns.heatmap(D, robust=True, square=True, yticklabels=True, xticklabels=True, cbar=True)
plt.show()
print("Distribution of pairwise distances")
plt.hist(np.hstack(D), 20, facecolor='orange', alpha=0.75)
plt.xlabel('Pairwise distances')
plt.ylabel('Frequency')
plt.grid(True)
plt.show()
# Clustering is often used as an accessory
# Quick hierarchical clustering for reordering of data points according to distances between them
sns.clustermap(X) # requires fastcluster package: !pip install fastcluster
plt.show()
from sklearn.metrics.cluster import v_measure_score, adjusted_rand_score
# Clustering our 4D dataset with KMeans k=3
kmeans = cluster.KMeans(n_clusters=3, random_state=0).fit(X)
print("In case we know a reference assignment of data points into clusters (labeled dataset), we can compare how well our clustering matches it")
print(y)
print(kmeans.labels_)
print("V measure", v_measure_score(y, kmeans.labels_))
print("Adj. Rand score", adjusted_rand_score(y, kmeans.labels_))
# Silhouette is used for assessing the performance of an unlabeled dataset
from sklearn.metrics.cluster import silhouette_score
import pprint
def calc_silhouette(data, n):
"""Runs Kmeans clustering and returns average silhouette coefficient"""
kmeans = cluster.KMeans(n_clusters=n).fit(data)
score = silhouette_score(data, kmeans.labels_, metric='l2')
return score
scores = {n: calc_silhouette(X, n) for n in range(2, 11)}
pprint.pprint(scores)
plt.plot(list(scores.keys()), list(scores.values()))
plt.xlabel('Number of clusters')
plt.ylabel('Average silhouette coefficient')
plt.show()
# Showing silhouette coefficient for each sample in each
# cluster is a powerful diagnostic tool
from sklearn.metrics.cluster import silhouette_samples
n_clusters = 4
# Compute the silhouette scores for each sample
kmeans = cluster.KMeans(n_clusters=n_clusters).fit(X)
lbls = kmeans.labels_
values = silhouette_samples(X, lbls)
g, ax = plt.subplots(figsize=(8, 6))
color_scale = np.linspace(0, 1, n_clusters)
y_lower = 1
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to cluster i
v = sorted(values[lbls == i])
cluster_size = len(v)
y_upper = y_lower + cluster_size
# color mapping:
c = plt.cm.Set1(color_scale[i])
ax.fill_betweenx(np.arange(y_lower, y_upper), 0, v, facecolor=c, edgecolor=c, alpha=0.8)
# Label the silhouette plots with their cluster numbers at the middle
ax.text(-0.05, y_lower + 0.5 * cluster_size, str(i))
y_lower = y_upper + 1
ax.set_xlabel("Silhouette coefficient")
ax.set_ylabel("Cluster label")
# Red dashed line shows an average silhouette score across all samples in all clusters
score = silhouette_score(X, lbls, metric='l2')
ax.axvline(x=score, color="red", linestyle="--")
ax.set_yticks([])
plt.show()
"""
Explanation: Clustering in a synthetic dataset (4D)
End of explanation
"""
from pathlib import Path
ICGC_API = 'https://dcc.icgc.org/api/v1/download?fn=/release_18/Projects/BRCA-US/'
expression_fname = 'protein_expression.BRCA-US.tsv.gz'
if not Path(expression_fname).is_file():
urllib.request.urlretrieve(ICGC_API + 'protein_expression.BRCA-US.tsv.gz', expression_fname);
E = pd.read_csv(expression_fname, delimiter='\t')
E.head(1)
donors = set(E['icgc_donor_id'])
genes = set(E['gene_name'])
print(len(donors))
print(len(genes))
donor2id = {donor: i for i, donor in enumerate(donors)}
id2donor = dict(zip(donor2id.values(), donor2id.keys()))
gene2id = {gene: i for i, gene in enumerate(genes)}
id2gene = dict(zip(gene2id.values(), gene2id.keys()))
# Let us create a donor x gene matrix for expression values
data = np.zeros((len(donors), len(genes)))
for i in range(len(E)):
data[donor2id[E.loc[i, 'icgc_donor_id']], gene2id[E.loc[i, 'gene_name']]] = float(E.loc[i, 'normalized_expression_level'])
# data = preprocessing.Normalizer().fit_transform(data)
# Scale data, let us make all values positive from 0 to 1
data = preprocessing.MinMaxScaler().fit_transform(data)
# print(data)
# Visualizing donors (rows) vs genes (columns) matrix
sns.heatmap(data, robust=True, square=False, yticklabels=False, xticklabels=False, cbar=True)
plt.show()
# Clustering of donors (rows) vs genes (columns) matrix
sns.clustermap(data, xticklabels=False, yticklabels=False)
plt.show()
# Now let's make a pairwise similarity matrix and visualize it as a heatmap
def clean_axis(ax):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for sp in ax.spines.values():
sp.set_visible(False)
# Figure is a grid with two parts 1 : 4
fig = plt.figure(figsize=(14, 10))
grid = gridspec.GridSpec(1, 2, wspace=.01, hspace=0., width_ratios=[0.25, 1])
Y = scipy.cluster.hierarchy.linkage(data, method='ward', metric='euclidean')
# also look up:
# method = [ average (UPGMA), complete, single, ward ]
# and metric
# Dendrogram
ax = fig.add_subplot(grid[0,0])
denD = scipy.cluster.hierarchy.dendrogram(Y, orientation='left', link_color_func=lambda k: 'black')
clean_axis(ax)
# Heatmap
ax = fig.add_subplot(grid[0,1])
D = pairwise_distances(data, metric = 'euclidean')
D = D[denD['leaves'], :][:, denD['leaves']]
axi = ax.imshow(D, interpolation='nearest', aspect='equal', origin='lower', cmap='RdBu')
clean_axis(ax)
# Legend for heatmap
cb = fig.colorbar(axi, fraction=0.046, pad=0.04, aspect=10)
cb.set_label('Distance', fontsize=20)
"""
Explanation: Example: Protein expression in Breast Cancer (BRCA)
End of explanation
"""
|
daniel-koehn/Theory-of-seismic-waves-II
|
01_Analytical_solutions/5_Greens_function_acoustic_1-3D.ipynb
|
gpl-3.0
|
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, "r").read())
"""
Explanation: Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2018 parts of this notebook are from (this Jupyter notebook) by Kristina Garina, Ashim Rijal and Heiner Igel (@heinerigel) which is a supplemenatry material to the book Computational Seismology: A Practical Introduction, additional modifications by D. Koehn, notebook style sheet by L.A. Barba, N.C. Clementi
End of explanation
"""
# Import Libraries
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Define parameters
vp0 = 1. # velocity m/s
r = 2. # distance from source
tmax = 5. # length of seismogram (s)
nt = 3000 # number of time samples
dt = tmax/nt # time increment
ts = 0 # source time
# Acquisition geometry
xs=0 # coordinates of source
ys=0
zs=0
xr=r # coordinates of receiver
yr=0
zr=0
# Define time vector
time = np.arange(0,tmax,dt)
# Calculating Green's function in 1D
G1=np.zeros(nt) # initialization G with zeros
for i in range (nt):
if (((time[i]-ts)-abs(xr-xs)/vp0)>=0):
G1[i]=1./(2*vp0)
else:
G1[i]=0
# Plotting Green's function in 1D
plt.plot(time, G1)
plt.title("Green's function for hom. 1D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
"""
Explanation: Computation of Green's functions and seismograms for the acoustic wave equation
In the previous lessons we derived the equations of motion to describe wave propagation in 3D, 2D and 1D elastic and acoustic media. Before solving the underlying partial differential equations numerically using finite-differences (FD), we should estimate some analytical solutions.
This is not only useful to check if the FD codes contain any bugs, but also to get an idea of the accuracy of the numerical solution compared to the analytical.
1D Green's function
Let's start with a simple problem, like the equations of motion for a 1D acoustic medium assuming a constant density model, which we derived in Lesson 3 and 4:
\begin{equation}
\frac{\partial^2 P}{\partial t^2} - V_p^2\frac{\partial^2 P}{\partial x^2} = f\nonumber
\end{equation}
If we introduce the Dirac delta function
$$
\delta(x) = \left{
\begin{array}{ll}
\infty &x=0 \
0 &x\neq 0
\end{array}
\right.
$$
with the normalization condition
$$
\int_{-\infty}^{\infty} \delta(x)\; dx = 1
$$
as a source term of the acoustic wave equation:
\begin{equation}
f = \delta(t-t_s) \delta(x-x_s) \nonumber
\end{equation}
we call the solution of the 1D wave equation
\begin{equation}
\frac{\partial^2 G_1}{\partial t^2} - V_p^2 \frac{\partial^2 G_1}{\partial x^2} = \delta(t-t_s) \delta(x-x_s)
\end{equation}
Green's function $\mathbf{G_1(x,t;x_s,t_s)}$. This means that we place a source at $x = x_s$. The source time function describes the time-dependent behaviour of the source. In this case the source time function has an amplitude of $1\; \frac{Pa}{s^2}$ only at time $t = t_s$, otherwise the amplitude is zero. The pressure wavefield is recored at the receiver position x and time t.
In the following derivation of the Green's function, we assume the special case of a source located at $x_s = 0\; m$ and a source time $t_s = 0\; s$, which simplifies eq. (1) to
\begin{equation}
\frac{\partial^2 G_1}{\partial t^2} - V_p^2 \frac{\partial^2 G_1}{\partial x^2} = \delta(t) \delta(x)
\end{equation}
Furthermore, the P-wave velocity distribution in the sub-surface is constant:
\begin{equation}
V_p(x) = V_{p0} = const. \nonumber
\end{equation}
The Green's function can be calculated using different approaches. We try to find a solution in the Fourier domain. First, we apply a temporal Fourier transform
\begin{equation}
\hat{f}(\omega) = \frac{1}{2\pi}\int_{-\infty}^{\infty} f(t) e^{-i\omega t} dt\nonumber
\end{equation}
where $\omega$ denotes the angular frequency, to eq. (2):
\begin{equation}
\frac{1}{2 \pi} \int_{-\infty}^{\infty}\biggl{\frac{\partial^2 G_1(x,t)}{\partial t^2} - V_{p0}^2\frac{\partial^2 G_1(x,t)}{\partial x^2}\biggl} e^{-i\omega t} dt = \frac{1}{2 \pi} \int_{-\infty}^{\infty}\delta(t) \delta(x) e^{-i\omega t} dt \nonumber
\end{equation}
Using the properties:
\begin{equation}
\frac{1}{2 \pi} \int_{-\infty}^{\infty}\biggl{\frac{\partial^2 G_1}{\partial t^2}\biggl} e^{-i\omega t} dt = -\omega^2 \hat{G}_1(x,\omega) \nonumber
\end{equation}
and
\begin{equation}
\frac{1}{2 \pi} \int_{-\infty}^{\infty}\delta(t) e^{-i\omega t} dt = \frac{1}{2 \pi} e^{-i\omega 0} = \frac{1}{2 \pi}\nonumber
\end{equation}
we can get rid of the 2nd time-derivative on the LHS and the time-dependent $\delta$ function on the RHS:
\begin{equation}
-\omega^2 \hat{G}1(x,\omega) - V{p0}^2\frac{\partial^2 \hat{G}_1(x,\omega)}{\partial x^2} = \frac{1}{2 \pi} \delta(x)
\end{equation}
Next, we apply a spatial Fourier transform
\begin{equation}
\hat{f}(k) = \frac{1}{2\pi}\int_{-\infty}^{\infty} f(x) e^{-ikx} dx,\nonumber
\end{equation}
where $k$ denotes the wavenumber, to eq. (3):
\begin{equation}
\frac{1}{2 \pi} \int_{-\infty}^{\infty}\biggl{-\omega^2\hat{G}1(x,\omega) - V{p0}^2\frac{\partial^2 \hat{G}1(x,\omega)}{\partial x^2}\biggr} e^{-ikx} dx = \frac{1}{4 \pi^2}\int{-\infty}^{\infty}\delta(x) e^{-ikx} dx\nonumber
\end{equation}
and using the same properties as for the temporal Fourier transform, we get:
\begin{equation}
-\omega^2 \hat{G}1(k,\omega) + k^2 V{p0}^2 \hat{G}_1(k,\omega) = \frac{1}{4 \pi^2}\nonumber
\end{equation}
Solving for $\hat{G}_1(k,\omega)$:
\begin{equation}
\hat{G}1(k,\omega) = \frac{1}{4 \pi^2} \frac{1}{V{p0}^2 k^2 - \omega^2} = \frac{1}{4 \pi^2 V_{p0}^2} \frac{1}{k^2 - \frac{\omega^2}{V_{p0}^2}}
\end{equation}
we have derived the Green's function solution for the 1D acoustic wave equation in the frequency-wavenumber domain.
To get the time-domain solution, we first apply the inverse spatial Fourier transform to eq. (4)
\begin{equation}
\hat{G}1(x,\omega) = \frac{1}{4 \pi^2 V{p0}^2}\int_{-\infty}^{\infty} \frac{e^{ikx}}{k^2 - \frac{\omega^2}{V_{p0}^2}}dk \notag
\end{equation}
This integral has two poles at $k = \pm \frac{\omega}{V_p}$, so we have to integrate along a contour around the poles. For more details I refer to
S.W. Rienstra & A. Hirschberg (2017): An Introduction to Acoustics
The result is the solution in the frequency-space domain:
\begin{equation}
\hat{G}1(x,\omega) = \frac{e^{-i\omega|x|/V{p0}}}{4 \pi i V_{p0} \omega}. \nonumber
\end{equation}
This will become useful to test the accuracy of our frequency domain finite-difference codes, which we will develop later in the lecture. For the transformation to time-domain we have to integrate around the pole at $\omega=0$:
\begin{equation}
\hat{G}1(x,t) = \frac{1}{4 \pi i V{p0}}\int_{-\infty}^{\infty}\frac{e^{i\omega(t-|x|/V_{p0})}}{\omega} d\omega. \nonumber
\end{equation}
This finally leads to the Green's function for the 1D homogeneous acoustic problem:
\begin{equation}
G_1(x,t)=\dfrac{1}{2V_{p0}}H\biggl(t-\dfrac{|x|}{V_{p0}}\biggr),\nonumber
\end{equation}
where $H$ denotes the Heaviside function:
$$
H(x) = \left{
\begin{array}{ll}
0 &x<0 \
1 &x\geq 0
\end{array}
\right.
$$
More generally, we can replace:
\begin{align}
x &\rightarrow x - x_s,\nonumber\
t &\rightarrow t - t_s,\nonumber\
\end{align}
and get:
\begin{equation}
G_1(x,t)=\dfrac{1}{2V_{p0}}H\biggl((t-t_s)-\dfrac{|x-x_s|}{V_{p0}}\biggr),\nonumber
\end{equation}
So the 1D Green's function is a Heaviside function delayed by the traveltime between source and receiver. Note also that the absolute value of the offset $|x-x_s|$ implies that we have a wave propagating to the left and one propagating to the right. Let's plot the 1D Green's function.
End of explanation
"""
# Calculation of 2D Green's function
G2=np.zeros(nt) # initialization G with zeros
r=np.sqrt((xs-xr)**2+(ys-yr)**2)
for i in range (nt):
if (((time[i]-ts)-abs(r)/vp0)>0):
G2[i]=(1./(2*np.pi*vp0**2))*(1./np.sqrt((time[i]-ts)**2-(r**2/vp0**2)))
else:
G2[i]=0
# Plotting Green's function in 2D
plt.plot(time, G2)
plt.title("Green's function for hom. 2D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
"""
Explanation: 2D Green's function
Using the same approach as for the 1D medium, we can calculate the 2D Green's function $\mathbf{G_2(x,t;x_s,t_s)}$, which is governed by
\begin{equation}
\frac{\partial^2 G_2}{\partial t^2} - V_{p0}^2 \biggl(\frac{\partial^2 G_2}{\partial x^2} + \frac{\partial^2 G_2}{\partial z^2} \biggl)= \delta(t-t_s) \delta(x-x_s) \delta(z-z_s) \nonumber
\end{equation}
as
\begin{equation}
G_2(x,z,t) = \dfrac{1}{2\pi V_{p0}^2}\dfrac{H\biggl((t-t_s)-\dfrac{|r|}{V_{p0}}\biggr)}{\sqrt{(t-t_s)^2-\dfrac{r^2}{V_{p0}^2}}} \nonumber
\end{equation}
with the source-receiver distance (offset)
$r = \sqrt{(x-x_s)^2+(z-z_s)^2}$
Compared to the 1D Green's function, we have a damped Heaviside function due to the radiation characteristic of the infinite line source, introduced by the 2D approximation. Let's also plot the 2D Green's function.
End of explanation
"""
# Calculation of 3D Green's function
G3=np.zeros(nt) # initialization G with zeros
r=np.sqrt((xs-xr)**2+(ys-yr)**2+(zs-zr)**2) # defining offset
amp=1./(4*np.pi*(vp0**2)*r) # defining amplitudes
t_arr=ts+(r/vp0) # time arrival
i_arr=t_arr/dt
b=int(i_arr)
G3[b]= amp/dt
# Plotting Green's function in 3D
plt.plot(time, G3)
plt.title("Green's function for hom. 3D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
"""
Explanation: 3D Green's function
Finally, we can calculate the 3D Green's function $\mathbf{G_3(x,t;x_s,t_s)}$, which is governed by
\begin{equation}
\frac{\partial^2 G_3}{\partial t^2} - V_{p0}^2 \biggl(\frac{\partial^2 G_3}{\partial x^2} + \frac{\partial^2 G_3}{\partial y^2} +\frac{\partial^2 G_3}{\partial z^2} \biggl)= \delta(t-t_s) \delta(x-x_s) \delta(y-y_s) \delta(z-z_s) \nonumber
\end{equation}
as
\begin{equation}
G_3(x,y,z,t) = \dfrac{1}{4 \pi V_{p0}^2 r}\delta\biggl((t-t_s)-\frac{r}{V_{p0}}\biggr) \nonumber
\end{equation}
with the source-receiver distance (offset)
$r = \sqrt{(x-x_s)^2+(y-y_s)^2+(z-z_s)^2}$
So the 3D Green's function for the homogeneous acoustic medium is a Delta distribution delayed by the traveltime between source and receiver. For the computation of the 3D Green's function, we have to approximate the $\delta-$function. An example is the boxcar function
$$
\delta_{bc}(x) = \left{
\begin{array}{ll}
1/dx &|x|\leq dx/2 \
0 &\text{elsewhere}
\end{array}
\right.
$$
fulfilling the properties of the $\delta$ function as $dx \rightarrow\; 0$. This function is used to properly scale the source term to obtain correct absolute amplitudes.
End of explanation
"""
# Defining source time function
f0 = 1 # Frequency (Hz)
p=1./f0 # period
t0 = p/dt # defining t0
sigma=4./p
# Initialization of source-time function
src=np.zeros(nt)
source=np.zeros(nt)
# Initialization of first derivative of gaussian
for it in range(nt):
t=(it-t0)*dt
src[it]=-2*sigma*t*np.exp(-(sigma*t)**2)
source[0:nt]=src
# Plotting of source time function
plt.plot(time, src)
plt.title('Source time function')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
"""
Explanation: Exercise
Derive the Green's function solutions of the 2D elastic SH problem:
\begin{align}
\rho\frac{\partial^2 u_y}{\partial t^2} - \frac{\partial}{\partial x} \mu \frac{\partial u_y}{\partial x} - \frac{\partial}{\partial z} \mu \frac{\partial u_y}{\partial z} &= f_y\nonumber \
\end{align}
and 1D elastic SH problem:
\begin{align}
\rho\frac{\partial^2 u_y}{\partial t^2} - \frac{\partial}{\partial x} \mu \frac{\partial u_y}{\partial x} &= f_y.\nonumber \
\end{align}
Assume a constant distribution of the shear modulus $\mu(x,z) = \mu_0 = const. \neq 0\; Pa$ and density $\rho(x,z) = \rho_0 = const. \neq 0\; \frac{kg}{m^3}$ in the subsurface.
Hint: To solve this problem, you do not have to apply any Fourier transform or integrate along contours around poles
Computation of seismograms
In field data applications we can not excitate a source time function like a delta distribution, which would have a perfect white spectrum (all frequencies are excitated at once). As we will see later, some numerical problems arise when explicitly calculating Green's function with the time-domain finite-difference approach.
Instead we have to rely on band-limited source signals. Seismograms for an arbritary source wavelet can be computed from the the Green's function. In the following example, the source wavelet consists of the first derivative of the Gaussian.
End of explanation
"""
# Computation of 1D seismogram
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G1_seis=
# Plotting Green's function in 1D
plt.plot(time, G1)
plt.title("Green's function for hom. 1D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim (0, tmax)
plt.grid()
plt.show()
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G2_seis=
# Plotting Green's function in 2D
plt.plot(time, G2)
plt.title("Green's function in 2D" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim((0, tmax))
plt.grid()
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G3_seis =
# Plotting Green's function in 3D
plt.plot(time, G3)
plt.title("Green's function in 3D" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim (0, tmax)
plt.grid()
plt.show()
"""
Explanation: Excerise
Compute seismograms $G_{seis}(x,t)$ for the 1D, 2D and 3D acoustic media, by a convolution of the Green's function $G(x,t;x_s,t_s)$ with the source wavelet $s(t)$:
$$G_{seis}(x,t) = G(x,t;x_s,t_s) * s(t)$$
Plot the resulting seismograms together with the Green's function solutions.
Hints:
* Use the NumPy function np.convolve.
* How could you check if your implemented convolution is correct?
End of explanation
"""
|
GoogleCloudPlatform/analytics-componentized-patterns
|
retail/recommendation-system/bqml-scann/01_train_bqml_mf_pmi.ipynb
|
apache-2.0
|
from google.cloud import bigquery
from datetime import datetime
import matplotlib.pyplot as plt, seaborn as sns
"""
Explanation: Part 1: Learn item embeddings based on song co-occurrence
This notebook is the first of five notebooks that guide you through running the Real-time Item-to-item Recommendation with BigQuery ML Matrix Factorization and ScaNN solution.
Use this notebook to complete the following tasks:
Explore the sample playlist data.
Compute Pointwise mutual information (PMI) that represents the co-occurence of songs on playlists.
Train a matrix factorization model using BigQuery ML to learn item embeddings based on the PMI data.
Explore the learned embeddings.
Before starting this notebook, you must run the 00_prep_bq_procedures notebook to complete the solution prerequisites.
After completing this notebook, run the 02_export_bqml_mf_embeddings notebook to process the item embedding data.
Setup
Import the required libraries, configure the environment variables, and authenticate your GCP account.
Import libraries
End of explanation
"""
PROJECT_ID = 'yourProject' # Change to your project.
!gcloud config set project $PROJECT_ID
"""
Explanation: Configure GCP environment settings
Update the PROJECT_ID variable to reflect the ID of the Google Cloud project you are using to implement this solution.
End of explanation
"""
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
"""
Explanation: Authenticate your GCP account
This is required if you run the notebook in Colab. If you use an AI Platform notebook, you should already be authenticated.
End of explanation
"""
import matplotlib.pyplot as plt, seaborn as sns
"""
Explanation: Explore the sample data
Use visualizations to explore the data in the vw_item_groups view that you created in the 00_prep_bq_and_datastore.ipynb notebook.
Import libraries for data visualization:
End of explanation
"""
%%bigquery --project $PROJECT_ID
CREATE OR REPLACE TABLE recommendations.valid_items
AS
SELECT
item_Id,
COUNT(group_Id) AS item_frequency
FROM recommendations.vw_item_groups
GROUP BY item_Id
HAVING item_frequency >= 15;
SELECT COUNT(*) item_count FROM recommendations.valid_items;
"""
Explanation: Count the number of songs that occur in at least 15 groups:
End of explanation
"""
%%bigquery --project $PROJECT_ID
CREATE OR REPLACE TABLE recommendations.valid_groups
AS
SELECT
group_Id,
COUNT(item_Id) AS group_size
FROM recommendations.vw_item_groups
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
GROUP BY group_Id
HAVING group_size BETWEEN 2 AND 100;
SELECT COUNT(*) group_count FROM recommendations.valid_groups;
"""
Explanation: Count the number of playlists that have between 2 and 100 items:
End of explanation
"""
%%bigquery --project $PROJECT_ID
SELECT COUNT(*) record_count
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups);
"""
Explanation: Count the number of records with valid songs and playlists:
End of explanation
"""
%%bigquery size_distribution --project $PROJECT_ID
WITH group_sizes
AS
(
SELECT
group_Id,
ML.BUCKETIZE(
COUNT(item_Id), [10, 20, 30, 40, 50, 101])
AS group_size
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups)
GROUP BY group_Id
)
SELECT
CASE
WHEN group_size = 'bin_1' THEN '[1 - 10]'
WHEN group_size = 'bin_2' THEN '[10 - 20]'
WHEN group_size = 'bin_3' THEN '[20 - 30]'
WHEN group_size = 'bin_4' THEN '[30 - 40]'
WHEN group_size = 'bin_5' THEN '[40 - 50]'
ELSE '[50 - 100]'
END AS group_size,
CASE
WHEN group_size = 'bin_1' THEN 1
WHEN group_size = 'bin_2' THEN 2
WHEN group_size = 'bin_3' THEN 3
WHEN group_size = 'bin_4' THEN 4
WHEN group_size = 'bin_5' THEN 5
ELSE 6
END AS bucket_Id,
COUNT(group_Id) group_count
FROM group_sizes
GROUP BY group_size, bucket_Id
ORDER BY bucket_Id
plt.figure(figsize=(20,5))
q = sns.barplot(x='group_size', y='group_count', data=size_distribution)
"""
Explanation: Show the playlist size distribution:
End of explanation
"""
%%bigquery occurrence_distribution --project $PROJECT_ID
WITH item_frequency
AS
(
SELECT
Item_Id,
ML.BUCKETIZE(
COUNT(group_Id)
, [15, 30, 50, 100, 200, 300, 400]) AS group_count
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups)
GROUP BY Item_Id
)
SELECT
CASE
WHEN group_count = 'bin_1' THEN '[15 - 30]'
WHEN group_count = 'bin_2' THEN '[30 - 50]'
WHEN group_count = 'bin_3' THEN '[50 - 100]'
WHEN group_count = 'bin_4' THEN '[100 - 200]'
WHEN group_count = 'bin_5' THEN '[200 - 300]'
WHEN group_count = 'bin_6' THEN '[300 - 400]'
ELSE '[400+]'
END AS group_count,
CASE
WHEN group_count = 'bin_1' THEN 1
WHEN group_count = 'bin_2' THEN 2
WHEN group_count = 'bin_3' THEN 3
WHEN group_count = 'bin_4' THEN 4
WHEN group_count = 'bin_5' THEN 5
WHEN group_count = 'bin_6' THEN 6
ELSE 7
END AS bucket_Id,
COUNT(Item_Id) item_count
FROM item_frequency
GROUP BY group_count, bucket_Id
ORDER BY bucket_Id
plt.figure(figsize=(20, 5))
q = sns.barplot(x='group_count', y='item_count', data=occurrence_distribution)
%%bigquery --project $PROJECT_ID
DROP TABLE IF EXISTS recommendations.valid_items;
%%bigquery --project $PROJECT_ID
DROP TABLE IF EXISTS recommendations.valid_groups;
"""
Explanation: Show the song occurrence distribution:
End of explanation
"""
%%bigquery --project $PROJECT_ID
DECLARE min_item_frequency INT64;
DECLARE max_group_size INT64;
SET min_item_frequency = 15;
SET max_group_size = 100;
CALL recommendations.sp_ComputePMI(min_item_frequency, max_group_size);
"""
Explanation: Compute song PMI data
You run the sp_ComputePMI stored procedure to compute song PMI data. This PMI data is what you'll use to train the matrix factorization model in the next section.
This stored procedure accepts the following parameters:
min_item_frequency — Sets the minimum number of times that a song must appear on playlists.
max_group_size — Sets the maximum number of songs that a playlist can contain.
These parameters are used together to select records where the song occurs on a number of playlists equal to or greater than the min_item_frequency value and the playlist contains a number of songs between 2 and the max_group_size value. These are the records that get processed to make the training dataset.
The stored procedure works as follows:
Selects a valid_item_groups1 table and populates it with records from thevw_item_groups` view that meet the following criteria:
The song occurs on a number of playlists equal to or greater than the
min_item_frequency value
The playlist contains a number of songs between 2 and the max_group_size
value.
Creates the item_cooc table and populates it with co-occurrence data that
identifies pairs of songs that occur on the same playlist. It does this by:
Self-joining the valid_item_groups table on the group_id column.
Setting the cooc column to 1.
Summing the cooc column for the item1_Id and item2_Id columns.
Creates an item_frequency table and populates it with data that identifies
how many playlists each song occurs in.
Recreates the item_cooc table to include the following record sets:
The item1_Id, item2_Id, and cooc data from the original item_cooc
table. The PMI values calculated from these song pairs lets the solution
calculate the embeddings for the rows in the feedback matrix.
<img src="figures/feedback-matrix-rows.png" alt="Embedding matrix that shows the matrix rows calculated by this step." style="width: 400px;"/>
The same data as in the previous bullet, but with the item1_Id data
written to the item2_Id column and the item2_Id data written to the
item1_Id column. This data provides the mirror values of the initial
entities in the feedback matrix. The PMI values calculated from these
song pairs lets the solution calculate the embeddings for the columns in
the feedback matrix.
<img src="figures/feedback-matrix-columns.png" alt="Embedding matrix that shows the matrix columns calculated by this step." style="width: 400px;"/>
The data from the item_frequency table. The item_Id data is written
to both the item1_Id and item2_Id columns and the frequency data is
written to the cooc column. This data provides the diagonal entries of
the feedback matrix. The PMI values calculated from these song pairs lets
the solution calculate the embeddings for the diagonals in the feedback
matrix.
<img src="figures/feedback-matrix-diagonals.png" alt="Embedding matrix that shows the matrix diagonals calculated by this step." style="width: 400px;"/>
Computes the PMI for item pairs in the item_cooc table, then recreates the
item_cooc table to include this data in the pmi column.
Run the sp_ComputePMI stored procedure
End of explanation
"""
%%bigquery --project $PROJECT_ID
SELECT
a.item1_Id,
a.item2_Id,
b.frequency AS freq1,
c.frequency AS freq2,
a.cooc,
a.pmi,
a.cooc * a.pmi AS score
FROM recommendations.item_cooc a
JOIN recommendations.item_frequency b
ON a.item1_Id = b.item_Id
JOIN recommendations.item_frequency c
ON a.item2_Id = c.item_Id
WHERE a.item1_Id != a.item2_Id
ORDER BY score DESC
LIMIT 10;
%%bigquery --project $PROJECT_ID
SELECT COUNT(*) records_count
FROM recommendations.item_cooc
"""
Explanation: View the song PMI data
End of explanation
"""
%%bigquery --project $PROJECT_ID
DECLARE dimensions INT64 DEFAULT 50;
CALL recommendations.sp_TrainItemMatchingModel(dimensions)
"""
Explanation: Train the BigQuery ML matrix factorization model
You run the sp_TrainItemMatchingModel stored procedure to train the item_matching_model matrix factorization model on the song PMI data. The model builds a feedback matrix, which in turn is used to calculate item embeddings for the songs. For more information about how this process works, see Understanding item embeddings.
This stored procedure accepts the dimensions parameter, which provides the value for the NUM_FACTORS parameter of the CREATE MODEL statement. The NUM_FACTORS parameter lets you set the number of latent factors to use in the model. Higher values for this parameter can increase model performance, but will also increase the time needed to train the model. Using the default dimensions value of 50, the model takes around 120 minutes to train.
Run the sp_TrainItemMatchingModel stored procedure
After the item_matching_model model is created successfully, you can use the the BigQuery console to investigate the loss through the training iterations, and also see the final evaluation metrics.
End of explanation
"""
%%bigquery song_embeddings --project $PROJECT_ID
SELECT
feature,
processed_input,
factor_weights,
intercept
FROM
ML.WEIGHTS(MODEL recommendations.item_matching_model)
WHERE
feature IN ('2114406',
'2114402',
'2120788',
'2120786',
'1086322',
'3129954',
'53448',
'887688',
'562487',
'833391',
'1098069',
'910683',
'1579481',
'2675403',
'2954929',
'625169')
songs = {
'2114406': 'Metallica: Nothing Else Matters',
'2114402': 'Metallica: The Unforgiven',
'2120788': 'Limp Bizkit: My Way',
'2120786': 'Limp Bizkit: My Generation',
'1086322': 'Jacques Brel: Ne Me Quitte Pas',
'3129954': 'Édith Piaf: Non, Je Ne Regrette Rien',
'53448': 'France Gall: Ella, Elle l\'a',
'887688': 'Enrique Iglesias: Tired Of Being Sorry',
'562487': 'Shakira: Hips Don\'t Lie',
'833391': 'Ricky Martin: Livin\' la Vida Loca',
'1098069': 'Snoop Dogg: Drop It Like It\'s Hot',
'910683': '2Pac: California Love',
'1579481': 'Dr. Dre: The Next Episode',
'2675403': 'Eminem: Lose Yourself',
'2954929': 'Black Sabbath: Iron Man',
'625169': 'Black Sabbath: Paranoid',
}
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
def process_results(results):
items = list(results['feature'].unique())
item_embeddings = dict()
for item in items:
emebedding = [0.0] * 100
embedding_pair = results[results['feature'] == item]
for _, row in embedding_pair.iterrows():
factor_weights = list(row['factor_weights'])
for _, element in enumerate(factor_weights):
emebedding[element['factor'] - 1] += element['weight']
item_embeddings[item] = emebedding
return item_embeddings
item_embeddings = process_results(song_embeddings)
item_ids = list(item_embeddings.keys())
for idx1 in range(0, len(item_ids) - 1):
item1_Id = item_ids[idx1]
title1 = songs[item1_Id]
print(title1)
print("==================")
embedding1 = np.array(item_embeddings[item1_Id])
similar_items = []
for idx2 in range(len(item_ids)):
item2_Id = item_ids[idx2]
title2 = songs[item2_Id]
embedding2 = np.array(item_embeddings[item2_Id])
similarity = round(cosine_similarity([embedding1], [embedding2])[0][0], 5)
similar_items.append((title2, similarity))
similar_items = sorted(similar_items, key=lambda item: item[1], reverse=True)
for element in similar_items[1:]:
print(f"- {element[0]}' = {element[1]}")
print()
"""
Explanation: Explore the trained embeddings
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/e47923b6fb0438d171cc375f56ae6765/plot_time_frequency_simulated.ipynb
|
bsd-3-clause
|
# Authors: Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
print(__doc__)
"""
Explanation: Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
End of explanation
"""
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
"""
Explanation: Simulate data
We'll simulate data with a known spectro-temporal structure.
End of explanation
"""
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
"""
Explanation: Calculate a time-frequency representation (TFR)
Below we'll demonstrate the output of several TFR functions in MNE:
:func:mne.time_frequency.tfr_multitaper
:func:mne.time_frequency.tfr_stockwell
:func:mne.time_frequency.tfr_morlet
Multitaper transform
First we'll use the multitaper method for calculating the TFR.
This creates several orthogonal tapering windows in the TFR estimation,
which reduces variance. We'll also show some of the parameters that can be
tweaked (e.g., time_bandwidth) that will result in different multitaper
properties, and thus a different TFR. You can trade time resolution or
frequency resolution or both in order to get a reduction in variance.
End of explanation
"""
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
"""
Explanation: (1) Least smoothing (most variance/background fluctuations).
End of explanation
"""
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
"""
Explanation: (2) Less frequency smoothing, more time smoothing.
End of explanation
"""
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
"""
Explanation: (3) Less time smoothing, more frequency smoothing.
End of explanation
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
"""
Explanation: Stockwell (S) transform
Stockwell uses a Gaussian window to balance temporal and spectral resolution.
Importantly, frequency bands are phase-normalized, hence strictly comparable
with regard to timing, and, the input signal can be recoverd from the
transform in a lossless way if we disregard numerical errors. In this case,
we control the spectral / temporal resolution by specifying different widths
of the gaussian window using the width parameter.
End of explanation
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
"""
Explanation: Morlet Wavelets
Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
with a gaussian envelope. We can control the balance between spectral and
temporal resolution with the n_cycles parameter, which defines the
number of cycles to include in the window.
End of explanation
"""
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
"""
Explanation: Calculating a TFR without averaging over epochs
It is also possible to calculate a TFR without averaging across trials.
We can do this by using average=False. In this case, an instance of
:class:mne.time_frequency.EpochsTFR is returned.
End of explanation
"""
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000, freqs, power[0],
cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
"""
Explanation: Operating on arrays
MNE also has versions of the functions above which operate on numpy arrays
instead of MNE objects. They expect inputs of the shape
(n_epochs, n_channels, n_times). They will also return a numpy array
of shape (n_epochs, n_channels, n_freqs, n_times).
End of explanation
"""
|
skdaccess/skdaccess
|
skdaccess/examples/Demo_UAVSAR.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skimage.measure import block_reduce
import numpy as np
"""
Explanation: The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
End of explanation
"""
from skdaccess.geo.uavsar.cache import DataFetcher
"""
Explanation: Import data fetcher
End of explanation
"""
slc_url_list = [
'http://downloaduav2.jpl.nasa.gov/Release22/SanAnd_23511_01/SanAnd_23511_14068_001_140529_L090HH_01_BC_s2_1x1.slc',
]
metadata_url_list = [
'http://downloaduav2.jpl.nasa.gov/Release22/SanAnd_23511_01/SanAnd_23511_14068_001_140529_L090HH_01_BC.ann',
]
llh_url = 'http://downloaduav2.jpl.nasa.gov/Release22/SanAnd_23511_01/SanAnd_23511_01_BC_s2_2x8.llh'
df = DataFetcher(slc_url_list = slc_url_list, metadata_url_list = metadata_url_list,
llh_url = llh_url, memmap = False)
dw = df.output()
label, data = next(dw.getIterator())
smoothed_mag = block_reduce(np.abs(data),(100,25),np.median)
plt.imshow(smoothed_mag,cmap='gray',vmax=1);
plt.colorbar();
"""
Explanation: Provide data URLs
End of explanation
"""
|
BrainIntensive/OnlineBrainIntensive
|
resources/nipype/nipype_tutorial/notebooks/basic_iteration.ipynb
|
mit
|
from nipype import Node, Workflow
from nipype.interfaces.fsl import BET, IsotropicSmooth
# Initiate a skull stripping Node with BET
skullstrip = Node(BET(mask=True,
in_file='/data/ds102/sub-01/anat/sub-01_T1w.nii.gz'),
name="skullstrip")
"""
Explanation: <img src="../static/images/iterables.png" width="240">
Iterables
Some steps in a neuroimaging analysis are repetitive. Running the same preprocessing on multiple subjects or doing statistical inference on multiple files. To prevent the creation of multiple individual scripts, Nipype has as execution plugin, called iterables.
The main homepage has a nice section about MapNode and iterables if you want to learn more. Also, if you are interested in more advanced procedures, such as synchronizing multiple iterables or using conditional iterables, check out synchronize and intersource.
For example, let's assume we have a node (A) that does simple skull stripping, followed by a node (B) that does isometric smoothing. Now, let's say, that we are curious about the effect of different smoothing kernels. Therefore, we want to run the smoothing node with FWHM set to 2mm, 8mm and 16mm.
End of explanation
"""
isosmooth = Node(IsotropicSmooth(), name='iso_smooth')
"""
Explanation: Create a smoothing Node with IsotropicSmooth
End of explanation
"""
isosmooth.iterables = ("fwhm", [4, 8, 16])
"""
Explanation: Now, to use iterables and therefore smooth with different fwhm is as simple as that:
End of explanation
"""
# Create the workflow
wf = Workflow(name="smoothflow")
wf.base_dir = "/data"
wf.connect(skullstrip, 'out_file', isosmooth, 'in_file')
# Run it in parallel (one core for each smoothing kernel)
wf.run('MultiProc', plugin_args={'n_procs': 3})
"""
Explanation: And to wrap it up. We need to create a workflow, connect the nodes and finally, can run the workflow in parallel.
End of explanation
"""
# Visualize the detailed graph
from IPython.display import Image
wf.write_graph(graph2use='exec', format='png', simple_form=True)
Image(filename='/data/smoothflow/graph_detailed.dot.png')
"""
Explanation: If we visualize the graph with exec, we can see where the parallelization actually takes place.
End of explanation
"""
!tree /data/smoothflow -I '*txt|*pklz|report*|*.json|*js|*.dot|*.html'
"""
Explanation: If you look at the structure in the workflow directory, you can also see, that for each smoothing, a specific folder was created, i.e. _fwhm_16.
End of explanation
"""
%pylab inline
from nilearn import plotting
plotting.plot_anat(
'/data/ds102/sub-01/anat/sub-01_T1w.nii.gz', title='original',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/skullstrip/sub-01_T1w_brain.nii.gz', title='skullstripped',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_4/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=4',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_8/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=8',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_16/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=16',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
"""
Explanation: Now, let's visualize the results!
End of explanation
"""
# First, let's specify the list of input variables
subject_list = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05']
session_list = ['run-1', 'run-2']
fwhm_widths = [4, 8]
"""
Explanation: IdentityInterface (special use case of iterabels)
A special use case of iterables is the IdentityInterface. The IdentityInterface interface allows you to create Nodes that simple identity mapping, i.e. Nodes that only work on parameters/strings.
For example, let's say you want to run a preprocessing workflow over 5 subjects, with each having two runs and applying 2 different smoothing kernel (as is done in the Preprocessing Example), we can do this as follows:
End of explanation
"""
from nipype import IdentityInterface
infosource = Node(IdentityInterface(fields=['subject_id', 'session_id', 'fwhm_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('session_id', session_list),
('fwhm_id', fwhm_widths)]
"""
Explanation: Now, we can create the IdentityInterface Node
End of explanation
"""
infosource.outputs
"""
Explanation: That's it. Now, we can connect the output fields of this infosource node like any other node to wherever we want.
End of explanation
"""
workflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(infosource, smooth, [('fwhm_id', 'fwhm')])
])
"""
Explanation: For example, like:
End of explanation
"""
|
CELMA-project/CELMA
|
derivations/divOfExBOperator/divOfVectorAdvectionWithN.ipynb
|
lgpl-3.0
|
from IPython.display import display
from sympy import symbols, simplify, sympify, expand
from sympy import init_printing
from sympy import Eq, Function
from clebschVector import ClebschVec
from clebschVector import div, grad, gradPerp, advVec
from common import rho, theta, poisson
from common import displayVec
init_printing()
u_z = symbols('u_z', real = True)
# In reality this is a function, but as it serves only as a dummy it is here defined as a symbol
# This makes it easier to replace
f = symbols('f', real = True)
phi = Function('phi')(rho, theta)
n = Function('n')(rho, theta)
# Symbols for printing
zeta, chi, epsilon = symbols('zeta, chi, epsilon')
"""
Explanation: Calculation of the divergence of the advection of the perpendicular gradient of the potential times density using Clebsch coordinates
We would here like to calculate
$$
\nabla\cdot\left(\mathbf{u}E\cdot\nabla\left[n \nabla\perp\phi \right]\right)
$$
using cylindrical Clebsch coordinates, as this tensor identity has not been found in the literature.
NOTE: These are normalized equations. As $B$ is constant, we can choose $B_0$ so that the normalized $\tilde{B}=1$, thus, $B$ is excluded from these equations.
Also, we would like to compare this with
$$
B{\phi,\Omega^D}
$$
End of explanation
"""
nGradPerpPhi = gradPerp(phi)*n
displayVec(nGradPerpPhi)
"""
Explanation: Calculation of the $E\times B$ advection
We would now like to calculate
$$
\zeta = \nabla\cdot\left(\mathbf{u}E \cdot\nabla\left[n\nabla\perp\phi\right]\right)
$$
We will do this by
Calculate $n\nabla_\perp\phi$
Define $\mathbf{u}_E$
By first calculating $\nabla_\perp\phi$
Calculate $\mathbf{u}E\cdot\nabla \left(n\nabla\perp\phi\right)$
To check the different contributions we also
Calculate $\mathbf{u}_E\cdot\nabla f$
Calculate $\mathbf{a}\cdot \left(n\nabla_\perp\phi\right)$
Take the divergence of the resulting vector
Compare this with $B{\phi,\Omega^D}$
Calculation of $n\nabla_\perp\phi$
End of explanation
"""
# The basis-vectors are contravariant => components are covariant
eTheta = ClebschVec(rho=0, theta=1, z=0, covariant=True)
eRho = ClebschVec(rho=1, theta=0, z=0, covariant=True)
B = eTheta^eRho
displayVec(B, 'B')
Blen = B.len()
display(Eq(symbols('B'), Blen))
b = B/(B.len())
displayVec(b, 'b')
"""
Explanation: Defining $\mathbf{u}_E$
We have that
$${u}E = - \frac{\nabla\perp\phi\times\mathbf{b}}{B}$$
Remember that we are working with normalized equations, so $B$ (which in reality is $\tilde{B}$) is equal to $1$.
NOTE: It migth appear that there is a discrepancy between having a coordinate system where $B$ is not constant where we have derived equation where $B$ is constant. This is because the cylindrical coordinate system is not a Clebsch system, but the metrics coinside. The Poisson bracket is the only place where $B$ is used explicitly, and care must be taken. The workaround is easy: Just multiply the Poisson bracket with $B$ to make it correct in cylindrical coordinates.
End of explanation
"""
gradPerpPhi = gradPerp(phi)
displayVec(gradPerpPhi)
# Normalized B
BTilde = 1
# Defining u_E
ue = - ((gradPerpPhi^b)/BTilde)
displayVec(ue, 'u_E')
"""
Explanation: NOTE: Basis vectors in $B$ are covariant, so components are contravariant
Calculation of $\nabla_\perp\phi$
End of explanation
"""
ueDotGrad_f = ue*grad(f)
display(ueDotGrad_f)
"""
Explanation: Calculation of $\mathbf{u}E\cdot\nabla \left(n\nabla\perp\phi\right)$
Calculation of $\mathbf{u}_E\cdot\nabla f$
End of explanation
"""
aRho, aZ, aTheta = symbols('a^rho, a^z, a^theta')
a_Rho, a_Z, a_Theta = symbols('a_rho, a_z, a_theta')
aCov = ClebschVec(rho = a_Rho, z=a_Z, theta = a_Theta, covariant=True)
aCon = ClebschVec(rho = aRho, z=aZ, theta = aTheta, covariant=False)
"""
Explanation: Calculation of $\mathbf{a}\cdot\nabla \left(n\nabla_\perp\phi\right)$
End of explanation
"""
aCovDotNablaGradPhi = advVec(aCov, nGradPerpPhi)
displayVec(aCovDotNablaGradPhi)
"""
Explanation: Using covariant vector
End of explanation
"""
aConDotNablaGradPhi = advVec(aCon, nGradPerpPhi)
displayVec(aConDotNablaGradPhi)
"""
Explanation: Using contravariant vector
End of explanation
"""
ueDotGradnGradPerpPhi = advVec(ue, nGradPerpPhi)
displayVec(ueDotGradnGradPerpPhi.doitVec())
displayVec(ueDotGradnGradPerpPhi.doitVec().simplifyVec())
"""
Explanation: Calculation of full $\mathbf{u}E\cdot\nabla \left(n\nabla\perp\phi\right)$
End of explanation
"""
div_ueDotGradnGradPerpPhi = div(ueDotGradnGradPerpPhi)
zetaFunc = div_ueDotGradnGradPerpPhi.doit().expand()
display(Eq(zeta, simplify(zetaFunc)))
"""
Explanation: Calculation of $\nabla\cdot\left(\mathbf{u}E\cdot\nabla\left[n\nabla\perp\phi\right]\right)$
End of explanation
"""
vortD = div(gradPerp(phi)*n)
display(Eq(symbols('Omega^D'), vortD.doit().expand()))
"""
Explanation: Comparison with $B{\phi,\Omega^D}$
In cylindrical Clebsch coordinates, we have that $\mathbf{u}_E\cdot\nabla = {\phi,\cdot}$. However, we have normalized our equations so that $\tilde{B}=1$. As $B$ from the Clebsch system is not constant, we can achieve normalization by multiplying the Poisson bracket with the un-normalized $B$ (from the Clebsch system).
We define the vorticity-like field $\Omega^D$ to be $\Omega^D = \nabla\cdot\left(n\nabla_\perp\phi\right)$. In the Clebsch system this is written as
End of explanation
"""
poissonPhiVortD = Blen*poisson(phi, vortD)
chiFunc = poissonPhiVortD.doit().expand()
display(Eq(chi, chiFunc))
"""
Explanation: We now write $\chi = B{\phi,\Omega^D}$
End of explanation
"""
epsilonFunc = (zetaFunc - chiFunc).expand()
display(Eq(epsilon, epsilonFunc))
"""
Explanation: The difference $\epsilon$ between $\zeta = \nabla\cdot\left(\mathbf{u}E\cdot\nabla\left[n\nabla\perp\phi\right]\right)$ and $\chi = B{\phi,\Omega^D}$ is given by
$$\epsilon = \zeta - \chi$$
End of explanation
"""
epsMinusCorrection = epsilonFunc\
-\
(\
(1/rho)*phi.diff(rho)*poisson(phi.diff(rho), n)\
+(1/(rho)**3)*phi.diff(theta)*poisson(phi.diff(theta),n)\
+(1/(rho)**4)*n.diff(theta)*(phi.diff(theta))**2
)
display(epsMinusCorrection.simplify())
"""
Explanation: In fact we see that
\begin{align}
\epsilon
- \left(
-\frac{1}{\rho}[\partial_\rho\phi]{n, \partial_\rho\phi}
-\frac{1}{\rho^3}[\partial_\theta\phi]{n, \partial_\theta\phi}
+\frac{1}{\rho^4}[\partial_\theta n][\partial_\theta\phi]^2
\right)
=\
\epsilon
- \left(
\frac{1}{\rho}[\partial_\rho\phi]{\partial_\rho\phi,n}
+\frac{1}{\rho^3}[\partial_\theta\phi]{\partial_\theta\phi, n}
+\frac{1}{\rho^4}[\partial_\theta n][\partial_\theta\phi]^2
\right)
=
\end{align}
End of explanation
"""
xi = (Blen/2)*poisson(ue*ue, n).doit()
epsMinusNewCorr = epsilonFunc - (Blen/2)*poisson(ue*ue, n).doit()
display(epsMinusNewCorr.simplify())
"""
Explanation: What is more interesting is in fact that
\begin{align}
\epsilon
- \xi
=
\epsilon
- \frac{B}{2}{\mathbf{u}_E\cdot\mathbf{u}_E, n}
\end{align}
End of explanation
"""
display((ue*ue).doit())
"""
Explanation: Where
\begin{align}
\mathbf{u}_E\cdot\mathbf{u}_E
=
\end{align}
End of explanation
"""
display((zetaFunc - (chiFunc + xi)).simplify())
"""
Explanation: Note that the last term $\frac{1}{\rho^4}(\partial_\theta n)(\partial_\theta\phi)^2$ does not appear to come from the Poisson bracket. This is however the case, and comes from the part which contains
$\frac{1}{2}\partial_\rho\left(\frac{1}{\rho}\partial_\theta \phi\right)^2 =
\left(\frac{1}{\rho}\partial_\theta \phi\right)\partial_\rho\left(\frac{1}{\rho}\partial_\theta \phi\right)$
as
$\partial_i (fg) = f \partial_i g + g \partial_i f$
To summarize, we have
\begin{align}
\zeta - (\chi + \xi) =
\end{align}
End of explanation
"""
S = expand(zetaFunc)
strS = str(S)
# phi rho derivatives
strS = strS.replace('Derivative(phi(rho, theta), rho)', 'phi_x')
strS = strS.replace('Derivative(phi(rho, theta), rho, rho)', 'phi_xx')
strS = strS.replace('Derivative(phi(rho, theta), rho, rho, rho)', 'phi_xxx')
# phi theta derivatives
strS = strS.replace('Derivative(phi(rho, theta), theta)', 'phi_z')
strS = strS.replace('Derivative(phi(rho, theta), theta, theta)', 'phi_zz')
strS = strS.replace('Derivative(phi(rho, theta), theta, theta, theta)', 'phi_zzz')
# phi mixed derivatives
strS = strS.replace('Derivative(phi(rho, theta), rho, theta)', 'phi_xz')
strS = strS.replace('Derivative(phi(rho, theta), rho, theta, theta)', 'phi_xzz')
strS = strS.replace('Derivative(phi(rho, theta), rho, rho, theta)', 'phi_xxz')
# Non-derivatives
strS = strS.replace('phi(rho, theta)', 'phi')
# n rho derivatives
strS = strS.replace('Derivative(n(rho, theta), rho)', 'n_x')
strS = strS.replace('Derivative(n(rho, theta), rho, rho)', 'n_xx')
# n theta derivatives
strS = strS.replace('Derivative(n(rho, theta), theta)', 'n_z')
strS = strS.replace('Derivative(n(rho, theta), theta, theta)', 'n_zz')
# n mixed derivatives
strS = strS.replace('Derivative(n(rho, theta), rho, theta)', 'n_xz')
# Non-derivatives
strS = strS.replace('n(rho, theta)', 'n')
newS = sympify(strS)
display(Eq(symbols('S_new'), expand(newS)))
"""
Explanation: Printing for comparison
End of explanation
"""
|
DawesLab/LabNotebooks
|
Double Slit Model.ipynb
|
mit
|
import matplotlib.pyplot as plt
from numpy import pi, sin, cos, linspace, exp, real, imag, abs, conj, meshgrid, log, log10, angle
from numpy.fft import fft, fftshift, ifft
from mpl_toolkits.mplot3d import axes3d
import BeamOptics as bopt
%matplotlib inline
b=.08*1e-3 # the slit width
a=.5*1e-3 # the slit spacing
k=2*pi/(795*1e-9) # longitudinal wavenumber
wt=0 # let time be zero
C=1 # unit amplitude
L=1.8 # distance from slits to CCD
d=.016 # distance from signal to LO at upstream end (used to calculate k_perp)
ccdwidth = 1300 # number of pixels
pixwidth = 20e-6 # pixel width (in meters)
y = linspace(-pixwidth*ccdwidth/2,pixwidth*ccdwidth/2,ccdwidth)
def alpha(y,a):
return k*a*y/(2*L)
def beta(y,b):
return k*b*y/(2*L)
def E_ds(y,a,b):
""" Double-slit field """
# From Hecht p 458:
#return b*C*(sin(beta(y)) / beta(y)) * (sin(wt-k*L) + sin(wt-k*L+2*alpha(y)))
# drop the time-dep term as it will average away:
return 2*b*C*(sin(beta(y,b)) / beta(y,b)) * cos(alpha(y,a)) #* sin(wt - k*L + alpha(y))
def E_dg(y,a,b):
""" Double gaussian field """
# The width needs to be small enough to see interference
# otherwise the beam doesn't diffract and shows no interference.
# We're using b for the gaussian width (i.e. equal to the slit width)
w=b
#return C*exp(1j*k*0.1*d*y/L)
return 5e-3*(bopt.gaussian_beam(0,y-a/2,L,E0=1,wavelambda=795e-9,w0=w,k=[0,0,k]) +
bopt.gaussian_beam(0,y+a/2,L,E0=1,wavelambda=795e-9,w0=w,k=[0,0,k]))
def E_lo(y,d):
"""Plane-wave LO beam incident at small angle, transverse wavenumber k*d*y/L"""
return C*exp(-1j*k*d*y/L)
"""
Explanation: Double-slit model
AMCDawes
A model of the interference between a plane-wave LO and the far-field double-slit output. The FFT is computed to model what we expect to measure in our experimental setup. Physically accurate parameters have been chosen.
Comments:
- The three-peak Fourier output is fairly consistent over a wide range of parameters
- LO is a plane-wave at first, then a Gaussian beam later on in the notebook.
- LO is at an angle, signal is normal-incident. This is not the same as
the experiment, but easier to treat numerically.
June 2021: fixed sign error in E_LO for plane wave, verified values for experimental data. Next: simulate multiple frames of data collection with phase variation.
End of explanation
"""
plt.plot(y,abs(E_ds(y,a,b)))
plt.title("Double slit field")
plt.plot(y,abs(E_dg(y,a,b)))
plt.title("Double-Gaussian field")
"""
Explanation: Sanity check: plot the field:
End of explanation
"""
def plotFFT(d,a,b):
"""Single function version of generating the FFT output"""
TotalField = E_dg(y,a,b)+E_lo(y,d)
TotalIntensity=TotalField*TotalField.conj()
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,1e-2])
plt.xlim([0,650])
plt.title("FFT output")
plotFFT(d=0.046,a=0.5e-3,b=0.08e-3)
"""
Explanation: Define a single function to explore the FFT:
End of explanation
"""
plotFFT(d=0.035,a=0.5e-3,b=0.08e-3)
"""
Explanation: This agrees well with Matt's code using symbolic calculations. The main difference I see is in the size of the low-frequency peak. It's much smaller here than in his version.
d=0.035
End of explanation
"""
plotFFT(d=0.02,a=0.5e-3,b=0.08e-3)
"""
Explanation: d=0.02
End of explanation
"""
def plotFFTds(d,a,b):
"""Single function version of generating the FFT output"""
TotalField = E_ds(y,a,b)+E_lo(y,d)
TotalIntensity=TotalField*TotalField.conj()
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,0.1])
plt.xlim([400,500])
plt.title("FFT output")
plotFFTds(d=0.025,a=0.5e-3,b=0.08e-3)
"""
Explanation: Double slit is still very different:
End of explanation
"""
bopt.gaussian_beam?
# bopt.gaussian_beam(x, y, z, E0, wavelambda, w0, k)
# set to evaluate gaussian at L (full distance to CCD) with waist width of 2 cm
# using d=0.046 for agreement with experiment
d=0.046
E_lo_gauss = bopt.gaussian_beam(0,y,L,E0=1,wavelambda=795e-9,w0=0.02,k=[0,k*d/L,k])
plt.plot(y,abs(E_lo_gauss))
TotalIntensity=(E_dg(y,a,b)+E_lo_gauss) * (E_dg(y,a,b)+E_lo_gauss).conj()
plt.figure(figsize=(14,4))
plt.plot(y,TotalIntensity,".-")
#plt.xlim([-.002,0])
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,0.01]) # Had to lower the LO power quite a bit, and then zoom way in.
plt.xlim([0,650])
"""
Explanation: This does not agree with experimental results.
Replace with Gaussian LO: import gaussian beam function, and repeat:
End of explanation
"""
|
paulvangentcom/heartrate_analysis_python
|
examples/2_regular_ECG/Analysing_a_regular_ECG_signal.ipynb
|
mit
|
#import packages
import heartpy as hp
import matplotlib.pyplot as plt
sample_rate = 250
"""
Explanation: Analysing a regular ECG signal
In this notebook I'll show you three examples of using HeartPy to analyse good-to-reasonable quality ECG signals you may encounter.
We'll be looking at three excerpts from the European ST-T Database over at Physionet. There are all recorded at 250 Hz
End of explanation
"""
data = hp.get_data('e0103.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
"""
Explanation: Let's look at the first file and visualise it:
End of explanation
"""
#run analysis
wd, m = hp.process(data, sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
"""
Explanation: That is a very nice and clean signal. We don't need to do any preprocessing and can run analysis right away:
End of explanation
"""
data = hp.get_data('e0110.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500])
plt.show()
"""
Explanation: That went well.
Now let's move on to the next one and see if we can analyse that one too:
End of explanation
"""
filtered = hp.filter_signal(data, cutoff = 0.05, sample_rate = sample_rate, filtertype='notch')
#visualize again
plt.figure(figsize=(12,4))
plt.plot(filtered)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500], label = 'original signal')
plt.plot(filtered[0:2500], alpha=0.5, label = 'filtered signal')
plt.legend()
plt.show()
"""
Explanation: Ah!
We have an issue where the T-wave (the broad wave right after the main QRS complex) is present. We can filter this using a notch filter, as we're interested in the QRS comples.
What the notch filter does is apply a frequency filter to a very narrow frequency range, allowing us to get rid of some things without disturbing the QRS complexes.
End of explanation
"""
#run analysis
wd, m = hp.process(hp.scale_data(filtered), sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
"""
Explanation: We've now reduced the amplitude of the T-wave and are ready for analysis
End of explanation
"""
from scipy.signal import resample
#resample the data. Usually 2, 4, or 6 times is enough depending on original sampling rate
resampled_data = resample(filtered, len(filtered) * 2)
#And run the analysis again. Don't forget to up the sample rate as well!
wd, m = hp.process(hp.scale_data(resampled_data), sample_rate * 2)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
"""
Explanation: Oh dear
HeartPy is distrusting some peaks. This is because HeartPy's optimizer likes broader peaks than some ECG recordings provide (especially lower sampling rates). Usually when filtering the peak width decreases as well, potentially causing issues.
The solution is simple. We can upsample the signal using scipy.signal.resample to help with this!
End of explanation
"""
data = hp.get_data('e0124.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500])
plt.show()
"""
Explanation: Upsampling the signal has enabled HeartPy to optimize and find the position for all peaks in the signal.
Note the use of hp.scale_data() in the processing function. This is recommended when the amplitude is low (2.4-3.8 in the original data).
Let's look at the last example
End of explanation
"""
#run analysis
wd, m = hp.process(hp.scale_data(data), sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
"""
Explanation: Again we have a very strong signal present in the recording. That is always nice to see. Analysis again is then straightforward:
End of explanation
"""
#resample the data. Usually 2, 4, or 6 times is enough depending on original sampling rate
resampled_data = resample(data, len(filtered) * 2)
#And run the analysis again. Don't forget to up the sample rate as well!
wd, m = hp.process(hp.scale_data(resampled_data), sample_rate * 2)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
"""
Explanation: And again we can fix the 'mistrusted' peaks with modest upsampling
End of explanation
"""
hp.plot_poincare(wd, m)
#print poincare measures
poincare_measures = ['sd1', 'sd2', 's', 'sd1/sd2']
print('\nnonlinear poincare measures:')
for measure in poincare_measures:
print('%s: %f' %(measure, m[measure]))
"""
Explanation: Since 1.2.4 HeartPy includes Poincaré nonlinear methods too
Use them like the plotter function:
End of explanation
"""
|
nproctor/phys202-2015-work
|
assignments/assignment10/ODEsEx01.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
import math
"""
Explanation: Ordinary Differential Equations Exercise 1
Imports
End of explanation
"""
def solve_euler(derivs, y0, x):
"""Solve a 1d ODE using Euler's method.
Parameters
----------
derivs : function
The derivative of the diff-eq with the signature deriv(y,x) where
y and x are floats.
y0 : float
The initial condition y[0] = y(x[0]).
x : np.ndarray, list, tuple
The array of times at which of solve the diff-eq.
Returns
-------
y : np.ndarray
Array of solutions y[i] = y(x[i])
"""
h = x[1]-x[0]
y = [y0]
for i in range(len(x)-1):
y.append(y[i] + h*derivs(y[i],x[i]) )
return np.array(y)
solve_euler(lambda y,x: 1, 0, [0,1,2])
assert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2])
"""
Explanation: Euler's method
Euler's method is the simplest numerical approach for solving a first order ODE numerically. Given the differential equation
$$ \frac{dy}{dx} = f(y(x), x) $$
with the initial condition:
$$ y(x_0)=y_0 $$
Euler's method performs updates using the equations:
$$ y_{n+1} = y_n + h f(y_n,x_n) $$
$$ h = x_{n+1} - x_n $$
Write a function solve_euler that implements the Euler method for a 1d ODE and follows the specification described in the docstring:
End of explanation
"""
def solve_midpoint(derivs, y0, x):
"""Solve a 1d ODE using the Midpoint method.
Parameters
----------
derivs : function
The derivative of the diff-eq with the signature deriv(y,x) where y
and x are floats.
y0 : float
The initial condition y[0] = y(x[0]).
x : np.ndarray, list, tuple
The array of times at which of solve the diff-eq.
Returns
-------
y : np.ndarray
Array of solutions y[i] = y(x[i])
"""
h = x[1]-x[0]
y = [y0]
for i in range(len(x)-1):
y.append(y[i] + h*derivs( (y[i] + h/2 *derivs(y[i],x[i])) , (x[i] + h/2)))
return np.array(y)
assert np.allclose(solve_midpoint(lambda y, x: 1, 0, [0,1,2]), [0,1,2])
"""
Explanation: The midpoint method is another numerical method for solving the above differential equation. In general it is more accurate than the Euler method. It uses the update equation:
$$ y_{n+1} = y_n + h f\left(y_n+\frac{h}{2}f(y_n,x_n),x_n+\frac{h}{2}\right) $$
Write a function solve_midpoint that implements the midpoint method for a 1d ODE and follows the specification described in the docstring:
End of explanation
"""
def solve_exact(x):
"""compute the exact solution to dy/dx = x + 2y.
Parameters
----------
x : np.ndarray
Array of x values to compute the solution at.
Returns
-------
y : np.ndarray
Array of solutions at y[i] = y(x[i]).
"""
h = x[1]-x[0]
y = []
for i in range(len(x)):
y.append(0.25 * np.exp(2* x[i]) - 0.5 * x[i] - 0.25)
return np.array(y)
solve_exact(np.array([0,1,2]))
assert np.allclose(solve_exact(np.array([0,1,2])),np.array([0., 1.09726402, 12.39953751]))
"""
Explanation: You are now going to solve the following differential equation:
$$
\frac{dy}{dx} = x + 2y
$$
which has the analytical solution:
$$
y(x) = 0.25 e^{2x} - 0.5 x - 0.25
$$
First, write a solve_exact function that compute the exact solution and follows the specification described in the docstring:
End of explanation
"""
x = np.linspace(0,1,11)
h = x[1] - x[0]
derivs = lambda y,x : x + 2*y
plt.plot(x, solve_euler(derivs, 0, x), label="Euler's Method", color="yellow")
plt.plot(x, solve_midpoint(derivs, 0, x), label ="Midpoint Method", color="blue")
plt.plot(x, solve_exact(x), label="Exact", color="red")
plt.plot(x, odeint(derivs, 0, x), label="ODEint", color="green")
plt.xlabel("x")
plt.ylabel("y(x)")
plt.title("Methods of Solving ODEs")
plt.legend()
ax = plt.gca()
ax.set_axis_bgcolor("#fcfcfc")
assert True # leave this for grading the plots
"""
Explanation: In the following cell you are going to solve the above ODE using four different algorithms:
Euler's method
Midpoint method
odeint
Exact
Here are the details:
Generate an array of x values with $N=11$ points over the interval $[0,1]$ ($h=0.1$).
Define the derivs function for the above differential equation.
Using the solve_euler, solve_midpoint, odeint and solve_exact functions to compute
the solutions using the 4 approaches.
Visualize the solutions on a sigle figure with two subplots:
Plot the $y(x)$ versus $x$ for each of the 4 approaches.
Plot $\left|y(x)-y_{exact}(x)\right|$ versus $x$ for each of the 3 numerical approaches.
Your visualization should have legends, labeled axes, titles and be customized for beauty and effectiveness.
While your final plot will use $N=10$ points, first try making $N$ larger and smaller to see how that affects the errors of the different approaches.
End of explanation
"""
|
as595/AllOfYourBases
|
TIARA/RadioImaging/FourierCat.ipynb
|
gpl-3.0
|
%matplotlib inline
"""
Explanation: FourierCats.ipynb
‹ FourierCats.ipynb › Copyright (C) ‹ 2017 › ‹ Anna Scaife - anna.scaife@manchester.ac.uk ›
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
[AMS - 170905] Notebook created for TIARA Astrostatistics Summer School, Taipei, September 2017.
This notebook presents some simple properties of Fourier space and accompanies the lecture: "Imaging the Invisible"
It uses a number of Python libraries, which are all installable using pip.
We want plotting inline in the notebook:
End of explanation
"""
import numpy as np # for array manipulation and the fft
import pylab as pl # for plotting
import cv2 # for image file handling
"""
Explanation: We need to import some libraries:
End of explanation
"""
cat = cv2.imread('./FIGURES/cat1.jpg',0)
"""
Explanation: Open up the input image:
End of explanation
"""
pl.imshow(cat,cmap='gray')
pl.show()
"""
Explanation: ...and let's take a look at it:
End of explanation
"""
cat_squiggle = np.fft.fft2(cat)
cat_squiggle_shifted = np.fft.fftshift(cat_squiggle)
cat_spectrum = 20*np.log(np.abs(cat_squiggle_shifted))
"""
Explanation: Next we'll dive straight in and Fourier Transform our cat. Note that there are a couple of steps to this, the first is the fft itself and the second is an fft "shift". This is necessary because of the way that the frequency space is ordered in the Fourier Transform.
End of explanation
"""
pl.subplot(121),pl.imshow(cat, cmap = 'gray')
pl.title('Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(122),pl.imshow(cat_spectrum, cmap = 'gray')
pl.title('Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.show()
"""
Explanation: Let's see how that looks:
End of explanation
"""
rows, cols = cat.shape
crow,ccol = rows/2 , cols/2
"""
Explanation: Now we can set things up for filtering our Fourier Cat. We need to know (1) the dimenions of the image and (2) where the centre is.
End of explanation
"""
filter_fnc = np.zeros(cat_squiggle_shifted.shape)
filter_fnc[crow-20:crow+20, ccol-20:ccol+20] = 1.0
pl.imshow(filter_fnc)
pl.show()
"""
Explanation: To start with, let's make a filter function that separates the inner most 40 x 40 pixels from everything else.
End of explanation
"""
cat_squiggle_hpf = np.copy(cat_squiggle_shifted)
cat_squiggle_hpf[np.where(filter_fnc==1.)] = 0.0+0*1j
"""
Explanation: We can then use this filter to, firstly, mask out the inner most 40 x 40 pixels in Fourier space. This removes our small Fourier frequencies, i.e. the large scale information in our image.
End of explanation
"""
cat_filtered = np.fft.ifftshift(cat_squiggle_hpf)
cat_filtered_hpf = np.fft.ifft2(cat_filtered)
cat_filtered_hpf = np.abs(cat_filtered_hpf)
"""
Explanation: We can then Fourier transform this back into image space.
End of explanation
"""
#pl.subplot(121),pl.imshow(cat, cmap = 'gray')
#pl.title('Image'), pl.xticks([]), pl.yticks([])
pl.subplot(121),pl.imshow(20*np.log(np.abs(cat_squiggle_hpf)), cmap = 'gray')
pl.title('Filtered Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(122),pl.imshow(cat_filtered_hpf)
pl.title('HPF Cat'), pl.xticks([]), pl.yticks([])
pl.show()
"""
Explanation: ...and, see how it looks.
End of explanation
"""
cat_squiggle_lpf = np.copy(cat_squiggle_shifted)
cat_squiggle_lpf[np.where(filter_fnc==0.)] = 0.+0.*1j
"""
Explanation: Now let's filter out the large Fourier frequencies:
End of explanation
"""
cat_filtered = np.fft.ifftshift(cat_squiggle_lpf)
cat_filtered_lpf = np.fft.ifft2(cat_filtered)
cat_filtered_lpf = np.abs(cat_filtered_lpf)
"""
Explanation: and Fourier transform that back into image space:
End of explanation
"""
#pl.subplot(121),pl.imshow(cat, cmap = 'gray')
#pl.title('Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(121),pl.imshow(20*np.log(np.abs(cat_squiggle_lpf)), cmap = 'gray')
pl.title('Filtered Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(122),pl.imshow(cat_filtered_lpf)
pl.title('LPF Cat'), pl.xticks([]), pl.yticks([])
pl.show()
"""
Explanation: This looks like:
End of explanation
"""
psf = np.fft.ifft2(filter_fnc)
psf = np.fft.ifftshift(psf)
psf = np.abs(psf)
"""
Explanation: We can also take the Fourier Transform of the filter function to see how the PSF looks:
End of explanation
"""
pl.subplot(231),pl.imshow(20*np.log(np.abs(cat_squiggle_lpf)), cmap = 'gray')
pl.title('Filtered Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(232),pl.imshow(20*np.log(np.abs(cat_squiggle_shifted)), cmap = 'gray')
pl.title('Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(233),pl.imshow(filter_fnc, cmap = 'gray')
pl.title('Filter'), pl.xticks([]), pl.yticks([])
pl.subplot(234),pl.imshow(cat_filtered_lpf, cmap = 'gray')
pl.xlabel('LPF Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(235),pl.imshow(cat, cmap = 'gray')
pl.xlabel('Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(236),pl.imshow(psf, cmap = 'gray')
pl.xlabel('PSF'), pl.xticks([]), pl.yticks([])
pl.show()
"""
Explanation: Let's use that to make the plot from the lecture:
End of explanation
"""
filter_mask = np.random.randint(2, size=cat_squiggle_shifted.shape)
pl.imshow(filter_mask)
pl.show()
cat_squiggle_msk = np.copy(cat_squiggle_shifted)
cat_squiggle_msk[np.where(filter_mask==0.)] = 0.+0.*1j
cat_filtered = np.fft.ifftshift(cat_squiggle_msk)
cat_filtered_msk = np.fft.ifft2(cat_filtered)
cat_filtered_msk = np.abs(cat_filtered_msk)
pl.subplot(121),pl.imshow(cat, cmap = 'gray')
pl.title('Cat'), pl.xticks([]), pl.yticks([])
#pl.subplot(132),pl.imshow(20*np.log(np.abs(cat_squiggle_msk)), cmap = 'gray')
#pl.title('Filtered Fourier Cat'), pl.xticks([]), pl.yticks([])
pl.subplot(122),pl.imshow(cat_filtered_msk)
pl.title('Masked Cat'), pl.xticks([]), pl.yticks([])
pl.show()
"""
Explanation: What about... instead of filtering out a contiguous range of Fourier frequencies, we rnadomly selected the Fourier components?
End of explanation
"""
|
variani/study
|
02-intro-python/projects/pandas/babynames.ipynb
|
cc0-1.0
|
%qtconsole
%matplotlib inline
"""
Explanation: About
R data-munging idioms and their equvalents in pandas/python:
Subset with multiple-choise %in%:
R: `subset(df, name %in% c("Andrew", "Andre"))
python: df.query('name in ["Andrew", "Andre"]') via link
Set up
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from ggplot import *
"""
Explanation: Imports
End of explanation
"""
df = pd.read_csv("data/babynames.csv")
"""
Explanation: Read data
End of explanation
"""
df.head()
"""
Explanation: Basic statistics
Looking at the first samples:
End of explanation
"""
(df['name'].nunique(), df['name'].size)
df['name'].nunique() / float(df['name'].size)
"""
Explanation: How many unique names are collected?
End of explanation
"""
df['year'].max() - df['year'].min()
"""
Explanation: We might thhink that approx. 20 entries per name are collected. Should it be equal to the length of the years period?
End of explanation
"""
df['name'].isin(['Andrew']).value_counts()
df.query('name == "Andrew"').shape
ggplot(df.query('name == "Joe"'), aes(x = 'year', y = 'n')) + geom_point() + ggtitle("name: Joe")
"""
Explanation: Not really. That means there are many zero entries in n column for many names.
Filter
By a single name
End of explanation
"""
ggplot(df.query('name == "Joe"'), aes(x = 'year', y = 'n', color = 'sex')) +\
geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle("name: Joe")
"""
Explanation: Don't forget the names are given for two genders.
End of explanation
"""
ggplot(df.query('name == "Mary"'), aes(x = 'year', y = 'n', color = 'sex')) +\
geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle("name: Mary")
"""
Explanation: Joe as a name for girls seems to be OK. What about Mary?
End of explanation
"""
ggplot(df.query('name == "Mary" & n < 500'), aes(x = 'year', y = 'n', color = 'sex')) +\
geom_point(size = 10) + geom_smooth(span = 0.1) + ggtitle("name: Mary, n < 500")
"""
Explanation: Name Mary for boys? Let's do a bit more subsetting by n < 500 filter.
End of explanation
"""
ind = np.argmax(df.query('name == "Mary" & sex == "M"')['n'])
ind
df.query('index == @ind')
"""
Explanation: Now let's get a record, where the number of female names Mary was the maximum.
End of explanation
"""
df.query('name in ["Andrew", "Andrey"]').shape
anames = ['Andrew', 'Andrey', 'Andres', 'Andre', 'And']
df.query('name in @anames').shape
ggplot(df.query('name in @anames'), aes(x = 'year', y = 'n', color = 'name')) +\
geom_point(size = 10) + geom_smooth(span = 0.1) + facet_wrap("sex")
ggplot(df.query('name in @anames & sex == "M"'), aes(x = 'year', y = 'n', color = 'name')) +\
geom_point(size = 10) + geom_smooth(span = 0.1, se = False) + scale_y_log(10) +\
ggtitle('Andre* male names ')
(df.query('name in @anames')
.groupby(['name', 'sex'])
[['n']].sum())
(df.query('name in @anames & sex == "M"')
.groupby(['name'])
[['n']].sum())
sf = (df.query('name in @anames & sex == "M"')
.groupby(['name'])
.agg({'n': {'total': sum, 'max': lambda x: x.max()},
'prop': {'max': max}}))
sf
sf = (df.query('name in @anames & sex == "M"')
.groupby(['name'])
.apply(lambda x: sum(x.n)))
sf
sf = (df.query('name in @anames & sex == "M"')
.groupby(['name'])
.apply(lambda x: pd.DataFrame({
'min': min(x.n),
'total': sum(x.n)}, index = x.index)))
sf
"""
Explanation: By multiple names
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.18/_downloads/5834d0f519577e60275c6ef3c9fb0dbc/plot_read_inverse.ipynb
|
bsd-3-clause
|
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname = data_path
fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname)
print("Method: %s" % inv['methods'])
print("fMRI prior: %s" % inv['fmri_prior'])
print("Number of sources: %s" % inv['nsource'])
print("Number of channels: %s" % inv['nchan'])
"""
Explanation: Reading an inverse operator
The inverse operator's source space is shown in 3D.
End of explanation
"""
lh_points = inv['src'][0]['rr']
lh_faces = inv['src'][0]['use_tris']
rh_points = inv['src'][1]['rr']
rh_faces = inv['src'][1]['use_tris']
from mayavi import mlab # noqa
mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
mesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
lh_faces, colormap='RdBu')
mesh.module_manager.scalar_lut_manager.reverse_lut = True
mesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
rh_faces, colormap='RdBu')
mesh.module_manager.scalar_lut_manager.reverse_lut = True
"""
Explanation: Show result on 3D source space
End of explanation
"""
|
karthikrangarajan/intro-to-sklearn
|
03.Feature Engineering.ipynb
|
bsd-3-clause
|
# PCA for dimensionality reduction
from sklearn import decomposition
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
# perform principal component analysis
pca = decomposition.PCA(.95)
pca.fit(X)
X_t = pca.transform(X)
(X_t[:, 0])
# import numpy and matplotlib for plotting (and set some stuff)
import numpy as np
np.set_printoptions(suppress=True)
import matplotlib.pyplot as plt
%matplotlib inline
# let's separate out data based on first two principle components
x1, x2 = X_t[:, 0], X_t[:, 1]
# please don't worry about details of the plotting below
# (note: you can get the iris names below from iris.target_names, also in docs)
c1 = np.array(list('rbg')) # colors
colors = c1[y] # y coded by color
classes = iris.target_names[y] # y coded by iris name
for (i, cla) in enumerate(set(classes)):
xc = [p for (j, p) in enumerate(x1) if classes[j] == cla]
yc = [p for (j, p) in enumerate(x2) if classes[j] == cla]
cols = [c for (j, c) in enumerate(colors) if classes[j] == cla]
plt.scatter(xc, yc, c = cols, label = cla)
plt.ylabel('Principal Component 2')
plt.xlabel('Principal Component 1')
plt.legend(loc = 4)
"""
Explanation: Feature reduction, selection and creation
Make the learning easier or better beforehand - feature reduction/selection/creation
PCA
SelectKBest
One-Hot Encoder
Principal component analysis (aka PCA)
Reduces dimensions (number of features), based on what information explains the most variance (or signal)
Considered unsupervised learning
Useful for very large feature space (e.g. say the botanist in charge of the iris dataset measured 100 more parts of the flower and thus there were 104 columns instead of 4)
More about PCA on wikipedia here
End of explanation
"""
# SelectKBest for selecting top-scoring features
from sklearn import datasets
from sklearn.feature_selection import SelectKBest, chi2
iris = datasets.load_iris()
X, y = iris.data, iris.target
print(X.shape)
# Do feature selection
# input is scoring function (here chi2) to get univariate p-values
# and number of top-scoring features (k) - here we get the top 2
X_t = SelectKBest(chi2, k = 2).fit_transform(X, y)
print(X_t.shape)
"""
Explanation: Selecting k top scoring features (also dimensionality reduction)
Considered unsupervised learning
End of explanation
"""
# OneHotEncoder for dummying variables
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import pandas as pd
data = pd.DataFrame({'index': range(1, 7),
'state': ['WA', 'NY', 'CO', 'NY', 'CA', 'WA']})
print(data)
# We encode both our categorical variable and it's labels
enc = OneHotEncoder()
label_enc = LabelEncoder() # remember the labels here
# Encode labels (can use for discrete numerical values as well)
data_label_encoded = label_enc.fit_transform(data['state'])
data['state'] = data_label_encoded
# Encode and "dummy" variables
data_feature_one_hot_encoded = enc.fit_transform(data[['state']])
# Put into dataframe to look nicer and decode state dummy variables to original state values
# TRY: compare the original input data (look at row numbers) to one hot encoding results
# --> do they match??
pd.DataFrame(data_feature_one_hot_encoded.toarray(), columns = label_enc.inverse_transform(range(4)))
# Encoded labels as dummy variables
print(data_label_encoded)
# Decoded
print(label_enc.inverse_transform(data_label_encoded))
"""
Explanation: <b>Note on scoring function selection in SelectKBest tranformations:</b>
* For regression - f_regression
* For classification - chi2, f_classif
One Hot Encoding
It's an operation on feature labels - a method of dummying variable
Expands the feature space by nature of transform - later this can be processed further with a dimensionality reduction (the dummied variables are now their own features)
FYI: One hot encoding variables is needed for python ML module tenorflow
The code cell below should help make this clear
End of explanation
"""
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
a = pd.DataFrame(X,
columns = ['Sepal length', 'Sepal width', 'Petal length', 'Petal width'])
col5 = pd.DataFrame(np.random.randint(1, 4, size = len(y)))
X_plus = pd.concat([a, col5], axis = 1)
X_plus.head(20)
# ...now one-hot-encode...
"""
Explanation: EXERCISE: Use one hot encoding to "recode" the iris data's extra suprise column (we are going to add a categorical variable here to play with...)
End of explanation
"""
|
rsterbentz/phys202-2015-work
|
assignments/assignment03/NumpyEx04.ipynb
|
mit
|
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
"""
Explanation: Numpy Exercise 4
Imports
End of explanation
"""
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
"""
Explanation: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$:
End of explanation
"""
def complete_deg(n):
"""Return the integer valued degree matrix D for the complete graph K_n."""
k = np.zeros((n,n), dtype=int)
i = 0
while i < n:
k[i,i] = n-1
i += 1
return k
complete_deg(5)
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
"""
Explanation: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
End of explanation
"""
def complete_adj(n):
"""Return the integer valued adjacency matrix A for the complete graph K_n."""
a = np.ones((n,n), dtype=int)
j = 0
while j < n:
a[j,j] = 0
j += 1
return a
complete_adj(5)
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
"""
Explanation: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
End of explanation
"""
m = 8
L = complete_deg(m) - complete_adj(m)
np.linalg.eigvals(L)
"""
Explanation: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
End of explanation
"""
|
newworldnewlife/TensorFlow-Tutorials
|
03C_Keras_API.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
"""
Explanation: TensorFlow Tutorial #03-C
Keras API
by Magnus Erik Hvass Pedersen
/ GitHub / Videos on YouTube
Introduction
Tutorial #02 showed how to implement a Convolutional Neural Network in TensorFlow. We made a few helper-functions for creating the layers in the network. It is essential to have a good high-level API because it makes it much easier to implement complex models, and it lowers the risk of errors.
There are several of these builder API's available for TensorFlow: PrettyTensor (Tutorial #03), Layers API (Tutorial #03-B), and several others. But they were never really finished and now they seem to be more or less abandoned by their developers.
This tutorial is about the Keras API which is already highly developed with very good documentation - and the development continues. It seems likely that Keras will be the standard API for TensorFlow in the future so it is recommended that you use it instead of the other APIs.
The author of Keras has written a blog-post on his API design philosophy which you should read.
Flowchart
The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. See Tutorial #02 for a more detailed description of convolution.
There are two convolutional layers, each followed by a down-sampling using max-pooling (not shown in this flowchart). Then there are two fully-connected layers ending in a softmax-classifier.
Imports
End of explanation
"""
# from tf.keras.models import Sequential # This does not work!
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer, Input
from tensorflow.python.keras.layers import Reshape, MaxPooling2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten
"""
Explanation: We need to import several things from Keras. Note the long import-statements. This might be a bug. Hopefully it will be possible to write shorter and more elegant lines in the future.
End of explanation
"""
tf.__version__
tf.keras.__version__
"""
Explanation: This was developed using Python 3.6 (Anaconda) and TensorFlow version:
End of explanation
"""
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
"""
Explanation: Load Data
The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.
End of explanation
"""
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
"""
Explanation: The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.
End of explanation
"""
data.test.cls = np.argmax(data.test.labels, axis=1)
"""
Explanation: The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now.
End of explanation
"""
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
# This is used for plotting the images.
img_shape = (img_size, img_size)
# Tuple with height, width and depth used to reshape arrays.
# This is used for reshaping in Keras.
img_shape_full = (img_size, img_size, 1)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
"""
Explanation: Data Dimensions
The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below.
End of explanation
"""
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""
Explanation: Helper-function for plotting images
Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.
End of explanation
"""
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
"""
Explanation: Plot a few images to see if data is correct
End of explanation
"""
def plot_example_errors(cls_pred):
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Boolean array whether the predicted class is incorrect.
incorrect = (cls_pred != data.test.cls)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
"""
Explanation: Helper-function to plot example errors
Function for plotting examples of images from the test-set that have been mis-classified.
End of explanation
"""
if False:
x_pretty = pt.wrap(x_image)
with pt.defaults_scope(activation_fn=tf.nn.relu):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=16, name='layer_conv1').\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=36, name='layer_conv2').\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=128, name='layer_fc1').\
softmax_classifier(num_classes=num_classes, labels=y_true)
"""
Explanation: PrettyTensor API
This is how the Convolutional Neural Network was implemented in Tutorial #03 using the PrettyTensor API. It is shown here for easy comparison to the Keras implementation below.
End of explanation
"""
# Start construction of the Keras Sequential model.
model = Sequential()
# Add an input layer which is similar to a feed_dict in TensorFlow.
# Note that the input-shape must be a tuple containing the image-size.
model.add(InputLayer(input_shape=(img_size_flat,)))
# The input is a flattened array with 784 elements,
# but the convolutional layers expect images with shape (28, 28, 1)
model.add(Reshape(img_shape_full))
# First convolutional layer with ReLU-activation and max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
activation='relu', name='layer_conv1'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Second convolutional layer with ReLU-activation and max-pooling.
model.add(Conv2D(kernel_size=5, strides=1, filters=36, padding='same',
activation='relu', name='layer_conv2'))
model.add(MaxPooling2D(pool_size=2, strides=2))
# Flatten the 4-rank output of the convolutional layers
# to 2-rank that can be input to a fully-connected / dense layer.
model.add(Flatten())
# First fully-connected / dense layer with ReLU-activation.
model.add(Dense(128, activation='relu'))
# Last fully-connected / dense layer with softmax-activation
# for use in classification.
model.add(Dense(num_classes, activation='softmax'))
"""
Explanation: Sequential Model
The Keras API has two modes of constructing Neural Networks. The simplest is the Sequential Model which only allows for the layers to be added in sequence.
End of explanation
"""
from tensorflow.python.keras.optimizers import Adam
optimizer = Adam(lr=1e-3)
"""
Explanation: Model Compilation
The Neural Network has now been defined and must be finalized by adding a loss-function, optimizer and performance metrics. This is called model "compilation" in Keras.
We can either define the optimizer using a string, or if we want more control of its parameters then we need to instantiate an object. For example, we can set the learning-rate.
End of explanation
"""
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
"""
Explanation: For a classification-problem such as MNIST which has 10 possible classes, we need to use the loss-function called categorical_crossentropy. The performance metric we are interested in is the classification accuracy.
End of explanation
"""
model.fit(x=data.train.images,
y=data.train.labels,
epochs=1, batch_size=128)
"""
Explanation: Training
Now that the model has been fully defined with loss-function and optimizer, we can train it. This function takes numpy-arrays and performs the given number of training epochs using the given batch-size. An epoch is one full use of the entire training-set. So for 10 epochs we would iterate randomly over the entire training-set 10 times.
End of explanation
"""
result = model.evaluate(x=data.test.images,
y=data.test.labels)
"""
Explanation: Evaluation
Now that the model has been trained we can test its performance on the test-set. This also uses numpy-arrays as input.
End of explanation
"""
for name, value in zip(model.metrics_names, result):
print(name, value)
"""
Explanation: We can print all the performance metrics for the test-set.
End of explanation
"""
print("{0}: {1:.2%}".format(model.metrics_names[1], result[1]))
"""
Explanation: Or we can just print the classification accuracy.
End of explanation
"""
images = data.test.images[0:9]
"""
Explanation: Prediction
We can also predict the classification for new images. We will just use some images from the test-set but you could load your own images into numpy arrays and use those instead.
End of explanation
"""
cls_true = data.test.cls[0:9]
"""
Explanation: These are the true class-number for those images. This is only used when plotting the images.
End of explanation
"""
y_pred = model.predict(x=images)
"""
Explanation: Get the predicted classes as One-Hot encoded arrays.
End of explanation
"""
cls_pred = np.argmax(y_pred,axis=1)
plot_images(images=images,
cls_true=cls_true,
cls_pred=cls_pred)
"""
Explanation: Get the predicted classes as integers.
End of explanation
"""
y_pred = model.predict(x=data.test.images)
"""
Explanation: Examples of Mis-Classified Images
We can plot some examples of mis-classified images from the test-set.
First we get the predicted classes for all the images in the test-set:
End of explanation
"""
cls_pred = np.argmax(y_pred,axis=1)
"""
Explanation: Then we convert the predicted class-numbers from One-Hot encoded arrays to integers.
End of explanation
"""
plot_example_errors(cls_pred)
"""
Explanation: Plot some of the mis-classified images.
End of explanation
"""
# Create an input layer which is similar to a feed_dict in TensorFlow.
# Note that the input-shape must be a tuple containing the image-size.
inputs = Input(shape=(img_size_flat,))
# Variable used for building the Neural Network.
net = inputs
# The input is an image as a flattened array with 784 elements.
# But the convolutional layers expect images with shape (28, 28, 1)
net = Reshape(img_shape_full)(net)
# First convolutional layer with ReLU-activation and max-pooling.
net = Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
activation='relu', name='layer_conv1')(net)
net = MaxPooling2D(pool_size=2, strides=2)(net)
# Second convolutional layer with ReLU-activation and max-pooling.
net = Conv2D(kernel_size=5, strides=1, filters=36, padding='same',
activation='relu', name='layer_conv2')(net)
net = MaxPooling2D(pool_size=2, strides=2)(net)
# Flatten the output of the conv-layer from 4-dim to 2-dim.
net = Flatten()(net)
# First fully-connected / dense layer with ReLU-activation.
net = Dense(128, activation='relu')(net)
# Last fully-connected / dense layer with softmax-activation
# so it can be used for classification.
net = Dense(num_classes, activation='softmax')(net)
# Output of the Neural Network.
outputs = net
"""
Explanation: Functional Model
The Keras API can also be used to construct more complicated networks using the Functional Model. This may look a little confusing at first, because each call to the Keras API will create and return an instance that is itself callable. It is not clear whether it is a function or an object - but we can call it as if it is a function. This allows us to build computational graphs that are more complex than the Sequential Model allows.
End of explanation
"""
from tensorflow.python.keras.models import Model
"""
Explanation: Model Compilation
We have now defined the architecture of the model with its input and output. We now have to create a Keras model and compile it with a loss-function and optimizer, so it is ready for training.
End of explanation
"""
model2 = Model(inputs=inputs, outputs=outputs)
"""
Explanation: Create a new instance of the Keras Functional Model. We give it the inputs and outputs of the Convolutional Neural Network that we constructed above.
End of explanation
"""
model2.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
"""
Explanation: Compile the Keras model using the rmsprop optimizer and with a loss-function for multiple categories. The only performance metric we are interested in is the classification accuracy, but you could use a list of metrics here.
End of explanation
"""
model2.fit(x=data.train.images,
y=data.train.labels,
epochs=1, batch_size=128)
"""
Explanation: Training
The model has now been defined and compiled so it can be trained using the same fit() function as used in the Sequential Model above. This also takes numpy-arrays as input.
End of explanation
"""
result = model2.evaluate(x=data.test.images,
y=data.test.labels)
"""
Explanation: Evaluation
Once the model has been trained we can evaluate its performance on the test-set. This is the same syntax as for the Sequential Model.
End of explanation
"""
for name, value in zip(model.metrics_names, result):
print(name, value)
"""
Explanation: The result is a list of values, containing the loss-value and all the metrics we defined when we compiled the model. Note that 'accuracy' is now called 'acc' which is a small inconsistency.
End of explanation
"""
print("{0}: {1:.2%}".format(model.metrics_names[1], result[1]))
"""
Explanation: We can also print the classification accuracy as a percentage:
End of explanation
"""
y_pred = model2.predict(x=data.test.images)
"""
Explanation: Examples of Mis-Classified Images
We can plot some examples of mis-classified images from the test-set.
First we get the predicted classes for all the images in the test-set:
End of explanation
"""
cls_pred = np.argmax(y_pred, axis=1)
"""
Explanation: Then we convert the predicted class-numbers from One-Hot encoded arrays to integers.
End of explanation
"""
plot_example_errors(cls_pred)
"""
Explanation: Plot some of the mis-classified images.
End of explanation
"""
path_model = 'model.keras'
"""
Explanation: Save & Load Model
NOTE: You need to install h5py for this to work!
Tutorial #04 was about saving and restoring the weights of a model using native TensorFlow code. It was an absolutely horrible API! Fortunately, Keras makes this very easy.
This is the file-path where we want to save the Keras model.
End of explanation
"""
model2.save(path_model)
"""
Explanation: Saving a Keras model with the trained weights is then just a single function call, as it should be.
End of explanation
"""
del model2
"""
Explanation: Delete the model from memory so we are sure it is no longer used.
End of explanation
"""
from tensorflow.python.keras.models import load_model
"""
Explanation: We need to import this Keras function for loading the model.
End of explanation
"""
model3 = load_model(path_model)
"""
Explanation: Loading the model is then just a single function-call, as it should be.
End of explanation
"""
images = data.test.images[0:9]
cls_true = data.test.cls[0:9]
"""
Explanation: We can then use the model again e.g. to make predictions. We get the first 9 images from the test-set and their true class-numbers.
End of explanation
"""
y_pred = model3.predict(x=images)
"""
Explanation: We then use the restored model to predict the class-numbers for those images.
End of explanation
"""
cls_pred = np.argmax(y_pred, axis=1)
"""
Explanation: Get the class-numbers as integers.
End of explanation
"""
plot_images(images=images,
cls_pred=cls_pred,
cls_true=cls_true)
"""
Explanation: Plot the images with their true and predicted class-numbers.
End of explanation
"""
def plot_conv_weights(weights, input_channel=0):
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(weights)
w_max = np.max(weights)
# Number of filters used in the conv. layer.
num_filters = weights.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = weights[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""
Explanation: Visualization of Layer Weights and Outputs
Helper-function for plotting convolutional weights
End of explanation
"""
model3.summary()
"""
Explanation: Get Layers
Keras has a simple way of listing the layers in the model.
End of explanation
"""
layer_input = model3.layers[0]
"""
Explanation: We count the indices to get the layers we want.
The input-layer has index 0.
End of explanation
"""
layer_conv1 = model3.layers[2]
layer_conv1
"""
Explanation: The first convolutional layer has index 2.
End of explanation
"""
layer_conv2 = model3.layers[4]
"""
Explanation: The second convolutional layer has index 4.
End of explanation
"""
weights_conv1 = layer_conv1.get_weights()[0]
"""
Explanation: Convolutional Weights
Now that we have the layers we can easily get their weights.
End of explanation
"""
weights_conv1.shape
"""
Explanation: This gives us a 4-rank tensor.
End of explanation
"""
plot_conv_weights(weights=weights_conv1, input_channel=0)
"""
Explanation: Plot the weights using the helper-function from above.
End of explanation
"""
weights_conv2 = layer_conv2.get_weights()[0]
plot_conv_weights(weights=weights_conv2, input_channel=0)
"""
Explanation: We can also get the weights for the second convolutional layer and plot them.
End of explanation
"""
def plot_conv_output(values):
# Number of filters used in the conv. layer.
num_filters = values.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot the output images of all the filters.
for i, ax in enumerate(axes.flat):
# Only plot the images for valid filters.
if i<num_filters:
# Get the output image of using the i'th filter.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
"""
Explanation: Helper-function for plotting the output of a convolutional layer
End of explanation
"""
def plot_image(image):
plt.imshow(image.reshape(img_shape),
interpolation='nearest',
cmap='binary')
plt.show()
"""
Explanation: Input Image
Helper-function for plotting a single image.
End of explanation
"""
image1 = data.test.images[0]
plot_image(image1)
"""
Explanation: Plot an image from the test-set which will be used as an example below.
End of explanation
"""
from tensorflow.python.keras import backend as K
output_conv1 = K.function(inputs=[layer_input.input],
outputs=[layer_conv1.output])
"""
Explanation: Output of Convolutional Layer - Method 1
There are different ways of getting the output of a layer in a Keras model. This method uses a so-called K-function which turns a part of the Keras model into a function.
End of explanation
"""
layer_output1 = output_conv1([[image1]])[0]
layer_output1.shape
"""
Explanation: We can then call this function with the input image. Note that the image is wrapped in two lists because the function expects an array of that dimensionality. Likewise, the function returns an array with one more dimensionality than we want so we just take the first element.
End of explanation
"""
plot_conv_output(values=layer_output1)
"""
Explanation: We can then plot the output of all 16 channels of the convolutional layer.
End of explanation
"""
output_conv2 = Model(inputs=layer_input.input,
outputs=layer_conv2.output)
"""
Explanation: Output of Convolutional Layer - Method 2
Keras also has another method for getting the output of a layer inside the model. This creates another Functional Model using the same input as the original model, but the output is now taken from the convolutional layer that we are interested in.
End of explanation
"""
layer_output2 = output_conv2.predict(np.array([image1]))
layer_output2.shape
"""
Explanation: This creates a new model-object where we can call the typical Keras functions. To get the output of the convoloutional layer we call the predict() function with the input image.
End of explanation
"""
plot_conv_output(values=layer_output2)
"""
Explanation: We can then plot the images for all 36 channels.
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples
|
notebooks/community/ml_ops/stage3/get_started_with_automl_pipeline_components.ipynb
|
apache-2.0
|
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install tensorflow-io==0.18 $USER_FLAG -q
! pip3 install --upgrade google-cloud-aiplatform \
google-cloud-pipeline-components \
google-cloud-logging \
pyarrow \
kfp $USER_FLAG -q
"""
Explanation: E2E ML on GCP: MLOps stage 3 : formalization: get started with AutoML pipeline components
<table align="left">
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage3/get_started_with_automl_pipeline_components.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage3/get_started_with_automl_pipeline_components.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/main/notebooks/community/ml_ops/stage3/get_started_with_automl_pipeline_components.ipynb">
<img src="https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32" alt="Vertex AI logo">
Open in Vertex AI Workbench
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 3 : formalization: get started with AutoML pipeline components.
Dataset
The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower in the given image from the five classes of flowers: daisy, dandelion, rose, sunflower, or tulip.
Objective
In this tutorial, you learn how to use prebuilt Google Cloud Pipeline Components for Vertex AI AutoML.
This tutorial uses the following Google Cloud ML services:
Vertex AI Pipelines
Vertex AI AutoML
Google Cloud Pipeline Components
Vertex AI Dataset, Model and Endpoint resources
Vertex AI Prediction
The steps performed include:
Construct a pipeline for:
Training a Vertex AI AutoML trained model.
Test the serving binary with a batch prediction job.
Deploying a Vertex AI AutoML trained model.
Execute a Vertex AI pipeline.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI pricing and Cloud Storage pricing and use the Pricing Calculator to generate a cost estimate based on your projected usage.
Installations
Install the following packages for executing this MLOps notebook.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so that it can find the packages.
End of explanation
"""
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
"""
Explanation: Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the Vertex AI API.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.
Set your project ID
If you don't know your project ID, you may be able to get your project ID using gcloud.
End of explanation
"""
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions.
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Vertex AI Notebook Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_URI
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_URI
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your service account from gcloud
if not IS_COLAB:
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip()
if IS_COLAB:
shell_output = ! gcloud projects describe $PROJECT_ID
project_number = shell_output[-1].split(":")[1].strip().replace("'", "")
SERVICE_ACCOUNT = f"{project_number}-compute@developer.gserviceaccount.com"
print("Service Account:", SERVICE_ACCOUNT)
"""
Explanation: Service Account
You use a service account to create Vertex AI Pipeline jobs. If you do not want to use your project's Compute Engine service account, set SERVICE_ACCOUNT to another service account ID.
End of explanation
"""
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
"""
Explanation: Set service account access for Vertex AI Pipelines
Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step. You only need to run this step once per service account.
End of explanation
"""
import base64
import json
import google.cloud.aiplatform as aiplatform
import tensorflow as tf
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import Artifact, Input, Output, component
"""
Explanation: Import libraries
End of explanation
"""
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)
"""
Explanation: Initialize Vertex AI SDK for Python
Initialize the Vertex AI SDK for Python for your project and corresponding bucket.
End of explanation
"""
IMPORT_FILE = (
"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv"
)
"""
Explanation: Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
@component(packages_to_install=["google-cloud-aiplatform"])
def evaluateAutoMLModelOp(
model: Input[Artifact], region: str, model_evaluation: Output[Artifact]
):
import logging
import google.cloud.aiplatform.gapic as gapic
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{region}-aiplatform.googleapis.com"}
model_service_client = gapic.ModelServiceClient(client_options=client_options)
model_id = model.metadata["resourceName"]
model_evaluations = model_service_client.list_model_evaluations(parent=model_id)
model_evaluation = list(model_evaluations)[0]
logging.info(model_evaluation)
"""
Explanation: Create AutoML model evaluation component
The Vertex AI pre-built pipeline components does not currently have a component for retrieiving the model evaluations for a AutoML model. So, you will first write your own component, as follows:
Takes as input the region and Model artifacts returned from an AutoML training component.
Create a client interface to the Vertex AI Model service (`metadata["resource_name"]).
Construct the resource ID for the model from the model artifact parameter.
Retrieve the model evaluation.
Return the model evaluation as a string.
End of explanation
"""
PIPELINE_ROOT = "{}/pipeline_root/automl_icn_training".format(BUCKET_URI)
DEPLOY_COMPUTE = "n1-standard-4"
@dsl.pipeline(
name="automl-icn-training", description="AutoML image classification training"
)
def pipeline(
import_file: str,
batch_files: list,
display_name: str,
bucket: str = PIPELINE_ROOT,
project: str = PROJECT_ID,
region: str = REGION,
):
from google_cloud_pipeline_components import aiplatform as gcc_aip
dataset_op = gcc_aip.ImageDatasetCreateOp(
project=project,
display_name=display_name,
gcs_source=import_file,
import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification,
)
training_op = gcc_aip.AutoMLImageTrainingJobRunOp(
project=project,
display_name=display_name,
prediction_type="classification",
model_type="CLOUD",
dataset=dataset_op.outputs["dataset"],
model_display_name=display_name,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
)
eval_op = evaluateAutoMLModelOp(model=training_op.outputs["model"], region=region)
batch_op = gcc_aip.ModelBatchPredictOp(
project=project,
job_display_name="batch_predict_job",
model=training_op.outputs["model"],
gcs_source_uris=batch_files,
gcs_destination_output_uri_prefix=bucket,
instances_format="jsonl",
predictions_format="jsonl",
model_parameters={},
machine_type=DEPLOY_COMPUTE,
starting_replica_count=1,
max_replica_count=1,
).after(eval_op)
endpoint_op = gcc_aip.EndpointCreateOp(
project=project,
location=region,
display_name=display_name,
).after(batch_op)
_ = gcc_aip.ModelDeployOp(
model=training_op.outputs["model"],
endpoint=endpoint_op.outputs["endpoint"],
automatic_resources_min_replica_count=1,
automatic_resources_max_replica_count=1,
traffic_split={"0": 100},
)
"""
Explanation: Construct AutoML training pipeline
In the example below, you construct a pipeline for training an AutoML model using pre-built Google Cloud Pipeline Components for AutoML, as follows:
Use the prebuilt component ImageDatasetCreateOp to create a Vertex AI Dataset resource, where:
The display name for the dataset is passed into the pipeline.
The import file for the dataset is passed into the pipeline.
The component returns the dataset resource as outputs["dataset"].
Use the prebuilt component AutoMLImageTrainingJobRunOp to train a Vertex AI AutoML Model resource, where:
The display name for the dataset is passed into the pipeline.
The dataset is the output from the ImageDatasetCreateOp.
The component returns the model resource as outputs["model"].
Use the prebuild component ModelBatchPredictOp to do a test batch prediction, where:
The model is the output from the AutoMLTrainingJobRunOp.
Use the prebuilt component EndpointCreateOp to create a Vertex AI Endpoint to deploy the trained model to, where:
Since the component has no dependencies on other components, by default it would be executed in parallel with the model training.
The after(training_op) is added to serialize its execution, so its only executed if the training operation completes successfully.
The component returns the endpoint resource as outputs["endpoint"].
Use the prebuilt component ModelDeployOp to deploy the trained AutoML model where:
The display name for the dataset is passed into the pipeline.
The model is the output from the AutoMLTrainingJobRunOp.
The endpoint is the output from the EndpointCreateOp.
Note: Since each component is executed as a graph node in its own execution context, you pass the parameter project for each component op, in constrast to doing a aiplatform.init(project=project) if this was a Python script calling the SDK methods directly within the same execution context.
End of explanation
"""
test_items = !gsutil cat $IMPORT_FILE | head -n2
if len(str(test_items[0]).split(",")) == 3:
_, test_item_1, test_label_1 = str(test_items[0]).split(",")
_, test_item_2, test_label_2 = str(test_items[1]).split(",")
else:
test_item_1, test_label_1 = str(test_items[0]).split(",")
test_item_2, test_label_2 = str(test_items[1]).split(",")
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
"""
Explanation: Get test item(s)
In the pipeline, you do a batch prediction on your Vertex model. You will use arbitrary examples from the dataset as test items. Don't be concerned that the examples were likely used while training the model. This step is just to demonstrate how to make a prediction.
End of explanation
"""
file_1 = test_item_1.split("/")[-1]
file_2 = test_item_2.split("/")[-1]
! gsutil cp $test_item_1 $BUCKET_URI/$file_1
! gsutil cp $test_item_2 $BUCKET_URI/$file_2
test_item_1 = BUCKET_URI + "/" + file_1
test_item_2 = BUCKET_URI + "/" + file_2
"""
Explanation: Copy test item(s)
For the batch prediction, copy the test items over to your Cloud Storage bucket.
End of explanation
"""
gcs_input_uri = BUCKET_URI + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": test_item_1, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
data = {"content": test_item_2, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
"""
Explanation: Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains key/value pairs:
content: The Cloud Storage path to the image.
mime_type: The content type. In our example, it is a jpeg file.
For example:
{'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}
End of explanation
"""
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="automl_icn_training.json"
)
pipeline = aiplatform.PipelineJob(
display_name="automl_icn_training",
template_path="automl_icn_training.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={
"import_file": IMPORT_FILE,
"batch_files": [gcs_input_uri],
"display_name": "flowers" + TIMESTAMP,
"project": PROJECT_ID,
"region": REGION,
},
)
pipeline.run()
! rm -f automl_icn_training.json
"""
Explanation: Compile and execute the pipeline
Next, you compile the pipeline and then exeute it. The pipeline takes the following parameters, which are passed as the dictionary parameter_values:
import_file: The Cloud Storage path to the dataset index file.
batch_files: A list of Cloud Storage paths to the input batch files.
display_name: The display name for the generated Vertex AI resources.
project: The project ID.
region: The region.
End of explanation
"""
PROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1]
print(PROJECT_NUMBER)
def print_pipeline_output(job, output_task_name):
JOB_ID = job.name
print(JOB_ID)
for _ in range(len(job.gca_resource.job_detail.task_details)):
TASK_ID = job.gca_resource.job_detail.task_details[_].task_id
EXECUTE_OUTPUT = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/executor_output.json"
)
GCP_RESOURCES = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/gcp_resources"
)
EVAL_METRICS = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/evaluation_metrics"
)
if tf.io.gfile.exists(EXECUTE_OUTPUT):
! gsutil cat $EXECUTE_OUTPUT
return EXECUTE_OUTPUT
elif tf.io.gfile.exists(GCP_RESOURCES):
! gsutil cat $GCP_RESOURCES
return GCP_RESOURCES
elif tf.io.gfile.exists(EVAL_METRICS):
! gsutil cat $EVAL_METRICS
return EVAL_METRICS
return None
print("image-dataset-create")
artifacts = print_pipeline_output(pipeline, "image-dataset-create")
print("\n\n")
print("automl-image-training-job")
artifacts = print_pipeline_output(pipeline, "automl-image-training-job")
print("\n\n")
output = !gsutil cat $artifacts
output = json.loads(output[0])
model_id = output["artifacts"]["model"]["artifacts"][0]["metadata"]["resourceName"]
print("\n")
print(model_id)
print("endpoint-create")
artifacts = print_pipeline_output(pipeline, "endpoint-create")
print("\n\n")
output = !gsutil cat $artifacts
output = json.loads(output[0])
endpoint_id = output["artifacts"]["endpoint"]["artifacts"][0]["metadata"][
"resourceName"
]
print("\n")
print(endpoint_id)
print("model-deploy")
artifacts = print_pipeline_output(pipeline, "model-deploy")
print("\n\n")
print("evaluateautomlmodelop")
artifacts = print_pipeline_output(pipeline, "evaluateautomlmodelop")
print("\n\n")
print("model-batch-predict")
artifacts = print_pipeline_output(pipeline, "model-batch-predict")
output = !gsutil cat $artifacts
output = json.loads(output[0])
print("\n\n")
print(
output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"gcsOutputDirectory"
]
)
output = !gsutil cat $artifacts
output = json.loads(output[0])
batch_job_id = output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"resourceName"
]
"""
Explanation: View AutoML training pipeline results
Finally, you will view the artifact outputs of each task in the pipeline.
End of explanation
"""
pipeline.delete()
"""
Explanation: Delete a pipeline job
After a pipeline job is completed, you can delete the pipeline job with the method delete(). Prior to completion, a pipeline job can be canceled with the method cancel().
End of explanation
"""
endpoint = aiplatform.Endpoint(endpoint_id)
"""
Explanation: Load an endpoint
The 'Endpoint' initializer will load an endpoint from an endpoint identifier.
End of explanation
"""
test_item = !gsutil cat $IMPORT_FILE | head -n1
if len(str(test_item[0]).split(",")) == 3:
_, test_item, test_label = str(test_item[0]).split(",")
else:
test_item, test_label = str(test_item[0]).split(",")
print(test_item, test_label)
"""
Explanation: Send a online prediction request
Send a online prediction request to your deployed model.
Get test item
You will use an arbitrary example out of the dataset as a test item. Don't be concerned that the example was likely used while training the model. This step is just to demonstrate how to make a prediction.
End of explanation
"""
with tf.io.gfile.GFile(test_item, "rb") as f:
content = f.read()
# The format of each instance should conform to the deployed model's prediction input schema.
instances = [{"content": base64.b64encode(content).decode("utf-8")}]
prediction = endpoint.predict(instances=instances)
print(prediction)
"""
Explanation: Make the prediction
Now that your Model resource is deployed to an Endpoint resource, you can do online predictions by sending prediction requests to the Endpoint resource.
Request
Since in this example your test item is in a Cloud Storage bucket, you open and read the contents of the image using tf.io.gfile.Gfile(). To pass the test data to the prediction service, you encode the bytes into base64 which makes the content safe from modification while transmitting binary data over the network.
The format of each instance is:
{ 'content': { 'b64': base64_encoded_bytes } }
Since the predict() method can take multiple items (instances), send your single test item as a list of one test item.
Response
The response from the predict() call is a Python dictionary with the following entries:
ids: The internal assigned unique identifiers for each prediction request.
displayNames: The class names for each class label.
confidences: The predicted confidence, between 0 and 1, per class label.
deployed_model_id: The Vertex AI identifier for the deployed Model resource which did the predictions.
End of explanation
"""
endpoint.undeploy_all()
endpoint.delete()
model = aiplatform.Model(model_id)
model.delete()
batch_job = aiplatform.BatchPredictionJob(batch_job_id)
batch_job.delete()
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial.
Delete the Vertex AI Model, Endpoint and BatchPredictionJob resources
Undelpoy and delete the Vertex AI Model, Endpoint and BatchPredictionJob resources.
End of explanation
"""
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
"""
Explanation: Delete the Cloud Storage bucket
Set delete_bucket to True to delete the Cloud storage bucket used in this notebook.
End of explanation
"""
|
cathywu/flow
|
tutorials/tutorial01_sumo.ipynb
|
mit
|
from flow.scenarios.loop import LoopScenario
"""
Explanation: Tutorial 01: Running Sumo Simulations
This tutorial walks through the process of running non-RL traffic simulations in Flow. Simulations of this form act as non-autonomous baselines and depict the behavior of human dynamics on a network. Similar simulations may also be used to evaluate the performance of hand-designed controllers on a network. This tutorial focuses primarily on the former use case, while an example of the latter may be found in exercise07_controllers.ipynb.
In this exercise, we simulate a initially perturbed single lane ring road. We witness in simulation that as time advances the initially perturbations do not dissipate, but instead propagates and expands until vehicles are forced to periodically stop and accelerate. For more information on this behavior, we refer the reader to the following article [1].
1. Components of a Simulation
All simulations, both in the presence and absence of RL, require two components: a scenario, and an environment. Scenarios describe the features of the transportation network used in simulation. This includes the positions and properties of nodes and edges constituting the lanes and junctions, as well as properties of the vehicles, traffic lights, inflows, etc. in the network. Environments, on the other hand, initialize, reset, and advance simulations, and act the primary interface between the reinforcement learning algorithm and the scenario. Moreover, custom environments may be used to modify the dynamical features of an scenario.
2. Setting up a Scenario
Flow contains a plethora of pre-designed scenarios used to replicate highways, intersections, and merges in both closed and open settings. All these scenarios are located in flow/scenarios. In order to recreate a ring road network, we begin by importing the scenario LoopScenario.
End of explanation
"""
name = "ring_example"
"""
Explanation: This scenario, as well as all other scenarios in Flow, is parametrized by the following arguments:
* name
* vehicles
* net_params
* initial_config
* traffic_lights
These parameters allow a single scenario to be recycled for a multitude of different network settings. For example, LoopScenario may be used to create ring roads of variable length with a variable number of lanes and vehicles.
2.1 Name
The name argument is a string variable depicting the name of the scenario. This has no effect on the type of network created.
End of explanation
"""
from flow.core.params import VehicleParams
vehicles = VehicleParams()
"""
Explanation: 2.2 VehicleParams
The VehicleParams class stores state information on all vehicles in the network. This class is used to identify the dynamical behavior of a vehicle and whether it is controlled by a reinforcement learning agent. Morover, information pertaining to the observations and reward function can be collected from various get methods within this class.
The initial configuration of this class describes the number of vehicles in the network at the start of every simulation, as well as the properties of these vehicles. We begin by creating an empty VehicleParams object.
End of explanation
"""
from flow.controllers.car_following_models import IDMController
"""
Explanation: Once this object is created, vehicles may be introduced using the add method. This method specifies the types and quantities of vehicles at the start of a simulation rollout. For a description of the various arguements associated with the add method, we refer the reader to the following documentation (VehicleParams.add).
When adding vehicles, their dynamical behaviors may be specified either by the simulator (default), or by user-generated models. For longitudinal (acceleration) dynamics, several prominent car-following models are implemented in Flow. For this example, the acceleration behavior of all vehicles will be defined by the Intelligent Driver Model (IDM) [2].
End of explanation
"""
from flow.controllers.routing_controllers import ContinuousRouter
"""
Explanation: Another controller we define is for the vehicle's routing behavior. For closed network where the route for any vehicle is repeated, the ContinuousRouter controller is used to perpetually reroute all vehicles to the initial set route.
End of explanation
"""
vehicles.add("human",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=22)
"""
Explanation: Finally, we add 22 vehicles of type "human" with the above acceleration and routing behavior into the Vehicles class.
End of explanation
"""
from flow.scenarios.loop import ADDITIONAL_NET_PARAMS
print(ADDITIONAL_NET_PARAMS)
"""
Explanation: 2.3 NetParams
NetParams are network-specific parameters used to define the shape and properties of a network. Unlike most other parameters, NetParams may vary drastically depending on the specific network configuration, and accordingly most of its parameters are stored in additional_params. In order to determine which additional_params variables may be needed for a specific scenario, we refer to the ADDITIONAL_NET_PARAMS variable located in the scenario file.
End of explanation
"""
from flow.core.params import NetParams
net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS)
"""
Explanation: Importing the ADDITIONAL_NET_PARAMS dict from the ring road scenario, we see that the required parameters are:
length: length of the ring road
lanes: number of lanes
speed: speed limit for all edges
resolution: resolution of the curves on the ring. Setting this value to 1 converts the ring to a diamond.
At times, other inputs may be needed from NetParams to recreate proper network features/behavior. These requirements can be founded in the scenario's documentation. For the ring road, no attributes are needed aside from the additional_params terms. Furthermore, for this exercise, we use the scenario's default parameters when creating the NetParams object.
End of explanation
"""
from flow.core.params import InitialConfig
initial_config = InitialConfig(spacing="uniform", perturbation=1)
"""
Explanation: 2.4 InitialConfig
InitialConfig specifies parameters that affect the positioning of vehicle in the network at the start of a simulation. These parameters can be used to limit the edges and number of lanes vehicles originally occupy, and provide a means of adding randomness to the starting positions of vehicles. In order to introduce a small initial disturbance to the system of vehicles in the network, we set the perturbation term in InitialConfig to 1m.
End of explanation
"""
from flow.core.params import TrafficLightParams
traffic_lights = TrafficLightParams()
"""
Explanation: 2.5 TrafficLightParams
TrafficLightParams are used to describe the positions and types of traffic lights in the network. These inputs are outside the scope of this tutorial, and instead are covered in exercise06_traffic_lights.ipynb. For our example, we create an empty TrafficLightParams object, thereby ensuring that none are placed on any nodes.
End of explanation
"""
from flow.envs.loop.loop_accel import AccelEnv
"""
Explanation: 3. Setting up an Environment
Several envionrments in Flow exist to train autonomous agents of different forms (e.g. autonomous vehicles, traffic lights) to perform a variety of different tasks. These environments are often scenario or task specific; however, some can be deployed on an ambiguous set of scenarios as well. One such environment, AccelEnv, may be used to train a variable number of vehicles in a fully observable network with a static number of vehicles.
End of explanation
"""
from flow.core.params import SumoParams
sumo_params = SumoParams(sim_step=0.1, render=True, emission_path='data')
"""
Explanation: Although we will not be training any autonomous agents in this exercise, the use of an environment allows us to view the cumulative reward simulation rollouts receive in the absence of autonomy.
Envrionments in Flow are parametrized by three components:
* EnvParams
* SumoParams
* Scenario
3.1 SumoParams
SumoParams specifies simulation-specific variables. These variables include the length a simulation step (in seconds) and whether to render the GUI when running the experiment. For this example, we consider a simulation step length of 0.1s and activate the GUI.
Another useful parameter is emission_path, which is used to specify the path where the emissions output will be generated. They contain a lot of information about the simulation, for instance the position and speed of each car at each time step. If you do not specify any emission path, the emission file will not be generated. More on this in Section 5.
End of explanation
"""
from flow.envs.loop.loop_accel import ADDITIONAL_ENV_PARAMS
print(ADDITIONAL_ENV_PARAMS)
"""
Explanation: 3.2 EnvParams
EnvParams specify environment and experiment-specific parameters that either affect the training process or the dynamics of various components within the scenario. Much like NetParams, the attributes associated with this parameter are mostly environment specific, and can be found in the environment's ADDITIONAL_ENV_PARAMS dictionary.
End of explanation
"""
from flow.core.params import EnvParams
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
"""
Explanation: Importing the ADDITIONAL_ENV_PARAMS variable, we see that it consists of only one entry, "target_velocity", which is used when computing the reward function associated with the environment. We use this default value when generating the EnvParams object.
End of explanation
"""
from flow.core.experiment import Experiment
"""
Explanation: 4. Setting up and Running the Experiment
Once the inputs to the scenario and environment classes are ready, we are ready to set up a Experiment object.
End of explanation
"""
# create the scenario object
scenario = LoopScenario(name="ring_example",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights)
# create the environment object
env = AccelEnv(env_params, sumo_params, scenario)
# create the experiment object
exp = Experiment(env)
# run the experiment for a set number of rollouts / time steps
_ = exp.run(1, 3000, convert_to_csv=True)
"""
Explanation: These objects may be used to simulate rollouts in the absence of reinforcement learning agents, as well as acquire behaviors and rewards that may be used as a baseline with which to compare the performance of the learning agent. In this case, we choose to run our experiment for one rollout consisting of 3000 steps (300 s).
Note: When executing the below code, remeber to click on the <img style="display:inline;" src="img/play_button.png"> Play button after the GUI is rendered.
End of explanation
"""
import os
emission_location = os.path.join(exp.env.sim_params.emission_path, exp.env.scenario.name)
print(emission_location + '-emission.xml')
"""
Explanation: As we can see from the above simulation, the initial perturbations in the network instabilities propogate and intensify, eventually leading to the formation of stop-and-go waves after approximately 180s.
5. Visualizing Post-Simulation
Once the simulation is done, a .xml file will be generated in the location of the specified emission_path in SumoParams (assuming this parameter has been specified) under the name of the scenario. In our case, this is:
End of explanation
"""
import pandas as pd
pd.read_csv(emission_location + '-emission.csv')
"""
Explanation: The .xml file contains various vehicle-specific parameters at every time step. This information is transferred to a .csv file if the convert_to_csv parameter in exp.run() is set to True. This file looks as follows:
End of explanation
"""
|
turbomanage/training-data-analyst
|
courses/machine_learning/deepdive/09_sequence/poetry.ipynb
|
apache-2.0
|
%%bash
pip freeze | grep tensor
%%bash
pip install tensor2tensor==1.13.1 tensorflow==1.13.1 tensorflow-serving-api==1.13 gutenberg
pip install tensorflow_hub
# install from sou
#git clone https://github.com/tensorflow/tensor2tensor.git
#cd tensor2tensor
#yes | pip install --user -e .
"""
Explanation: Text generation using tensor2tensor on Cloud ML Engine
This notebook illustrates using the <a href="https://github.com/tensorflow/tensor2tensor">tensor2tensor</a> library to do from-scratch, distributed training of a poetry model. Then, the trained model is used to complete new poems.
<br/>
Install tensor2tensor, and specify Google Cloud Platform project and bucket
Install the necessary packages. tensor2tensor will give us the Transformer model. Project Gutenberg gives us access to historical poems.
<b>p.s.</b> Note that this notebook uses Python2 because Project Gutenberg relies on BSD-DB which was deprecated in Python 3 and removed from the standard library.
tensor2tensor itself can be used on Python 3. It's just Project Gutenberg that has this issue.
End of explanation
"""
%%bash
pip freeze | grep tensor
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# this is what this notebook is demonstrating
PROBLEM= 'poetry_line_problem'
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['PROBLEM'] = PROBLEM
#os.environ['PATH'] = os.environ['PATH'] + ':' + os.getcwd() + '/tensor2tensor/tensor2tensor/bin/'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
"""
Explanation: If the following cell does not reflect the version of tensorflow and tensor2tensor that you just installed, click "Reset Session" on the notebook so that the Python environment picks up the new packages.
End of explanation
"""
%%bash
rm -rf data/poetry
mkdir -p data/poetry
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
import re
books = [
# bookid, skip N lines
(26715, 1000, 'Victorian songs'),
(30235, 580, 'Baldwin collection'),
(35402, 710, 'Swinburne collection'),
(574, 15, 'Blake'),
(1304, 172, 'Bulchevys collection'),
(19221, 223, 'Palgrave-Pearse collection'),
(15553, 522, 'Knowles collection')
]
with open('data/poetry/raw.txt', 'w') as ofp:
lineno = 0
for (id_nr, toskip, title) in books:
startline = lineno
text = strip_headers(load_etext(id_nr)).strip()
lines = text.split('\n')[toskip:]
# any line that is all upper case is a title or author name
# also don't want any lines with years (numbers)
for line in lines:
if (len(line) > 0
and line.upper() != line
and not re.match('.*[0-9]+.*', line)
and len(line) < 50
):
cleaned = re.sub('[^a-z\'\-]+', ' ', line.strip().lower())
ofp.write(cleaned)
ofp.write('\n')
lineno = lineno + 1
else:
ofp.write('\n')
print('Wrote lines {} to {} from {}'.format(startline, lineno, title))
!wc -l data/poetry/*.txt
"""
Explanation: Download data
We will get some <a href="https://www.gutenberg.org/wiki/Poetry_(Bookshelf)">poetry anthologies</a> from Project Gutenberg.
End of explanation
"""
with open('data/poetry/raw.txt', 'r') as rawfp,\
open('data/poetry/input.txt', 'w') as infp,\
open('data/poetry/output.txt', 'w') as outfp:
prev_line = ''
for curr_line in rawfp:
curr_line = curr_line.strip()
# poems break at empty lines, so this ensures we train only
# on lines of the same poem
if len(prev_line) > 0 and len(curr_line) > 0:
infp.write(prev_line + '\n')
outfp.write(curr_line + '\n')
prev_line = curr_line
!head -5 data/poetry/*.txt
"""
Explanation: Create training dataset
We are going to train a machine learning model to write poetry given a starting point. We'll give it one line, and it is going to tell us the next line. So, naturally, we will train it on real poetry. Our feature will be a line of a poem and the label will be next line of that poem.
<p>
Our training dataset will consist of two files. The first file will consist of the input lines of poetry and the other file will consist of the corresponding output lines, one output line per input line.
End of explanation
"""
%%bash
rm -rf poetry
mkdir -p poetry/trainer
%%writefile poetry/trainer/problem.py
import os
import tensorflow as tf
from tensor2tensor.utils import registry
from tensor2tensor.models import transformer
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import generator_utils
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
@registry.register_problem
class PoetryLineProblem(text_problems.Text2TextProblem):
"""Predict next line of poetry from the last line. From Gutenberg texts."""
@property
def approx_vocab_size(self):
return 2**13 # ~8k
@property
def is_generate_per_split(self):
# generate_data will NOT shard the data into TRAIN and EVAL for us.
return False
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
# 10% evaluation data
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 90,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 10,
}]
def generate_samples(self, data_dir, tmp_dir, dataset_split):
with open('data/poetry/raw.txt', 'r') as rawfp:
prev_line = ''
for curr_line in rawfp:
curr_line = curr_line.strip()
# poems break at empty lines, so this ensures we train only
# on lines of the same poem
if len(prev_line) > 0 and len(curr_line) > 0:
yield {
"inputs": prev_line,
"targets": curr_line
}
prev_line = curr_line
# Smaller than the typical translate model, and with more regularization
@registry.register_hparams
def transformer_poetry():
hparams = transformer.transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.num_heads = 4
hparams.attention_dropout = 0.6
hparams.layer_prepostprocess_dropout = 0.6
hparams.learning_rate = 0.05
return hparams
@registry.register_hparams
def transformer_poetry_tpu():
hparams = transformer_poetry()
transformer.update_hparams_for_tpu(hparams)
return hparams
# hyperparameter tuning ranges
@registry.register_ranged_hparams
def transformer_poetry_range(rhp):
rhp.set_float("learning_rate", 0.05, 0.25, scale=rhp.LOG_SCALE)
rhp.set_int("num_hidden_layers", 2, 4)
rhp.set_discrete("hidden_size", [128, 256, 512])
rhp.set_float("attention_dropout", 0.4, 0.7)
%%writefile poetry/trainer/__init__.py
from . import problem
%%writefile poetry/setup.py
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'tensor2tensor'
]
setup(
name='poetry',
version='0.1',
author = 'Google',
author_email = 'training-feedback@cloud.google.com',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Poetry Line Problem',
requires=[]
)
!touch poetry/__init__.py
!find poetry
"""
Explanation: We do not need to generate the data beforehand -- instead, we can have Tensor2Tensor create the training dataset for us. So, in the code below, I will use only data/poetry/raw.txt -- obviously, this allows us to productionize our model better. Simply keep collecting raw data and generate the training/test data at the time of training.
Set up problem
The Problem in tensor2tensor is where you specify parameters like the size of your vocabulary and where to get the training data from.
End of explanation
"""
%%bash
DATA_DIR=./t2t_data
TMP_DIR=$DATA_DIR/tmp
rm -rf $DATA_DIR $TMP_DIR
mkdir -p $DATA_DIR $TMP_DIR
# Generate data
t2t-datagen \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--data_dir=$DATA_DIR \
--tmp_dir=$TMP_DIR
"""
Explanation: Generate training data
Our problem (translation) requires the creation of text sequences from the training dataset. This is done using t2t-datagen and the Problem defined in the previous section.
(Ignore any runtime warnings about np.float64. they are harmless).
End of explanation
"""
!ls t2t_data | head
"""
Explanation: Let's check to see the files that were output. If you see a broken pipe error, please ignore.
End of explanation
"""
%%bash
DATA_DIR=./t2t_data
gsutil -m rm -r gs://${BUCKET}/poetry/
gsutil -m cp ${DATA_DIR}/${PROBLEM}* ${DATA_DIR}/vocab* gs://${BUCKET}/poetry/data
%%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print(response['serviceAccount'])")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
"""
Explanation: Provide Cloud ML Engine access to data
Copy the data to Google Cloud Storage, and then provide access to the data. gsutil throws an error when removing an empty bucket, so you may see an error the first time this code is run.
End of explanation
"""
%%bash
BASE=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/subset
gsutil -m rm -r $OUTDIR
gsutil -m cp \
${BASE}/${PROBLEM}-train-0008* \
${BASE}/${PROBLEM}-dev-00000* \
${BASE}/vocab* \
$OUTDIR
"""
Explanation: Train model locally on subset of data
Let's run it locally on a subset of the data to make sure it works.
End of explanation
"""
%%bash
DATA_DIR=gs://${BUCKET}/poetry/subset
OUTDIR=./trained_model
rm -rf $OUTDIR
t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--hparams_set=transformer_poetry \
--output_dir=$OUTDIR --job-dir=$OUTDIR --train_steps=10
"""
Explanation: Note: the following will work only if you are running Jupyter on a reasonably powerful machine. Don't be alarmed if your process is killed.
End of explanation
"""
%%bash
LOCALGPU="--train_steps=7500 --worker_gpu=1 --hparams_set=transformer_poetry"
DATA_DIR=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/model
rm -rf $OUTDIR
t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--hparams_set=transformer_poetry \
--output_dir=$OUTDIR ${LOCALGPU}
"""
Explanation: Option 1: Train model locally on full dataset (use if running on Notebook Instance with a GPU)
You can train on the full dataset if you are on a Google Cloud Notebook Instance with a P100 or better GPU
End of explanation
"""
%%bash
GPU="--train_steps=7500 --cloud_mlengine --worker_gpu=1 --hparams_set=transformer_poetry"
DATADIR=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/model
JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
echo "'Y'" | t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--output_dir=$OUTDIR \
${GPU}
%%bash
## CHANGE the job name (based on output above: You will see a line such as Launched transformer_poetry_line_problem_t2t_20190322_233159)
gcloud ml-engine jobs describe transformer_poetry_line_problem_t2t_20190323_003001
"""
Explanation: Option 2: Train on Cloud ML Engine
tensor2tensor has a convenient --cloud_mlengine option to kick off the training on the managed service.
It uses the Python API mentioned in the Cloud ML Engine docs, rather than requiring you to use gcloud to submit the job.
<p>
Note: your project needs P100 quota in the region.
<p>
The echo is because t2t-trainer asks you to confirm before submitting the job to the cloud. Ignore any error about "broken pipe".
If you see a message similar to this:
<pre>
[... cloud_mlengine.py:392] Launched transformer_poetry_line_problem_t2t_20190323_000631. See console to track: https://console.cloud.google.com/mlengine/jobs/.
</pre>
then, this step has been successful.
End of explanation
"""
%%bash
# use one of these
TPU="--train_steps=7500 --use_tpu=True --cloud_tpu_name=laktpu --hparams_set=transformer_poetry_tpu"
DATADIR=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/model_tpu
JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
echo "'Y'" | t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--output_dir=$OUTDIR \
${TPU}
%%bash
gsutil ls gs://${BUCKET}/poetry/model_tpu
"""
Explanation: The job took about <b>25 minutes</b> for me and ended with these evaluation metrics:
<pre>
Saving dict for global step 8000: global_step = 8000, loss = 6.03338, metrics-poetry_line_problem/accuracy = 0.138544, metrics-poetry_line_problem/accuracy_per_sequence = 0.0, metrics-poetry_line_problem/accuracy_top5 = 0.232037, metrics-poetry_line_problem/approx_bleu_score = 0.00492648, metrics-poetry_line_problem/neg_log_perplexity = -6.68994, metrics-poetry_line_problem/rouge_2_fscore = 0.00256089, metrics-poetry_line_problem/rouge_L_fscore = 0.128194
</pre>
Notice that accuracy_per_sequence is 0 -- Considering that we are asking the NN to be rather creative, that doesn't surprise me. Why am I looking at accuracy_per_sequence and not the other metrics? This is because it is more appropriate for problem we are solving; metrics like Bleu score are better for translation.
Option 3: Train on a directly-connected TPU
If you are running on a VM connected directly to a Cloud TPU, you can run t2t-trainer directly. Unfortunately, you won't see any output from Jupyter while the program is running.
Compare this command line to the one using GPU in the previous section.
End of explanation
"""
%%bash
XXX This takes 3 hours on 4 GPUs. Remove this line if you are sure you want to do this.
DATADIR=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/model_full2
JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
echo "'Y'" | t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--hparams_set=transformer_poetry \
--output_dir=$OUTDIR \
--train_steps=75000 --cloud_mlengine --worker_gpu=4
"""
Explanation: The job took about <b>10 minutes</b> for me and ended with these evaluation metrics:
<pre>
Saving dict for global step 8000: global_step = 8000, loss = 6.03338, metrics-poetry_line_problem/accuracy = 0.138544, metrics-poetry_line_problem/accuracy_per_sequence = 0.0, metrics-poetry_line_problem/accuracy_top5 = 0.232037, metrics-poetry_line_problem/approx_bleu_score = 0.00492648, metrics-poetry_line_problem/neg_log_perplexity = -6.68994, metrics-poetry_line_problem/rouge_2_fscore = 0.00256089, metrics-poetry_line_problem/rouge_L_fscore = 0.128194
</pre>
Notice that accuracy_per_sequence is 0 -- Considering that we are asking the NN to be rather creative, that doesn't surprise me. Why am I looking at accuracy_per_sequence and not the other metrics? This is because it is more appropriate for problem we are solving; metrics like Bleu score are better for translation.
Option 4: Training longer
Let's train on 4 GPUs for 75,000 steps. Note the change in the last line of the job.
End of explanation
"""
%%bash
gsutil ls gs://${BUCKET}/poetry/model #_modeltpu
"""
Explanation: This job took <b>12 hours</b> for me and ended with these metrics:
<pre>
global_step = 76000, loss = 4.99763, metrics-poetry_line_problem/accuracy = 0.219792, metrics-poetry_line_problem/accuracy_per_sequence = 0.0192308, metrics-poetry_line_problem/accuracy_top5 = 0.37618, metrics-poetry_line_problem/approx_bleu_score = 0.017955, metrics-poetry_line_problem/neg_log_perplexity = -5.38725, metrics-poetry_line_problem/rouge_2_fscore = 0.0325563, metrics-poetry_line_problem/rouge_L_fscore = 0.210618
</pre>
At least the accuracy per sequence is no longer zero. It is now 0.0192308 ... note that we are using a relatively small dataset (12K lines) and this is tiny in the world of natural language problems.
<p>
In order that you have your expectations set correctly: a high-performing translation model needs 400-million lines of input and takes 1 whole day on a TPU pod!
## Check trained model
End of explanation
"""
%%writefile data/poetry/rumi.txt
Where did the handsome beloved go?
I wonder, where did that tall, shapely cypress tree go?
He spread his light among us like a candle.
Where did he go? So strange, where did he go without me?
All day long my heart trembles like a leaf.
All alone at midnight, where did that beloved go?
Go to the road, and ask any passing traveler —
That soul-stirring companion, where did he go?
Go to the garden, and ask the gardener —
That tall, shapely rose stem, where did he go?
Go to the rooftop, and ask the watchman —
That unique sultan, where did he go?
Like a madman, I search in the meadows!
That deer in the meadows, where did he go?
My tearful eyes overflow like a river —
That pearl in the vast sea, where did he go?
All night long, I implore both moon and Venus —
That lovely face, like a moon, where did he go?
If he is mine, why is he with others?
Since he’s not here, to what “there” did he go?
If his heart and soul are joined with God,
And he left this realm of earth and water, where did he go?
Tell me clearly, Shams of Tabriz,
Of whom it is said, “The sun never dies” — where did he go?
"""
Explanation: Batch-predict
How will our poetry model do when faced with Rumi's spiritual couplets?
End of explanation
"""
%%bash
awk 'NR % 2 == 1' data/poetry/rumi.txt | tr '[:upper:]' '[:lower:]' | sed "s/[^a-z\'-\ ]//g" > data/poetry/rumi_leads.txt
head -3 data/poetry/rumi_leads.txt
%%bash
# same as the above training job ...
TOPDIR=gs://${BUCKET}
OUTDIR=${TOPDIR}/poetry/model #_tpu # or ${TOPDIR}/poetry/model_full
DATADIR=${TOPDIR}/poetry/data
MODEL=transformer
HPARAMS=transformer_poetry #_tpu
# the file with the input lines
DECODE_FILE=data/poetry/rumi_leads.txt
BEAM_SIZE=4
ALPHA=0.6
t2t-decoder \
--data_dir=$DATADIR \
--problem=$PROBLEM \
--model=$MODEL \
--hparams_set=$HPARAMS \
--output_dir=$OUTDIR \
--t2t_usr_dir=./poetry/trainer \
--decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \
--decode_from_file=$DECODE_FILE
"""
Explanation: Let's write out the odd-numbered lines. We'll compare how close our model can get to the beauty of Rumi's second lines given his first.
End of explanation
"""
%%bash
DECODE_FILE=data/poetry/rumi_leads.txt
cat ${DECODE_FILE}.*.decodes
"""
Explanation: <b> Note </b> if you get an error about "AttributeError: 'HParams' object has no attribute 'problems'" please <b>Reset Session</b>, run the cell that defines the PROBLEM and run the above cell again.
End of explanation
"""
%%bash
XXX This takes about 15 hours and consumes about 420 ML units. Uncomment if you wish to proceed anyway
DATADIR=gs://${BUCKET}/poetry/data
OUTDIR=gs://${BUCKET}/poetry/model_hparam
JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
echo "'Y'" | t2t-trainer \
--data_dir=gs://${BUCKET}/poetry/subset \
--t2t_usr_dir=./poetry/trainer \
--problem=$PROBLEM \
--model=transformer \
--hparams_set=transformer_poetry \
--output_dir=$OUTDIR \
--hparams_range=transformer_poetry_range \
--autotune_objective='metrics-poetry_line_problem/accuracy_per_sequence' \
--autotune_maximize \
--autotune_max_trials=4 \
--autotune_parallel_trials=4 \
--train_steps=7500 --cloud_mlengine --worker_gpu=4
"""
Explanation: Some of these are still phrases and not complete sentences. This indicates that we might need to train longer or better somehow. We need to diagnose the model ...
<p>
### Diagnosing training run
<p>
Let's diagnose the training run to see what we'd improve the next time around.
(Note that this package may not be present on Jupyter -- `pip install pydatalab` if necessary)
#### Monitor training with TensorBoard
To activate TensorBoard within the JupyterLab UI navigate to "<b>File</b>" - "<b>New Launcher</b>". Then double-click the 'Tensorboard' icon on the bottom row.
TensorBoard 1 will appear in the new tab. Navigate through the three tabs to see the active TensorBoard. The 'Graphs' and 'Projector' tabs offer very interesting information including the ability to replay the tests.
You may close the TensorBoard tab when you are finished exploring.
<table>
<tr>
<td><img src="diagrams/poetry_loss.png"/></td>
<td><img src="diagrams/poetry_acc.png"/></td>
Looking at the loss curve, it is clear that we are overfitting (note that the orange training curve is well below the blue eval curve). Both loss curves and the accuracy-per-sequence curve, which is our key evaluation measure, plateaus after 40k. (The red curve is a faster way of computing the evaluation metric, and can be ignored). So, how do we improve the model? Well, we need to reduce overfitting and make sure the eval metrics keep going down as long as the loss is also going down.
<p>
What we really need to do is to get more data, but if that's not an option, we could try to reduce the NN and increase the dropout regularization. We could also do hyperparameter tuning on the dropout and network sizes.
## Hyperparameter tuning
tensor2tensor also supports hyperparameter tuning on Cloud ML Engine. Note the addition of the autotune flags.
<p>
The `transformer_poetry_range` was registered in problem.py above.
End of explanation
"""
%%bash
# same as the above training job ...
BEST_TRIAL=28 # CHANGE as needed.
TOPDIR=gs://${BUCKET}
OUTDIR=${TOPDIR}/poetry/model_hparam/$BEST_TRIAL
DATADIR=${TOPDIR}/poetry/data
MODEL=transformer
HPARAMS=transformer_poetry
# the file with the input lines
DECODE_FILE=data/poetry/rumi_leads.txt
BEAM_SIZE=4
ALPHA=0.6
t2t-decoder \
--data_dir=$DATADIR \
--problem=$PROBLEM \
--model=$MODEL \
--hparams_set=$HPARAMS \
--output_dir=$OUTDIR \
--t2t_usr_dir=./poetry/trainer \
--decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \
--decode_from_file=$DECODE_FILE \
--hparams="num_hidden_layers=4,hidden_size=512"
%%bash
DECODE_FILE=data/poetry/rumi_leads.txt
cat ${DECODE_FILE}.*.decodes
"""
Explanation: When I ran the above job, it took about 15 hours and finished with these as the best parameters:
<pre>
{
"trialId": "37",
"hyperparameters": {
"hp_num_hidden_layers": "4",
"hp_learning_rate": "0.026711152525921437",
"hp_hidden_size": "512",
"hp_attention_dropout": "0.60589466163419292"
},
"finalMetric": {
"trainingStep": "8000",
"objectiveValue": 0.0276162791997
}
</pre>
In other words, the accuracy per sequence achieved was 0.027 (as compared to 0.019 before hyperparameter tuning, so a <b>40% improvement!</b>) using 4 hidden layers, a learning rate of 0.0267, a hidden size of 512 and droput probability of 0.606. This is inspite of training for only 7500 steps instead of 75,000 steps ... we could train for 75k steps with these parameters, but I'll leave that as an exercise for you.
<p>
Instead, let's try predicting with this optimized model. Note the addition of the hp* flags in order to override the values hardcoded in the source code. (there is no need to specify learning rate and dropout because they are not used during inference). I am using 37 because I got the best result at trialId=37
End of explanation
"""
%%bash
TOPDIR=gs://${BUCKET}
OUTDIR=${TOPDIR}/poetry/model_full2
DATADIR=${TOPDIR}/poetry/data
MODEL=transformer
HPARAMS=transformer_poetry
BEAM_SIZE=4
ALPHA=0.6
t2t-exporter \
--model=$MODEL \
--hparams_set=$HPARAMS \
--problem=$PROBLEM \
--t2t_usr_dir=./poetry/trainer \
--decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \
--data_dir=$DATADIR \
--output_dir=$OUTDIR
%%bash
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/poetry/model_full2/export | tail -1)
echo $MODEL_LOCATION
saved_model_cli show --dir $MODEL_LOCATION --tag_set serve --signature_def serving_default
"""
Explanation: Take the first three line. I'm showing the first line of the couplet provided to the model, how the AI model that we trained complets it and how Rumi completes it:
<p>
INPUT: where did the handsome beloved go <br/>
AI: where art thou worse to me than dead <br/>
RUMI: I wonder, where did that tall, shapely cypress tree go?
<p>
INPUT: he spread his light among us like a candle <br/>
AI: like the hurricane eclipse <br/>
RUMI: Where did he go? So strange, where did he go without me? <br/>
<p>
INPUT: all day long my heart trembles like a leaf <br/>
AI: and through their hollow aisles it plays <br/>
RUMI: All alone at midnight, where did that beloved go?
<p>
Oh wow. The couplets as completed are quite decent considering that:
* We trained the model on American poetry, so feeding it Rumi is a bit out of left field.
* Rumi, of course, has a context and thread running through his lines while the AI (since it was fed only that one line) doesn't.
<p>
"Spreading light like a hurricane eclipse" is a metaphor I won't soon forget. And it was created by a machine learning model!
## Serving poetry
How would you serve these predictions? There are two ways:
<ol>
<li> Use [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/deploying-models) -- this is serverless and you don't have to manage any infrastructure.
<li> Use [Kubeflow](https://github.com/kubeflow/kubeflow/blob/master/user_guide.md) on Google Kubernetes Engine -- this uses clusters but will also work on-prem on your own Kubernetes cluster.
</ol>
<p>
In either case, you need to export the model first and have TensorFlow serving serve the model. The model, however, expects to see *encoded* (i.e. preprocessed) data. So, we'll do that in the Python Flask application (in AppEngine Flex) that serves the user interface.
End of explanation
"""
%%writefile mlengine.json
description: Poetry service on ML Engine
autoScaling:
minNodes: 1 # We don't want this model to autoscale down to zero
%%bash
MODEL_NAME="poetry"
MODEL_VERSION="v1"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/poetry/model_full2/export | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
#gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} \
--model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=1.13 --config=mlengine.json
"""
Explanation: Cloud ML Engine
End of explanation
"""
!cat application/app.yaml
%%bash
cd application
#gcloud app create # if this is your first app
#gcloud app deploy --quiet --stop-previous-version app.yaml
"""
Explanation: Kubeflow
Follow these instructions:
* On the GCP console, launch a Google Kubernetes Engine (GKE) cluster named 'poetry' with 2 nodes, each of which is a n1-standard-2 (2 vCPUs, 7.5 GB memory) VM
* On the GCP console, click on the Connect button for your cluster, and choose the CloudShell option
* In CloudShell, run:
git clone https://github.com/GoogleCloudPlatform/training-data-analyst`
cd training-data-analyst/courses/machine_learning/deepdive/09_sequence
* Look at ./setup_kubeflow.sh and modify as appropriate.
AppEngine
What's deployed in Cloud ML Engine or Kubeflow is only the TensorFlow model. We still need a preprocessing service. That is done using AppEngine. Edit application/app.yaml appropriately.
End of explanation
"""
|
fonnesbeck/PyMC3_Oslo
|
notebooks/1. Introduction to PyMC3.ipynb
|
cc0-1.0
|
%load ../data/melanoma_data.py
%matplotlib inline
import seaborn as sns; sns.set_context('notebook')
from pymc3 import Normal, Model, DensityDist, sample, log, exp
with Model() as melanoma_survival:
# Convert censoring indicators to indicators for failure event
failure = (censored==0).astype(int)
# Parameters (intercept and treatment effect) for survival rate
beta = Normal('beta', mu=0.0, sd=1e5, shape=2)
# Survival rates, as a function of treatment
lam = exp(beta[0] + beta[1]*treat)
# Survival likelihood, accounting for censoring
def logp(failure, value):
return (failure * log(lam) - lam * value).sum()
x = DensityDist('x', logp, observed={'failure':failure, 'value':t})
"""
Explanation: Introduction to PyMC3
Probabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.
PyMC3 Features
Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.
PyMC3's feature set helps to make Bayesian analysis as painless as possible. Here is a short list of some of its features:
Fits Bayesian statistical models with Markov chain Monte Carlo, variational inference and
other algorithms.
Includes a large suite of well-documented statistical distributions.
Creates summaries including tables and plots.
Traces can be saved to the disk as plain text, SQLite or pandas dataframes.
Several convergence diagnostics and model checking methods are available.
Extensible: easily incorporates custom step methods and unusual probability distributions.
MCMC loops can be embedded in larger programs, and results can be analyzed with the full power of Python.
Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends.
End of explanation
"""
with melanoma_survival:
trace = sample(1000)
from pymc3 import traceplot
traceplot(trace[500:], varnames=['beta']);
"""
Explanation: This example will generate 1000 posterior samples.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
disasters_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
n_years = len(disasters_data)
plt.figure(figsize=(12.5, 3.5))
plt.bar(np.arange(1851, 1962), disasters_data, color="#348ABD")
plt.xlabel("Year")
plt.ylabel("Disasters")
plt.title("UK coal mining disasters, 1851-1962")
plt.xlim(1851, 1962);
"""
Explanation: Motivating Example: Coal mining disasters
Consider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period.
Let's build a model for this series and attempt to estimate when the change occurred.
End of explanation
"""
from pymc3 import DiscreteUniform
with Model() as disaster_model:
switchpoint = DiscreteUniform('switchpoint', lower=0, upper=n_years)
"""
Explanation: We represent our conceptual model formally as a statistical model:
$$\begin{array}{ccc}
(y_t | \tau, \lambda_1, \lambda_2) \sim\text{Poisson}\left(r_t\right), & r_t=\left{
\begin{array}{lll}
\lambda_1 &\text{if}& t< \tau\
\lambda_2 &\text{if}& t\ge \tau
\end{array}\right.,&t\in[t_l,t_h]\
\tau \sim \text{DiscreteUniform}(t_l, t_h)\
\lambda_1\sim \text{Exponential}(a)\
\lambda_2\sim \text{Exponential}(b)
\end{array}$$
Because we have defined $y$ by its dependence on $\tau$, $\lambda_1$ and $\lambda_2$, the latter three are known as the parents of $y$ and $D$ is called their child. Similarly, the parents of $\tau$ are $t_l$ and $t_h$, and $\tau$ is the child of $t_l$ and $t_h$.
Implementing a PyMC Model
At the model-specification stage (before the data are observed), $y$, $\tau$, $\lambda_1$, and $\lambda_2$ are all random variables. Bayesian "random" variables have not necessarily arisen from a physical random process. The Bayesian interpretation of probability is epistemic, meaning random variable $x$'s probability distribution $p(x)$ represents our knowledge and uncertainty about $x$'s value. Candidate values of $x$ for which $p(x)$ is high are relatively more probable, given what we know.
We can generally divide the variables in a Bayesian model into two types: stochastic and deterministic. The only deterministic variable in this model is $r$. If we knew the values of $r$'s parents, we could compute the value of $r$ exactly. A deterministic like $r$ is defined by a mathematical function that returns its value given values for its parents. Deterministic variables are sometimes called the systemic part of the model. The nomenclature is a bit confusing, because these objects usually represent random variables; since the parents of $r$ are random, $r$ is random also.
On the other hand, even if the values of the parents of variables switchpoint, disasters (before observing the data), early_mean or late_mean were known, we would still be uncertain of their values. These variables are stochastic, characterized by probability distributions that express how plausible their candidate values are, given values for their parents.
Let's begin by defining the unknown switchpoint as a discrete uniform random variable:
End of explanation
"""
foo = DiscreteUniform('foo', lower=0, upper=10)
"""
Explanation: We have done two things here. First, we have created a Model object; a Model is a Python object that encapsulates all of the variables that comprise our theoretical model, keeping them in a single container so that they may be used as a unit. After a Model is created, we can populate it with all of the model components that we specified when we wrote the model down.
Notice that the Model above was declared using a with statement. This expression is used to define a Python idiom known as a context manager. Context managers, in general, are used to manage resources of some kind within a program. In this case, our resource is a Model, and we would like to add variables to it so that we can fit our statistical model. The key characteristic of the context manager is that the resources it manages are only defined within the indented block corresponding to the with statement. PyMC uses this idiom to automatically add defined variables to a model. Thus, any variable we define is automatically added to the Model, without having to explicitly add it. This avoids the repetitive syntax of add methods/functions that you see in some machine learning packages:
python
model.add(a_variable)
model.add(another_variable)
model.add(yet_another_variable)
model.add(and_again)
model.add(please_kill_me_now)
...
In fact, PyMC variables cannot be defined without a corresponding Model:
End of explanation
"""
x = DiscreteUniform.dist(lower=0, upper=100)
x
"""
Explanation: However, variables can be explicitly added to models without the use of a context manager, via the variable's optional model argument.
python
disaster_model = Model()
switchpoint = DiscreteUniform('switchpoint', lower=0, upper=110, model=disaster_model)
Or, if we just want a discrete uniform distribution, and do not need to use it in a PyMC3 model necessarily, we can create one using the dist classmethod.
End of explanation
"""
from pymc3 import discrete
discrete.__all__
"""
Explanation: DiscreteUniform is an object that represents uniformly-distributed discrete variables. Use of this distribution
suggests that we have no preference a priori regarding the location of the switchpoint; all values are equally likely.
PyMC3 includes most of the common random variable distributions used for statistical modeling. For example, the following discrete random variables are available.
End of explanation
"""
from pymc3 import Exponential
with disaster_model:
early_mean = Exponential('early_mean', 1)
late_mean = Exponential('late_mean', 1)
"""
Explanation: By having a library of variables that represent statistical distributions, users are relieved of having to code distrbutions themselves.
Similarly, we can create the exponentially-distributed variables early_mean and late_mean for the early and late Poisson rates, respectively (also in the context of the model distater_model):
End of explanation
"""
from pymc3 import switch
with disaster_model:
rate = switch(switchpoint >= np.arange(n_years), early_mean, late_mean)
"""
Explanation: In this instance, we are told that the variables are being transformed. In PyMC3, variables with purely positive priors like Exponential are transformed with a log function. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named <variable name>_log) is added to the model for sampling. In this model this happens behind the scenes. Variables with priors that constrain them on two sides, like Beta or Uniform (continuous), are also transformed to be unconstrained but with a log odds transform.
Next, we define the variable rate, which selects the early rate early_mean for times before switchpoint and the late rate late_mean for times after switchpoint. We create rate using the switch function, which returns early_mean when the switchpoint is larger than (or equal to) a particular year, and late_mean otherwise.
End of explanation
"""
from pymc3 import Poisson
with disaster_model:
disasters = Poisson('disasters', mu=rate, observed=disasters_data)
"""
Explanation: The last step is to define the data likelihood, or sampling distribution. In this case, our measured outcome is the number of disasters in each year, disasters. This is a stochastic variable but unlike early_mean and late_mean we have observed its value. To express this, we set the argument observed to the observed sequence of disasters. This tells PyMC that this distribution's value is fixed, and should not be changed:
End of explanation
"""
disaster_model.vars
"""
Explanation: The model that we specified at the top of the page has now been fully implemented in PyMC3. Let's have a look at the model's attributes to see what we have.
The stochastic nodes in the model are identified in the vars (i.e. variables) attribute:
End of explanation
"""
disaster_model.deterministics
"""
Explanation: The last two variables are the log-transformed versions of the early and late rate parameters. The original variables have become deterministic nodes in the model, since they only represent values that have been back-transformed from the transformed variable, which has been subject to fitting or sampling.
End of explanation
"""
from pymc3 import Deterministic
with disaster_model:
rate = Deterministic('rate', switch(switchpoint >= np.arange(n_years), early_mean, late_mean))
"""
Explanation: You might wonder why rate, which is a deterministic component of the model, is not in this list. This is because, unlike the other components of the model, rate has not been given a name and given a formal PyMC data structure. It is essentially an intermediate calculation in the model, implying that we are not interested in its value when it comes to summarizing the output from the model. Most PyMC objects have a name assigned; these names are used for storage and post-processing:
as keys in on-disk databases,
as axis labels in plots of traces,
as table labels in summary statistics.
If we wish to include rate in our output, we need to make it a Deterministic object, and give it a name:
End of explanation
"""
disaster_model.deterministics
"""
Explanation: Now, rate is included in the Model's deterministics list, and the model will retain its samples during MCMC sampling, for example.
End of explanation
"""
disasters.dtype
early_mean.init_value
"""
Explanation: Why are data and unknown variables represented by the same object?
Since its represented by PyMC random variable object, disasters is defined by its dependence on its parent rate even though its value is fixed. This isn't just a quirk of PyMC's syntax; Bayesian hierarchical notation itself makes no distinction between random variables and data. The reason is simple: to use Bayes' theorem to compute the posterior, we require the likelihood. Even though disasters's value is known and fixed, we need to formally assign it a probability distribution as if it were a random variable. Remember, the likelihood and the probability function are essentially the same, except that the former is regarded as a function of the parameters and the latter as a function of the data. This point can be counterintuitive at first, as many peoples' instinct is to regard data as fixed a priori and unknown variables as dependent on the data.
One way to understand this is to think of statistical models as predictive models for data, or as models of the processes that gave rise to data. Before observing the value of disasters, we could have sampled from its prior predictive distribution $p(y)$ (i.e. the marginal distribution of the data) as follows:
Sample early_mean, switchpoint and late_mean from their
priors.
Sample disasters conditional on these values.
Even after we observe the value of disasters, we need to use this process model to make inferences about early_mean , switchpoint and late_mean because its the only information we have about how the variables are related.
We will see later that we can sample from this fixed stochastic random variable, to obtain predictions after having observed our data.
PyMC3 Variables
Each of the built-in statistical variables are subclasses of the generic Distribution class in PyMC3. The Distribution carries relevant attributes about the probability distribution, such as the data type (called dtype), any relevant transformations (transform, see below), and initial values (init_value).
End of explanation
"""
plt.hist(switchpoint.random(size=1000))
"""
Explanation: PyMC's built-in distribution variables can also be used to generate random values from that variable. For example, the switchpoint, which is a discrete uniform random variable, can generate random draws:
End of explanation
"""
early_mean.transformed
"""
Explanation: As we noted earlier, some variables have undergone transformations prior to sampling. Such variables will have transformed attributes that points to the variable that it has been transformed to.
End of explanation
"""
switchpoint.distribution
"""
Explanation: Variables will usually have an associated distribution, as determined by the constructor used to create it. For example, the switchpoint variable was created by calling DiscreteUniform(). Hence, its distribution is DiscreteUniform:
End of explanation
"""
type(switchpoint)
type(disasters)
"""
Explanation: As with all Python objects, the underlying type of a variable can be exposed with the type() function:
End of explanation
"""
switchpoint.logp({'switchpoint':55, 'early_mean_log':1, 'late_mean_log':1})
"""
Explanation: We will learn more about these types in an upcoming section.
Variable log-probabilities
All PyMC3 stochastic variables can evaluate their probability mass or density functions at a particular value, given the values of their parents. The logarithm of a stochastic object's probability mass or density can be
accessed via the logp method.
End of explanation
"""
disasters.logp({'switchpoint':55, 'early_mean_log':1, 'late_mean_log':1})
"""
Explanation: For vector-valued variables like disasters, the logp attribute returns the sum of the logarithms of
the joint probability or density of all elements of the value.
End of explanation
"""
with disaster_model:
trace = sample(2000)
"""
Explanation: Custom variables
Though we created the variables in disaster_model using well-known probability distributions that are available in PyMC3, its possible to create custom distributions by wrapping functions that compute an arbitrary log-probability using the DensityDist function. For example, our initial example showed an exponential survival function, which accounts for censored data. If we pass this function as the logp argument for DensityDist, we can use it as the data likelihood in a survival model:
```python
def logp(failure, value):
return (failure * log(lam) - lam * value).sum()
x = DensityDist('x', logp, observed={'failure':failure, 'value':t})
```
Users are thus not
limited to the set of of statistical distributions provided by PyMC.
Fitting the model with MCMC
PyMC3's sample function will fit probability models (linked collections of variables) like ours using Markov chain Monte Carlo (MCMC) sampling. Unless we manually assign particular algorithms to variables in our model, PyMC will assign algorithms that it deems appropriate (it usually does a decent job of this):
End of explanation
"""
trace
"""
Explanation: This returns the Markov chain of draws from the model in a data structure called a trace.
End of explanation
"""
help(sample)
"""
Explanation: The sample() function always takes at least one argument, draws, which specifies how many samples to draw. However, there are a number of additional optional arguments that are worth knowing about:
End of explanation
"""
from pymc3 import Slice
with disaster_model:
trace = sample(1000, step=Slice(vars=[early_mean, late_mean]))
"""
Explanation: The step argument is what allows users to manually override the sampling algorithms used to fit the model. For example, if we wanted to use a slice sampler to sample the early_mean and late_mean variables, we could specify it:
End of explanation
"""
trace['late_mean']
"""
Explanation: Accessing the samples
The output of the sample function is a MultiTrace object, which stores the sequence of samples for each variable in the model. These traces can be accessed using dict-style indexing:
End of explanation
"""
trace['late_mean', -5:]
"""
Explanation: The trace can also be sliced using the NumPy array slice [start:stop:step].
End of explanation
"""
plt.hist(trace['late_mean']);
"""
Explanation: Sampling output
You can examine the marginal posterior of any variable by plotting a
histogram of its trace:
End of explanation
"""
from pymc3 import traceplot
traceplot(trace[500:], varnames=['early_mean', 'late_mean', 'switchpoint']);
"""
Explanation: PyMC has its own plotting functionality dedicated to plotting MCMC output. For example, we can obtain a time series plot of the trace and a histogram using traceplot:
End of explanation
"""
from pymc3 import summary
summary(trace[500:], varnames=['early_mean', 'late_mean'])
"""
Explanation: The upper left-hand pane of each figure shows the temporal series of the
samples from each parameter, while below is an autocorrelation plot of
the samples. The right-hand pane shows a histogram of the trace. The
trace is useful for evaluating and diagnosing the algorithm's
performance, while the histogram is useful for
visualizing the posterior.
For a non-graphical summary of the posterior, simply call the stats method.
End of explanation
"""
|
texib/deeplearning_homework
|
tensorflow-lite/export_model.ipynb
|
mit
|
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras import backend as K
import tensorflow as tf
from tensorflow.python.tools import freeze_graph, optimize_for_inference_lib
import numpy as np
"""
Explanation: 以下為 Export 成 freeze_graph 的範例程式嗎
End of explanation
"""
def export_model_for_mobile(model_name, input_node_name, output_node_name):
# 先暫存成另一個檔檔
tf.train.write_graph(K.get_session().graph_def, 'out', \
model_name + '_graph.pbtxt')
tf.train.Saver().save(K.get_session(), 'out/' + model_name + '.chkp')
freeze_graph.freeze_graph('out/' + model_name + '_graph.pbtxt', None, \
False, 'out/' + model_name + '.chkp', output_node_name, \
"save/restore_all", "save/Const:0", \
'out/frozen_' + model_name + '.pb', True, "")
input_graph_def = tf.GraphDef()
with tf.gfile.Open('out/frozen_' + model_name + '.pb', "rb") as f:
input_graph_def.ParseFromString(f.read())
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def, [input_node_name], [output_node_name],
tf.float32.as_datatype_enum)
with tf.gfile.FastGFile('out/tensorflow_lite_' + model_name + '.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
"""
Explanation: Exprot to frezen model 的標準作法
由於在 Tensorflow Lite
但在使用之前需要 Session 載入 Graph & Weight
tf.train.write_graph :如果是使用 Keras 的話就是用 K.get_session().graph_def 取得 grpah ,然後輸到 phtxt
tf.train.Saver() : 透過 K.get_session() 取得 Session ,而後透過 tf.train.Saver().save()
End of explanation
"""
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
model = Sequential([
Conv2D(8, (3, 3), activation='relu', input_shape=[128,128,3]),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(8, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128),
Activation('relu'),
Dense(7),
Activation('softmax')
])
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.load_weights("/home/kent/git/DeepLearning_ClassmatesImageClassification_jdwang2018_5_25/CNN_Classfier_32X32_jdwang_Weight_1.h5")
"""
Explanation: 創建 Graph
End of explanation
"""
export_model_for_mobile('classmate_new', model.input.name.split(":")[0], model.output.name.split(":")[0])
"""
Explanation: 呼叫預設的函式
End of explanation
"""
|
OSGeoLabBp/tutorials
|
english/data_processing/lessons/img_def.ipynb
|
cc0-1.0
|
import glob # to extend file name pattern to list
import cv2 # OpenCV for image processing
from cv2 import aruco # to find ArUco markers
import numpy as np # for matrices
import matplotlib.pyplot as plt # to show images
"""
Explanation: <a href="https://colab.research.google.com/github/OSGeoLabBp/tutorials/blob/master/english/data_processing/lessons/img_def.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Movement and deformation analysis from images
Principles
Images/videos are made by a stable camera, to put it another way, the camera does not move during observations
Calibrated camera/system is necessary
Image resolution is enhanced by geodetic telescope
Methods
Template matching
Pattern recognition
Template matching characteristics
Pros
There is always a match
Simple algorithm
Special marker is not necessary
Cons
The chance of false match is higher
No or minimal rotation
No or minimal scale change
Pattern recognition charasteristics
Pros
Marker can rotate
Marker scale can change
Normal of the marker can be estimated
Cons
Special marker have to be fit to target
More sensitive for light conditions
First off, let's import the necessary Python packages.
End of explanation
"""
!wget -q -O sample_data/monalisa.jpg https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/monalisa.jpg
!wget -q -O sample_data/mona_temp4.png https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/mona_temp4.png
"""
Explanation: Template matching
Let's first download an image and a template to search for. The template is a smaller part of the original image.
End of explanation
"""
img = cv2.imread('sample_data/monalisa.jpg') # load image
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert image to grayscale
templ = cv2.imread('sample_data/mona_temp4.png') # load template
templ_gray = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY) # convert template to grayscale
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) # show image and template
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('image to scan')
ax2.imshow(cv2.cvtColor(templ, cv2.COLOR_BGR2RGB)) # BGR vs. RGB
ax2.set_title('template to find')
ax2.set_xlim(ax1.get_xlim()) # set same scale
ax2.set_ylim(ax1.get_ylim())
print(f'image sizes: {img_gray.shape} template sizes: {templ_gray.shape}')
"""
Explanation: Both the image used for processing and the template are converted to grayscale images to boost efficiency.
End of explanation
"""
result = cv2.matchTemplate(img_gray, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4] # get position of best match
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
result_uint = ((result - np.min(result)) / (np.max(result) - np.min(result)) * 256).astype('uint8')
result_img = cv2.cvtColor(result_uint, cv2.COLOR_GRAY2BGR)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('Match on original image')
ax1.plot(fr[:,0], fr[:,1], 'r')
ax1.plot([max[0]],[max[1]], 'r*')
ax2.imshow(result_img)
ax2.plot(fr[:,0], fr[:,1], 'r')
ax2.plot([max[0]],[max[1]], 'r*')
ax2.set_title('Normalized coefficients')
ax2.set_xlim(ax1.get_xlim()) # set same scale
ax2.set_ylim(ax1.get_ylim())
print(f'best match at {max} value {val:.6f}')
"""
Explanation: Change the code above to plot grayscale images.
The OpenCV package has a function for template mathing, so let's call it and display the result. The matchTemplate function can calculate six different formulas to find the best match. Within the function, TM_CCOEFF_NORMED it calculates a normalized coefficient in the range (0, 1), where the perfect match gives value 1.
End of explanation
"""
!wget -q -O sample_data/monalisa_tilt.jpg https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/monalisa_tilt.jpg
"""
Explanation: Change the code above and try other methods, TM_CCORR_NORMED, TM_SQDIFF_NORMED, for instance.
Image transformation
If the pattern is rotated or scaled, the pattern might not match the image. This issue can be fixed by using homology matrix. For more details see: source
Let's download another image with a rotated Mona Lisa.
End of explanation
"""
img = cv2.imread('sample_data/monalisa_tilt.jpg', cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(img, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4]
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
plt.imshow(img, cmap="gray")
plt.plot(fr[:,0], fr[:,1], 'r')
plt.plot([max[0]],[max[1]], 'r*')
print(f'best match at {max} value {val:.6f} BUT FALSE!')
"""
Explanation: Let's try to find the template on the rotated image.
End of explanation
"""
def project_img(image, a_src, a_dst):
""" calculate transformation matrix """
new_image = image.copy() # make a copy of input image
# get parameters of transformation
projective_matrix = cv2.getPerspectiveTransform(a_src, a_dst)
# transform image
transformed = cv2.warpPerspective(img, projective_matrix, image.shape)
# cut destination area
transformed = transformed[0:int(np.max(a_dst[:,1])),0:int(np.max(a_dst[:,0]))]
return transformed
# frame on warped image
src = [(240, 44), (700, 116), (703, 815), (243, 903)]
# frame on original
s = img_gray.shape
dst = [(0, 0), (s[1], 0), (s[1], s[0]), (0,s[0])]
a_src = np.float32(src)
a_dst = np.float32(dst)
# image transformation
img_dst = project_img(img, a_src, a_dst)
# template match
result = cv2.matchTemplate(img_dst, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4]
# frame around template on transformed image
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
fig, ax = plt.subplots(1,2, figsize=(13,8))
ax[0].imshow(img, cmap="gray");
ax[0].plot(a_src[:,0], a_src[:,1], 'r--')
ax[0].set_title('Original Image')
ax[1].imshow(img_dst, cmap="gray")
ax[1].plot(a_dst[:,0], a_dst[:,1], 'r--')
ax[1].set_title('Warped Image')
ax[1].plot(fr[:,0], fr[:,1], 'r')
ax[1].plot([max[0]],[max[1]], 'r*')
print(f'best match at {max} value {val:.2f}')
"""
Explanation: Let's transform the image back to the perpendicular plan.
End of explanation
"""
!wget -q -O sample_data/markers.png https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/markers.png
img = cv2.imread('sample_data/markers.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(cv2.aruco.DICT_4X4_100)
params = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(img_gray, aruco_dict, parameters=params)
x = np.zeros(ids.size)
y = np.zeros(ids.size)
img1 = img.copy()
for j in range(ids.size):
x[j] = int(round(np.average(corners[j][0][:, 0])))
y[j] = int(round(np.average(corners[j][0][:, 1])))
cv2.putText(img1, str(ids[j][0]), (int(x[j]+2), int(y[j])), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 255), 3)
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].imshow(img)
ax[1].imshow(img1)
ax[1].plot(x, y, "ro")
print(list(zip(list(x), list(y))))
"""
Explanation: Recognition of ArUco markers
"An ArUco marker is a synthetic square marker composed by a wide black border and an inner binary matrix which determines its identifier (id). The black border facilitates its fast detection in the image and the binary codification allows its identification and the application of error detection and correction techniques. The marker size determines the size of the internal matrix. For instance a marker size of 4x4 is composed by 16 bits." (from OpenCV documentation)
There is a contrib package in OpenCV to detect ArUco markers called aruco.
Let's find six ArUco markers on a simple image.
End of explanation
"""
!wget -q -O sample_data/cal.zip https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/cal.zip
!unzip -q -o sample_data/cal.zip -d sample_data
width = 5 # Charuco board size
height = 7
board = cv2.aruco.CharucoBoard_create(width, height, .025, .0125, aruco_dict) # generate board in memory
img = board.draw((500, 700))
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
_ = plt.title('Charuco board')
"""
Explanation: Calibration
Low-cost cameras might have significant distortions (either radial or tangential). Therefore, we have to calibrate cameras before using in deformation and movement analysis.
Radial distortion
$$ x' = x (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) $$
$$ y' = y (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) $$
Tangential distortion
$$ x' = x + (2 p_1 x y + p_2 (r^2 + 2 x^2)) $$
$$ y' = y + (p_1 (r^2+2 y^2) + 2 p_2 x y) $$
Camera matrix
<table>
<tr><td>f<sub>x</sub></td><td>0</td><td>c<sub>x</sub></td></tr>
<tr><td>0</td><td>f<sub>y</sub></td><td>c<sub>y</sub></td></tr>
<tr><td>0</td><td>0</td><td>1</td></tr></table>
Distortion parameters are ($ k_1, k_2, k_3, p_1, p_2 $). Camera matrix contains focal length ($ f_x, f_y $) and optical centers ($ c_x, c_y $).
For the calibration we need a chessboard like figure and more than ten photos from different directions.
Let's download the images for calibration.
End of explanation
"""
fig, ax = plt.subplots(1, 6, figsize=(15, 2))
for i in range(6):
im = cv2.imread('sample_data/cal{:d}.jpg'.format(i+1))
ax[i].imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
ax[i].set_title('cal{:d}.jpg'.format(i+1))
"""
Explanation: The first 6 images for calibration:
End of explanation
"""
allCorners = []
allIds = []
decimator = 0
for name in glob.glob("sample_data/cal*.jpg"):
frame = cv2.imread(name)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, _ = cv2.aruco.detectMarkers(gray, aruco_dict)
ret, corners1, ids1 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray, board)
allCorners.append(corners1)
allIds.append(ids1)
decimator += 1
ret, mtx, dist, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board, gray.shape, None, None)
print("Camera matrix [pixels]")
for i in range(mtx.shape[0]):
print(f'{mtx[i][0]:8.1f} {mtx[i][1]:8.1f} {mtx[i][2]:8.1f}')
print('Radial components')
print(30 * '-')
print(f'{dist[0][0]:10.5f} {dist[0][1]:10.5f} {dist[0][2]:10.5f}')
print(30 * '-')
print('Tangential components')
print(f'{dist[0][3]:10.5f} {dist[0][4]:10.5f}')
"""
Explanation: Using the ArUco calibration, let's find the camera matrix and the associated radial and tangential distortion parameters.
End of explanation
"""
gray = cv2.imread('sample_data/cal1.jpg', cv2.IMREAD_GRAYSCALE)
fig, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(gray, cmap='gray')
ax[0].set_title('distorted image')
ax[1].imshow(cv2.undistort(gray, mtx, dist, None), cmap='gray')
_ = ax[1].set_title('undistorted image')
"""
Explanation: Plot undistorted image and the one corrected by calibration parameters.
End of explanation
"""
!wget -q -O sample_data/demo.mp4 https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/demo.mp4
cap = cv2.VideoCapture('sample_data/demo.mp4')
frame = 0 # frame counter
xc = [] # for pixel coordinates of marker
yc = []
frames = []
while True:
ret, img = cap.read() # get next frame from video
if ret:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert image to grayscale
img_gray = cv2.undistort(gray, mtx, dist, None) # remove camera distortion using calibration
corners, ids, _ = aruco.detectMarkers(img_gray, aruco_dict, parameters=params) # find ArUco markers
if ids: # marker found?
yc.append(img_gray.shape[1] - int(round(np.average(corners[0][0][:, 1])))) # change y direction
frames.append(frame)
frame += 1 # frame count
else:
break # no more images
plt.plot(frames, yc)
plt.title('Vertical positions of ArUco marker from video frames')
plt.xlabel('frame count')
plt.grid()
_ = plt.ylabel('vertical position [pixel]')
"""
Explanation: Complex example
We have a video of a moving object with an ArUco marker. Let's process the video frame by frame and make a plot of movements. During the process images are corrected by the calibration data.
Click here to watch video.
End of explanation
"""
|
relopezbriega/mi-python-blog
|
content/notebooks/MachineLearningPractica2.ipynb
|
gpl-2.0
|
# <!-- collapse=True -->
# Importando las librerías que vamos a utilizar
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFE
from sklearn.ensemble import ExtraTreesClassifier
# graficos incrustados
%matplotlib inline
# parametros esteticos de seaborn
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
# importando el dataset preprocesado.
ONG_data = pd.read_csv('LEARNING_procesado.csv', header=0)
# Agregando la columna AGE2
AGE2 = pd.cut(ONG_data['AGE'], range(0, 100, 10))
ONG_data['AGE2'] = AGE2
# Eliminar columnas con donaciones superiores a 60 (atípicos)
ONG_data = ONG_data[ONG_data.DONOR_AMOUNT < 60]
# Convertir datos categoricos a numericos
tipos = ONG_data.columns.to_series().groupby(ONG_data.dtypes).groups
ctext = tipos[np.dtype('object')]
for c in ctext:
ONG_data[c], _ = pd.factorize(ONG_data[c])
ONG_data['AGE2'], _ = pd.factorize(ONG_data['AGE2'])
"""
Explanation: Ejemplo de Machine Learning con Python - Selección de atributos
Esta notebook fue creada originalmente como un blog post por Raúl E. López Briega en Matemáticas, Analisis de datos y Python. El contenido esta bajo la licencia BSD.
<img alt="Machine Learning" title="Machine Learning" src="https://relopezbriega.github.io/images/machine-learning.jpg">
Introducción
Continuando donde nos quedamos en el artículo anterior Ejemplo de Machine Learning - preprocesamiento y exploración; ahora es tiempo de ingresar en el terreno de la selección de atributos.
¿En qué consiste la selección de atributos?
La selección de atributos es el proceso por el cual seleccionamos un subconjunto de atributos (representados por cada una de las columnas en un dataset de forma tabular) que son más relevantes para la construcción del modelo predictivo sobre el que estamos trabajando.
Este proceso, no se debe confundir con el de reducción de dimensiones; si bien ambos procesos buscan reducir el número de atributos en nuestro dataset; este último lo hace por medio de la creación de nuevos atributos que son combinaciones de los anteriores; mientras que en el proceso de selección de atributos, intentamos incluir y excluir los atributos prácticamente sin modificarlos.
El proceso de selección de atributos es tanto un arte como una ciencia, en donde el conocimiento sobre el problema y la intuición son sumamente importantes. El objetivo de la selección de atributos es triple: mejorar la capacidad predictiva de nuestro modelo, proporcionando modelos predictivos más rápidos y eficientes, y proporcionar una mejor comprensión del proceso subyacente que generó los datos. Los métodos de selección de atributos se pueden utilizar para identificar y eliminar los atributos innecesarios, irrelevantes y redundantes que no contribuyen a la exactitud del modelo predictivo o incluso puedan disminuir su precisión.
Beneficios de la selección de atributos
Uno de los principales beneficios de la selección de atributos esta plasmado por la famosa frase "Menos es más" del arquitecto Ludwig Mies van der Rohe, precursor del minimalismo. Menos atributos son deseables ya que reduce la complejidad del modelo, y un modelo más simple es más fácil de entender y explicar.
Otros beneficios adicionales que nos proporciona una buena selección de atributos antes de comenzar con el armado del modelo, son:
Reduce el sobreentrenamiento: Menos datos redundantes significan menos oportunidades para tomar decisiones sobre la base de ruido.
Mejora la precisión: Menos datos engañosos se convierten en una mejora en la exactitud del modelo.
Reduce el tiempo de entrenamiento: Menos datos significa que los algoritmos aprenden más rápidamente.
Selección de atributos univariante o multivariante
Una cosa que no debemos pasar por alto en el proceso de selección de atributos, es la relación que puede existir entre ellos. Es decir que debemos considerar seleccionar o eliminar un atributo en forma individual (univariante) o un un grupo de atributos en forma conjunta (multivariante). Esto también va a depender del problema con el que estemos tratando y del modelo que elijamos. Por ejemplo si elegimos como modelo un clasificador bayesiano ingenuo, el modelo asume que cada atributo es independiente del resto, por lo tanto, podríamos utilizar un enfoque univariante sin problemas; en cambio si elegimos como modelo una red neuronal, este último no asume la independencia de los atributos, sino que utiliza todas la que dispone; por lo tanto aquí deberíamos seguir un enfoque multivariante para seleccionar los atributos.
Algoritmos para selección de atributos
Podemos encontrar dos clases generales de algoritmos de selección de atributos: los métodos de filtrado, y los métodos empaquetados.
Métodos de filtrado
Estos métodos aplican una medida estadística para asignar una puntuación a cada atributo. Los atributos luego son clasificados de acuerdo a su puntuación y son, o bien seleccionados para su conservación o eliminados del conjunto de datos. Los métodos de filtrado son a menudo univariantes y consideran a cada atributo en forma independiente, o con respecto a la variable dependiente.
Ejemplos de estos métodos son: prueba de Chi cuadrado, prueba F de Fisher, ratio de ganancia de información y los coeficientes de correlación.
Métodos empaquetados
Estos métodos consideran la selección de un conjunto de atributos como un problema de búsqueda, en donde las diferentes combinaciones son evaluadas y comparadas. Para hacer estas evaluaciones se utiliza un modelo predictivo y luego se asigna una puntuación a cada combinación basada en la precisión del modelo.
Un ejemplo de este método es el algoritmo de eliminación recursiva de atributos.
Ejemplo
Pasamos ahora a ver como podemos aplicar todo esto al caso en el que veníamos trabajando en el el artículo anterior. Pero antes, terminemos con algunas tareas de preprocesamiento adicionales.
End of explanation
"""
# Separamos las columnas objetivo
donor_flag = ONG_data['DONOR_FLAG']
donor_amount = ONG_data['DONOR_AMOUNT']
indice = ONG_data['IDX']
# Aplicando el algoritmo univariante de prueba F.
k = 15 # número de atributos a seleccionar
entrenar = ONG_data.drop(['DONOR_FLAG', 'DONOR_AMOUNT', 'IDX'], axis=1)
columnas = list(entrenar.columns.values)
seleccionadas = SelectKBest(f_classif, k=k).fit(entrenar, donor_flag)
atrib = seleccionadas.get_support()
atributos = [columnas[i] for i in list(atrib.nonzero()[0])]
atributos
"""
Explanation: Con estas manipulaciones lo que hicimos es cargar en memoria el dataset que prepocesamos anteriormente, le agregamos la nueva columna AGE2, ya que es mejor tener la edad agrupada en rangos en lugar de individualmente, luego eliminamos los valores atípicos que habíamos detectado; y por último, reemplazamos con su equivalente numérico a todas las variables categóricas; ya que para los algoritmos de Scikit-learn es mucho más eficiente trabajar con variables numéricas.
Ahora sí, ya estamos en condiciones de poder comenzar a aplicar algunos de los algoritmos de selección de atributos, comencemos con un simple algoritmo univariante que aplica el método de filtrado. Para esto vamos a utilizar los objetos SelectKBest y f_classif del paquete sklearn.feature_selection.
End of explanation
"""
# Algoritmo de Eliminación Recursiva de atributos con ExtraTrees
modelo = ExtraTreesClassifier()
era = RFE(modelo, 15) # número de atributos a seleccionar
era = era.fit(entrenar, donor_flag)
# imprimir resultados
atrib = era.support_
atributos = [columnas[i] for i in list(atrib.nonzero()[0])]
atributos
"""
Explanation: Como podemos ver, el algoritmo nos seleccionó la cantidad de atributos que le indicamos; en este ejemplo decidimos seleccionar solo 15; obviamente, cuando armemos nuestro modelo final vamos a tomar un número mayor de atributos.
¿Cómo funciona?
Este algoritmo selecciona a los mejores atributos basándose en una prueba estadística univariante. Al objeto SelectKBest le pasamos la prueba estadística que vamos a a aplicar, en este caso una prueba F definida por el objeto f_classif, junto con el número de atributos a seleccionar. El algoritmo va a aplicar la prueba a todos los atributos y va a seleccionar los que mejor resultado obtuvieron.
Ahora veamos como funciona el algoritmo de Eliminación Recursiva de atributos. Para este caso, vamos a utilizar como nuestro modelo predictivo el algoritmo ExtraTreesClassifier.
End of explanation
"""
# Importancia de atributos.
modelo.fit(entrenar, donor_flag)
modelo.feature_importances_[:15]
# 15 coeficientes más altos
np.sort(modelo.feature_importances_)[::-1][:15]
"""
Explanation: ¿Cómo funciona?
En este algoritmo, dado un modelo predictivo que asigna un coeficiente de importancia a cada atributo (como ExtraTreesClassifier), el objetivo de la Eliminación Recursiva de atributos es ir seleccionado en forma recursiva un número cada vez más pequeño de atributos. Primero comienza con todos los atributos del dataset y luego en cada pasada va eliminando aquellos que tenga el menor coeficiente de importancia hasta alcanzar el número de atributos deseado.
Si vemos los 15 atributos seleccionados por este otro algoritmo, existen muchas diferencias con los que selecciono el modelo anterior; en general, la Eliminación Recursiva de atributos suele ser mucho más precisa, pero también consume mucho más tiempo y recursos, ya que requiere que entrenemos a un modelo predictivo para poder obtener sus resultados.
Por último, también podríamos utilizar ese coeficiente de importancia que nos proporciona el modelo como una guía adicional para refinar nuestra selección de atributos.
End of explanation
"""
# Probabilidad de ser donante de todo el dataset.
prob_gral = (ONG_data[ONG_data.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].count() \
/ ONG_data['DONOR_AMOUNT'].count()) * 100.0
prob_gral
# Probabilidad de realizar donanción con LASTGIFT <= 10
lastgift10 = (ONG_data[(ONG_data.DONOR_AMOUNT > 0) &
(ONG_data.LASTGIFT <= 10)]['DONOR_AMOUNT'].count() \
/ ONG_data[ONG_data.LASTGIFT <= 10]['DONOR_AMOUNT'].count()) * 100.0
lastgift10
# graficando los resultados
lastgift = pd.Series({'promedio gral': prob_gral, 'lastgift<=10': lastgift10})
plot=lastgift.plot(kind='barh',
color=['blue', 'green']).set_title('Pobabilidad de donar')
"""
Explanation: Analicemos algunos de estos atributos en forma individual para tener una idea de cuanto puede ser que aporten a la exactitud del modelo. Podríamos comparar por ejemplo, el promedio de donaciones que podríamos obtener con este atributo contra el promedio de todo el dataset. Tomemos por ejemplo al atributo LASTGIFT que representa el importe de la última donación que realizó cada persona incluida en el conjunto de datos. En principio parece lógico que este atributo sea significativo para el modelo, ya que si donó en el pasado, hay bastantes posibilidades de que vuelva a donar en esta oportunidad.
End of explanation
"""
# importe promedio de donación general
donacion_prom = ONG_data[ONG_data.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].mean()
donacion_prom
# importe promedio de donación lastgift <= 10
lastgift10_imp = ONG_data[(ONG_data.DONOR_AMOUNT > 0) &
(ONG_data.LASTGIFT <= 10)]['DONOR_AMOUNT'].mean()
lastgift10_imp
# graficando los resultados
lastgift = pd.Series({'imp promedio gral': donacion_prom,
'lastgift<=10': lastgift10_imp})
plot=lastgift.plot(kind='barh',
color=['blue', 'green']
).set_title('importe promedio de donación')
"""
Explanation: Este último gráfico nos muestra claramente que con un valor del atributo LASTGIFT menor o igual a 10 las probabilidades de que esa persona realice una donación mejoran, pero veamos que pasa con el importe de la donación.
End of explanation
"""
|
trangel/Insight-Data-Science
|
analysis-data/Length-forum-posts.ipynb
|
gpl-3.0
|
# Set up paths/ os
import os
import sys
this_path=os.getcwd()
os.chdir("../data")
sys.path.insert(0, this_path)
# Load datasets
import pandas as pd
df = pd.read_csv("MedHelp-posts.csv",index_col=0)
df.head(2)
df_users = pd.read_csv("MedHelp-users.csv",index_col=0)
df_users.head(2)
# 1 classify users as professionals and general public:
df_users['is expert']=0
for user_id in df_users.index:
user_description=df_users.loc[user_id,['user description']].values
if ( "," in user_description[0]):
print(user_description[0])
df_users.loc[user_id,['is expert']]=1
# Save database:
df_users.to_csv("MedHelp-users-class.csv")
is_expert=df_users['is expert'] == 1
is_expert.value_counts()
"""
Explanation: Insights from medical posts
In this notebook, I try to find characteristics of medical posts.
What is the ratio of post from professionals vs. those from general public?
What are the characteristics that well-separate professional-level posts?
Length of text
Usage of offending vocabulary
Writing level
End of explanation
"""
# Select user_id from DB where is_professional = 1
experts_ids = df_users[df_users['is expert'] == 1 ].index.values
experts_ids
non_experts_ids = df_users[df_users['is expert'] == 0 ].index.values
# Select * where user_id in experts_ids
#df_users.loc[df_users.index.isin(experts_ids)]
df_experts=df.loc[df['user id'].isin(experts_ids)]
print('Total of posts from expert users {}'.format(len(df_experts)))
print('Total of posts {}'.format(len(df)))
print('Ratio {}'.format(len(df_experts)/len(df)))
del df_experts
"""
Explanation: Only 10 out of 505 users are experts!
This corresponds to 2 % of users.
End of explanation
"""
# Tokenize data
import nltk
tokenizer = nltk.RegexpTokenizer(r'\w+')
# Get the length of tokens into a columns
df_text = df['text'].str.lower()
df_token = df_text.apply(tokenizer.tokenize)
df['token length'] = df_token.apply(len)
# Get list of tokens from text in first article:
#for text in df_text.values:
# ttext = tokenizer.tokenize(text.lower())
# lenght_text=len(ttext)
# break
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.mlab as mlab
from matplotlib import gridspec
from scipy.stats import norm
import numpy as np
from scipy.optimize import curve_fit
from lognormal import lognormal, lognormal_stats,truncated_normal
from scipy.stats import truncnorm
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.unicode'] = True
plt.rcParams.update({'font.size': 24})
nbins=100
fig = plt.figure()
#fig=plt.figure(figsize=(2,1))
#fig.set_size_inches(6.6,3.3)
gs = gridspec.GridSpec(2, 1)
#plt.subplots_adjust(left=0.1,right=1.0,bottom=0.17,top=0.9)
#plt.suptitle('Text length (words count)')
fig.text(0.04,0.5,'Distribution',va='center',rotation='vertical')
#X ticks
xmax=200
x=np.arange(0,xmax,10) #xtics
xx=np.arange(1,xmax,1)
# Panel 1
ax1=plt.subplot(gs[0])
ax1.set_xlim([0, xmax])
ax1.set_xticks(x)
ax1.tick_params(labelbottom='off')
#plt.ylabel('')
#Class 0
X=df.loc[df['user id'].isin(non_experts_ids)]['token length'].values
n,bins,patches=plt.hist(X,nbins,normed=1,facecolor='cyan',align='mid')
popt,pcov = curve_fit(truncated_normal,bins[:nbins],n)
c0,=plt.plot(xx,truncated_normal(xx,*popt),color='blue',label='non expert')
plt.legend(handles=[c0],bbox_to_anchor=(0.45, 0.95), loc=2, borderaxespad=0.)
print(popt)
mu=X.mean()
var=X.var()
print("Class 0: Mean,variance: ({},{})".format(mu,var))
# Panel 2
ax2=plt.subplot(gs[1])
ax2.set_xlim([0, xmax])
ax2.set_xticks(x)
#ax2.set_yticks(np.arange(0,8,2))
#plt.ylabel('Normal distribution')
#Class 1
X=df.loc[df['user id'].isin(experts_ids)]['token length'].values
#(mu,sigma) = norm.fit(X)
n,bins,patches=plt.hist(X,nbins,normed=1,facecolor='orange',align='mid')
popt,pcov = curve_fit(lognormal,bins[:nbins],n)
#c1,=plt.plot(xx,mlab.normpdf(xx, mu, sigma),color='darkorange',label='layered')
c1,=plt.plot(xx,lognormal(xx,*popt),color='red',label='expert')
plt.legend(handles=[c1],bbox_to_anchor=(0.45, 0.95), loc=2, borderaxespad=0.)
print("Class 1: Mean,variance:",lognormal_stats(*popt))
#plt.xlabel('Volume ratio (theor./expt.)')
plt.show()
# What is the 5% for distribution of experts?
X=df.loc[df['user id'].isin(experts_ids)]['token length'].values
total=len(X)
for ix in range(10,500,10):
this_sum=0
for xx in X:
if xx < ix:
this_sum = this_sum + 1
percentile = this_sum/total * 100
print("Value {} percentile {}".format(ix,percentile))
"""
Explanation: Length ot text
End of explanation
"""
|
NervanaSystems/neon_course
|
02 VGG Fine-tuning.ipynb
|
apache-2.0
|
from neon.backends import gen_backend
be = gen_backend(batch_size=64, backend='cpu')
"""
Explanation: Tutorial: Fine-tuning VGG on CIFAR-10
One of the most common questions we get is how to use neon to load a pre-trained model and fine-tune on a new dataset. In this tutorial, we show how to load a pre-trained convolutional neural network (VGG), which was trained on ImageNet, a large corpus of natural images with 1000 categories. We will then use this model to train on the CIFAR-10 dataset, a much smaller set of images with 10 categories.
We begin by first generating a computational backend with the gen_backend function from neon. If there is a GPU available (recommended), this function will generate a GPU backend. Otherwise, a CPU backend will be used.
Note: VGG will not fit on a Kepler GPU, so here we use CPU backend for instructional purposes. If you are running on a Maxwell+ GPU, switch the backend to gpu below.
End of explanation
"""
print be
"""
Explanation: We can inspect the backend via:
End of explanation
"""
from neon.transforms import Rectlin
from neon.initializers import Constant, Xavier
relu = Rectlin()
conv_params = {'strides': 1,
'padding': 1,
'init': Xavier(local=True),
'bias': Constant(0),
'activation': relu}
"""
Explanation: Defining the VGG Model
We begin by first generating our VGG network. VGG is a popular convolutional neural network with ~19 layers. Not only does this network perform well with fine-tuning, but is also easy to define since the convolutional layers all have 3x3 filter sizes, and only differ in the number of feature maps.
We first define some common parameters used by all the convolution layers:
End of explanation
"""
from neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine
from neon.initializers import GlorotUniform
# Set up the model layers
vgg_layers = []
# set up 3x3 conv stacks with different number of filters
vgg_layers.append(Conv((3, 3, 64), **conv_params))
vgg_layers.append(Conv((3, 3, 64), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 128), **conv_params))
vgg_layers.append(Conv((3, 3, 128), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))
"""
Explanation: Then, we can define our network as a list of layers:
End of explanation
"""
from neon.transforms import Softmax
vgg_layers.append(Affine(nout=10, init=GlorotUniform(), bias=Constant(0), activation=Softmax(),
name="class_layer"))
"""
Explanation: The last layer of VGG is an Affine layer with 1000 units, for the 1000 categories in the ImageNet dataset. However, since our dataset only has 10 classes, we will instead use 10 output units. We also give this layer a special name (class_layer) so we know not to load pre-trained weights for this layer.
End of explanation
"""
from neon.models import Model
model = Model(layers=vgg_layers)
"""
Explanation: Now we are ready to load the pre-trained weights into this model. First we generate a Model object to hold the VGG layers:
End of explanation
"""
from neon.data.datasets import Dataset
from neon.util.persist import load_obj
import os
# location and size of the VGG weights file
url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
filename = 'VGG_D.p'
size = 554227541
# edit filepath below if you have the file elsewhere
_, filepath = Dataset._valid_path_append('data', '', filename)
if not os.path.exists(filepath):
Dataset.fetch_dataset(url, filename, filepath, size)
# load the weights param file
print("Loading VGG weights from {}...".format(filepath))
trained_vgg = load_obj(filepath)
print("Done!")
"""
Explanation: Loading pre-trained weights
Next, we download the pre-trained VGG weights from our Model Zoo. Note: this file is quite large (~550MB). By default, the weights file is saved in your home directory. To change this, or if you have already downloaded the file somewhere else, please edit the filepath variable below.
End of explanation
"""
print("The dictionary has the following keys: {}".format(trained_vgg.keys()))
layer0 = trained_vgg['model']['config']['layers'][0]
print("The first layer is of type: {}".format(layer0['type']))
# filter weights of the first layer
W = layer0['params']['W']
print("The first layer weights have average magnitude of {:.2}".format(abs(W).mean()))
"""
Explanation: In neon, models are saved as python dictionaries. Below are some example calls to explore the model. You can examine the weights, the layer configuration, and more.
End of explanation
"""
param_layers = [l for l in model.layers.layers]
param_dict_list = trained_vgg['model']['config']['layers']
for layer, params in zip(param_layers, param_dict_list):
if(layer.name == 'class_layer'):
break
# To be sure, we print the name of the layer in our model
# and the name in the vgg model.
print(layer.name + ", " + params['config']['name'])
layer.load_weights(params, load_states=True)
"""
Explanation: We encourage you to use the below blank code cell to explore the model dictionary!
We then iterate over the layers in our model, and load the weights from trained_vgg using each layers' layer.load_weights method. The final Affine layer is different between our model and the pre-trained model, since the number of classes have changed. Therefore, we break the loop when we encounter the final Affine layer, which has the name class_layer.
End of explanation
"""
import neon
import os
neon_path = os.path.split(os.path.dirname(neon.__file__))[0]
print "Found path to neon as {}".format(neon_path)
"""
Explanation: As a check, the above code should have printed out pairs of layer names, from our model and the pre-trained vgg models. The exact name may differ, but the type of layer and layer number should match between the two.
Fine-tuning VGG on the CIFAR-10 dataset
Now that we've modified the model for our new CIFAR-10 dataset, and loaded the model weights, let's give training a try!
Aeon Dataloader
The CIFAR-10 dataset is small enough to fit into memory, meaning that we would normally use an ArrayIterator to generate our dataset. However, the CIFAR-10 images are 28x28, and VGG was trained on ImageNet, which ahs images that are of 224x224 size. For this reason, we use our macrobatching dataloader aeon, which performs image scaling and cropping on-the-fly.
To prepare the data, we first invoke an ingestion script that downloads the data and creates the macrobatches. The script is located in your neon folder. Here we use some python language to extract the path to neon from the virtual environment.
End of explanation
"""
%run $neon_path/examples/cifar10_msra/data.py --out_dir data/cifar10/
"""
Explanation: Then we execute the ingestion script below
End of explanation
"""
config = {
'manifest_filename': 'data/cifar10/train-index.csv', # CSV manifest of data
'manifest_root': 'data/cifar10', # root data directory
'image': {'height': 224, 'width': 224, # output image size
'scale': [0.875, 0.875], # random scaling of image before cropping
'flip_enable': True}, # randomly flip image
'type': 'image,label', # type of data
'minibatch_size': be.bsz # batch size
}
"""
Explanation: Aeon configuration
Aeon allows a diverse set of configurations to specify which transformations are applied on-the-fly during training. These configs are specific as python dictionaries. For more detail, see the aeon documentation.
End of explanation
"""
from neon.data.aeon_shim import AeonDataLoader
from neon.data.dataloader_transformers import OneHot, TypeCast, BGRMeanSubtract
train_set = AeonDataLoader(config, be)
train_set = OneHot(train_set, index=1, nclasses=10) # perform onehot on the labels
train_set = TypeCast(train_set, index=0, dtype=np.float32) # cast the image to float32
train_set = BGRMeanSubtract(train_set, index=0) # subtract image color means (based on default values)
"""
Explanation: We then use this configuration to create our dataloader. The outputs from the dataloader are then sent through a series of transformations.
End of explanation
"""
from neon.optimizers import GradientDescentMomentum, Schedule, MultiOptimizer
from neon.transforms import CrossEntropyMulti
# define different optimizers for the class_layer and the rest of the network
# we use a momentum coefficient of 0.9 and weight decay of 0.0005.
opt_vgg = GradientDescentMomentum(0.001, 0.9, wdecay=0.0005)
opt_class_layer = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005)
# also define optimizers for the bias layers, which have a different learning rate
# and not weight decay.
opt_bias = GradientDescentMomentum(0.002, 0.9)
opt_bias_class = GradientDescentMomentum(0.02, 0.9)
# set up the mapping of layers to optimizers
opt = MultiOptimizer({'default': opt_vgg, 'Bias': opt_bias,
'class_layer': opt_class_layer, 'class_layer_bias': opt_bias_class})
# use cross-entropy cost to train the network
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
"""
Explanation: Optimizer configuration
For fine-tuning, we want the final Affine layer to be updated with a higher learning rate compared to the pre-trained weights throughout the rest of the network.
End of explanation
"""
from neon.callbacks.callbacks import Callbacks
callbacks = Callbacks(model)
model.fit(train_set, optimizer=opt, num_epochs=10, cost=cost, callbacks=callbacks)
"""
Explanation: Finally, we set up callbacks so the model can report progress during training, and then run the model.fit function. Note that if you are on a CPU, this next section will take long to finish for you to see results.
End of explanation
"""
|
SJSlavin/phys202-2015-work
|
assignments/assignment06/InteractEx05.ipynb
|
mit
|
# YOUR CODE HERE
import matplotlib as plt
import numpy as np
import IPython as ipy
from IPython.display import SVG
from IPython.html.widgets import interactive, fixed
from IPython.html import widgets
from IPython.display import display
"""
Explanation: Interact Exercise 5
Imports
Put the standard imports for Matplotlib, Numpy and the IPython widgets in the following cell.
End of explanation
"""
s = """
<svg width="100" height="100">
<circle cx="50" cy="50" r="20" fill="aquamarine" />
</svg>
"""
SVG(s)
"""
Explanation: Interact with SVG display
SVG is a simple way of drawing vector graphics in the browser. Here is a simple example of how SVG can be used to draw a circle in the Notebook:
End of explanation
"""
def draw_circle(width=100, height=100, cx=25, cy=25, r=5, fill='red'):
"""Draw an SVG circle.
Parameters
----------
width : int
The width of the svg drawing area in px.
height : int
The height of the svg drawing area in px.
cx : int
The x position of the center of the circle in px.
cy : int
The y position of the center of the circle in px.
r : int
The radius of the circle in px.
fill : str
The fill color of the circle.
"""
# YOUR CODE HERE
svg = """
<svg width='%s' height='%s'>
<circle cx='%s' cy='%s' r='%s' fill='%s' />
</svg>
""" % (width, height, cx, cy, r, fill)
display(SVG(svg))
draw_circle(cx=10, cy=10, r=10, fill='blue')
assert True # leave this to grade the draw_circle function
"""
Explanation: Write a function named draw_circle that draws a circle using SVG. Your function should take the parameters of the circle as function arguments and have defaults as shown. You will have to write the raw SVG code as a Python string and then use the IPython.display.SVG object and IPython.display.display function.
End of explanation
"""
# YOUR CODE HERE
w = interactive(draw_circle, width=fixed(300), height=fixed(300), cx=(0, 300, 1), cy=(0, 300, 1), r=(0, 50, 1), fill="red");
c = w.children
assert c[0].min==0 and c[0].max==300
assert c[1].min==0 and c[1].max==300
assert c[2].min==0 and c[2].max==50
assert c[3].value=='red'
"""
Explanation: Use interactive to build a user interface for exploing the draw_circle function:
width: a fixed value of 300px
height: a fixed value of 300px
cx/cy: a slider in the range [0,300]
r: a slider in the range [0,50]
fill: a text area in which you can type a color's name
Save the return value of interactive to a variable named w.
End of explanation
"""
# YOUR CODE HERE
display(w)
#the sliders show but not the circle itself?
assert True # leave this to grade the display of the widget
"""
Explanation: Use the display function to show the widgets created by interactive:
End of explanation
"""
|
tensorflow/docs-l10n
|
site/es-419/guide/keras/functional.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
tf.keras.backend.clear_session() # Reseteo sencillo
"""
Explanation: La API funcional "Keras" en TensorFlow
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/functional"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Ver en TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/es-419/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Correr en Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/es-419/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Ver código fuente en GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/es-419/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Descargar notebook</a>
</td>
</table>
Note: Nuestra comunidad de Tensorflow ha traducido estos documentos. Como las traducciones de la comunidad
son basados en el "mejor esfuerzo", no hay ninguna garantia que esta sea un reflejo preciso y actual
de la Documentacion Oficial en Ingles.
Si tienen sugerencias sobre como mejorar esta traduccion, por favor envian un "Pull request"
al siguiente repositorio tensorflow/docs.
Para ofrecerse como voluntario o hacer revision de las traducciones de la Comunidad
por favor contacten al siguiente grupo docs@tensorflow.org list.
Setup
End of explanation
"""
from tensorflow import keras
inputs = keras.Input(shape=(784,))
"""
Explanation: Introduccion
Ya estás familiarizado con el uso del metodo keras.Sequential() para crear modelos.
La API funcional es una forma de crear modelos mas dinamicos que con Sequential: La API funcional puede manejar modelos con topología no lineal, modelos con capas compartidas y modelos con múltiples entradas o salidas.
Se basa en la idea de que un modelo de aprendizaje profundo
suele ser un gráfico acíclico dirigido (DAG) de capas.
La API funcional es un conjunto de herramientas para construir gráficos de capas.
Considera el siguiente modelo:
(input: 784-vectores dimensionales)
↧
[Dense (64 units, activacion relu)]
↧
[Dense (64 units, activacion relu)]
↧
[Dense (10 units, activacion softmax)]
↧
(output: distribución de probabilidad en 10 clases)
Es una simple grafica de tres capas.
Para construir este modelo con la API funcional,
comenzarías creando un nodo de entrada:
End of explanation
"""
img_inputs = keras.Input(shape=(32, 32, 3))
"""
Explanation: Aqui solo especificamos el tipo de nuestra data set: 784-vectores dimensionales.
Nota que el tamaño del batch siempre debe ser omitido, solo se incluye el tipo de la data set.
Para una input de tipo imágen (31,32,3) hubiese sido:
End of explanation
"""
inputs.shape
inputs.dtype
"""
Explanation: Lo que se devuelve, input, contiene información sobre la forma y el tipo de dato que se espera ingresa en tu modelo:
End of explanation
"""
from tensorflow.keras import layers
dense = layers.Dense(64, activation='relu')
x = dense(inputs)
"""
Explanation: Puedes crear un nuevo nodo en el grafico de capas mandando a llamar al objeto input.
End of explanation
"""
x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10, activation='softmax')(x)
"""
Explanation: La acción "layer call" es como dibujar una flecha desde "entradas" a la capa que creamos.
Estamos "pasando" las entradas a la capa dense, y afuera obtenemosx.
Agreguemos algunas capas más a nuestro gráfico de capas:
La acción "llamada a la capa" es como dibujar una flecha de "entradas" a la capa que creamos.
Estamos pasando las entradas a una capa mas densa, y respecto a la salida obtenemos una x.
End of explanation
"""
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Explanation: LLegados a este punto, podemos crear un Modelo especificando sus entradas y salidas en las capas de graficas.
End of explanation
"""
inputs = keras.Input(shape=(784,), name='img')
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')
"""
Explanation: Recapitulando, esta es nuestra definción completa del proceso:
End of explanation
"""
model.summary()
"""
Explanation: Veamos como se muestra el model summary:
End of explanation
"""
keras.utils.plot_model(model, 'my_first_model.png')
"""
Explanation: También podemos trazar el modelo como un gráfico:
End of explanation
"""
keras.utils.plot_model(model, 'my_first_model_with_shape_info.png', show_shapes=True)
"""
Explanation: Y opcionalmente mostrar la entrada y la salida de la forma de cada capa en la gráfica ploteada:
End of explanation
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
model.compile(loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=64,
epochs=5,
validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print('Test loss:', test_scores[0])
print('Test accuracy:', test_scores[1])
"""
Explanation: Esta figura y el código que escribimos son prácticamente idénticos. En la versión de código, las flechas de conexión simplemente se reemplazan por la operación de llamada.
Un "gráfico de capas" es una imagen mental muy intuitiva para un modelo de aprendizaje profundo, y la API funcional es una forma de crear modelos que reflejan de cerca esta imagen mental.
Entrenamiento, evaluación e inferencia.
El entrenamiento, la evaluación y la inferencia funcionan exactamente de la misma manera para los modelos construidos
utilizando la API funcional como para los modelos secuenciales.
Aquí hay una demostración rápida.
Aquí cargamos datos de imagen MNIST, los rediseñamos en vectores,
ajustar el modelo en los datos (mientras se monitorea el rendimiento en una división de validación),
y finalmente evaluamos nuestro modelo en los datos de prueba:
End of explanation
"""
model.save('path_to_my_model.h5')
del model
# Recrea el mismo modelo, desde el archivo:
model = keras.models.load_model('path_to_my_model.h5')
"""
Explanation: Para obtener una guía completa sobre el entrenamiento y evaluación de modelos, consulta Guía de entrenamiento y evaluación.
Almacenado y serialización
El almacenado y la serialización funcionan exactamente de la misma manera para los modelos construidos
utilizando la API funcional como para los modelos secuenciales.
Una forma estándar de guardar un modelo funcional es llamar a model.save () para guardar todo el modelo en un solo archivo.
Posteriormente, puede volver a crear el mismo modelo a partir de este archivo, incluso si ya no tiene acceso al código.
eso creó el modelo.
Este archivo incluye:
- La arquitectura del modelo.
- Los valores de peso del modelo (que se aprendieron durante el entrenamiento)
- La configuración de entrenamiento del modelo (lo que pasó a compilar), si corresponde
- El optimizador y su estado, si corresponde (esto le permite reiniciar el entrenamiento donde lo dejó)
End of explanation
"""
encoder_input = keras.Input(shape=(28, 28, 1), name='img')
x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
autoencoder.summary()
"""
Explanation: Para obtener una guía completa sobre el guardado de modelos, consulta Guía para guardar y serializar modelos.
Usando el mismo gráfico de capas para definir múltiples modelos
En la API funcional, los modelos se crean especificando sus entradas
y salidas en un gráfico de capas. Eso significa que un solo gráfico de capas
Se puede utilizar para generar múltiples modelos.
En el siguiente ejemplo, usamos la misma arquitectura de capas para crear instancias de dos modelos:
un modelo de "codificador" que convierte las entradas de imagen en vectores de 16 dimensiones,
y un modelo completo de autoencoder para entrenamiento.
End of explanation
"""
encoder_input = keras.Input(shape=(28, 28, 1), name='original_img')
x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
encoder.summary()
decoder_input = keras.Input(shape=(16,), name='encoded_img')
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name='img')
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder')
autoencoder.summary()
"""
Explanation: Tenga en cuenta que hacemos que la arquitectura de decodificación sea estrictamente simétrica a la arquitectura de codificación,
para que obtengamos una forma de salida que sea igual a la forma de entrada (28, 28, 1).
El reverso de una capa Conv2D es una capaConv2DTranspose, y el reverso de una capa MaxPooling2D
La capa es una capa UpSampling2D.
Todos los modelos son invocables, al igual que las capas.
Puede tratar cualquier modelo como si fuera una capa, llamándolo en una Entrada o en la salida de otra capa.
Tenga en cuenta que al llamar a un modelo no solo está reutilizando la arquitectura del modelo, también está reutilizando sus pesos.
Veamos esto en acción. Aquí hay una versión diferente del ejemplo de autoencoder que crea un modelo de codificador, un modelo de decodificador,
y encadenarlos en dos llamadas para obtener el modelo de autoencoder:
End of explanation
"""
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1, activation='sigmoid')(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
"""
Explanation: Como puede ver, el modelo puede estar anidado: un modelo puede contener submodelos (ya que un modelo es como una capa).
Un caso de uso común para la anidación de modelos es * ensamblaje *.
Como ejemplo, a continuación se explica cómo agrupar un conjunto de modelos en un solo modelo que promedia sus predicciones:
End of explanation
"""
num_tags = 12 # Número de etiquetas de problemas únicos
num_words = 10000 # Tamaño del vocabulario obtenido al preprocesar datos de texto
num_departments = 4 # Número de departamentos para predicciones.
title_input = keras.Input(shape=(None,), name='title') # Secuencia de longitud variable de entradas
body_input = keras.Input(shape=(None,), name='body') # Secuencia de longitud variable de entradas
tags_input = keras.Input(shape=(num_tags,), name='tags') # Vectores binarios de tamaño `num_tags`
# Ingresa cada palabra en el título en un vector de 64 dimensiones
title_features = layers.Embedding(num_words, 64)(title_input)
# Ingresa cada palabra en el texto en un vector de 64 dimensiones
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce la secuencia de palabras ingresadas en el título en un solo vector de 128 dimensiones
title_features = layers.LSTM(128)(title_features)
# Reduce la secuencia de palabras ingresadas en el cuerpo en un solo vector de 32 dimensiones
body_features = layers.LSTM(32)(body_features)
# Combina todas las funciones disponibles en un solo vector grande mediante concatenación
x = layers.concatenate([title_features, body_features, tags_input])
# Pegua una regresión logística para la predicción de prioridad en la parte superior de las características
priority_pred = layers.Dense(1, activation='sigmoid', name='priority')(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, activation='softmax', name='department')(x)
# Instancia un modelo de extremo a extremo que prediga tanto la prioridad como el departamento
model = keras.Model(inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred])
"""
Explanation: Manipulación de topologías gráficas complejas
Modelos con múltiples entradas y salidas
La API funcional facilita la manipulación de múltiples entradas y salidas.
Esto no se puede manejar con la API secuencial.
Aquí hay un ejemplo simple.
Supongamos que está creando un sistema para clasificar los tickets de emisión personalizados por prioridad y enrutarlos al departamento correcto.
Tu modelo tendrá 3 entradas:
Título del ticket (entrada de texto)
Cuerpo del texto del ticket (entrada de texto)
Cualquier etiqueta agregada por el usuario (entrada categórica)
Tendrá dos salidas:
Puntuación de prioridad entre 0 y 1 (salida sigmoidea escalar)
El departamento que debe manejar el ticket (salida softmax sobre el conjunto de departamentos)
Construyamos este modelo en pocas líneas con la API funcional.
End of explanation
"""
keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)
"""
Explanation: Ploteando el modelo:
End of explanation
"""
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss=['binary_crossentropy', 'categorical_crossentropy'],
loss_weights=[1., 0.2])
"""
Explanation: Al compilar este modelo, podemos asignar diferentes pérdidas a cada salida.
Incluso puede asignar diferentes pesos a cada pérdida, para modular su
contribución a la pérdida total de entrenamiento.
End of explanation
"""
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority': 'binary_crossentropy',
'department': 'categorical_crossentropy'},
loss_weights=[1., 0.2])
"""
Explanation: Como dimos nombres a nuestras capas de salida, también podríamos especificar la pérdida de esta manera:
End of explanation
"""
import numpy as np
# Datos de entrada ficticios
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')
# Datos objetivo ficticios
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit({'title': title_data, 'body': body_data, 'tags': tags_data},
{'priority': priority_targets, 'department': dept_targets},
epochs=2,
batch_size=32)
"""
Explanation: Podemos entrenar el modelo pasando listas de matrices Numpy de entradas y objetivos:
End of explanation
"""
inputs = keras.Input(shape=(32, 32, 3), name='img')
x = layers.Conv2D(32, 3, activation='relu')(inputs)
x = layers.Conv2D(64, 3, activation='relu')(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation='relu')(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10, activation='softmax')(x)
model = keras.Model(inputs, outputs, name='toy_resnet')
model.summary()
"""
Explanation: Al llamar al ajuste con un objeto Dataset, debería producir un
tupla de listas como ([title_data, body_data, tags_data], [priority_targets, dept_targets])
o una tupla de diccionarios como
({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets}).
Para obtener una explicación más detallada, consulta la guía completa Guía de entrenamiento y evaluación.
Un modelo de Red neuronal residual de juguete
Además de los modelos con múltiples entradas y salidas,
La API funcional facilita la manipulación de topologías de conectividad no lineal,
es decir, modelos donde las capas no están conectadas secuencialmente.
Esto tampoco se puede manejar con la API secuencial (como su nombre lo indica).
Un caso de uso común para esto son las conexiones residuales.
Construyamos un modelo de ResNet de juguete para CIFAR10 para demostrar esto.
End of explanation
"""
keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True)
"""
Explanation: Ploteando el modelo:
End of explanation
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss='categorical_crossentropy',
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=64,
epochs=1,
validation_split=0.2)
"""
Explanation: Vamos a entrenarlo:
End of explanation
"""
# Ingreso de 1000 palabras únicas asignadas a vectores de 128 dimensiones
shared_embedding = layers.Embedding(1000, 128)
# Secuencia de longitud variable de enteros
text_input_a = keras.Input(shape=(None,), dtype='int32')
# Secuencia de longitud variable de enteros
text_input_b = keras.Input(shape=(None,), dtype='int32')
# Reutilizamos la misma capa para codificar ambas entradas
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""
Explanation: Compartir capas
Otro buen uso de la API funcional son los modelos que usan capas compartidas. Las capas compartidas son instancias de capa que se reutilizan varias veces en un mismo modelo: aprenden características que corresponden a múltiples rutas en el gráfico de capas.
Las capas compartidas a menudo se usan para codificar entradas que provienen de espacios similares (por ejemplo, dos piezas de texto diferentes que presentan un vocabulario similar), ya que permiten compartir información entre estas diferentes entradas y hacen posible entrenar un modelo de este tipo en menos datos. Si se ve una palabra determinada en una de las entradas, eso beneficiará el procesamiento de todas las entradas que pasan por la capa compartida.
Para compartir una capa en la API funcional, simplemente llame a la misma instancia de capa varias veces. Por ejemplo, aquí hay una capa Ingresa (del ingles Embedding) compartida entre dos entradas de texto diferentes:
End of explanation
"""
from tensorflow.keras.applications import VGG19
vgg19 = VGG19()
"""
Explanation: Extracción y reutilización de nodos en el gráfico de capas
Debido a que el gráfico de capas que está manipulando en la API funcional es una estructura de datos estática, se puede acceder e inspeccionarlo. Así es como podemos trazar modelos funcionales como imágenes, por ejemplo.
Esto también significa que podemos acceder a las activaciones de capas intermedias ("nodos" en el gráfico) y reutilizarlas en otros lugares. ¡Esto es extremadamente útil para la extracción de características, por ejemplo!
Veamos un ejemplo. Este es un modelo VGG19 con pesas pre-entrenadas en ImageNet:
End of explanation
"""
features_list = [layer.output for layer in vgg19.layers]
"""
Explanation: Y estas son las activaciones intermedias del modelo, obtenidas al consultar la estructura de datos del gráfico:
End of explanation
"""
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype('float32')
extracted_features = feat_extraction_model(img)
"""
Explanation: Podemos usar estas características para crear un nuevo modelo de extracción de características, que devuelve los valores de las activaciones de la capa intermedia, y podemos hacer todo esto en 3 líneas.
End of explanation
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
"""
Explanation: Esto es útil cuando [implementa la transferencia de estilo neural] (https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution- 7d541ac31398), entre otras cosas.
Extendiendo la API escribiendo capas personalizadas
tf.keras tiene una amplia gama de capas incorporadas. Aquí están algunos ejemplos:
Capas convolucionales: Conv1D,Conv2D, Conv3D,Conv2DTranspose, etc.
Capas de agrupación: MaxPooling1D,MaxPooling2D, MaxPooling3D,AveragePooling1D, etc.
Capas RNN: GRU,LSTM, ConvLSTM2D, etc.
BatchNormalization,Dropout, Embedded, etc.
Si no encuentras lo que necesitas, es fácil extender la API creando tus propias capas.
Todas las capas subclasifican la clase Layer e implementan:
- Un método call, que especifica el cálculo realizado por la capa.
- Un método build, que crea los pesos de la capa (tenga en cuenta que esto es solo una convención de estilo; también puede crear pesos en__init__).
Para obtener más información sobre cómo crear capas desde cero, consulta la guía Guía para escribir capas y modelos desde cero.
Aquí hay una implementación simple de una capa Densa:
End of explanation
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {'units': self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(
config, custom_objects={'CustomDense': CustomDense})
"""
Explanation: Si deseas que tu capa personalizada admita la serialización, también debes definir un método get_config,
que devuelve los argumentos del constructor de la instancia de capa:
End of explanation
"""
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation='tanh')
self.projection_2 = layers.Dense(units=units, activation='tanh')
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, timesteps, input_dim)))
"""
Explanation: Opcionalmente, también podría implementar el método de clase from_config (cls, config), que se encarga de recrear una instancia de capa dado su diccionario de configuración. La implementación predeterminada de from_config es:
python
def from_config(cls, config):
return cls(**config)
Cuándo usar la API funcional
¿Cómo decidir si usar la API funcional para crear un nuevo modelo o simplemente subclasificar la clase Modelo directamente?
En general, la API funcional es de nivel superior, más fácil y segura de usar, y tiene una serie de características que los modelos de subclases no admiten.
Sin embargo, la subclasificación de modelos le brinda una mayor flexibilidad al crear modelos que no se pueden expresar fácilmente como gráficos acíclicos dirigidos de capas (por ejemplo, no podría implementar un Tree-RNN con la API funcional, tendría que subclasificar Model directamente).
Estas son las fortalezas de la API funcional:
Las propiedades enumeradas a continuación también son ciertas para los modelos secuenciales (que también son estructuras de datos), pero no son ciertas para los modelos subclasificados (que son bytecode de Python, no estructuras de datos).
Es menos detallado.
No super (MyClass, self) .__ init __ (...), no def call (self, ...):, etc.
Comparar:
pitón
input = keras.Input (shape = (32,))
x = capas. Denso (64, activación = 'relu') (entradas)
salidas = capas. Denso (10) (x)
mlp = keras.Model (entradas, salidas)
Con la versión subclaseada:
```pitón
clase MLP (keras.Model):
def init (self, kwargs):
super (MLP, self) . init ( kwargs)
self.dense_1 = capas.Dense (64, activación = 'relu')
self.dense_2 = layers.Dense (10)
llamada def (auto, entradas):
x = self.dense_1 (entradas)
return self.dense_2 (x)
Instanciar el modelo.
mlp = MLP ()
Necesario para crear el estado del modelo.
El modelo no tiene un estado hasta que se llama al menos una vez.
_ = mlp (tf.zeros ((1, 32)))
```
Valida su modelo mientras lo está definiendo.
En la API funcional, su especificación de entrada (forma y dtype) se crea de antemano (a través de Input), y cada vez que llama a una capa, la capa comprueba que la especificación que se le pasa coincide con sus supuestos, y generará un mensaje de error útil si no.
Esto garantiza que se ejecutará cualquier modelo que pueda construir con la API funcional. Toda la depuración (que no sea la depuración relacionada con la convergencia) ocurrirá estáticamente durante la construcción del modelo, y no en el momento de la ejecución. Esto es similar a la comprobación de tipo en un compilador.
Tu modelo funcional es trazable e inspeccionable.
Puedes trazar el modelo como un gráfico, y puedes acceder fácilmente a los nodos intermedios en este gráfico, por ejemplo, para extraer y reutilizar las activaciones de las capas intermedias, como vimos en un ejemplo anterior:
pitón
features_list = [layer.output para la capa en vgg19.layers]
feat_extraction_model = keras.Model (input = vgg19.input, salidas = features_list)
Su modelo funcional puede ser serializado o clonado.
Debido a que un modelo funcional es una estructura de datos en lugar de un fragmento de código, es serializable de forma segura y se puede guardar como un único archivo que le permite recrear exactamente el mismo modelo sin tener acceso a ninguno de los códigos originales. Consulta nuestra [guía de guardado y serialización] (./save_and_serialize.ipynb) para obtener más detalles.
Estas son las debilidades de la API funcional:
No admite arquitecturas dinámicas.
La API funcional trata los modelos como DAG de capas. Esto es cierto para la mayoría de las arquitecturas de aprendizaje profundo, pero no para todas: por ejemplo, las redes recursivas o los RNN de árbol no siguen este supuesto y no se pueden implementar en la API funcional.
A veces, solo necesitas escribir todo desde cero.
Al escribir actividades avanzadas, es posible que desee hacer cosas que están fuera del alcance de "definir un DAG de capas": por ejemplo, es posible que desee exponer múltiples métodos personalizados de entrenamiento e inferencia en su instancia de modelo. Esto requiere subclases.
Para profundizar más en las diferencias entre la API funcional y la subclasificación de modelos, puede leer [¿Qué son las API simbólicas e imperativas en TensorFlow 2.0?] (Https://medium.com/tensorflow/what-are-symbolic-and -imperative-apis-in-tensorflow-2-0-dfccecb01021).
Mezcla y combina diferentes estilos de API
Es importante destacar que elegir entre la subclasificación de API funcional o modelo no es una decisión binaria que lo restringe a una categoría de modelos. Todos los modelos en la API tf.keras pueden interactuar con cada uno, ya sean modelos secuenciales, modelos funcionales o modelos / capas subclasificados escritos desde cero.
Siempre puede usar un modelo funcional o modelo secuencial como parte de un modelo / capa subclasificado:
End of explanation
"""
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation='tanh')
self.projection_2 = layers.Dense(units=units, activation='tanh')
self.classifier = layers.Dense(1, activation='sigmoid')
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
return self.classifier(features)
# Tenga en cuenta que especificamos un tamaño de lote estático para las entradas con `batch_shape`
# arg, porque el cálculo interno de `CustomRNN` requiere un tamaño de lote estático
# (cuando creamos el tensor de ceros `estado`).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, 10, 5)))
"""
Explanation: Inversamente, puede usar cualquier Capa o Modelo subclasificado en la API Funcional siempre que implemente un método call que siga uno de los siguientes patrones:
call (self, input, ** kwargs) donde input es un tensor o una estructura anidada de tensores (por ejemplo, una lista de tensores), y donde** kwargs son argumentos no tensoriales (no input )
call (self, input, training = None, ** kwargs) donde training es un valor booleano que indica si la capa debe comportarse en modo de entrenamiento y modo de inferencia.
call (self, input, mask = None, ** kwargs) donde mask es un tensor de máscara booleano (útil para RNN, por ejemplo).
call (self, input, training = None, mask = None, ** kwargs) - por supuesto, puede tener tanto un comportamiento específico de enmascaramiento como de entrenamiento al mismo tiempo.
Además, si implementa el método get_config en su Capa o Modelo personalizado, los modelos funcionales que cree con él seguirán siendo serializables y clonables.
Aquí hay un ejemplo rápido en el que usamos un RNN personalizado escrito desde cero en un modelo funcional:
End of explanation
"""
|
rasbt/biopandas
|
docs/tutorials/Working_with_MOL2_Structures_in_DataFrames.ipynb
|
bsd-3-clause
|
%load_ext watermark
%watermark -d -u -p pandas,biopandas
from biopandas.mol2 import PandasMol2
import pandas as pd
pd.set_option('display.width', 600)
pd.set_option('display.max_columns', 8)
"""
Explanation: BioPandas
Author: Sebastian Raschka mail@sebastianraschka.com
License: BSD 3 clause
Project Website: http://rasbt.github.io/biopandas/
Code Repository: https://github.com/rasbt/biopandas
End of explanation
"""
from biopandas.mol2 import PandasMol2
pmol = PandasMol2().read_mol2('./data/1b5e_1.mol2')
"""
Explanation: Working with MOL2 Structures in DataFrames
The Tripos MOL2 format is a common format for working with small molecules. In this tutorial, we will go over some examples that illustrate how we can use Biopandas' MOL2 DataFrames to analyze molecules conveniently.
Loading MOL2 Files
Using the read_mol2 method, we can read MOL2 files from standard .mol2 text files:
End of explanation
"""
pmol = PandasMol2().read_mol2('./data/40_mol2_files.mol2.gz')
"""
Explanation: [File link: 1b5e_1.mol2]
The read_mol2 method can also load structures from .mol2.gz files, but if you have a multi-mol2 file, keep in mind that it will only fetch the first molecule in this file. In the section "Parsing Multi-MOL2 files," we will see how we can parse files that contain multiple structures.
End of explanation
"""
print('Molecule ID: %s' % pmol.code)
print('\nRaw MOL2 file contents:\n\n%s\n...' % pmol.mol2_text[:500])
"""
Explanation: [File link: 40_mol2_files.mol2.gz]
After the file was succesfully loaded, we have access to the following basic PandasMol2 attributes:
End of explanation
"""
pmol.df.head(3)
"""
Explanation: The most interesting and useful attribute, however, is the PandasMol2.df DataFrame, which contains the ATOM section of the MOL2 structure. Let's print the first 3 lines from the ATOM coordinate section to see how it looks like:
End of explanation
"""
from biopandas.mol2 import PandasMol2
pmol = PandasMol2()
pmol.read_mol2('./data/1b5e_1.mol2')
pmol.df.tail(10)
"""
Explanation: The MOL2 Data Format
PandasMol2 expects the MOL2 file to be in the standard Tripos MOL2 format, and most importantly, that the "@<TRIPOS>ATOM" section is consistent with the following format convention:
Format:
atom_id atom_name x y z atom_type [subst_id
[subst_name [charge [status_bit]]]]
atom_id (integer) = the ID number of the atom at the time the file was created. This is provided for reference only and is not used when the .mol2 file is read into SYBYL.
atom_name (string) = the name of the atom.
x (real) = the x coordinate of the atom.
y (real) = the y coordinate of the atom.
z (real) = the z coordinate of the atom.
atom_type (string) = the SYBYL atom type for the atom.
subst_id (integer) = the ID number of the substructure containing the atom.
subst_name (string) = the name of the substructure containing the atom.
charge (real) = the charge associated with the atom.
status_bit (string) = the internal SYBYL status bits associated with the atom. These should never be set by the user. Valid status bits are DSPMOD, TYPECOL, CAP, BACKBONE, DICT, ESSENTIAL, WATER and DIRECT.
For example, the contents of a typical Tripos MOL2 file may look like this:
@<TRIPOS>MOLECULE
DCM Pose 1
32 33 0 0 0
SMALL
USER_CHARGES
@<TRIPOS>ATOM
1 C1 18.8934 5.5819 24.1747 C.2 1 <0> -0.1356
2 C2 18.1301 4.7642 24.8969 C.2 1 <0> -0.0410
3 C3 18.2645 6.8544 23.7342 C.2 1 <0> 0.4856
...
31 H11 18.5977 8.5756 22.6932 H 1 <0> 0.4000
32 H12 14.2530 1.0535 27.4278 H 1 <0> 0.4000
@<TRIPOS>BOND
1 1 2 2
2 1 3 1
3 2 11 1
4 3 10 2
5 3 12 1
...
28 8 27 1
29 9 28 1
30 9 29 1
31 12 30 1
32 12 31 1
33 18 32 1
Working with MOL2 DataFrames
In the previous sections, we've seen how to load MOL2 structures into DataFrames and how to access them. Once, we have the ATOM section of a MOL2 file in a DataFrame format, we can readily slice and dice the molecular structure and analyze it.
To demonstrate some typical use cases, let us load the structure of deoxycytidylate hydroxymethylase (DCM), which is shown in the figure below:
End of explanation
"""
pmol.df[pmol.df['atom_type'] != 'H'].tail(10)
"""
Explanation: [File link: 1b5e_1.mol2]
For example, we can select all hydrogen atoms by filtering on the atom type column:
End of explanation
"""
keto = pmol.df[pmol.df['atom_type'] == 'O.2']
print('number of keto groups: %d' % keto.shape[0])
keto
"""
Explanation: Or, if we like to count the number of keto-groups in this molecule, we can do the following:
End of explanation
"""
from biopandas.mol2 import PandasMol2
pmol = PandasMol2().read_mol2('./data/1b5e_1.mol2')
"""
Explanation: A list of all the allowed atom types that can be found in Tripos MOL2 files is provided below:
Code Definition
C.3 carbon sp3
C.2 carbon sp2
C.1 carbon sp
C.ar carbon aromatic
C.cat cabocation (C+) used only in a guadinium group
N.3 nitrogen sp3
N.2 nitrogen sp2
N.1 nitrogen sp
N.ar nitrogen aromatic
N.am nitrogen amide
N.pl3 nitrogen trigonal planar
N.4 nitrogen sp3 positively charged
O.3 oxygen sp3
O.2 oxygen sp2
O.co2 oxygen in carboxylate and phosphate groups
O.spc oxygen in Single Point Charge (SPC) water model
O.t3p oxygen in Transferable Intermolecular Potential (TIP3P) water model
S.3 sulfur sp3
S.2 sulfur sp2
S.O sulfoxide sulfur
S.O2/S.o2 sulfone sulfur
P.3 phosphorous sp3
F fluorine
H hydrogen
H.spc hydrogen in Single Point Charge (SPC) water model
H.t3p hydrogen in Transferable Intermolecular Potential (TIP3P) water model
LP lone pair
Du dummy atom
Du.C dummy carbon
Any any atom
Hal halogen
Het heteroatom = N, O, S, P
Hev heavy atom (non hydrogen)
Li lithium
Na sodium
Mg magnesium
Al aluminum
Si silicon
K potassium
Ca calcium
Cr.thm chromium (tetrahedral)
Cr.oh chromium (octahedral)
Mn manganese
Fe iron
Co.oh cobalt (octahedral)
Cu copper
Plotting
Since we are using pandas under the hood, which in turns uses matplotlib under the hood, we can produce quick summary plots of our MOL2 structures conveniently. Below are a few examples of how to visualize molecular properties.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
"""
Explanation: [File link: 1b5e_1.mol2]
End of explanation
"""
pmol.df['atom_type'].value_counts().plot(kind='bar')
plt.xlabel('atom type')
plt.ylabel('count')
plt.show()
"""
Explanation: For instance, let's say we are interested in the counts of the different atom types that can be found in the MOL2 file; we could do the following:
End of explanation
"""
pmol.df['element_type'] = pmol.df['atom_type'].apply(lambda x: x.split('.')[0])
pmol.df['element_type'].value_counts().plot(kind='bar')
plt.xlabel('element type')
plt.ylabel('count')
plt.show()
"""
Explanation: If this is too fine-grained for our needs, we could summarize the different atom types by atomic elements:
End of explanation
"""
groupby_charge = pmol.df.groupby(['atom_type'])['charge']
groupby_charge.mean().plot(kind='bar', yerr=groupby_charge.std())
plt.ylabel('charge')
plt.show()
"""
Explanation: One of the coolest features in pandas is the groupby method. Below is an example plotting the average charge of the different atom types with the standard deviation as error bars:
End of explanation
"""
from biopandas.mol2 import PandasMol2
l_1 = PandasMol2().read_mol2('./data/1b5e_1.mol2')
l_2 = PandasMol2().read_mol2('./data/1b5e_2.mol2')
r_heavy = PandasMol2.rmsd(l_1.df, l_2.df)
r_all = PandasMol2.rmsd(l_1.df, l_2.df, heavy_only=False)
print('Heavy-atom RMSD: %.4f Angstrom' % r_heavy)
print('All-atom RMSD: %.4f Angstrom' % r_all)
"""
Explanation: Computing the Root Mean Square Deviation
The Root-mean-square deviation (RMSD) is simply a measure of the average distance between atoms of 2 structures. This calculation of the Cartesian error follows the equation:
$$RMSD(a, b) = \sqrt{\frac{1}{n} \sum^{n}{i=1} \big((a{ix})^2 + (a_{iy})^2 + (a_{iz})^2 \big)} \
= \sqrt{\frac{1}{n} \sum^{n}_{i=1} || a_i + b_i||_2^2}$$
So, assuming that the we have the following 2 conformations of a ligand molecule
we can compute the RMSD as follows:
End of explanation
"""
from biopandas.mol2 import PandasMol2
pmol = PandasMol2().read_mol2('./data/1b5e_1.mol2')
keto_coord = pmol.df[pmol.df['atom_type'] == 'O.2'][['x', 'y', 'z']]
keto_coord
"""
Explanation: [File links: 1b5e_1.mol2, 1b5e_2.mol2]
<br>
Filtering Atoms by Distance
We can use the distance method to compute the distance between each atom (or a subset of atoms) in our data frame and a three-dimensional reference point. For example, let's assume were are interested in computing the distance between a keto group in the DMC molecule, which we've seen earlier, and other atoms in the same molecule.
First, let's get the coordinates of all keto-groups in this molecule:
End of explanation
"""
print('x, y, z coords:', keto_coord.values[0])
distances = pmol.distance(keto_coord.values[0])
"""
Explanation: In the following example, we use PandasMol2's distance method. The distance method returns a pandas Series object containing the Euclidean distance between an atom and all other atoms in the structure. In the following example, keto_coord.values[0] refers to the x, y, z coordinates of the first row (i.e., first keto group) in the array above:
End of explanation
"""
pmol.df['distances'] = distances
pmol.df.head()
"""
Explanation: For our convenience, we can add these distances to our MOL2 DataFrame:
End of explanation
"""
pmol.df[pmol.df['atom_type'] == 'O.2']
"""
Explanation: Now, say we are interested in the Euclidean distance between the two keto groups in the molecule:
End of explanation
"""
all_within_3A = pmol.df[pmol.df['distances'] <= 3.0]
all_within_3A.tail()
"""
Explanation: In the example above, the distance between the two keto groups is 8 angstrom.
Another common task that we can perform using these atomic distances is to select only the neighboring atoms of the keto group (here: atoms within 3 angstrom). The code is as follows:
End of explanation
"""
from biopandas.mol2 import split_multimol2
mol2_id, mol2_cont = next(split_multimol2('./data/40_mol2_files.mol2'))
print('Molecule ID:\n', mol2_id)
print('First 10 lines:\n', mol2_cont[:10])
"""
Explanation: Parsing Multi-MOL2 files
Basic Multi-MOL2 File Parsing
As mentioned earlier, PandasMol2.read_mol2 method only reads in the first molecule if it is given a multi-MOL2 file. However, if we want to create DataFrames from multiple structures in a MOL2 file, we can use the handy split_multimol2 generator.
The split_multimol2 generator yields tuples containing the molecule IDs and the MOL2 content as strings in a list -- each line in the MOL2 file is stored as a string in the list.
End of explanation
"""
pdmol = PandasMol2()
with open('./data/filtered.mol2', 'w') as f:
for mol2 in split_multimol2('./data/40_mol2_files.mol2'):
pdmol.read_mol2_from_list(mol2_lines=mol2[1], mol2_code=mol2[0])
# do some analysis
keep_molecule = False
# save molecule if it passes our filter criterion
if keep_molecule:
# note that the mol2_text contains the original mol2 content
f.write(pdmol.mol2_text)
"""
Explanation: [File link: 40_mol2_files.mol2]
We can now use this generator to loop over all files in a multi-MOL2 file and create PandasMol2 DataFrames. A typical use case would be the filtering of mol2 files by certain properties:
End of explanation
"""
import pandas as pd
from mputil import lazy_imap
from biopandas.mol2 import PandasMol2
from biopandas.mol2 import split_multimol2
# Selection strings to capture
# all molecules that contain at least one sp2 hybridized
# oxygen atom and at least one Fluorine atom
SELECTIONS = ["(pdmol.df.atom_type == 'O.2')",
"(pdmol.df.atom_type == 'F')"]
# Path to the multi-mol2 input file
MOL2_FILE = "./data/40_mol2_files.mol2"
# Data processing function to be run in parallel
def data_processor(mol2):
"""Return molecule ID if there's a match and '' otherwise"""
pdmol = PandasMol2().read_mol2_from_list(mol2_lines=mol2[1],
mol2_code=mol2[0])
match = mol2[0]
for sub_sele in SELECTIONS:
if not pd.eval(sub_sele).any():
match = ''
break
return match
# Process molecules and save IDs of hits to disk
with open('./data/selected_ids.txt', 'w') as f:
searched, found = 0, 0
for chunk in lazy_imap(data_processor=data_processor,
data_generator=split_multimol2(MOL2_FILE),
n_cpus=0): # means all available cpus
for mol2_id in chunk:
if mol2_id:
# write IDs of matching molecules to disk
f.write('%s\n' % mol2_id)
found += 1
searched += len(chunk)
print('Searched %d molecules. Got %d hits.' % (searched, found))
"""
Explanation: Using Multiprocessing for Multi-MOL2 File Analysis
To improve the computational efficiency and throughput for multi-mol2 analyses, it is recommended to use the mputil package, which evaluates Python generators lazily. The lazy_imap function from mputil is based on Python's standardlib multiprocessing imap function, but it doesn't consume the generator upfront. This lazy evaluation is important, for example, if we are parsing large (possibly Gigabyte- or Terabyte-large) multi-mol2 files for multiprocessing.
The following example provides a template for atom-type based molecule queries, but the data_processor function can be extended to do any kind of functional group queries (for example, involving the 'charge' column and/or PandasMol2.distance method).
End of explanation
"""
|
IS-ENES-Data/submission_forms
|
test/forms/CORDEX/CORDEX_ki_1234.ipynb
|
apache-2.0
|
from dkrz_forms import form_widgets
form_widgets.show_status('form-submission')
"""
Explanation: CORDEX ESGF submission form
General Information
Data to be submitted for ESGF data publication must follow the rules outlined in the Cordex Archive Design Document <br /> (https://verc.enes.org/data/projects/documents/cordex-archive-design)
Thus file names have to follow the pattern:<br />
VariableName_Domain_GCMModelName_CMIP5ExperimentName_CMIP5EnsembleMember_RCMModelName_RCMVersionID_Frequency[_StartTime-EndTime].nc <br />
Example: tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
The directory structure in which these files are stored follow the pattern:<br />
activity/product/Domain/Institution/
GCMModelName/CMIP5ExperimentName/CMIP5EnsembleMember/
RCMModelName/RCMVersionID/Frequency/VariableName <br />
Example: CORDEX/output/AFR-44/MPI-CSC/MPI-M-MPI-ESM-LR/rcp26/r1i1p1/MPI-CSC-REMO2009/v1/mon/tas/tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
Notice: If your model is not yet registered, please contact contact cordex-registration@cordex.org
specifying: Full institution name, Short institution name (acronym), Contact person and
e-mail, RCM Name (acronym), Terms of Use (unrestricted or non-commercial only) and the CORDEX domains in which you are interested.
At some CORDEX ESGF data centers a 'data submission form' is in use in order to improve initial information exchange between data providers and the data center. The form has to be filled before the publication process can be started. In case you have questions pleas contact the individual data centers:
o at DKRZ: cordex@dkrz.de
o at SMHI: rossby.cordex@smhi.se
End of explanation
"""
MY_LAST_NAME = "ki" # e.gl MY_LAST_NAME = "schulz"
#-------------------------------------------------
from dkrz_forms import form_handler, form_widgets, checks
form_info = form_widgets.check_pwd(MY_LAST_NAME)
sfg = form_handler.init_form(form_info)
sf = sfg.sub.entity_out.form_info
"""
Explanation: Start submission procedure
The submission is based on this interactive document consisting of "cells" you can modify and then evaluate
evaluation of cells is done by selecting the cell and then press the keys "Shift" + "Enter"
<br /> please evaluate the following cell to initialize your form
End of explanation
"""
sf.submission_type = "..." # example: sf.submission_type = "initial_version"
"""
Explanation: please provide information on the contact person for this CORDEX data submission request
Type of submission
please specify the type of this data submission:
- "initial_version" for first submission of data
- "new _version" for a re-submission of previousliy submitted data
- "retract" for the request to retract previously submitted data
End of explanation
"""
sf.institution = "..." # example: sf.institution = "Alfred Wegener Institute"
"""
Explanation: Requested general information
Please provide model and institution info as well as an example of a file name
institution
The value of this field has to equal the value of the optional NetCDF attribute 'institution'
(long version) in the data files if the latter is used.
End of explanation
"""
sf.institute_id = "..." # example: sf.institute_id = "AWI"
"""
Explanation: institute_id
The value of this field has to equal the value of the global NetCDF attribute 'institute_id'
in the data files and must equal the 4th directory level. It is needed before the publication
process is started in order that the value can be added to the relevant CORDEX list of CV1
if not yet there. Note that 'institute_id' has to be the first part of 'model_id'
End of explanation
"""
sf.model_id = "..." # example: sf.model_id = "AWI-HIRHAM5"
"""
Explanation: model_id
The value of this field has to be the value of the global NetCDF attribute 'model_id'
in the data files. It is needed before the publication process is started in order that
the value can be added to the relevant CORDEX list of CV1 if not yet there.
Note that it must be composed by the 'institute_id' follwed by the RCM CORDEX model name,
separated by a dash. It is part of the file name and the directory structure.
End of explanation
"""
sf.experiment_id = "..." # example: sf.experiment_id = "evaluation"
# ["value_a","value_b"] in case of multiple experiments
sf.time_period = "..." # example: sf.time_period = "197901-201412"
# ["time_period_a","time_period_b"] in case of multiple values
"""
Explanation: experiment_id and time_period
Experiment has to equal the value of the global NetCDF attribute 'experiment_id'
in the data files. Time_period gives the period of data for which the publication
request is submitted. If you intend to submit data from multiple experiments you may
add one line for each additional experiment or send in additional publication request sheets.
End of explanation
"""
sf.example_file_name = "..." # example: sf.example_file_name = "tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc"
# Please run this cell as it is to check your example file name structure
# to_do: implement submission_form_check_file function - output result (attributes + check_result)
form_handler.cordex_file_info(sf,sf.example_file_name)
"""
Explanation: Example file name
Please provide an example file name of a file in your data collection,
this name will be used to derive the other
End of explanation
"""
sf.grid_mapping_name = "..." # example: sf.grid_mapping_name = "rotated_latitude_longitude"
"""
Explanation: information on the grid_mapping
the NetCDF/CF name of the data grid ('rotated_latitude_longitude', 'lambert_conformal_conic', etc.),
i.e. either that of the native model grid, or 'latitude_longitude' for the regular -XXi grids
End of explanation
"""
sf.grid_as_specified_if_rotated_pole = "..." # example: sf.grid_as_specified_if_rotated_pole = "yes"
"""
Explanation: Does the grid configuration exactly follow the specifications in ADD2 (Table 1)
in case the native grid is 'rotated_pole'? If not, comment on the differences; otherwise write 'yes' or 'N/A'. If the data is not delivered on the computational grid it has to be noted here as well.
End of explanation
"""
sf.data_qc_status = "..." # example: sf.data_qc_status = "QC2-CORDEX"
sf.data_qc_comment = "..." # any comment of quality status of the files
"""
Explanation: Please provide information on quality check performed on the data you plan to submit
Please answer 'no', 'QC1', 'QC2-all', 'QC2-CORDEX', or 'other'.
'QC1' refers to the compliancy checker that can be downloaded at http://cordex.dmi.dk.
'QC2' refers to the quality checker developed at DKRZ.
If your answer is 'other' give some informations.
End of explanation
"""
sf.terms_of_use = "..." # example: sf.terms_of_use = "unrestricted"
"""
Explanation: Terms of use
Please give the terms of use that shall be asigned to the data.
The options are 'unrestricted' and 'non-commercial only'.
For the full text 'Terms of Use' of CORDEX data refer to
http://cordex.dmi.dk/joomla/images/CORDEX/cordex_terms_of_use.pdf
End of explanation
"""
sf.directory_structure = "..." # example: sf.directory_structure = "compliant"
"""
Explanation: Information on directory structure and data access path
(and other information needed for data transport and data publication)
If there is any directory structure deviation from the CORDEX standard please specify here.
Otherwise enter 'compliant'. Please note that deviations MAY imply that data can not be accepted.
End of explanation
"""
sf.data_path = "..." # example: sf.data_path = "mistral.dkrz.de:/mnt/lustre01/work/bm0021/k204016/CORDEX/archive/"
sf.data_information = "..." # ...any info where data can be accessed and transfered to the data center ... "
"""
Explanation: Give the path where the data reside, for example:
blizzard.dkrz.de:/scratch/b/b364034/. If not applicable write N/A and give data access information in the data_information string
End of explanation
"""
sf.exclude_variables_list = "..." # example: sf.exclude_variables_list=["bnds", "vertices"]
"""
Explanation: Exclude variable list
In each CORDEX file there may be only one variable which shall be published and searchable at the ESGF portal (target variable). In order to facilitate publication, all non-target variables are included in a list used by the publisher to avoid publication. A list of known non-target variables is [time, time_bnds, lon, lat, rlon ,rlat ,x ,y ,z ,height, plev, Lambert_Conformal, rotated_pole]. Please enter other variables into the left field if applicable (e.g. grid description variables), otherwise write 'N/A'.
End of explanation
"""
sf.uniqueness_of_tracking_id = "..." # example: sf.uniqueness_of_tracking_id = "yes"
"""
Explanation: Uniqueness of tracking_id and creation_date
In case any of your files is replacing a file already published, it must not have the same tracking_id nor
the same creation_date as the file it replaces.
Did you make sure that that this is not the case ?
Reply 'yes'; otherwise adapt the new file versions.
End of explanation
"""
sf.variable_list_day = [
"clh","clivi","cll","clm","clt","clwvi",
"evspsbl","evspsblpot",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","prc","prhmax","prsn","prw","ps","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","tauu","tauv","ta200","ta500","ta850","ts",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850","wsgsmax",
"zg200","zg500","zmla"
]
sf.variable_list_mon = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200",
"ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_sem = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200","ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_fx = [
"areacella",
"mrsofc",
"orog",
"rootd",
"sftgif","sftlf"
]
"""
Explanation: Variable list
list of variables submitted -- please remove the ones you do not provide:
End of explanation
"""
# simple consistency check report for your submission form
res = form_handler.check_submission(sf)
sf.sub.valid_submission = res['valid_submission']
form_handler.DictTable(res)
"""
Explanation: Check your submission form
Please evaluate the following cell to check your submission form.
In case of errors, please go up to the corresponden information cells and update your information accordingly.
End of explanation
"""
form_handler.save_form(sf,"..my comment..") # edit my comment info
#evaluate this cell if you want a reference to the saved form emailed to you
# (only available if you access this form via the DKRZ form hosting service)
form_handler.email_form_info()
# evaluate this cell if you want a reference (provided by email)
# (only available if you access this form via the DKRZ hosting service)
form_handler.email_form_info(sf)
"""
Explanation: Save your form
your form will be stored (the form name consists of your last name plut your keyword)
End of explanation
"""
form_handler.email_form_info(sf)
form_handler.form_submission(sf)
"""
Explanation: officially submit your form
the form will be submitted to the DKRZ team to process
you also receive a confirmation email with a reference to your online form for future modifications
End of explanation
"""
|
ampl/amplpy
|
notebooks/efficient_frontier.ipynb
|
bsd-3-clause
|
from google.colab import auth
auth.authenticate_user()
!pip install -q amplpy ampltools gspread --upgrade
"""
Explanation: Install needed modules and authenticate user to use google sheets
End of explanation
"""
MODULES=['ampl', 'cplex']
from ampltools import cloud_platform_name, ampl_notebook
from amplpy import AMPL, register_magics
if cloud_platform_name() is None:
ampl = AMPL() # Use local installation of AMPL
else:
ampl = ampl_notebook(modules=MODULES) # Install AMPL and use it
register_magics(ampl_object=ampl) # Evaluate %%ampl_eval cells with ampl.eval()
"""
Explanation: Google Colab & Kaggle interagration
End of explanation
"""
import gspread
from google.auth import default
creds, _ = default()
gclient = gspread.authorize(creds)
def open_spreedsheet(name):
if name.startswith('https://'):
return gclient.open_by_url(name)
return gclient.open(name)
import pandas as pd
def get_worksheet_values(name):
return spreedsheet.worksheet(name).get_values(value_render_option='UNFORMATTED_VALUE')
def table_to_dataframe(rows):
return pd.DataFrame(rows[1:], columns=rows[0]).set_index(rows[0][0])
def matrix_to_dataframe(rows, tr=False, nameOverride = None):
col_labels = rows[0][1:]
row_labels = [row[0] for row in rows[1:]]
def label(pair):
return pair if not tr else (pair[1], pair[0])
data = {
label((rlabel, clabel)): rows[i+1][j+1]
for i, rlabel in enumerate(row_labels)
for j, clabel in enumerate(col_labels)}
df = pd.Series(data).reset_index()
name = nameOverride if nameOverride else rows[0][0]
df.columns = ['index1', 'index2', name]
return df.set_index(['index1', 'index2'])
"""
Explanation: Auxiliar functions
End of explanation
"""
spreedsheet = open_spreedsheet("https://docs.google.com/spreadsheets/d/1d9wRk2PJgYsjiNVoKi1FcawGSVUB5mxSeQUBdEleUv4/edit?usp=sharing")
rows = get_worksheet_values('covariance')
# To be able to use ampl.set_data, we override the data column
# of the dataframe to the name of the parameter we will be setting (S)
covar = matrix_to_dataframe(rows, nameOverride = "S")
rows = get_worksheet_values('expectedReturns')
expected = table_to_dataframe(rows)
"""
Explanation: Efficient Frontier Example
Open connection to the spreadsheet and get the data, using the utility functions in ampltools to get pandas dataframes.
End of explanation
"""
%%ampl_eval
set A ordered; # assets
param S{A, A}; # cov matrix
param mu{A} default 0; # expected returns
param lb default 0;
param ub default Infinity;
param targetReturn default -Infinity;
var w{A} >= lb <= ub; # weights
var portfolioReturn >= targetReturn;
minimize portfolio_variance: sum {i in A, j in A} S[i, j] * w[i] * w[j];
maximize portfolio_return: sum{a in A} mu[a] * w[a];
s.t. def_total_weight: sum {i in A} w[i] = 1;
s.t. def_target_return: sum{a in A} mu[a] * w[a] >= targetReturn;
"""
Explanation: The following is a version of Markowitz mean-variance model, that can be used to calculate the efficient frontier
End of explanation
"""
# Set expected returns
ampl.set_data(expected, set_name="A")
# Set covariance data
ampl.set_data(covar)
"""
Explanation: Using amplpy we set the data of the ampl entities using the dataframes we got from google sheets
End of explanation
"""
%%ampl_eval
# Set options
option solver cplex;
option solver_msg 0;
# The following declarations are needed for the efficient frontier script
param nPoints := 20;
param minReturn;
param maxReturn;
param variances{1..nPoints+1};
param returns{1..nPoints+1};
param delta = (maxReturn-minReturn)/nPoints;
"""
Explanation: Set a few options in AMPL to streamline the execution
End of explanation
"""
%%ampl_eval
# Solve the min variance problem to get the minimum return
objective portfolio_variance;
solve > NUL;
printf "Min portfolio variance: %f, return: %f\n", portfolio_variance, portfolio_return;
let minReturn := portfolio_return;
# Store the first data point in the efficient frontier
let variances[1] := portfolio_variance;
let returns[1] := minReturn;
# Solve the max return problem
objective portfolio_return;
solve > NUL;
printf "Max portfolio variance: %f, return: %f", portfolio_variance, portfolio_return;
let maxReturn := portfolio_return;
"""
Explanation: Calculate the extreme points for the efficient frontier procedure: get the minimum return by solving the min varance problem, then get the maximum return by solving the max return problem.
End of explanation
"""
%%ampl_eval
# Switch objective to portfolio variance
objective portfolio_variance;
# Set starting point
let targetReturn := minReturn;
# Calculate the efficient frontier
for{j in 1..nPoints}
{
let targetReturn := targetReturn+delta;
solve > NUL;
printf "Return %f, variance %f\n", portfolio_return, portfolio_variance;
let returns[j+1] := portfolio_return;
let variances[j+1]:=portfolio_variance;
};
"""
Explanation: Now that we have the upper and lower values for the expected returns, iterate nPoints times setting the desired return at regularly increasing levels and solve the min variance problem, thus getting the points (return - variance) needed to plot the efficient frontier.
End of explanation
"""
import matplotlib.pyplot as plt
df = ampl.get_data('returns', 'variances').toPandas()
plt.plot(df.variances*1000, df.returns)
plt.xlabel("Variance*1000")
plt.ylabel("Expected Return")
plt.title("Efficient frontier")
plt.show()
"""
Explanation: Finally, we plot the efficient frontier
End of explanation
"""
|
hhain/sdap17
|
notebooks/robin_ue2/mustererkennung_in_funkmessdaten.ipynb
|
mit
|
# imports
import re
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pprint as pp
"""
Explanation: Mustererkennung in Funkmessdaten
Aufgabe 1: Laden der Datenbank in Jupyter Notebook
End of explanation
"""
hdf = pd.HDFStore('../../data/raw/TestMessungen_NEU.hdf')
print(hdf.keys)
"""
Explanation: Wir öffnen die Datenbank und lassen uns die Keys der einzelnen Tabellen ausgeben.
End of explanation
"""
df_x1_t1_trx_1_4 = hdf.get('/x1/t1/trx_1_4')
print("Rows:", df_x1_t1_trx_1_4.shape[0])
print("Columns:", df_x1_t1_trx_1_4.shape[1])
"""
Explanation: Aufgabe 2: Inspektion eines einzelnen Dataframes
Wir laden den Frame x1_t1_trx_1_4 und betrachten seine Dimension.
End of explanation
"""
# first inspection of columns from df_x1_t1_trx_1_4
df_x1_t1_trx_1_4.head(5)
"""
Explanation: Als nächstes Untersuchen wir exemplarisch für zwei Empfänger-Sender-Gruppen die Attributzusammensetzung.
End of explanation
"""
# Little function to retrieve sender-receiver tuples from df columns
def extract_snd_rcv(df):
regex = r"trx_[1-4]_[1-4]"
# creates a set containing the different pairs
snd_rcv = {x[4:7] for x in df.columns if re.search(regex, x)}
return [(x[0],x[-1]) for x in snd_rcv]
# Sums the number of columns for each sender-receiver tuple
def get_column_counts(snd_rcv, df):
col_counts = {}
for snd,rcv in snd_rcv:
col_counts['Columns for pair {} {}:'.format(snd, rcv)] = len([i for i, word in enumerate(list(df.columns)) if word.startswith('trx_{}_{}'.format(snd, rcv))])
return col_counts
# Analyze the column composition of a given measurement.
def analyse_columns(df):
df_snd_rcv = extract_snd_rcv(df)
cc = get_column_counts(df_snd_rcv, df)
for x in cc:
print(x, cc[x])
print("Sum of pair related columns: %i" % sum(cc.values()))
print()
print("Other columns are:")
for att in [col for col in df.columns if 'ifft' not in col and 'ts' not in col]:
print(att)
# Analyze the values of the target column.
def analyze_target(df):
print(df['target'].unique())
print("# Unique values in target: %i" % len(df['target'].unique()))
"""
Explanation: Für die Analyse der Frames definieren wir einige Hilfsfunktionen.
End of explanation
"""
analyse_columns(df_x1_t1_trx_1_4)
"""
Explanation: Bestimme nun die Spaltezusammensetzung von df_x1_t1_trx_1_4.
End of explanation
"""
analyze_target(df_x1_t1_trx_1_4)
"""
Explanation: Betrachte den Inhalt der "target"-Spalte von df_x1_t1_trx_1_4.
End of explanation
"""
df_x3_t2_trx_3_1 = hdf.get('/x3/t2/trx_3_1')
print("Rows:", df_x3_t2_trx_3_1.shape[0])
print("Columns:", df_x3_t2_trx_3_1.shape[1])
"""
Explanation: Als nächstes laden wir den Frame x3_t2_trx_3_1 und betrachten seine Dimension.
End of explanation
"""
analyse_columns(df_x3_t2_trx_3_1)
analyze_target(df_x3_t2_trx_3_1)
"""
Explanation: Gefolgt von einer Analyse seiner Spaltenzusammensetzung und seiner "target"-Werte.
End of explanation
"""
vals = df_x1_t1_trx_1_4.loc[:,'trx_2_4_ifft_0':'trx_2_4_ifft_1999'].values
# one big heatmap
plt.figure(figsize=(14, 12))
plt.title('trx_2_4_ifft')
plt.xlabel("ifft of frequency")
plt.ylabel("measurement")
ax = sns.heatmap(vals, xticklabels=200, yticklabels=20, vmin=0, vmax=1, cmap='nipy_spectral_r')
plt.show()
"""
Explanation: Frage: Was stellen Sie bzgl. der „Empfänger-Nummer_Sender-Nummer“-Kombinationen fest? Sind diese gleich? Welche Ausprägungen finden Sie in der Spalte „target“?
Antwort: Wir sehen, wenn jeweils ein Paar sendet, hören die anderen beiden Sender zu und messen ihre Verbindung zu den gerade sendenden Knoten (d.h. 6 Paare in jedem Dataframe). Sendet z.B. das Paar 3 1, so misst Knoten 1 die Verbindung 1-3, Knoten 3 die Verbindung 3-1 und Knoten 2 und 4 Verbindung 2-1 und 2-3 bzw. 4-1 und 4-3. Die 10 verschiedenen Ausprägungen der Spalte "target" sind oben zu sehen.
Aufgabe 3: Visualisierung der Messreihe des Datensatz
Wir visualisieren die Rohdaten mit verschiedenen Heatmaps, um so die Integrität der Daten optisch zu validieren und Ideen für mögliche Features zu entwickeln. Hier stellen wir exemplarisch die Daten von Frame df_x1_t1_trx_1_4 dar.
End of explanation
"""
# compare different heatmaps
plt.figure(1, figsize=(12,10))
# nipy_spectral_r scheme
plt.subplot(221)
plt.title('trx_2_4_ifft')
plt.xlabel("ifft of frequency")
plt.ylabel("measurement")
ax = sns.heatmap(vals, xticklabels=200, yticklabels=20, vmin=0, vmax=1, cmap='nipy_spectral_r')
# terrain scheme
plt.subplot(222)
plt.title('trx_2_4_ifft')
plt.xlabel("ifft of frequency")
plt.ylabel("measurement")
ax = sns.heatmap(vals, xticklabels=200, yticklabels=20, vmin=0, vmax=1, cmap='terrain')
# Vega10 scheme
plt.subplot(223)
plt.title('trx_2_4_ifft')
plt.xlabel("ifft of frequency")
plt.ylabel("measurement")
ax = sns.heatmap(vals, xticklabels=200, yticklabels=20, vmin=0, vmax=1, cmap='Vega10')
# Wistia scheme
plt.subplot(224)
plt.title('trx_2_4_ifft')
plt.xlabel("ifft of frequency")
plt.ylabel("measurement")
ax = sns.heatmap(vals, xticklabels=200, yticklabels=20, vmin=0, vmax=1, cmap='Wistia')
# Adjust the subplot layout, because the logit one may take more space
# than usual, due to y-tick labels like "1 - 10^{-3}"
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
wspace=0.2)
plt.show()
"""
Explanation: Wir betrachten wie verschiedene Farbschemata unterschiedliche Merkmale unserer Rohdaten hervorheben.
End of explanation
"""
# Iterating over hdf data and creating interim data presentation stored in data/interim/testmessungen_interim.hdf
# Interim data representation contains aditional binary class (binary_target - encoding 0=empty and 1=not empty)
# and multi class target (multi_target - encoding 0-9 for each possible class)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
interim_path = '../../data/interim/01_testmessungen.hdf'
def binary_mapper(df):
def map_binary(target):
if target.startswith('Empty'):
return 0
else:
return 1
df['binary_target'] = pd.Series(map(map_binary, df['target']))
def multiclass_mapper(df):
le.fit(df['target'])
df['multi_target'] = le.transform(df['target'])
for key in hdf.keys():
df = hdf.get(key)
binary_mapper(df)
multiclass_mapper(df)
df.to_hdf(interim_path, key)
hdf.close()
"""
Explanation: Aufgabe 3: Groundtruth-Label anpassen
End of explanation
"""
hdf = pd.HDFStore('../../data/interim/01_testmessungen.hdf')
df_x1_t1_trx_3_1 = hdf.get('/x1/t1/trx_3_1')
print("binary_target for measurement 5:", df_x1_t1_trx_3_1['binary_target'][5])
print("binary_target for measurement 120:", df_x1_t1_trx_3_1['binary_target'][120])
hdf.close()
"""
Explanation: Überprüfe neu beschrifteten Dataframe „/x1/t1/trx_3_1“ verwenden. Wir erwarten als Ergebnisse für 5 zu Beginn des Experiments „Empty“ (bzw. 0) und für 120 mitten im Experiment „Not Empty“ (bzw. 1).
End of explanation
"""
from evaluation import *
from filters import *
from utility import *
from features import *
"""
Explanation: Aufgabe 4: Einfacher Erkenner mit Hold-Out-Validierung
Wir folgen den Schritten in Aufgabe 4 und testen einen einfachen Erkenner.
End of explanation
"""
# raw data to achieve target values
hdf = pd.HDFStore('../../data/raw/TestMessungen_NEU.hdf')
"""
Explanation: Öffnen von Hdf mittels pandas
End of explanation
"""
# generate datasets
tst = ['1','2','3']
tst_ds = []
for t in tst:
df_tst = hdf.get('/x1/t'+t+'/trx_3_1')
lst = df_tst.columns[df_tst.columns.str.contains('_ifft_')]
#df_tst_cl,_ = distortion_filter(df_tst_cl)
groups = get_trx_groups(df_tst)
df_std = rf_grouped(df_tst, groups=groups, fn=rf_std_single, label='target')
df_mean = rf_grouped(df_tst, groups=groups, fn=rf_mean_single)
df_p2p = rf_grouped(df_tst, groups=groups, fn=rf_ptp_single) # added p2p feature
df_all = pd.concat( [df_std, df_mean, df_p2p], axis=1 ) # added p2p feature
df_all = cf_std_window(df_all, window=4, label='target')
df_tst_sum = generate_class_label_presence(df_all, state_variable='target')
# remove index column
df_tst_sum = df_tst_sum[df_tst_sum.columns.values[~df_tst_sum.columns.str.contains('index')].tolist()]
print('Columns in Dataset:',t)
print(df_tst_sum.columns)
tst_ds.append(df_tst_sum.copy())
# holdout validation
print(hold_out_val(tst_ds, target='target', include_self=False, cl='rf', verbose=False, random_state=1))
"""
Explanation: Beispiel Erkenner
Datensätze vorbereiten
End of explanation
"""
hdf.close()
"""
Explanation: Schließen von HDF Store
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/cams/cmip6/models/sandbox-2/atmoschem.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cams', 'sandbox-2', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: CAMS
Source ID: SANDBOX-2
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:43
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
jArumugam/python-notes
|
P10Decorators.ipynb
|
mit
|
def func():
return 1
func()
"""
Explanation: Decorators
Decorators can be thought of as functions which modify the functionality of another function. They help to make your code shorter and more "Pythonic".
To properly explain decorators we will slowly build up from functions. Make sure to restart the Python and the Notebooks for this lecture to look the same on your own computer. So lets break down the steps:
Functions Review
End of explanation
"""
s = 'Global Variable'
def func():
print locals()
"""
Explanation: Scope Review
Remember from the nested statements lecture that Python uses Scope to know what a label is referring to. For example:
End of explanation
"""
print globals()
"""
Explanation: Remember that Python functions create a new scope, meaning the function has its own namespace to find variable names when they are mentioned within the function. We can check for local variables and global variables with the local() and globals() functions. For example:
End of explanation
"""
print globals().keys()
"""
Explanation: Here we get back a dictionary of all the global variables, many of them are predefined in Python. So let's go ahead and look at the keys:
End of explanation
"""
globals()['s']
"""
Explanation: Note how s is there, the Global Variable we defined as a string:
End of explanation
"""
func()
"""
Explanation: Now lets run our function to check for any local variables in the func() (there shouldn't be any)
End of explanation
"""
def hello(name='Jose'):
return 'Hello '+name
hello()
"""
Explanation: Great! Now lets continue with building out the logic of what a decorator is. Remember that in Python everything is an object. That means functions are objects which can be assigned labels and passed into other functions. Lets start with some simple examples:
End of explanation
"""
greet = hello
greet
greet()
"""
Explanation: Assign a label to the function. Note that e are not using parentheses here because we are not calling the function hello, instead we are just putting it into the greet variable.
End of explanation
"""
del hello
hello()
greet()
"""
Explanation: This assignment is not attached to the original function:
End of explanation
"""
def hello(name='Jose'):
print 'The hello() function has been executed'
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
print greet()
print welcome()
print "Now we are back inside the hello() function"
hello()
welcome()
"""
Explanation: Functions within functions
Great! So we've seen how we can treat functions as objects, now lets see how we can define functions inside of other functions:
End of explanation
"""
def hello(name='Jose'):
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
if name == 'Jose':
return greet
else:
return welcome
x = hello()
"""
Explanation: Note how due to scope, the welcome() function is not defined outside of the hello() function. Now lets learn about returning functions from within functions:
Returning Functions
End of explanation
"""
x
"""
Explanation: Now lets see what function is returned if we set x = hello(), note how the closed parenthesis means that name ahs been defined as Jose.
End of explanation
"""
print x()
"""
Explanation: Great! Now we can see how x is pointing to the greet function inside of the hello function.
End of explanation
"""
x = hello(name="Sam")
x()
print x()
"""
Explanation: Lets take a quick look at the code again.
In the if/else clause we are returning greet and welcome, not greet() and welcome().
This is because when you put a pair of parentheses after it, the function gets executed; whereas if you don’t put parenthesis after it, then it can be passed around and can be assigned to other variables without executing it.
When we write x = hello(), hello() gets executed and because the name is Jose by default, the function greet is returned. If we change the statement to x = hello(name = "Sam") then the welcome function will be returned. We can also do print hello()() which outputs now you are in the greet() function.
End of explanation
"""
def hello():
return 'Hi Jose!'
def other(func):
print 'Other code would go here'
print func()
other(hello)
"""
Explanation: Functions as Arguments
Now lets see how we can pass functions as arguments into other functions:
End of explanation
"""
def new_decorator(func):
def wrap_func():
print "Code would be here, before executing the func"
func()
print "Code here will execute after the func()"
return wrap_func
def func_needs_decorator():
print "This function is in need of a Decorator"
func_needs_decorator()
# Reassign func_needs_decorator
func_needs_decorator = new_decorator(func_needs_decorator)
func_needs_decorator()
"""
Explanation: Great! Note how we can pass the functions as objects and then use them within other functions. Now we can get started with writing our first decorator:
Creating a Decorator
In the previous example we actually manually created a Decorator. Here we will modify it to make its use case clear:
End of explanation
"""
@new_decorator
def func_needs_decorator():
print "This function is in need of a Decorator"
func_needs_decorator()
"""
Explanation: So what just happened here? A decorator simple wrapped the function and modified its behavior. Now lets understand how we can rewrite this code using the @ symbol, which is what Python uses for Decorators:
End of explanation
"""
|
eshlykov/mipt-day-after-day
|
statistics/python/python_2.ipynb
|
unlicense
|
(1, 2, 3)
()
(1,)
"""
Explanation: Кафедра дискретной математики МФТИ
Курс математической статистики
Никита Волков
На основе http://www.inp.nsk.su/~grozin/python/
Кортежи
Кортежи (tuples) очень похожи на списки, но являются неизменяемыми. Как мы видели, использование изменяемых объектов может приводить к неприятным сюрпризам.
Кортежи пишутся в круглых скобках. Если элементов $>1$ или 0, это не вызывает проблем. Но как записать кортеж с одним элементом? Конструкция (x) абсолютно легальна в любом месте любого выражения, и означает просто x. Чтобы избежать неоднозначности, кортеж с одним элементом x записывается в виде (x,).
End of explanation
"""
t = 1, 2, 3
t
"""
Explanation: Скобки ставить не обязательно, если кортеж - единственная вещь в правой части присваивания.
End of explanation
"""
len(t)
t[1]
u = 4, 5
t + u
2 * u
"""
Explanation: Работать с кортежами можно так же, как со списками. Нельзя только изменять их.
End of explanation
"""
x, y = 1, 2
x
y
"""
Explanation: В левой части присваивания можно написать несколько переменных через запятую, а в правой кортеж. Это одновременное присваивание значений нескольким переменным.
End of explanation
"""
x, y = y, x
x
y
"""
Explanation: Сначала вычисляется кортеж в правой части, исходя из старых значений переменных (до этого присваивания). Потом одновременно всем переменным присваиваются новые значения из этого кортежа. Поэтому так можно обменять значения двух переменных.
End of explanation
"""
s = {0, 1, 0, 5, 5, 1, 0}
s
"""
Explanation: Это проще, чем в других языках, где приходится использовать третью переменную.
Множества
В соответствии с математическими обозначениями, множества пишутся в фигурных скобках. Элемент может содержаться в множестве только один раз. Порядок элементов в множестве не имеет значения, поэтому питон их сортирует. Элементы множества могут быть любых типов.
End of explanation
"""
1 in s, 2 in s, 1 not in s
"""
Explanation: Принадлежит ли элемент множеству?
End of explanation
"""
l = [0, 1, 0, 5, 5, 1, 0]
set(l)
set('абба')
"""
Explanation: Множество можно получить из списка, или строки, или любого объекта, который можно использовать в for цикле (итерабельного).
End of explanation
"""
set()
"""
Explanation: Как записать пустое множество? Только так.
End of explanation
"""
{}
"""
Explanation: Дело в том, что в фигурных скобках в питоне пишутся также словари (мы будем их обсуждать в следующем параграфе). Когда в них есть хоть один элемент, можно отличить словарь от множества. Но пустые фигурные скобки означают пустой словарь.
End of explanation
"""
len(s)
for x in s:
print(x)
"""
Explanation: Работать с множествами можно как со списками.
End of explanation
"""
{i for i in range(5)}
"""
Explanation: Это генератор множества (set comprehension).
End of explanation
"""
s2 = s | {2, 5}
s2
"""
Explanation: Объединение множеств.
End of explanation
"""
s < s2, s > s2, s <= s2, s >= s2
"""
Explanation: Проверка того, является ли одно множество подмножеством другого.
End of explanation
"""
s2 & {1, 2, 3}
"""
Explanation: Пересечение.
End of explanation
"""
s2 - {1,3,5}
s2 ^ {1,3,5}
"""
Explanation: Разность и симметричная разность.
End of explanation
"""
s2.add(4)
s2
s2.remove(1)
s2
"""
Explanation: Множества (как и списки) являются изменяемыми объектами. Добавление элемента в множество и исключение из него.
End of explanation
"""
s2 |= {1, 2}
s2
"""
Explanation: Как и в случае +=, можно скомбинировать теоретико-множественную операцию с присваиванием.
End of explanation
"""
x = set([1, 4, 2, 4, 2, 1, 3, 4])
print(x)
x.add(5) # добавление элемента
print(x)
x.pop() # удаление элемента
print(x)
print(x.intersection(set([2, 4, 6, 8]))) # Пересечение
print(x.difference(set([2, 4, 6, 8]))) # Разность
print(x.union(set([2, 4, 6, 8]))) # Объединение
print(x.symmetric_difference(set([2, 4, 6, 8]))) # Симметрическая разность
print(x.issubset(set([2, 4, 6, 8]))) # Является ли подмножеством
print(x.issubset(set(list(range(10)))))
print(x.issuperset(set([2, 4, 6, 8]))) # Является ли надмножеством
print(x.issuperset(set([2, 4])))
"""
Explanation: Приведенные выше операции можно записывать и в другом стиле
End of explanation
"""
d = {'one': 1, 'two': 2, 'three': 3}
d
"""
Explanation: Существуют также неизменяемые множества. Этот тип данных называется frozenset. Операции над такими множествами подобны обычным, только невозможно изменять их (добавлять и исключать элементы).
Словари
Словарь содержит пары ключ - значение (их порядок несущественен). Это один из наиболее полезных и часто используемых типов данных в питоне.
End of explanation
"""
d['two']
d['four']
"""
Explanation: Можно узнать значение, соответствующее некоторому ключу. Словари реализованы как хэш-таблицы, так что поиск даже в больших словарях очень эффективен. В языках низкого уровня (например, C) для построения хэш-таблиц требуется использовать внешние библиотеки и писать заметное количество кода. В скриптовых языках (perl, python, php) они уже встроены в язык, и использовать их очень легко.
End of explanation
"""
'one' in d, 'four' in d
"""
Explanation: Можно проверить, есть ли в словаре данный ключ.
End of explanation
"""
d['one'] =- 1
d
d['four'] = 4
d
"""
Explanation: Можно присваивать значения как имеющимся ключам, так и отсутствующим (они добавятся к словарю).
End of explanation
"""
len(d)
"""
Explanation: Длина - число ключей в словаре.
End of explanation
"""
del d['two']
d
"""
Explanation: Можно удалить ключ из словаря.
End of explanation
"""
d.get('one'), d.get('five')
d.get('one', 0), d.get('five', 0)
"""
Explanation: Метод get, если он будет вызван с отсутствующим ключом, не приводит к ошибке, а возвращает специальный объект None. Он используется всегда, когда необходимо указать, что объект отсутствует (в какой-то мере он аналогичен null в C). Если передать методу get второй аргумент - значение по умолчанию, то будет возвращаться это значение, а не None.
End of explanation
"""
d = {}
d
d['zero'] = 0
d
d['one'] = 1
d
"""
Explanation: Словари обычно строят последовательно: начинают с пустого словаря, а затем добавляют ключи со значениями.
End of explanation
"""
d = {i: i ** 2 for i in range(5)}
d
"""
Explanation: А это генератор словаря (dictionary comprehension).
End of explanation
"""
d = {}
d[0, 0] = 1
d[0, 1] = 0
d[1, 0] = 0
d[1, 1] = -1
d
d[0, 0] + d[1, 1]
"""
Explanation: Ключами могут быть любые неизменяемые объекты, например, целые числа, строки, кортежи.
End of explanation
"""
d = {'one': 1, 'two': 2, 'three': 3}
for x in d:
print(x, ' ', d[x])
"""
Explanation: Словари, подобно спискам, можно использовать в for циклах. Перебираются имеющиеся в словаре ключи (в каком-то непредсказуемом порядке).
End of explanation
"""
d.keys(), d.values(), d.items()
for x in sorted(d.keys()):
print(x, ' ', d[x])
for x, y in d.items():
print(x, ' ', y)
del x, y
"""
Explanation: Метод keys возвращает список ключей, метод values - список соответствующих значений (в том же порядке), а метод items - список пар (ключ,значение). Точнее говоря, это не списки, а некоторые объекты, которые можно использовать в for циклах или превратить в списки функцией list. Если хочется написать цикл по упорядоченному списку ключей, то можно использовать sorted(d.keys)).
End of explanation
"""
bool(False), bool(True)
bool(None)
bool(0), bool(123)
bool(''), bool(' ')
bool([]), bool([0])
bool(set()), bool({0})
bool({}), bool({0: 0})
"""
Explanation: Что есть истина? И что есть ложь? Подойдём к этому философскому вопросу экспериментально.
End of explanation
"""
def f():
pass
f
pass
type(f)
r = f()
print(r)
"""
Explanation: На выражения, стоящие в булевых позициях (после if, elif и while), неявно напускается функция bool. Некоторые объекты интерпретируются как False: число 0, пустая строка, пустой список, пустое множество, пустой словарь, None и некоторые другие. Все остальные объекты интерпретируются как True. В операторах if или while очень часто используется список, словарь или что-нибудь подобное, что означает делай что-то если этот список (словарь и т.д.) не пуст.
Заметим, что число с плавающей точкой 0.0 тоже интерпретируется как False. Это использовать категорически не рекомендуется: вычисления с плавающей точкой всегда приближённые, и неизвестно, получите Вы 0.0 или 1.234E-12.
Лучше напишите if abs(x)<epsilon:.
Функции
Это простейшая в мире функция. Она не имеет параметров, ничего не делает и ничего не возвращает. Оператор pass означает "ничего не делай"; он используется там, где синтаксически необходим оператор, а делать ничено не нужно (после if или elif, после def и т.д.).
End of explanation
"""
def f(x):
return x + 1
f(1), f(1.0)
f('abc')
"""
Explanation: Эта функция более полезна: она имеет параметр и что-то возвращает.
End of explanation
"""
def f(x, a=0, b='b'):
print(x, ' ', a, ' ', b)
f(1.0)
f(1.0, 1)
f(1.0, b='a')
f(1.0, b='a', a=2)
f(a=2, x=2.0)
"""
Explanation: Если у функции много параметров, то возникает желание вызывать её попроще в наиболее часто встречающихся случаях. Для этого в операторе def можно задать значения некоторых параметров по умолчанию (они должны размещаться в конце списка параметров). При вызове необходимо указать все обязательные параметры (у которых нет значений по умолчанию), а необязательные можно и не указывать. Если при вызове указывать параметры в виде имя=значение, то это можно делать в любом порядке. Это гораздо удобнее, чем вспоминать, является данный параметр восьмым или девятым при вызове какой-нибудь сложной функции. Обратите внимание, что в конструкции имя=значение не ставятся пробелы между символом =.
End of explanation
"""
a = 1
def f():
a = 2
return a
f()
a
"""
Explanation: Переменные, использующиеся в функции, являются локальными. Присваивание им не меняет значений глобальных переменных с такими же именами.
End of explanation
"""
def f():
global a
a = 2
return a
f()
a
"""
Explanation: Если в функции нужно использовать какие-нибудь глобальные переменные, их нужно описать как global.
End of explanation
"""
def f(x, l):
l.append(x)
return l
l = [1, 2, 3]
f(0, l)
l
"""
Explanation: Пространство имён устанавливает соответствие между именами переменных и объектами - их значениями. Есть пространство имён локальных переменных функции, пространство имён глобальных переменных программы и пространство имён встроенных функций языка питон. Для реализации пространств имён используются словари.
Если функции передаётся в качестве аргумента какой-нибудь изменяемый объект, и функция его изменяет, то это изменение будет видно снаружи после этого вызова. Мы уже обсуждали эту ситуацию, когда две переменные (в данном случае глобальная переменная и параметр функции) указывают на один и тот же изменяемый объект объект.
End of explanation
"""
def f(x, l=[]):
l.append(x)
return l
f(0)
f(1)
f(2)
"""
Explanation: Если в качестве значения какого-нибудь параметра по умолчанию используется изменяемый объект, то это может приводить к неожиданным последствиям. В данном случае исполнение определения функции приводит к созданию двух объектов: собственно функции и объекта-списка, первоначально пустого, который используется для инициализации параметра функции при вызове. Функция изменяет этот объект. При следующем вызове он опять используется для инициализации параметра, но его значение уже изменилось.
End of explanation
"""
def f(x, l=None):
if l is None:
l = []
l.append(x)
return l
f(0)
f(1)
f(2, [0, 1])
"""
Explanation: Чтобы избежать таких сюрпризов, в качестве значений по умолчанию лучше использовать только неизменяемые объекты.
End of explanation
"""
def f(x, *l):
print(x, ' ', l)
f(0)
f(0, 1)
f(0, 1, 2)
f(0, 1, 2, 3)
"""
Explanation: Эта функция имеет один обязательный параметр плюс произвольное число необязательных. При вызове все такие дополнительные аргументы объединяются в кортеж, который функция может использовать по своему усмотрению.
End of explanation
"""
l=[1, 2]
c=('a', 'b')
f(*l, 0, *c)
"""
Explanation: Звёздочку можно использовать и при вызове функции. Можно заранее построить список (или кортеж) аргументов, а потом вызвать функцию с этими аргументами.
End of explanation
"""
(*l, 0, *c)
[*l, 0, *c]
[*l, 3]
"""
Explanation: Такую распаковку из списков и кортежей можно использовать не только при вызове функции, но и при построении списка или кортежа.
End of explanation
"""
def f(x, y, **d):
print(x, ' ', y, ' ', d)
f(0, 1, foo=2, bar=3)
"""
Explanation: Эта функция имеет два обязательных параметра плюс произвольное число необязательных ключевых параметров. При вызове они должны задаваться в виде имя=значение. Они собираются в словарь, который функция может использовать по своему усмотрению.
End of explanation
"""
d={'foo': 2, 'bar': 3}
f(0, 1, **d)
d['x'] = 0
d['y'] = 1
f(**d)
"""
Explanation: Двойную звёздочку можно использовать и при вызове функции. Можно заранее построить словарь аргументов, сопоставляющий значения именам параметров, а потом вызвать функцию с этими ключевыми аргументами.
End of explanation
"""
def f(**d):
return d
f(x=0, y=1, z=2)
"""
Explanation: Вот любопытный способ построить словарь с ключами-строками.
End of explanation
"""
d={0: 'a', 1: 'b'}
{**d, 2: 'c'}
"""
Explanation: Двойную звёздочку можно использовать не только при вызове функции, но и при построении словаря.
End of explanation
"""
d1 = {0: 'a', 1: 'b'}
d2 = {2: 'c', 3: 'd'}
{**d1, **d2}
"""
Explanation: Вот простой способ объединить два словаря.
End of explanation
"""
d2 = {1: 'B', 2: 'C'}
{**d1, 3: 'D', **d2, 3: 'd'}
"""
Explanation: Если один и тот же ключ встречается несколько раз, следующее значение затирает предыдущее.
End of explanation
"""
def f(x, y, *l, **d):
print(x, ' ', y, ' ', l, ' ', d)
f(0, 1, 2, 3, foo=4, bar=5)
"""
Explanation: Это наиболее общий вид списка параметров функции. Сначала идут обязательные параметры (в данном случае два), затем произвольное число необязательных (при вызове они будут объединены в кортеж), а затем произвольное число ключевых параметров (при вызове они будут объединены в словарь).
End of explanation
"""
def f0(x):
return x + 2
def f1(x):
return 2 * x
l = [f0, f1]
l
x = 2.0
n = 1
l[n](x)
"""
Explanation: В питоне функции являются гражданами первого сорта. Они могут присутствовать везде, где допустимы объекты других типов - среди элементов списков, значений в словарях и т.д.
End of explanation
"""
def fib(n):
'''вычисляет n-е число Фибоначчи'''
assert type(n) is int and n>0
if n <= 2:
return 1
x, y = 1, 1
for i in range(n - 2):
x, y = y, x + y
return y
fib.__doc__
help(fib)
"""
Explanation: Если Вы пишете функцию не для того, чтобы один раз её вызвать и навсегда забыть, то нужна документация, объясняющая, что эта функция делает. Для этого сразу после строчки def пишется строка. Она называется док-строкой, и сохраняется при трансляции исходного текста на питоне в байт-код (в отличие от комментариев, которые при этом отбрасываются). Обычно эта строка заключается в тройные кавычки и занимает несколько строчек. Док-строка доступна как атрибут __doc__ функции, и используется функцией help. Вот пример культурно написанной функции, вычисляющей $n$-е число Фибоначчи.
Для проверки типов аргументов, переданных функции, удобно использовать оператор assert. Если условие в нём истинно, всё в порядке, и он ничего не делает; если же оно ложно, выдаётся сообщение об ошибке.
End of explanation
"""
fib?
[fib(n) for n in range(1, 10)]
fib(-1)
fib(2.0)
"""
Explanation: В jupyter-ноутбуке к документации можно обращаться более удобным способом
End of explanation
"""
x = zip(range(5), range(0, 10, 2))
print(list(x))
"""
Explanation: Некоторые полезные функции
zip скрещивает два массива одной длины
End of explanation
"""
x = map(lambda tmp: tmp ** 2, range(5))
print(list(x))
"""
Explanation: map применяет функию к каждому элементу массива
End of explanation
"""
x = list(zip([7, 3, 4, 4, 5, 3, 9], ['a', 'n', 'n', 'a', 'k', 'n', 'a']))
# сначала сортировка по букве по алфавиту, потом сортировка по убыванию по числу
x = sorted(x, key=lambda element: (element[1], -element[0]))
print(list(x))
"""
Explanation: sorted --- сортировка
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/cnrm-cerfacs/cmip6/models/cnrm-cm6-1-hr/seaice.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'cnrm-cm6-1-hr', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: CNRM-CERFACS
Source ID: CNRM-CM6-1-HR
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:52
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
tensorflow/docs-l10n
|
site/en-snapshot/probability/examples/Gaussian_Process_Latent_Variable_Model.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
%pylab inline
"""
Explanation: Gaussian Process Latent Variable Models
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/probability/examples/Gaussian_Process_Latent_Variable_Model"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Latent variable models attempt to capture hidden structure in high dimensional
data. Examples include principle component analysis (PCA) and factor analysis.
Gaussian processes are "non-parametric" models which can flexibly capture local
correlation structure and uncertainty. The Gaussian process latent variable
model (Lawrence, 2004) combines these concepts.
Background: Gaussian Processes
A Gaussian process is any collection of random variables such that the marginal
distribution over any finite subset is a multivariate normal distribution. For
a detailed look at GPs in the context of regression, check out
Gaussian Process Regression in TensorFlow Probability.
We use a so-called index set to label each of the random variables in the
collection that the GP comprises. In the case of a finite index set, we just
get a multivariate normal. GP's are most interesting, though, when we consider
infinite collections. In the case of index sets like $\mathbb{R}^D$, where we
have a random variable for every point in $D$-dimensional space, the GP can be
thought of as a distribution over random functions. A single draw from such a
GP, if it could be realized, would assign a (jointly normally-distributed) value
to every point in $\mathbb{R}^D$. In this colab, we'll focus on GP's over some
$\mathbb{R}^D$.
Normal distributions are completely determined by their first and second order
statistics -- indeed, one way to define the normal distribution is as one whose
higher-order cumulants are all zero. This is the case for GP's, too: we completely
specify a GP by describing the mean and covariance<sup></sup>. Recall that for
finite-dimensional multivariate normals, the mean is a vector and the covariance is a square,
symmetric positive-definite matrix. In the infinite-dimensional GP, these
structures generalize to a mean function $m : \mathbb{R}^D \to \mathbb{R}$,
defined at each point of the index set, and a covariance "kernel*" function,
$k : \mathbb{R}^D \times \mathbb{R}^D \to \mathbb{R}$. The kernel
function is required to be positive-definite, which
essentially says that, restricted to a finite set of points, it yields a
postiive-definite matrix.
Most of the structure of a GP derives from its covariance kernel function --
this function describes how the values of sampeld functions vary across nearby
(or not-so-nearby) points. Different covariance functions encourage different
degrees of smoothness. One commonly used kernel function is the "exponentiated
quadratic" (a.k.a., "gaussian", "squared exponential" or "radial basis
function"), $k(x, x') = \sigma^2 e^{(x - x^2) / \lambda^2}$. Other examples
are outlined on David Duvenaud's kernel cookbook page, as well
as in the canonical text Gaussian Processes for Machine Learning.
<sub>* With an infinite index set, we also require a consistency condition. Since
the definition of the GP is in terms of finite marginals, we must require that
these marginals are consistent irrespective of the order in which the
marginals are taken. This is a somewhat advanced topic in the theory of
stochastic processes, out of scope for this tutorial; suffice it to say things
work out ok in the end!</sub>
Applying GPs: Regression and Latent Variable Models
One way we can use GPs is for regression: given a bunch of observed data in the
form of inputs ${x_i}{i=1}^N$ (elements of the index set) and observations
${y_i}{i=1}^N$, we can use these to form a posterior predictive distribution
at a new set of points ${x_j^}_{j=1}^M$. Since the distributions are all
Gaussian, this boils down to some straightforward linear algebra (but note: the
requisite computations have runtime cubic* in the number of data points and
require space quadratic in the number of data points -- this is a major limiting
factor in the use of GPs and much current research focuses on computationally
viable alternatives to exact posterior inference). We cover GP regression in more
detail in the GP Regression in TFP colab.
Another way we can use GPs is as a latent variable model: given a collection of
high-dimensional observations (e.g., images), we can posit some low-dimensional
latent structure. We assume that, conditional on the latent structure, the large
number of outputs (pixels in the image) are independent of each other. Training
in this model consists of
1. optimizing model parameters (kernel function parameters as well as, e.g.,
observation noise variance), and
2. finding, for each training observation (image), a corresponding point
location in the index set.
All of the optimization can be done by maximizing the marginal log likelihood of
the data.
Imports
End of explanation
"""
# Load the MNIST data set and isolate a subset of it.
(x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()
N = 1000
small_x_train = x_train[:N, ...].astype(np.float64) / 256.
small_y_train = y_train[:N]
"""
Explanation: Load MNIST Data
End of explanation
"""
# Create some trainable model parameters. We will constrain them to be strictly
# positive when constructing the kernel and the GP.
unconstrained_amplitude = tf.Variable(np.float64(1.), name='amplitude')
unconstrained_length_scale = tf.Variable(np.float64(1.), name='length_scale')
unconstrained_observation_noise = tf.Variable(np.float64(1.), name='observation_noise')
# We need to flatten the images and, somewhat unintuitively, transpose from
# shape [100, 784] to [784, 100]. This is because the 784 pixels will be
# treated as *independent* conditioned on the latent inputs, meaning we really
# have a batch of 784 GP's with 100 index_points.
observations_ = small_x_train.reshape(N, -1).transpose()
# Create a collection of N 2-dimensional index points that will represent our
# latent embeddings of the data. (Lawrence, 2004) prescribes initializing these
# with PCA, but a random initialization actually gives not-too-bad results, so
# we use this for simplicity. For a fun exercise, try doing the
# PCA-initialization yourself!
init_ = np.random.normal(size=(N, 2))
latent_index_points = tf.Variable(init_, name='latent_index_points')
"""
Explanation: Prepare trainable variables
We'll be jointly training 3 model parameters as well as the latent inputs.
End of explanation
"""
# Create our kernel and GP distribution
EPS = np.finfo(np.float64).eps
def create_kernel():
amplitude = tf.math.softplus(EPS + unconstrained_amplitude)
length_scale = tf.math.softplus(EPS + unconstrained_length_scale)
kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)
return kernel
def loss_fn():
observation_noise_variance = tf.math.softplus(
EPS + unconstrained_observation_noise)
gp = tfd.GaussianProcess(
kernel=create_kernel(),
index_points=latent_index_points,
observation_noise_variance=observation_noise_variance)
log_probs = gp.log_prob(observations_, name='log_prob')
return -tf.reduce_mean(log_probs)
trainable_variables = [unconstrained_amplitude,
unconstrained_length_scale,
unconstrained_observation_noise,
latent_index_points]
optimizer = tf.optimizers.Adam(learning_rate=1.0)
@tf.function(autograph=False, jit_compile=True)
def train_model():
with tf.GradientTape() as tape:
loss_value = loss_fn()
grads = tape.gradient(loss_value, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
return loss_value
"""
Explanation: Construct model and training ops
End of explanation
"""
# Initialize variables and train!
num_iters = 100
log_interval = 20
lips = np.zeros((num_iters, N, 2), np.float64)
for i in range(num_iters):
loss = train_model()
lips[i] = latent_index_points.numpy()
if i % log_interval == 0 or i + 1 == num_iters:
print("Loss at step %d: %f" % (i, loss))
"""
Explanation: Train and plot the resulting latent embeddings
End of explanation
"""
# Plot the latent locations before and after training
plt.figure(figsize=(7, 7))
plt.title("Before training")
plt.grid(False)
plt.scatter(x=init_[:, 0], y=init_[:, 1],
c=y_train[:N], cmap=plt.get_cmap('Paired'), s=50)
plt.show()
plt.figure(figsize=(7, 7))
plt.title("After training")
plt.grid(False)
plt.scatter(x=lips[-1, :, 0], y=lips[-1, :, 1],
c=y_train[:N], cmap=plt.get_cmap('Paired'), s=50)
plt.show()
"""
Explanation: Plot results
End of explanation
"""
# We'll draw samples at evenly spaced points on a 10x10 grid in the latent
# input space.
sample_grid_points = 10
grid_ = np.linspace(-4, 4, sample_grid_points).astype(np.float64)
# Create a 10x10 grid of 2-vectors, for a total shape [10, 10, 2]
grid_ = np.stack(np.meshgrid(grid_, grid_), axis=-1)
# This part's a bit subtle! What we defined above was a batch of 784 (=28x28)
# independent GP distributions over the input space. Each one corresponds to a
# single pixel of an MNIST image. Now what we'd like to do is draw 100 (=10x10)
# *independent* samples, each one separately conditioned on all the observations
# as well as the learned latent input locations above.
#
# The GP regression model below will define a batch of 784 independent
# posteriors. We'd like to get 100 independent samples each at a different
# latent index point. We could loop over the points in the grid, but that might
# be a bit slow. Instead, we can vectorize the computation by tacking on *even
# more* batch dimensions to our GaussianProcessRegressionModel distribution.
# In the below grid_ shape, we have concatentaed
# 1. batch shape: [sample_grid_points, sample_grid_points, 1]
# 2. number of examples: [1]
# 3. number of latent input dimensions: [2]
# The `1` in the batch shape will broadcast with 784. The final result will be
# samples of shape [10, 10, 784, 1]. The `1` comes from the "number of examples"
# and we can just `np.squeeze` it off.
grid_ = grid_.reshape(sample_grid_points, sample_grid_points, 1, 1, 2)
# Create the GPRegressionModel instance which represents the posterior
# predictive at the grid of new points.
gprm = tfd.GaussianProcessRegressionModel(
kernel=create_kernel(),
# Shape [10, 10, 1, 1, 2]
index_points=grid_,
# Shape [1000, 2]. 1000 2 dimensional vectors.
observation_index_points=latent_index_points,
# Shape [784, 1000]. A batch of 784 1000-dimensional observations.
observations=observations_)
"""
Explanation: Construct predictive model and sampling ops
End of explanation
"""
samples = gprm.sample()
# Plot the grid of samples at new points. We do a bit of tweaking of the samples
# first, squeezing off extra 1-shapes and normalizing the values.
samples_ = np.squeeze(samples.numpy())
samples_ = ((samples_ -
samples_.min(-1, keepdims=True)) /
(samples_.max(-1, keepdims=True) -
samples_.min(-1, keepdims=True)))
samples_ = samples_.reshape(sample_grid_points, sample_grid_points, 28, 28)
samples_ = samples_.transpose([0, 2, 1, 3])
samples_ = samples_.reshape(28 * sample_grid_points, 28 * sample_grid_points)
plt.figure(figsize=(7, 7))
ax = plt.subplot()
ax.grid(False)
ax.imshow(-samples_, interpolation='none', cmap='Greys')
plt.show()
"""
Explanation: Draw samples conditioned on the data and latent embeddings
We sample at 100 points on a 2-d grid in the latent space.
End of explanation
"""
|
geektoni/shogun
|
doc/ipython-notebooks/ica/bss_audio.ipynb
|
bsd-3-clause
|
import numpy as np
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from scipy.io import wavfile
from scipy.signal import resample
import shogun as sg
def load_wav(filename,samplerate=44100):
# load file
rate, data = wavfile.read(filename)
# convert stereo to mono
if len(data.shape) > 1:
data = data[:,0]/2 + data[:,1]/2
# re-interpolate samplerate
ratio = float(samplerate) / float(rate)
data = resample(data, int(len(data) * ratio))
return samplerate, data.astype(np.int16)
"""
Explanation: Blind Source Separation with the Shogun Machine Learning Toolbox
By Kevin Hughes
This notebook illustrates <a href="http://en.wikipedia.org/wiki/Blind_signal_separation">Blind Source Seperation</a>(BSS) on audio signals using <a href="http://en.wikipedia.org/wiki/Independent_component_analysis">Independent Component Analysis</a> (ICA) in Shogun. We generate a mixed signal and try to seperate it out using Shogun's implementation of ICA & BSS called <a href="http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1Jade.html">JADE</a>.
My favorite example of this problem is known as the cocktail party problem where a number of people are talking simultaneously and we want to separate each persons speech so we can listen to it separately. Now the caveat with this type of approach is that we need as many mixtures as we have source signals or in terms of the cocktail party problem we need as many microphones as people talking in the room.
Let's get started, this example is going to be in python and the first thing we are going to need to do is load some audio files. To make things a bit easier further on in this example I'm going to wrap the basic scipy wav file reader and add some additional functionality. First I added a case to handle converting stereo wav files back into mono wav files and secondly this loader takes a desired sample rate and resamples the input to match. This is important because when we mix the two audio signals they need to have the same sample rate.
End of explanation
"""
from IPython.display import Audio
from IPython.display import display
def wavPlayer(data, rate):
display(Audio(data, rate=rate))
"""
Explanation: Next we're going to need a way to play the audio files we're working with (otherwise this wouldn't be very exciting at all would it?). In the next bit of code I've defined a wavPlayer class that takes the signal and the sample rate and then creates a nice HTML5 webplayer right inline with the notebook.
End of explanation
"""
# change to the shogun-data directory
import os
os.chdir(os.path.join(SHOGUN_DATA_DIR, 'ica'))
%matplotlib inline
import matplotlib.pyplot as plt
# load
fs1,s1 = load_wav('tbawht02.wav') # Terran Battlecruiser - "Good day, commander."
# plot
plt.figure(figsize=(6.75,2))
plt.plot(s1)
plt.title('Signal 1')
plt.show()
# player
wavPlayer(s1, fs1)
"""
Explanation: Now that we can load and play wav files we actually need some wav files! I found the sounds from Starcraft to be a great source of wav files because they're short, interesting and remind me of my childhood. You can download Starcraft wav files here: http://wavs.unclebubby.com/computer/starcraft/ among other places on the web or from your Starcraft install directory (come on I know its still there).
Another good source of data (although lets be honest less cool) is ICA central and various other more academic data sets: http://perso.telecom-paristech.fr/~cardoso/icacentral/base_multi.html. Note that for lots of these data sets the data will be mixed already so you'll be able to skip the next few steps.
Okay lets load up an audio file. I chose the Terran Battlecruiser saying "Good Day Commander". In addition to the creating a wavPlayer I also plotted the data using Matplotlib (and tried my best to have the graph length match the HTML player length). Have a listen!
End of explanation
"""
# load
fs2,s2 = load_wav('TMaRdy00.wav') # Terran Marine - "You want a piece of me, boy?"
# plot
plt.figure(figsize=(6.75,2))
plt.plot(s2)
plt.title('Signal 2')
plt.show()
# player
wavPlayer(s2, fs2)
"""
Explanation: Now let's load a second audio clip:
End of explanation
"""
# load
fs3,s3 = load_wav('PZeRdy00.wav') # Protoss Zealot - "My life for Aiur!"
# plot
plt.figure(figsize=(6.75,2))
plt.plot(s3)
plt.title('Signal 3')
plt.show()
# player
wavPlayer(s3, fs3)
"""
Explanation: and a third audio clip:
End of explanation
"""
# Adjust for different clip lengths
fs = fs1
length = max([len(s1), len(s2), len(s3)])
s1 = np.resize(s1, (length,1))
s2 = np.resize(s2, (length,1))
s3 = np.resize(s3, (length,1))
S = (np.c_[s1, s2, s3]).T
# Mixing Matrix
#A = np.random.uniform(size=(3,3))
#A = A / A.sum(axis=0)
A = np.array([[1, 0.5, 0.5],
[0.5, 1, 0.5],
[0.5, 0.5, 1]])
print('Mixing Matrix:')
print(A.round(2))
# Mix Signals
X = np.dot(A,S)
# Mixed Signal i
for i in range(X.shape[0]):
plt.figure(figsize=(6.75,2))
plt.plot((X[i]).astype(np.int16))
plt.title('Mixed Signal %d' % (i+1))
plt.show()
wavPlayer((X[i]).astype(np.int16), fs)
"""
Explanation: Now we've got our audio files loaded up into our example program. The next thing we need to do is mix them together!
First another nuance - what if the audio clips aren't the same lenth? The solution I came up with for this was to simply resize them all to the length of the longest signal, the extra length will just be filled with zeros so it won't affect the sound.
The signals are mixed by creating a mixing matrix $A$ and taking the dot product of $A$ with the signals $S$.
Afterwards I plot the mixed signals and create the wavPlayers, have a listen!
End of explanation
"""
# Convert to features for shogun
mixed_signals = sg.create_features((X).astype(np.float64))
"""
Explanation: Now before we can work on separating these signals we need to get the data ready for Shogun, thankfully this is pretty easy!
End of explanation
"""
# Separating with JADE
jade = sg.create_transformer('Jade')
jade.fit(mixed_signals)
signals = jade.transform(mixed_signals)
S_ = signals.get('feature_matrix')
A_ = jade.get('mixing_matrix')
A_ = A_ / A_.sum(axis=0)
print('Estimated Mixing Matrix:')
print(A_)
"""
Explanation: Now lets unmix those signals!
In this example I'm going to use an Independent Component Analysis (ICA) algorithm called JADE. JADE is one of the ICA algorithms available in Shogun and it works by performing Aproximate Joint Diagonalization (AJD) on a 4th order cumulant tensor. I'm not going to go into a lot of detail on how JADE works behind the scenes but here is the reference for the original paper:
Cardoso, J. F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. In IEE Proceedings F (Radar and Signal Processing) (Vol. 140, No. 6, pp. 362-370). IET Digital Library.
Shogun also has several other ICA algorithms including the Second Order Blind Identification (SOBI) algorithm, FFSep, JediSep, UWedgeSep and FastICA. All of the algorithms inherit from the ICAConverter base class and share some common methods for setting an intial guess for the mixing matrix, retrieving the final mixing matrix and getting/setting the number of iterations to run and the desired convergence tolerance. Some of the algorithms have additional getters for intermediate calculations, for example Jade has a method for returning the 4th order cumulant tensor while the "Sep" algorithms have a getter for the time lagged covariance matrices. Check out the source code on GitHub (https://github.com/shogun-toolbox/shogun) or the Shogun docs (http://www.shogun-toolbox.org/doc/en/latest/annotated.html) for more details!
End of explanation
"""
# Show separation results
# Separated Signal i
gain = 4000
for i in range(S_.shape[0]):
plt.figure(figsize=(6.75,2))
plt.plot((gain*S_[i]).astype(np.int16))
plt.title('Separated Signal %d' % (i+1))
plt.show()
wavPlayer((gain*S_[i]).astype(np.int16), fs)
"""
Explanation: Thats all there is to it! Check out how nicely those signals have been separated and have a listen!
End of explanation
"""
|
johanvdw/niche_vlaanderen
|
docs/flooding.ipynb
|
mit
|
import niche_vlaanderen as nv
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Flooding module
Niche Vlaanderen also contains a module to model the influence of flooding more precisely. This is done using the Flooding class.
The first step is importing the niche_vlaanderen module. For convenience, we will be importing as nv.
End of explanation
"""
fp = nv.Flooding()
"""
Explanation: Creating a Flooding model
Here the Flooding class is created. Like in Niche Vlaanderen, when creating the class, the model with its codetables is initialized.
End of explanation
"""
fp.calculate(depth_file="../testcase/flooding/ff_bt_t10_h_0.asc",
frequency="T10", period="summer", duration=1)
"""
Explanation: Running the model
The calculate method of the class takes four arguments: the depths (as grid), frequency, period and duration.
End of explanation
"""
fp.plot(1)
fp.plot(25)
plt.show()
"""
Explanation: Inspecting the model
The results can be plotted per vegetation type. Note that not all vegetation types of Niche are supported by the flooding module.
End of explanation
"""
fp.table.head()
"""
Explanation: Like for the niche model, it is also possible to generate a summary table.
End of explanation
"""
fp.write("_output", overwrite_files=True)
"""
Explanation: Saving the model
Comparable to the niche model, the resulting grids can be saved using the write method.
End of explanation
"""
myniche = nv.Niche()
input = "../testcase/dijle/"
myniche.set_input("soil_code", input +"bodemv.asc")
myniche.set_input("msw", input +"gvg_0_cm.asc")
myniche.set_input("mlw", input +"glg_0_cm.asc")
myniche.set_input("mhw", input +"ghg_0_cm.asc")
myniche.set_input("seepage", input +"kwel_mm_dag.asc")
myniche.set_input("management", input +"beheer_int.asc")
myniche.set_input("nitrogen_atmospheric", input +"depositie_def.asc")
myniche.set_input("nitrogen_animal", input +"bemest_dier.asc")
myniche.set_input("nitrogen_fertilizer", input +"bemest_kunst.asc")
myniche.set_input("inundation_vegetation", input +"overstr_veg.asc")
myniche.set_input("inundation_acidity", input +"ovrstr_t10_50.asc")
myniche.set_input("inundation_nutrient", input +"ovrstr_t10_50.asc")
myniche.set_input("minerality", input + "minerality.asc")
myniche.set_input("rainwater", input +"nulgrid.asc")
"""
Explanation: Combining the output with niche
The output of a Flooding model can be combined with a Niche model, by using the combine method.
We will create a new niche model and set the inputs.
End of explanation
"""
myniche.run()
myniche.plot(18)
plt.show()
"""
Explanation: Note that the niche model must be run prior to combining - otherwise this will raise an error.
In this example we also plot the result to allow comparison with the combined map.
End of explanation
"""
combined = fp.combine(myniche)
combined.plot(18)
plt.show()
"""
Explanation: Finally, we run the actual combine method. The resulting object is a Flooding object, so we can use the same method for plotting the results.
End of explanation
"""
|
astroumd/GradMap
|
notebooks/Lectures2018/Lecture3/Lecture3_Gaussians-Answer Key.ipynb
|
gpl-3.0
|
lifemean = np.mean(lifetimes) #get mean
lifestd = np.std(lifetimes) #get standard deviation
"""
Explanation: Gaussians
You just learned a little about what a Gaussian distribution looks like. As a reminder, a Gaussian curve is sometimes called a bell curve because the shape looks like a bell.
To review, the equation for the Gaussian curve is the following:
$f(x) = \frac{1}{\sqrt{2\pi\sigma^2}}e^{\frac{-(x-\mu)^2}{2\sigma^2}}$
where $\mu$ is the mean and $\sigma$ is the standard deviation.
The standard normal distribution, where $\mu=0$ and $\sigma=1$, is selected for by calling np.random.randn().
You're probably wondering why Gaussian, a.k.a. normal, distributions are so important. The reason is that the distributions of many things follow a normal distribution -- such as the heights of people, manufactured parts, blood pressure readings, and error measurements -- making it important to understand.
There are specific metrics that describe a normal distribution.
1) The mean, median, and mode of a Gaussian distribution are all the same.
2) There is symmetry about the mean, as in 50% of the values fall to the right of the mean and the other 50% fall to the left.
3) A certain amount of data falls within integer multiples of the standard deviation, as shown below.
Does the lifetimes data we plotted earlier hold up to these three criteria? Let's find out.
Remember, the lifetimes data was imported as the variable lifetimes before.
End of explanation
"""
#import stats module
from scipy import stats
"""
Explanation: Let's examine the first criterion: the mean, median, and mode of a Gaussian distribution are all the same.
To calculate the mode, we need to import another module called the stats module. The median can still be calculated from the numpy module.
End of explanation
"""
#your code here
lifemode = stats.mode(lifetimes) #calculate mode
lifemedian = np.median(lifetimes) #calculate median
print(lifemean)
print(lifemode)
print(lifemedian)
"""
Explanation: Now calculate the median and mode of the variable lifetimes and display them.
End of explanation
"""
#your code here
numsamp = len(lifetimes)
print(numsamp)
"""
Explanation: Does the lifetimes data fulfill the first criterion of a Gaussian distribution?
Now let's check the second criterion. Is there symmetry about the mean?
First, let's find out how many samples are in the variable lifetimes and display it.
End of explanation
"""
#Put your code here
#why doesn't this work?
#uppermask = lifetimes>lifemedian
#upperhalf = lifetimes(uppermask) #this should work, but doesn't?
#lowermask = lifetimes<=lifemedian
#lowerhalf = lifetimes(lowermask) #ditto
#but this does?
upperhalf = [ii for ii in lifetimes if ii>lifemedian] #get upper 50%
lowerhalf = [jj for jj in lifetimes if jj<=lifemedian] #get lower 50%
upperperc = len(upperhalf)/numsamp
lowerperc = len(lowerhalf)/numsamp
print(upperperc)
print(lowerperc)
"""
Explanation: Now that you have the number of samples, you will need to use the median value to find out how many samples lie above and below it.
End of explanation
"""
#Put your code here
plus_std = (lifemedian+1*lifestd, lifemedian+2*lifestd, lifemedian+3*lifestd)
minus_std = (lifemedian-1*lifestd, lifemedian-2*lifestd, lifemedian-3*lifestd)
aboveperc = [None]*3
belowperc = [None]*3
ii=0
while ii<len(plus_std):
data_above = [jj for jj in lifetimes if jj>lifemedian and jj<plus_std[ii]]
aboveperc[ii] = len(data_above)/numsamp
data_below = [kk for kk in lifetimes if kk<=lifemedian and kk>minus_std[ii]]
belowperc[ii] = len(data_below)/numsamp
ii+=1
print('% of data within', ii, 'standard deviations of the median:', aboveperc[ii-1]+belowperc[ii-1])
"""
Explanation: Does the lifetimes data fulfill the second criterion of a Gaussian distribution?
Now let's check the last criterion. How much data falls within a standard deviation or two (or three)?
Remember, you already calculated the standard deviation of the lifetimes data as the variable lifestd.
End of explanation
"""
|
PiercingDan/mat245
|
Labs/Lab9/MAT245 Lab 9.ipynb
|
mit
|
from sklearn import datasets
bc = datasets.load_breast_cancer()
samples, targets = bc.data, bc.target
"""
Explanation: MAT245 Lab 9
Classifcation using Logistic Regression
Background
In a binary classification problem we have samples of data $x \in \mathbb{R}^n$, and we want to predict the value of a target variable $y \in {0, 1}$. For instance, a farmer might want to know if a $32 \times 32$ image $X \in \mathbb{R}^{32\times 32}$ contained a picture of a cucumber or not. We model absense or presense of a cucumber with outputs of $0$ or $1$ respectively.
The logistic regression approach to classification uses a hypothesis function $h_\theta$ of the form
$$
h_\theta(x)
=
g(\theta^T x)
=
\frac{1}{1 + e^{-\theta^T x}}.
$$
The parameter $\theta$ is what we're going to want to optimize. Since $h_\theta(x) \in [0, 1]$, we can interpret its value as the probability of $x$ having a certain label:
\begin{align}
\mathbb{P}(y = 1 ~|~ x, \theta) &= h_\theta(x) \
\mathbb{P}(y = 0 ~|~ x, \theta) &= 1 - h_\theta(x).
\end{align}
So if $h_\theta(x) \geq 0.5$, we predict $y =1$, otherwise we predict $y = 0$. Written differently, this is
$$
\mathbb{P}(y ~|~ x, \theta) = h_\theta(x)^y (1 - h_\theta(x))^{1-y}.
$$
Now, suppose we have $m$ independently generated samples in our dataset. As usual, we arrange these $m$ samples into an $m\times n$ matrix whose rows each represent individual samples. The likelihood of the parameter $\theta$ is given by
\begin{align}
L(\theta)
&=
\mathbb{P}(y ~|~ X, \theta) \
&=
\prod_{i=1}^m \mathbb{P}(y^{(i)} ~|~ X^{(i)}, \theta) \
&=
\prod_{i=1}^m h_\theta(x^{(i)})^{y^{(i)}} (1 - h_\theta(x^{(i)}))^{1-y^{(i)}}.
\end{align}
Our goal is then to choose $\theta$ to maximize this likelihood. In practice, it is easier to maximize the log-likelihood function:
\begin{align}
l(\theta)
&=
\log(L(\theta)) \
&=
\sum_{i=1}^m y^{(i)} \log [ h_\theta(x^{(i)})] + (1 - y^{(i)}) \log[1 - h_\theta(x^{(i)})].
\end{align}
We can maximize the log-likelihood by performing stochastic gradient ascent. In other words, we choose a training pair $(x, y) = (x^{(i)}, y^{(i)})$ at random, and compute the gradient of $l$ at this pair using the formula:
\begin{align}
\frac{\partial }{\partial\theta_j} l (\theta)
&=
\left(y \frac{1}{g(\theta^T x} - (1 - y) \frac{1}{1 - g(\theta^T x)}\right) \frac{\partial}{\partial\theta_j}g(\theta^T x) \
&=
\left(y \frac{1}{g(\theta^T x} - (1 - y) \frac{1}{1 - g(\theta^T x)}\right)
g(\theta^T x)(1 - g(\theta^Tx)) \frac{\partial}{\partial\theta_j}\theta^Tx \
&=
(y(1 - g(\theta^Tx)) - (1 - y)g(\theta^Tx))x_j \
&=
(y - h_\theta(x))x_j.
\end{align}
Above we used the derivative identity $g'(x) = g(z)(1-g(z))$. To choose new $\theta$ values, we want to take a small step in the direction of the gradient (since we are maximizing $l(\theta)$). This gives the update rule of
$$
\theta_j = \theta_j + \alpha (y^{(i)} - h_\theta(x^{(i)}))x_j^{(i)}
$$
where $\alpha$ is the learning rate parameter.
Application: breast cancer detection
The sklearn breast cancer dataset consists of $569$ $30$-dimensional data points. The goal is to classify each data point as representing either a malignant or benign tumor. You can load the data with the following code:
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
digits = datasets.load_digits()
samples, targets = digits.data, digits.target
%matplotlib inline
plt.imshow(np.reshape(samples[0], (8,8)), cmap='Greys')
"""
Explanation: Goals (1):
Split the breast cancer data into 70% training and 30% validation sets.
Write a python implementation of the logistic regression function $(\theta, x) \mapsto h_\theta(x)$.
Implement the stochastic gradient ascent (SGA) algorithm described above to choose the best parameter $\theta$ for the hypothesis function $h_\theta$. How do different learning rates affect convergence? Typical choices are in the range 0.001 - 0.1.
Validate your model's classification accuracy on the validation set (the sklearn.metrics.accuracy_score function may come in handy here).
How many iterations of SGA do you need to consistently get >85% classification accuracy on the validation set?
Principal component analysis
Background
Principal component analysis (PCA) is a dimensionality reduction technique. The idea is to project the data down to lower dimension by 'dropping' those directions/dimensions that don't contain much variance. For instance, consider the following sample of data points in 2D:
<img src="pca.svg" alt="Gaussian data in 2D" style="width: 300px;"/>
The goal of a PCA in this case would be to project all of the data points onto the axis spanned by the longer arrow; since the short arrow is orthogonal to the large one, it would be ideal if we could project along the short arrow. The new dataset will be 1-dimensional, and since most of the variation in the data was along the direction spanned by the long arrow, hopefully we haven't lost much information.
For more details about the mathematics of PCA, see Andrew Ng's great notes here.
Identifying digits with PCA and k-Nearest Neighbors.
The sklearn digits dataset contains images of handwritten digits, much like the famous MNIST dataset. Here's a sample:
End of explanation
"""
|
xtr33me/deep-learning
|
image-classification/dlnd_image_classification.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
"""
Explanation: Image Classification
In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.
Get the Data
Run the following cell to download the CIFAR-10 dataset for python.
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
"""
Explanation: Explore the Data
The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following:
* airplane
* automobile
* bird
* cat
* deer
* dog
* frog
* horse
* ship
* truck
Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch.
Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
End of explanation
"""
def normalize(x):
"""
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
"""
# TODO: Implement Function
#Using point to point will adjust based on image so we will use 255 max instead - x.ptp(0)
return (x - x.min(0)) / 255
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_normalize(normalize)
"""
Explanation: Implement Preprocess Functions
Normalize
In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x.
End of explanation
"""
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
# TODO: Implement Function
return np.eye(10)[x]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_one_hot_encode(one_hot_encode)
"""
Explanation: One-hot encode
Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function.
Hint: Don't reinvent the wheel.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
"""
Explanation: Randomize Data
As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.
Preprocess all the data and save it
Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32,[None, image_shape[0],image_shape[1],image_shape[2]],name="x")
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32, [None, n_classes], name="y")
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32, name="keep_prob")
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
"""
Explanation: Build the network
For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.
Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.
However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d.
Let's begin!
Input
The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions
* Implement neural_net_image_input
* Return a TF Placeholder
* Set the shape using image_shape with batch size set to None.
* Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_label_input
* Return a TF Placeholder
* Set the shape using n_classes with batch size set to None.
* Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_keep_prob_input
* Return a TF Placeholder for dropout keep probability.
* Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder.
These names will be used at the end of the project to load your saved model.
Note: None for shapes in TensorFlow allow for a dynamic size.
End of explanation
"""
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
# TODO: Implement Function
weight = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], int(x_tensor.shape[3]), conv_num_outputs], mean=0.0, stddev=0.05))
bias = tf.Variable(tf.zeros(conv_num_outputs))
conv = tf.nn.conv2d(x_tensor, weight, strides=[1, conv_strides[0], conv_strides[1], 1], padding='SAME')
conv = tf.nn.bias_add(conv, bias)
conv = tf.nn.relu(conv)
return tf.nn.max_pool(conv, ksize=[1, pool_ksize[0],pool_ksize[1],1], strides=[1, pool_strides[0], pool_strides[1], 1], padding='SAME')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_con_pool(conv2d_maxpool)
"""
Explanation: Convolution and Max Pooling Layer
Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling:
* Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor.
* Apply a convolution to x_tensor using weight and conv_strides.
* We recommend you use same padding, but you're welcome to use any padding.
* Add bias
* Add a nonlinear activation to the convolution.
* Apply Max Pooling using pool_ksize and pool_strides.
* We recommend you use same padding, but you're welcome to use any padding.
Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers.
End of explanation
"""
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
# TODO: Implement Function
return tf.contrib.layers.flatten(x_tensor)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_flatten(flatten)
"""
Explanation: Flatten Layer
Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def fully_conn(x_tensor, num_outputs):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
return tf.contrib.layers.fully_connected(x_tensor,num_outputs, activation_fn=tf.nn.relu)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_fully_conn(fully_conn)
"""
Explanation: Fully-Connected Layer
Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def output(x_tensor, num_outputs):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
shape = x_tensor.get_shape().as_list()
flatImgSize = int(shape[1])
out = tf.add(tf.matmul(x_tensor, tf.Variable(tf.truncated_normal([flatImgSize, num_outputs], stddev=0.05, mean=0.0))), tf.Variable(tf.zeros([num_outputs])))
return out
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_output(output)
"""
Explanation: Output Layer
Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
Note: Activation, softmax, or cross entropy should not be applied to this.
End of explanation
"""
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
n_conv_outputs = [32,16,8,4,2]
conv_ksize = (3,3)
conv_strides = (1,1)
pool_ksize = (3,3)
pool_strides = (1,1)
conv1 = conv2d_maxpool(x, n_conv_outputs[1], conv_ksize, conv_strides, pool_ksize, pool_strides)
conv2_ksize = (2,2)
conv2_strides = (1,1)
pool2_ksize = (2,2)
pool2_strides = (1,1)
conv2 = conv2d_maxpool(conv1, n_conv_outputs[0], conv2_ksize, conv2_strides, pool2_ksize, pool2_strides)
conv2 = conv2d_maxpool(conv2, n_conv_outputs[0], conv2_ksize, conv2_strides, pool2_ksize, pool2_strides)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flat_layer = flatten(conv2)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fc1 = fully_conn(flat_layer, n_conv_outputs[0])
#fc1 = tf.nn.dropout(fc1, keep_prob)
fc1 = fully_conn(fc1, n_conv_outputs[1])
fc1 = fully_conn(fc1, n_conv_outputs[2])
#Only performing the droput at the end after fully connected
fc1 = tf.nn.dropout(fc1, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
out = output(fc1, 10)
# TODO: return output
return out
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
"""
Explanation: Create Convolutional Model
Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model:
Apply 1, 2, or 3 Convolution and Max Pool layers
Apply a Flatten Layer
Apply 1, 2, or 3 Fully Connected Layers
Apply an Output Layer
Return the output
Apply TensorFlow's Dropout to one or more layers in the model using keep_prob.
End of explanation
"""
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
# TODO: Implement Function
session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob:keep_probability})
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)
"""
Explanation: Train the Neural Network
Single Optimization
Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following:
* x for image input
* y for labels
* keep_prob for keep probability for dropout
This function will be called for each batch, so tf.global_variables_initializer() has already been called.
Note: Nothing needs to be returned. This function is only optimizing the neural network.
End of explanation
"""
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
# TODO: Implement Function
loss = session.run(cost, feed_dict={x:feature_batch, y:label_batch, keep_prob:1.})
print('Loss: {}'.format(loss))
valid_accuracy = sess.run(accuracy, feed_dict={x:valid_features, y:valid_labels, keep_prob:1.})
print('Accuracy: {}'.format(valid_accuracy))
"""
Explanation: Show Stats
Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy.
End of explanation
"""
# TODO: Tune Parameters
epochs = 100
batch_size = 512
keep_probability = 0.80
"""
Explanation: Hyperparameters
Tune the following parameters:
* Set epochs to the number of iterations until the network stops learning or start overfitting
* Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory:
* 64
* 128
* 256
* ...
* Set keep_probability to the probability of keeping a node using dropout
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
"""
Explanation: Train on a Single CIFAR-10 Batch
Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
"""
Explanation: Fully Train the Model
Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
"""
Test the saved model against the test dataset
"""
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
Explanation: Checkpoint
The model has been saved to disk.
Test Model
Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.
End of explanation
"""
|
LaubachLab/Spikes-and-Fields
|
Save your workspace.ipynb
|
gpl-3.0
|
import dill
import numpy as np
from scipy.io import loadmat, savemat
import h5py
import hdf5storage
"""
Explanation: Save your workspace in Python
A major issue for me coming to Python from Matlab was how to save my workspaces. This is especially crucial when finalizing results in support of a manuscript. It is painful to have reviewers to ask for other statistics or new analyses and have to run everything over again to address such issues. Also, some analyses take a long time to run. So, how the heck does one save workspace variables into a file in Python? It turns out to be not that difficult. Several established libraries exist for this purpose. One of these libraries, dill, is very good for short-term saves. Others are better for long-term data storage and for sharing with others who might still be dependent on Matlab or who use R.
The code below brings in five options for saving your workspace.
By the way, this code was written on PCs running Linux Mint, an Ubuntu variant, and the Python installation was based on Continuum Analytics Anaconda Python distro.
dill is an extension of Python pickle module that enables saving (serializing) most of the common Python datatypes. It depends on the version of Python and libraries that are installed on the computer that creates the dilled workspace. For me, it is the go-to library for when I am working on analysis on my office PC and need to head out and carry on using my notebook. However, given its limitation (dependence on version of Python and libraries), it does not seem like a good idea for long-term data storage.
numpy has a nice function called savez that saves several arrays into a single file in an uncompressed or compressed format. It is fast to use, but depends on Python. However, recently a library for R called RcppCNPy was written that makes it easy to load and save data in this format.
scipy includes functions for reading and writing Matlab version 4 and 5 files, savemat and [loadmat]https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.io.loadmat.html. These are very useful especially if you are using both Python and Matlab or have collaborators stuck on Matlab.
Perhaps the best long-term storage format is hdf. This format is used for the most recent versions of Matlab and can be directly read into GNU-Octave. Well-established libraries exist for working with hdf files in R and Julia. The HDF Group supplies a viewer for hdf files that makes it easy to check on the contents of a file without reading the file into Python. I have found that two Python libraries, h5py and hdf5storage, useful for working with hdf files in Python. h5py is fast and easy to use. hdf5storage is slower but produces compressed saves by default.
This notebook shows how to use these libraries for saving your workspace in Python. The data set is part of the demo data file provided with NeuronExplorer, written by my grad school lab colleague Alex Kirillov. NeuronExplorer is an excellent tool for working with neurophysiological datafiles. My lab depends on it.
The first step is to import the relevant libraries.
End of explanation
"""
%cd ~/Desktop/Spikes-and-Fields/NEx-demo
NEx_demo = loadmat('SpikesAndFields.mat')
"""
Explanation: Switch folders and load the neuronal data, which were parsed out of a nex file using old Matlab code.
End of explanation
"""
Keys = NEx_demo.keys()
print(Keys)
"""
Explanation: loadmat puts the variables from the Matlab/Octave workspace into a dict.
End of explanation
"""
Neuron04a = NEx_demo['Neuron04a']
Neuron05b = NEx_demo['Neuron05b']
Neuron05c = NEx_demo['Neuron05c']
Neuron06b = NEx_demo['Neuron06b']
Neuron06d = NEx_demo['Neuron06d']
Neuron07a = NEx_demo['Neuron07a']
Event04 = NEx_demo['Event04']
Event05 = NEx_demo['Event05']
Event06 = NEx_demo['Event06']
ADmat = NEx_demo['AD01'] # LFP data
adfreq = NEx_demo['adfreq'] # sampling frequency
ts = NEx_demo['ts'] # ts is the temporal offset between spikes/events and fields in the Plexon recording file
"""
Explanation: My work style is to put each neuron, lfp, or behavioral event into its own variable in the workspace.
End of explanation
"""
%xdel NEx_demo
%xdel Keys
"""
Explanation: Clean up a bit.
End of explanation
"""
%whos ndarray
"""
Explanation: Display the arrays in the workspace.
End of explanation
"""
%cd ~/temp
"""
Explanation: Switch to a temporary directory to evaluate saving using the various Python tools.
(I use Dropbox and SpiderOak for backups, but my temp folder is not backed up. I hate wasting bandwidth.)
End of explanation
"""
%time dill.dump_session('test.pkl')
ls -lstr test.pkl
"""
Explanation: dill
dill is REALLY useful for saving the entire workspace, e.g. when shutting down notebook, heading home, and picking up work after dinner
End of explanation
"""
%reset -f
%who
import dill
%time dill.load_session('test.pkl')
%whos
"""
Explanation: dill is an optimal way to save intermediate files or your workspace
e.g. working in coffee shop and want to save progress while coding; save end-of-day coding and pick up on notebook at home after dinner
End of explanation
"""
%%time
with h5py.File('test.h5', 'w') as hf:
hf.create_dataset('ADmat', data=ADmat, compression="gzip", shuffle=True)
hf.create_dataset('adfreq', data=adfreq, compression="gzip", shuffle=True)
hf.create_dataset('ts', data=ts, compression="gzip", shuffle=True)
hf.create_dataset('Event04', data=Event04, compression="gzip", shuffle=True)
hf.create_dataset('Neuron04a', data=Neuron04a, compression="gzip", shuffle=True)
ls -lstr test.h5
"""
Explanation: compare hdf5, hdf5storage, np's save, and scipy's savemat
Unlike numpy's savez and scipy's savemat, hdf5 needs to know the datatypes that are to be saved. For this example, ADmat, W, and icasig are ndarrays and adfreq and ts are floats.
<b>HDF5</b>
End of explanation
"""
# dict is used to set up variables for hdf5storate.writes
vars = {'ADmat':ADmat, 'adfreq':adfreq, 'ts':ts, 'Event04':Event04, 'Neuron04a':Neuron04a}
%%time
hdf5storage.writes(vars, filename='test_hdf5storage.h5')
ls -lstr test_hdf5storage.h5
"""
Explanation: all files verified using hdf viewer; file loads directly into Octave and Matlab
<b>hdf5storage</b> -- default options are much slower than direct calls to h5py; however, the file is saved more efficiently, and this effect is more apparent with more LFP channels
End of explanation
"""
%%time
np.savez('test', 'ADmat':ADmat, 'adfreq':adfreq, 'ts':ts, 'Event04':Event04, 'Neuron04a':Neuron04a)
ls -lstr test.npz
"""
Explanation: hdf5storage is easy to use, handles compression without effort, and is based on a stable format (H5) that can also be read into Matlab
<b>np.savez</b>
End of explanation
"""
%%time
savemat('test.mat', vars)
ls -lstr test.mat
"""
Explanation: this format seems to be stable and standard; a library exists for reading it into R (RcppCNPy: https://cran.r-project.org/web/packages/RcppCNPy/vignettes/RcppCNPy-intro.pdf); this would be useful for saving intermediate files between R and Python, and could be used as a long-term option, for Python only; however, the compressed options are no better than 8% (from my testing)
<b>savemat</b> (v5 matlab, from scipy)
End of explanation
"""
ls -lstr
rm test*.*
ls
"""
Explanation: this is a fast way to save data in a format that is easily read into and out of Matlab/Octave
this format is much faster than H5, but does not offer compression and requires a slow (and complex) library to be read into R (R.matlab)
clean up
End of explanation
"""
|
bismayan/MaterialsMachineLearning
|
notebooks/old_ICSD_Notebooks/Understanding ICSD data.ipynb
|
mit
|
# How many ternaries have been assigned a structure type?
structure_types = [line[3] for line in data if line[3] is not '']
unique_structure_types = set(structure_types)
print("There are {} ICSD ternaries entries.".format(len(data)))
print("Structure types are assigned for {} entries.".format(len(structure_types)))
print("There are {} unique structure types.".format(len(unique_structure_types)))
"""
Explanation: Structure Types
Structure types are assigned by hand by ICSD curators.
End of explanation
"""
def is_stoichiometric(composition):
return np.all(np.mod(composition.values(), 1) == 0)
stoichiometric_compositions = [c for c in compositions if is_stoichiometric(c)]
print("Number of stoichiometric compositions: {}".format(len(stoichiometric_compositions)))
ternaries = set(c.formula for c in stoichiometric_compositions)
len(ternaries)
data_stoichiometric = [x for x in data if is_stoichiometric(Composition(x[2]))]
from collections import Counter
struct_type_freq = Counter(x[3] for x in data_stoichiometric if x[3] is not '')
plt.loglog(range(1, len(struct_type_freq)+1),
sorted(struct_type_freq.values(), reverse = True), 'o')
sorted(struct_type_freq.items(), key = lambda x: x[1], reverse = True)
len(set([x[2] for x in data if x[3] == 'Perovskite-GdFeO3']))
uniq_phases = set()
for row in data_stoichiometric:
spacegroup, formula, struct_type = row[1:4]
phase = (spacegroup, Composition(formula).formula, struct_type)
uniq_phases.add(phase)
uniq_struct_type_freq = Counter(x[2] for x in uniq_phases if x[2] is not '')
uniq_struct_type_freq_sorted = sorted(uniq_struc_type_freq.items(), key = lambda x: x[1], reverse = True)
plt.loglog(range(1, len(uniq_struct_type_freq_sorted)+1),
[x[1] for x in uniq_struct_type_freq_sorted], 'o')
uniq_struct_type_freq_sorted
for struct_type,freq in uniq_struct_type_freq_sorted[:10]:
print("{} : {}".format(struct_type, freq))
fffs = [p[1] for p in uniq_phases if p[2] == struct_type]
fmt = " ".join(["{:14}"]*5)
print(fmt.format(*fffs[0:5]))
print(fmt.format(*fffs[5:10]))
print(fmt.format(*fffs[10:15]))
print(fmt.format(*fffs[15:20]))
"""
Explanation: Filter for stoichiometric compounds only:
End of explanation
"""
# What are the longest formulas?
for formula in sorted(formulas, key = lambda x: len(x), reverse = True)[:20]:
print(formula)
"""
Explanation: Long Formulas
End of explanation
"""
def filter_in_set(compound, universe):
return all((e in universe) for e in Composition(compound))
transition_metals = [e for e in Element if e.is_transition_metal]
tm_ternaries = [c for c in formulas if filter_in_set(c, transition_metals)]
print("Number of intermetallics:", len(tm_ternaries))
unique_tm_ternaries = set([Composition(c).formula for c in tm_ternaries])
print("Number of unique intermetallics:", len(unique_tm_ternaries))
unique_tm_ternaries
"""
Explanation: Two key insights:
1. Just because there are three elements in the formula
doesn't mean the compound is fundamentally a ternary.
There are doped binaries which masquerade as ternaries.
And there are doped ternaries which masquerade as quaternaries,
or even quintenaries. Because I only asked for compositions
with 3 elements, this data is missing.
2. ICSD has strategically placed parentheses in the formulas
which give hints as to logical groupings. For example:
(Ho1.3 Ti0.7) ((Ti0.64 Ho1.36) O6.67)
is in fact in the pyrochlore family, A2B2O7.
Intermetallics
How many intermetallics does the ICSD database contain?
End of explanation
"""
|
dacr26/CompPhys
|
01_01_euler.ipynb
|
mit
|
T0 = 10. # initial temperature
Ts = 83. # temp. of the environment
r = 0.1 # cooling rate
dt = 0.05 # time step
tmax = 60. # maximum time
nsteps = int(tmax/dt) # number of steps
T = T0
for i in range(1,nsteps+1):
new_T = T - r*(T-Ts)*dt
T = new_T
print i,i*dt, T
# we can also do t = t - r*(t-ts)*dt
"""
Explanation: author:
- 'Adrian E. Feiguin'
title: 'Computational Physics'
...
Ordinary differential equations
Let’s consider a simple 1st order equation:
$$\frac{dy}{dx}=f(x,y)$$
To solve this equation with a computer we need to discretize the differences: we
have to convert the differential equation into a “finite differences” equation. The simplest
solution is Euler’s method.
Euler’s method
Supouse that at a point $x_0$, the function $f$ has a value $y_0$. We
want to find the approximate value of $y$ in a point $x_1$ close to
$x_0$, $x_1=x_0+\Delta x$, with $\Delta x$ small. We assume that $f$,
the rate of change of $y$, is constant in this interval $\Delta x$.
Therefore we find: $$\begin{eqnarray}
&& dx \approx \Delta x &=&x_1-x_0, \
&& dy \approx \Delta y &=&y_1-y_0,\end{eqnarray}$$ with
$y_1=y(x_1)=y(x_0+\Delta x)$. Then we re-write the differential equation in terms of discrete differences as:
$$\frac{\Delta y}{\Delta x}=f(x,y)$$ or
$$\Delta y = f(x,y)\Delta x$$
and approximate the value of $y_1$ as
$$y_1=y_0+f(x_0,y_0)(x_1-x_0)$$ We can generalize this formula to find
the value of $y$ at $x_2=x_1+\Delta x$ as
$$y_{2}=y_1+f(x_1,y_1)\Delta x,$$ or in the general case:
$$y_{n+1}=y_n+f(x_n,y_n)\Delta x$$
This is a good approximation as long as $\Delta x$ is “small”. What is
small? Depends on the problem, but it is basically defined by the “rate
of change”, or “smoothness” of $f$. $f(x)$ has to behave smoothly and
without rapid variations in the interval $\Delta x$.
Notice that Euler’s method is equivalent to a 1st order Taylor expansion
about the point $x_0$. The “local error” calculating $x_1$ is then
$O(\Delta x^2)$. If we use the method $N$ times to calculate $N$
consecutive points, the propagated “global” error will be
$NO(\Delta x^2)\approx O(\Delta
x)$. This error decreases linearly with decreasing step, so we need to
halve the step size to reduce the error in half. The numerical work for
each step consists of a single evaluation of $f$.
Exercise 1.1: Newton’s law of cooling
If the temperature difference between an object and its surroundings is
small, the rate of change of the temperature of the object is
proportional to the temperature difference: $$\frac{dT}{dt}=-r(T-T_s),$$
where $T$ is the temperature of the body, $T_s$ is the temperature of
the environment, and $r$ is a “cooling constant” that depends on the
heat transfer mechanism, the contact area with the environment and the
thermal properties of the body. The minus sign appears because if
$T>T_s$, the temperature must decrease.
Write a program to calculate the temperature of a body at a time $t$,
given the cooling constant $r$ and the temperature of the body at time
$t=0$. Plot the results for $r=0.1\frac{1}{min}$; $T_0=83^{\circ} C$
using different intervals $\Delta t$ and compare with exact (analytical)
results.
End of explanation
"""
%matplotlib inline
import numpy as np
from matplotlib import pyplot
"""
Explanation: Let's try plotting the results. We first need to import the required libraries and methods
End of explanation
"""
my_time = np.zeros(nsteps)
my_temp = np.zeros(nsteps)
"""
Explanation: Next, we create numpy arrays to store the (x,y) values
End of explanation
"""
T = T0
my_temp[0] = T0
for i in range(1,nsteps):
T = T - r*(T-Ts)*dt
my_time[i] = i*dt
my_temp[i] = T
pyplot.plot(my_time, my_temp, color='#003366', ls='-', lw=3)
pyplot.xlabel('time')
pyplot.ylabel('temperature');
"""
Explanation: We have to re write the loop to store the values in the arrays. Remember that numpy arrays start from 0.
End of explanation
"""
my_time = np.linspace(0.,tmax,nsteps)
pyplot.plot(my_time, my_temp, color='#003366', ls='-', lw=3)
pyplot.xlabel('time')
pyplot.ylabel('temperature');
"""
Explanation: We could have saved effort by defining
End of explanation
"""
def euler(y, f, dx):
"""Computes y_new = y + f*dx
Parameters
----------
y : float
old value of y_n at x_n
f : float
first derivative f(x,y) evaluated at (x_n,y_n)
dx : float
x step
"""
return y + f*dx
T = T0
for i in range(1,nsteps):
T = euler(T, -r*(T-Ts), dt)
my_temp[i] = T
"""
Explanation: Alternatively, and in order to re use code in future problems, we could have created a function.
End of explanation
"""
euler = lambda y, f, dx: y + f*dx
"""
Explanation: Actually, for this particularly simple case, calling a function may introduce unecessary overhead, but it is a an example that we will find useful for future applications. For a simple function like this we could have used a "lambda" function (more about lambda functions <a href="http://www.secnetix.de/olli/Python/lambda_functions.hawk">here</a>).
End of explanation
"""
dt = 1.
#my_color = ['#003366','#663300','#660033','#330066']
my_color = ['red', 'green', 'blue', 'black']
for j in range(0,4):
nsteps = int(tmax/dt) #the arrays will have different size for different time steps
my_time = np.linspace(dt,tmax,nsteps)
my_temp = np.zeros(nsteps)
T = T0
for i in range(1,nsteps):
T = euler(T, -r*(T-Ts), dt)
my_temp[i] = T
pyplot.plot(my_time, my_temp, color=my_color[j], ls='-', lw=3)
dt = dt/2.
pyplot.xlabel('time');
pyplot.ylabel('temperature');
pyplot.xlim(8,10);
pyplot.ylim(48,58);
"""
Explanation: Now, let's study the effects of different time steps on the convergence:
End of explanation
"""
|
dwhswenson/openpathsampling
|
examples/misc/alanine_dipeptide_committor/4_analysis_help.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import openpathsampling as paths
import numpy as np
import pandas as pd
pd.options.display.max_rows = 10
storage = paths.Storage("committor_results.nc", "r")
phi = storage.cvs['phi']
psi = storage.cvs['psi']
%%time
C_7eq = storage.volumes['C_7eq']
alpha_R = storage.volumes['alpha_R']
experiments = storage.tag['experiments']
"""
Explanation: Analysis help
This covers stuff that you will need to know in order to use the committor_results.nc file.
End of explanation
"""
%%time
committor_analyzer = paths.ShootingPointAnalysis.from_individual_runs(experiments)
"""
Explanation: The experiments object is a list of tuples (snapshot, final_state). Each snapshot is an OPS snapshot object (a point in phase space), and the final_state is either the C_7eq object or the alpha_R object.
Directly obtaining a committor analysis
As it happens, experiments is in precisely the correct format to be used in one of the approaches to constructing a committor analysis.
This section requires OpenPathSampling 0.9.1 or later.
End of explanation
"""
committor_analyzer.to_pandas()
"""
Explanation: Before going further, let's talk a little bit about the implementation of the ShootingPointAnalysis object. The main thing to understand is that the purpose of that object is to histogram according to configuration. The first snapshot encountered is kept as a representative of that configuration.
So whereas there are 10000 snapshots in experiments (containing the full data, including velocities), there are only 1000 entries in the committor_analyzer (because, in this data set, I ran 1000 snapshots with 10 shots each.)
Per-configuration results
The .to_pandas() function creates a pandas table with configurations as the index, the final states as columns, and the number of times that configuration led to that final state as entries. With no argument, to_pandas() using the an integer for each configuration.
End of explanation
"""
psi_hash = lambda x : float(psi(x))
committor_analyzer.to_pandas(label_function=psi_hash)
"""
Explanation: You can also pass it a function that takes a snapshot and returns a (hashable) value. That value will be used for the index. These collective variables return numpy arrays, so we need to cast the 1D array to a float.
End of explanation
"""
committor = committor_analyzer.committor(alpha_R)
# show the first 10 values
{k: committor[k] for k in committor.keys()[:10]}
"""
Explanation: You can also directly obtain the committor as a dictionary of (representative) snapshot to committor value. The committor here is defines as the probability of ending in a given state, so you must give the state.
End of explanation
"""
hist1D, bins = committor_analyzer.committor_histogram(psi_hash, alpha_R, bins=20)
bin_widths = [bins[i+1]-bins[i] for i in range(len(bins)-1)]
plt.bar(left=bins[:-1], height=hist1D, width=bin_widths, log=True);
"""
Explanation: Committor histogram in 1D
End of explanation
"""
ramachandran_hash = lambda x : (float(phi(x)), float(psi(x)))
hist2D, bins_phi, bins_psi = committor_analyzer.committor_histogram(ramachandran_hash, alpha_R, bins=20)
# not the best, since it doesn't distinguish NaNs, but that's just a matter of plotting
plt.pcolor(bins_phi, bins_psi, hist2D.T, cmap="winter")
plt.clim(0.0, 1.0)
plt.colorbar();
"""
Explanation: Committor histogram in 2D
End of explanation
"""
# let's take the first shooting point snapshot
# experiments[N][0] gives shooting snapshot for experiment N
snapshot = experiments[0][0]
"""
Explanation: Obtaining information from the snapshots
The information committor_results.nc should be everything you could want, including initial velocities for every system. In principle, you'll mainly access that information using collective variables (see documentation on using MDTraj to create OPS collective variables). However, you may decide to access that information directly, so here's how you do that.
End of explanation
"""
snapshot.coordinates
snapshot.xyz
"""
Explanation: OpenMM-based objects come with units. So snapshot.coordinates is a unitted value. This can be annoying in analysis, so we have a convenience snapshot.xyz to get the version without units.
End of explanation
"""
snapshot.velocities
snapshot.velocities / snapshot.velocities.unit
"""
Explanation: For velocities, we don't have the convenience function, but if you want to remove units from velocities you can do so with velocity / velocity.unit.
End of explanation
"""
snapshot2 = experiments[1][0]
np.all(snapshot.coordinates == snapshot2.coordinates)
np.any(snapshot.velocities == snapshot2.velocities)
"""
Explanation: Note that snapshots include coordinates and velocities. We have several sets of initial velocities for each initial snapshot. Taking the second shooting snapshot and comparing coordinates and velocities:
End of explanation
"""
|
sbenthall/bigbang
|
examples/experimental_notebooks/Walkers and Talkers.ipynb
|
agpl-3.0
|
# Load the raw email and git data
url = "http://mail.python.org/pipermail/scipy-dev/"
arx = Archive(url,archive_dir="../archives")
mailInfo = arx.data
repo = repo_loader.get_repo("bigbang")
gitInfo = repo.commit_data;
"""
Explanation: Introduction
In group efforts, there is sometimes the impression that there are those who work, and those who talk. A naive question to ask is whether or not the people that tend to talk a lot actually get any work done. This is an obviously and purposefully obtuse question with an interesting answer.
We can use BigBang's newest feature, git data collection, to compare all of the contributors to a project, in this case Scipy, based on their email and git commit activity. The hypothesis in this case was that people who commit a lot will also tend to email a lot, and vice versa, since their involvement in a project would usually require them to do both. This hypothesis was proven to be correct. However, the data reveals many more interesting phenomenon.
End of explanation
"""
entityResolve = bigbang.entity_resolution.entityResolve
mailAct = mailInfo.apply(entityResolve, axis=1, args =("From",None))
gitAct = gitInfo.apply(entityResolve, axis=1, args =("Committer Email","Committer Name"))
"""
Explanation: Entity Resolution
Git and Email data comes from two different datatables. To observe a single person's git and email data, we need a way to identify that person across the two different datatables.
To solve this problem, I wrote an entity resolution client that will parse a Pandas dataframe and add a new column to it called "Person-ID" which gives each row an ID that represents one unique contributor. A person may go by many names ("Robert Smith, Rob B. Smith, Bob S., etc.) and use many different emails. However, this client will read through these data tables in one pass and consolidate these identities based on a few strategies.
End of explanation
"""
NUM_SLICES = 1500 # Number of animation frames. More means more loading time
mailAct.sort("Date")
gitAct.sort("Time")
def getSlices(df, numSlices):
sliceSize = len(df)/numSlices
slices = []
for i in range(1, numSlices + 1):
start = 0
next = (i)*sliceSize;
next = min(next, len(df)-1) # make sure we don't go out of bounds
slice = df.iloc[start:next]
slices.append(slice)
return slices
mailSlices = getSlices(mailAct, NUM_SLICES)
gitSlices = getSlices(gitAct, NUM_SLICES)
"""
Explanation: After we've run entity resolution on our dataframes, we split the dataframe into slices based on time. So for the entire life-span of the project, we will have NUM_SLICES different segments to analyze. We will be able to look at the git and email data up until that certain date, which can let us analyze these changes over time.
End of explanation
"""
def processSlices(slices) :
for i in range(len(slices)):
slice = slices[i]
slice = slice.groupby("Person-ID").size()
slice.sort()
slices[i] = slice
def concatSlices(slicesA, slicesB) :
# assumes they have the same number of slices
# First is emails, second is commits
ansSlices = []
for i in range(len(slicesA)):
sliceA = slicesA[i]
sliceB = slicesB[i]
ans = pd.concat({"Emails" : sliceA, "Commits": sliceB}, axis = 1)
ans = ans[pd.notnull(ans["Emails"])]
ans = ans[pd.notnull(ans["Commits"])]
ansSlices.append(ans);
return ansSlices
processSlices(mailSlices)
processSlices(gitSlices)
finalSlices = concatSlices(mailSlices, gitSlices)
"""
Explanation: Merging Data Tables
Now we want to merge these two tables based on their Person-ID values. Basically, we first count how many emails / commits a certain contributor had in a certain slice. We then join all the rows with the same Person-ID to each other, so that we have the number of emails and the number of commits of each person in one row per person in one consolidated dataframe. We then delete all the rows where both of these values aren't defined. These represent people for whom we have git data but not mail data, or vice versa.
End of explanation
"""
def idToFloat(id):
return id*1.0/400.0;
for i in range(len(finalSlices)):
slice = finalSlices[i]
toSet = []
for i in slice.index.values:
i = idToFloat(i)
toSet.append(i)
slice["color"] = toSet
"""
Explanation: Coloring
We now assign a float value [0 --> 1] to each person. This isn't neccesary, but can let us graph these changes in a scatter plot and give each contributor a unique color to differentiate them. This will help us track an individual as their dot travels over time.
End of explanation
"""
data = finalSlices[len(finalSlices)-1] # Will break if there are 0 slices
fig = plt.figure(figsize=(8, 8))
d = data
x = d["Emails"]
y = d["Commits"]
c = d["color"]
ax = plt.axes(xscale='log', yscale = 'log')
plt.scatter(x, y, c=c, s=75)
plt.ylim(0, 10000)
plt.xlim(0, 10000)
ax.set_xlabel("Emails")
ax.set_ylabel("Commits")
plt.plot([0, 1000],[0, 1000], linewidth=5)
plt.show()
"""
Explanation: Here we graph our data. Each dot represents a unique contributor's number of emails and commits. As you'll notice, the graph is on a log-log scale.
End of explanation
"""
from IPython.display import YouTubeVideo
display(YouTubeVideo('GCcYJBq1Bcc', width=500, height=500))
display(YouTubeVideo('uP-z4jJqxmI', width=500, height=500))
fig = plt.figure(figsize=(8, 8))
a = finalSlices[0]
print type(plt)
ax = plt.axes(xscale='log', yscale = 'log')
graph, = ax.plot(x ,y, 'o', c='red', alpha=1, markeredgecolor='none')
ax.set_xlabel("Emails")
ax.set_ylabel("Commits")
plt.ylim(0, 10000)
plt.xlim(0, 10000)
def init():
graph.set_data([],[]);
return graph,
def animate(i):
a = finalSlices[i]
x = a["Emails"]
y = a["Commits"]
graph.set_data(x, y)
return graph,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=NUM_SLICES, interval=1, blit=True)
anim.save('t1.mp4', fps=15)
def main():
data = finalSlices
first = finalSlices[0]
fig = plt.figure(figsize=(8, 8))
d = data
x = d[0]["Emails"]
y = d[0]["Commits"]
c = d[0]["color"]
ax = plt.axes(xscale='log', yscale='log')
scat = plt.scatter(x, y, c=c, s=100)
plt.ylim(0, 10000)
plt.xlim(0, 10000)
plt.xscale('log')
plt.yscale('log')
ani = animation.FuncAnimation(fig, update_plot, frames=NUM_SLICES,
fargs=(data, scat), blit=True)
ani.save('test.mp4', fps=10)
#plt.show()
def update_plot(i, d, scat):
x = d[i]["Emails"]
y = d[i]["Commits"]
c = d[i]["color"]
plt.cla()
ax = plt.axes()
ax.set_xscale('log')
ax.set_yscale('log')
scat = plt.scatter(x, y, c=c, s=100)
plt.ylim(0, 10000)
plt.xlim(0, 10000)
plt.xlabel("Emails")
plt.ylabel("Commits")
return scat,
main()
"""
Explanation: Animations
Below this point, you'll find the code for generating animations. This can take a long time (~30 mins) for a large number of slices. However, the pre-generated videos are below.
The first video just shows all the contributors over time without unique colors. The second video has a color for each contributor, but also contains a Matplotlib bug where the minimum x and y values for the axes is not followed.
There is a lot to observe. As to our hypothesis, it's clear that people who email more commit more. In our static graph, we could see many contributors on the x-axis -- people who only email -- but this dynamic graph allows us to see the truth. While it may seem that they're people who only email, the video shows that even these contributors eventually start committing. Most committers don't really get past 10 commits without starting to email the rest of the project, for pretty clear reasons. However, the emailers can "get away with" exclusively emailing for longer, but eventually they too start to commit. In general, not only is there a positive correlation, there's a general trend of everyone edging close to having a stable and relatively equal ratio of commits to emails.
End of explanation
"""
|
kjschiroo/mlip
|
Machine_Learning_in_Python.ipynb
|
mit
|
from sklearn.datasets import load_digits
data_set = load_digits()
"""
Explanation: Machine learning in Python
The data set
End of explanation
"""
data_set.keys()
data_set.data
"""
Explanation: Let's poke around and see what is in the data set.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (4.0, 4.0)
def show_image(image_data):
plt.imshow(image_data, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
show_image(data_set.images[0])
data_set.images[0]
"""
Explanation: Well, that is a bit hard to grok. Let's see if we can get a better view.
End of explanation
"""
data_set.target
"""
Explanation: Now we have an idea of what our data looks like. It looks like they took 8x8 gray scale images, and then just concatenated all of the rows together.
End of explanation
"""
half_length = len(data_set.data) // 2
train_set = {
'data': data_set.data[:half_length],
'target': data_set.target[:half_length],
}
test_set = {
'data': data_set.data[half_length:],
'target': data_set.target[half_length:]
}
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier()
classifier.fit(train_set['data'], train_set['target'])
"""
Explanation: And each one of these data points has a label, 0 through 9.
Machine learning time
End of explanation
"""
import random
plt.rcParams['figure.figsize'] = (4.0, 4.0)
predictions = classifier.predict(test_set['data'])
the_pick = random.randrange(0, len(test_set['data']))
show_image(test_set['data'][the_pick].reshape(8, 8))
print("We predict: {0}".format(predictions[the_pick]))
import pandas as pd
pd.crosstab(test_set['target'], predictions, rownames=['Actual'], colnames=['Predicted'], margins=True)
"""
Explanation: We now have a classifier. Let's try it out.
End of explanation
"""
from sklearn import metrics
print("Classification report")
print(metrics.classification_report(test_set['target'], predictions))
plt.rcParams['figure.figsize'] = (12.0, 12.0)
precision = []
recall = []
probabilities = classifier.predict_proba(test_set['data'])
for i in range(10):
actual = [v == i for v in test_set['target']]
p, r, _ = metrics.precision_recall_curve(actual, probabilities[:, i])
precision.append(p)
recall.append(r)
for i in range(10):
plt.plot(recall[i], precision[i], label=i)
plt.legend(loc='lower left')
plt.xlim([0.0, 1.0])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
"""
Explanation: ## How are we doing? ##
End of explanation
"""
import numpy as np
# Totally random data, nothing can be learned here.
rows = 1000
features = 64
data = np.random.random((rows, features))
labels = np.random.randint(0, 2, rows)
from sklearn.ensemble import RandomForestClassifier
# Train it on all the data, such a bad idea!
bad_classifier = RandomForestClassifier()
bad_classifier.fit(data, labels)
probabilities = bad_classifier.predict_proba(data)
p, r, _ = metrics.precision_recall_curve(labels, probabilities[:,1])
plt.plot(r, p)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
"""
Explanation: Bad classifier
End of explanation
"""
# Divide our data in half
half_length = len(data) // 2
train_data = data[:half_length]
train_labels = labels[:half_length]
test_data = data[half_length:]
test_labels = labels[half_length:]
# train on half of it
good_classifier = RandomForestClassifier()
good_classifier.fit(train_data, train_labels)
# evaluate
probabilities = good_classifier.predict_proba(test_data)
p, r, _ = metrics.precision_recall_curve(test_labels, probabilities[:,1])
plt.plot(r, p)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
"""
Explanation: The right way to do it
End of explanation
"""
|
FractalFlows/DAOResearch
|
notebooks/LS-LMSR.ipynb
|
mit
|
result1_task1 = {
'description': 'Attempt to reproduce the result 1 of this article',
'doi': '10.1051/itmconf/20140201004',
'reference': 'result 1, p. 4',
'type': 'scientific task',
'possible_outcomes': [
'the result is reproducible',
'the result is not reproducible'
]
}
"""
Explanation: Creating a scientific task
End of explanation
"""
class LS_LMSRMarket(object):
def __init__(self, task, vig=0.1, init=1.0, market='LS_LMSR', b=None):
"""
Parameters
----------
task dict
A dictionary describing the task for which the predictive market is created.
Keys:
-----
type: str
(e.g. 'scientific task')
description: str
description of the task to be performed
reference: str
Internal reference (e.g. 'result 1, p. 4')
doi: str
DOI of the related publication
possible_outcomes: list
List of strings describing the possible outcomes of the task
vig float
parameter of the `alpha` variable used to calculate the `b` variable.
Corresponds to the market "vig" value - typically between 5 and 30 percent in real-world markets
init float
The initial subsidies of the market, spread equally in this algorithm on all the outcomes.
market srt, 'LS_LMSR' | 'LMSR'
The market type. If 'LMSR' is selected, then a b value should be given.
"""
self.market = market
if self.market == 'LSMR':
if b == None:
raise Exception('b value is needed for LSMR markets')
self._b = b
for k, v in task.items():
setattr(self, k, v)
self.init = init
self.n = len(self.possible_outcomes)
self._x = [np.ones([self.n])*init/self.n]
self._book = []
self.market_value = init
self._history = []
self.alpha = vig*self.n/np.log(self.n)
@property
def b(self):
if self.market == 'LMSR':
return self._b
elif self.market == 'LS_LMSR':
return self._b_func(self.x)
else:
raise Exception('market must be set to either "LMSR" or "LS_LMSR"')
def _b_func(self, x):
"""Calculate the `b` equation: b=\alpha \Sigma x"""
return self.alpha * x.sum()
@property
def book(self):
return pd.DataFrame(self._book)
@property
def x(self):
return self._x[-1].copy()
def cost(self, x):
return self.b*np.log(np.exp(x/self.b).sum())
def _new_x(self, shares, outcome):
new_x = self.x
new_x[outcome] += shares
return new_x
def price(self, shares, outcome):
return self._price(self._new_x(shares, outcome))
def _price(self, x):
return self.cost(x)-self.cost(self.x)
def register_x(self, x):
self._x.append(x)
def calculate_shares(self, paid, outcome):
obj_func = lambda s: np.abs(self.price(s, outcome) - paid)
return fmin_cobyla(obj_func, paid/self.p[outcome], [])
def buy_shares(self, name, paid, outcome):
shares = self.calculate_shares(paid, outcome)
self.register_x(self._new_x(shares, outcome))
self._book.append({'name':name,
'shares':shares,
'outcome':outcome,
'paid':paid})
self._history.append(self.p)
self.market_value += paid
print("%s paid %2.2f EUR, for %2.2f shares of outcome %d, which will give him %2.2f EUR if he wins"%(
name, paid, shares, outcome, shares/self.x[outcome]*self.market_value))
return shares
def sell_shares(self, name, shares, outcome):
price = self.price(-share, outcome)
self._book.append({'name':name,
'shares':-shares,
'outcome':outcome,
'paid':-price})
self.market_value -= price
self._history.append(self.p)
return price
def outcome_probability(self):
K = np.exp(self.x/self.b)
return K/K.sum()
@property
def p(self):
return self.outcome_probability()
def history(self):
return np.array(self._history)
pm = LS_LMSRMarket(result1_task1, init=10., vig=0.1)
pm.buy_shares('Mark', 1., 0)
pm.buy_shares('Erik', 300., 1)
pm.buy_shares('Soeren', 1., 0)
pm.buy_shares('Albert', 3., 1)
pm.market_value
pm.book
total_shares = pm.book.groupby('outcome').shares.sum()
book = pm.book
book['possible_payout'] = pm.market_value * pm.book.shares / total_shares.values[pm.book.outcome.values]
book['ownership_ratio'] = pm.book.shares / total_shares.values[pm.book.outcome.values]
grouped = book.groupby('name')
df = grouped.paid.sum().to_frame(name='paid')
df['possible_payout'] = grouped.possible_payout.sum()
df
book
pm.x
pm.market_value
pl.plot(pm.history())
pl.ylim([0.,1.])
pl.legend(['outcome 0', 'outcome 1'])
"""
Explanation: The LS-LMSR model from Augur
Heavily inspired from this blog post: Augur’s Automated Market Maker: The LS-LMSR, By Dr. Abe Othman.
The cost function for the LMSR is given by:
$$
C(\textbf{q}) = b \log \left(\sum_{i=1}^n e^{\frac{q_i}{b}} \right)
$$
and the marginal prices on each event are given by the partial derivatives of the cost function:
$$
p_j(\textbf{q}) = \frac{e^{\frac{q_j}{b}}}{\sum_{i=1}^n e^{\frac{q_i}{b}}}
$$
where $b$, which is defined as a constant in the original LMSR model of Hanson, is here defined as a variable of q
$$
b(\textbf{q})=\alpha \sum_{i=1}^n q_i
$$
with $\alpha$ defined as
$$
\alpha = \frac{0.1}{n \log{n}}
$$
with $n$ being the number of dimensions of $\textbf{q}$
End of explanation
"""
new_event = {
"type": "CreateEvent",
"vin": [{
"n": 0,
"value": 0.01000000,
"units": "bitcoin",
"scriptSig": """
<Joe’s signature>
<Joe´s public key >"""
}],
"vout": [{
"n": 0,
"value" : 0.01000000,
"units": "bitcoin",
"event": {
"id": "<event hash >",
"description": """Hillary Clinton
wins the 2016 U.S.
Presidential Election.""",
"branch": "politics",
"is_binary": True,
"valid_range": [0, 1],
"expiration": 1478329200,
"creator": "<Joe’s address>"
},
"address": "<base-58 event ID>",
"script": """
OP_DUP
OP_HASH160
<event hash >
OP_EQUALVERIFY
OP_MARKETCHECK"""
}]
}
"""
Explanation: The Augur example
Inspired from Augur white paper
Joe is creating a new event
End of explanation
"""
new_market = {
"type": "CreateMarket",
"loss_limit": 1.2,
"vin": [{
"n": 0,
"value": 27.72588722,
"units": "bitcoin",
"tradingFee": 0.005,
"scriptSig": """<Joe’s signature>
<Joe ’s public key >"""
}],
"vout": [{
"n": 0,
"value": 27.72588722,
"units": "bitcoin",
"script": """
OP_DUP
OP_HASH160
OP_EVENTLOOKUP
OP_ISSHARES
OP_MARKETCHECK"""
},
{
"n": 1,
"value": 10**9,
"units": "shares",
"event": "<event -1 hash >",
"branch": "politics",
"script": """
OP_DUP
OP_HASH160
OP_EVENTLOOKUP
OP_ISBITCOIN
OP_MARKETCHECK"""
},
{
"n": 2,
"value": 10**9,
"units": "shares",
"event": "<event-2 hash>",
"branch": "politics",
"script": """
OP_DUP
OP_HASH160
OP_EVENTLOOKUP
OP_ISBITCOIN
OP_MARKETCHECK"""
}],
"id": "<market hash>",
"creator": "<Joe’s address>"
}
"""
Explanation: Joe is creating a Market of events
End of explanation
"""
n = 100
outcome = 0.001
# The amount is assumed to increase linearly with time, as the market increases its liquidity
amount = np.random.random([n]) * 100. #* (1+np.arange(n))/(1.*n)
outcomes = np.zeros([n])
outcomes[np.random.random([n])<outcome] = 1.0
"""
Explanation: 100 Traders example
End of explanation
"""
pm = LS_LMSRMarket(result1_task1, init=10., vig=0.1)
"""
Explanation: Creating the new task prediction market
End of explanation
"""
pm.buy_shares('EvilMegaCorp', 1000, 1)
"""
Explanation: One company comes along and bet sh*t ton of money
End of explanation
"""
for i, a, o in zip(range(n),amount, outcomes):
pm.buy_shares('Trader-%d'%(i), a, int(o))
pm.buy_shares('EvilMegaCorp', 1000, 1)
"""
Explanation: Performing the bets
End of explanation
"""
pm.market_value
total_shares = pm.book.groupby('outcome').shares.sum()
book = pm.book
book['possible_payout'] = pm.market_value * pm.book.shares / total_shares.values[pm.book.outcome.values]
grouped = book.groupby('name')
df = grouped.paid.sum().to_frame(name='paid')
df['possible_payout'] = grouped.possible_payout.sum()
df
total = pm.book.groupby('outcome').shares.sum()
total
pm.book.groupby('outcome').paid.sum()
pm.book.groupby('outcome').shares.sum()
pm.p
pm.book.groupby('outcome').sum().values/pm.book.groupby('outcome').sum().values.sum()
"""
Explanation: The total to pay for each outcome
End of explanation
"""
pl.plot(pm.history())
pl.ylim([0.,1.])
pl.legend(['outcome 0', 'outcome 1'])
pl.title('%d Trades, total market value=%2.2f EUR'%(n, pm.market_value))
"""
Explanation: Plot the market prediction history
End of explanation
"""
book = pm.book
book['possible_win'] = pm.book.owed - pm.book.paid
book['p0'] = pm.history()[:,0]
book['p1'] = pm.history()[:,1]
book
pm.market_value
pm.p
"""
Explanation: The book of trades
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
development/tutorials/21_22_pblum_mode.ipynb
|
gpl-3.0
|
import phoebe
b = phoebe.default_binary()
b.add_dataset('lc', dataset='lc01')
print(b.filter(qualifier='pblum*', dataset='lc01'))
print(b.get_parameter('pblum_mode'))
"""
Explanation: 2.1 - 2.2 Migration: pblum_mode and pblum vs pblum_ext
PHOEBE 2.2 introduces new modes for handling the scaling between absolute and relative luminosities/intensities/fluxes via the new pblum_mode parameter, which will exist for each LC dataset attached to the bundle. By default pblum_mode will be set to 'component-coupled', which mimics the default behavior prior to version 2.2.
End of explanation
"""
print(b.get_parameter('pblum_component'))
b.set_value('pblum_component', 'secondary')
print(b.filter(qualifier='pblum*', dataset='lc01'))
"""
Explanation: In the default mode, you can change which of the stars you'd like to provide the luminosity. By default, this is the primary component. To provide the luminosity of the secondary star instead, set pblum_component.
Previously this was achieved by setting pblum_ref@primary = 'secondary' and pblum_ref@secondary = 'self'. Note that the pblum_ref parameter has been removed in 2.2+ in favor of the more flexible and intuitive pblum_mode parameter.
End of explanation
"""
b.set_value('pblum_mode', 'decoupled')
print(b.filter(qualifier='pblum*', dataset='lc01'))
"""
Explanation: Previously, you could 'decouple' the luminisoties by setting pblum_ref of both components to 'self'. In PHOEBE 2.2+, you will instead change pblum_mode to 'decoupled', in which case multiple pblum parameters will become visible.
End of explanation
"""
print(b.compute_pblums())
"""
Explanation: For more information on the behavior for all of these supported modes, see the pblum tutorial.
In addition, PHOEBE 2.2 distinguishes between intrinsic passband luminosities (pblum) and extrinsic passband luminosities (pblum_ext). The returned dictionary from b.compute_pblums now includes both intrinsic and extrinsic values, with the keys of the dictionary now including pblum@ or pblum_ext@.
End of explanation
"""
b.add_dataset('mesh', dataset='mesh01')
print(b.get_parameter('columns', dataset='mesh01').choices)
"""
Explanation: This also means that the mesh column to expose luminosities is renamed to pblum_ext (and abs_pblum_ext) as these expose the extrinsic luminosities (including features such as spots, irradiation, etc).
End of explanation
"""
|
ktmud/deep-learning
|
sentiment-network/Sentiment_Classification_Projects.ipynb
|
mit
|
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (Check inside your classroom for a discount code)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem" (this lesson)
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network (video only - nothing in notebook)
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset<a id='lesson_1'></a>
The cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.
End of explanation
"""
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Note: The data in reviews.txt we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory<a id='lesson_2'></a>
End of explanation
"""
from collections import Counter
import numpy as np
"""
Explanation: Project 1: Quick Theory Validation<a id='project_1'></a>
There are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.
You'll find the Counter class to be useful in this exercise, as well as the numpy library.
End of explanation
"""
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
"""
Explanation: We'll create three Counter objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.
End of explanation
"""
for review, label in zip(reviews, labels):
words = review.split(' ')
if label == 'POSITIVE':
positive_counts.update(words)
else:
negative_counts.update(words)
total_counts.update(words)
"""
Explanation: TODO: Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.
Note: Throughout these projects, you should use split(' ') to divide a piece of text (such as a review) into individual words. If you use split() instead, you'll get slightly different results than what the videos and solutions show.
End of explanation
"""
# Examine the counts of the most common words in positive reviews
positive_counts.most_common()
# Examine the counts of the most common words in negative reviews
negative_counts.most_common()
"""
Explanation: Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
End of explanation
"""
# Create Counter object to store positive/negative ratios
pos_neg_ratios = Counter()
pos_neg_ratios.update({
word: positive_counts[word] / float(negative_counts[word]+1)
for word, val in positive_counts.items() if val > 100
})
"""
Explanation: As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the ratios of word usage between positive and negative reviews.
TODO: Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in pos_neg_ratios.
Hint: the positive-to-negative ratio for a given word can be calculated with positive_counts[word] / float(negative_counts[word]+1). Notice the +1 in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews.
End of explanation
"""
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
"""
Explanation: Examine the ratios you've calculated for a few words:
End of explanation
"""
for x, val in pos_neg_ratios.items():
pos_neg_ratios[x] = np.log(val)
"""
Explanation: Looking closely at the values you just calculated, we see the following:
Words that you would expect to see more often in positive reviews – like "amazing" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.
Words that you would expect to see more often in negative reviews – like "terrible" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.
Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like "the" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The +1 we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.
Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:
Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.
When comparing absolute values it's easier to do that around zero than one.
To fix these issues, we'll convert all of our ratios to new values using logarithms.
TODO: Go through all the ratios you calculated and convert them to logarithms. (i.e. use np.log(ratio))
In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs.
End of explanation
"""
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
"""
Explanation: Examine the new ratios you've calculated for the same words from before:
End of explanation
"""
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
# Note: Above is the code Andrew uses in his solution video,
# so we've included it here to avoid confusion.
# If you explore the documentation for the Counter class,
# you will see you could also find the 30 least common
# words like this: pos_neg_ratios.most_common()[:-31:-1]
"""
Explanation: If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above 1, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below -1. It's now clear that both of these words are associated with specific, opposing sentiments.
Now run the following cells to see more ratios.
The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see all the words in the list.)
The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write reversed(pos_neg_ratios.most_common()).)
You should continue to see values similar to the earlier ones we checked – neutral words will be close to 0, words will get more positive as their ratios approach and go above 1, and words will get more negative as their ratios approach and go below -1. That's why we decided to use the logs instead of the raw ratios.
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: End of Project 1.
Watch the next video to see Andrew's solution, then continue on to the next lesson.
Transforming Text into Numbers<a id='lesson_3'></a>
The cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.
End of explanation
"""
# TODO: Create set named "vocab" containing all of the words from all of the reviews
vocab = set(total_counts.keys())
"""
Explanation: Project 2: Creating the Input/Output Data<a id='project_2'></a>
TODO: Create a set named vocab that contains every word in the vocabulary.
End of explanation
"""
vocab_size = len(vocab)
print(vocab_size)
"""
Explanation: Run the following cell to check your vocabulary size. If everything worked correctly, it should print 74074
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network_2.png')
"""
Explanation: Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. layer_0 is the input layer, layer_1 is a hidden layer, and layer_2 is the output layer.
End of explanation
"""
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros
layer_0 = np.array([np.zeros(vocab_size, dtype=int)])
"""
Explanation: TODO: Create a numpy array called layer_0 and initialize it to all zeros. You will find the zeros function particularly helpful here. Be sure you create layer_0 as a 2-dimensional matrix with 1 row and vocab_size columns.
End of explanation
"""
layer_0.shape
from IPython.display import Image
Image(filename='sentiment_network.png')
"""
Explanation: Run the following cell. It should display (1, 74074)
End of explanation
"""
# Create a dictionary of words in the vocabulary mapped to index positions
# (to be used in layer_0)
word2index = {x: i for i, x in enumerate(vocab)}
word2index
"""
Explanation: layer_0 contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
End of explanation
"""
def update_input_layer(review):
""" Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
"""
global layer_0
# clear out previous state by resetting the layer to be all 0s
layer_0 *= 0
for word in review.split(' '):
layer_0[0,word2index[word]] += 1
"""
Explanation: TODO: Complete the implementation of update_input_layer. It should count
how many times each word is used in the given review, and then store
those counts at the appropriate indices inside layer_0.
End of explanation
"""
update_input_layer(reviews[0])
layer_0
"""
Explanation: Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in layer_0.
End of explanation
"""
def get_target_for_label(label):
"""Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
"""
return 0 if label == 'NEGATIVE' else 1
"""
Explanation: TODO: Complete the implementation of get_target_for_labels. It should return 0 or 1,
depending on whether the given label is NEGATIVE or POSITIVE, respectively.
End of explanation
"""
labels[0]
get_target_for_label(labels[0])
"""
Explanation: Run the following two cells. They should print out'POSITIVE' and 1, respectively.
End of explanation
"""
labels[1]
get_target_for_label(labels[1])
"""
Explanation: Run the following two cells. They should print out 'NEGATIVE' and 0, respectively.
End of explanation
"""
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set(' '.join(reviews).split(' '))
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set(labels)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {x: i for i, x in enumerate(self.review_vocab)}
# Create a dictionary of labels mapped to index positions
self.label2index = {x: i for i, x in enumerate(self.label_vocab)}
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1, input_nodes))
def update_input_layer(self, review):
""" Update input layer with word counts """
self.layer_0 *= 0
for word in review.split(' '):
if word in self.word2index:
self.layer_0[0, self.word2index[word]] += 1
def get_target_for_label(self,label):
return self.label2index[label]
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
n_records = len(training_reviews)
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i, (review, label) in enumerate(zip(training_reviews, training_labels)):
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
self.update_input_layer(review)
label = self.label2index[label]
layer_0 = self.layer_0 # (1, n_i)
layer_1 = self.layer_0 @ self.weights_0_1 # (1, n_i)@(n_i, n_h) = (1, n_h)
# the hidden layer doesn't change the input
hidden_output = layer_1
output = self.sigmoid(hidden_output @ self.weights_1_2)
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
error = label - output # output error
output_error_term = error * self.sigmoid_output_2_derivative(output)
hidden_error_term = self.weights_1_2 @ output_error_term # * 1
self.weights_0_1 += self.learning_rate * (layer_0.T @ hidden_error_term.T)
self.weights_1_2 += self.learning_rate * (layer_1.T @ output_error_term.T)
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
if abs(error[0,0]) < 0.5:
correct_so_far += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
print("Progress: %.4f%% Speed(reviews/sec): %.1f "
"Correct: %s #Trained: %s "
"Training Accuracy: %s" % (
100 * i/n_records, reviews_per_second,
correct_so_far, i+1,
100 * correct_so_far/(i+1)
), end="\r")
if(i and i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
self.update_input_layer(review.lower())
# input to (and output from) the hidden layer
hidden_output = self.layer_0 @ self.weights_0_1
# final output
output = self.sigmoid(hidden_output @ self.weights_1_2)
return 'POSITIVE' if output[0] > 0.5 else 'NEGATIVE'
"""
Explanation: End of Project 2.
Watch the next video to see Andrew's solution, then continue on to the next lesson.
Project 3: Building a Neural Network<a id='project_3'></a>
TODO: We've included the framework of a class called SentimentNetork. Implement all of the items marked TODO in the code. These include doing the following:
- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer.
- Do not add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.
- Re-use the code from earlier in this notebook to create the training data (see TODOs in the code)
- Implement the pre_process_data function to create the vocabulary for our training data generating functions
- Ensure train trains over the entire corpus
Where to Get Help if You Need it
Re-watch earlier Udacity lectures
Chapters 3-5 - Grokking Deep Learning - (Check inside your classroom for a discount code)
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
"""
Explanation: Run the following cell to create a SentimentNetwork that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of 0.1.
End of explanation
"""
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set).
We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.
End of explanation
"""
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, 0.01, and then train the new network.
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, 0.001, and then train the new network.
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
"""
Explanation: With a learning rate of 0.001, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.
End of Project 3.
Watch the next video to see Andrew's solution, then continue on to the next lesson.
Understanding Neural Noise<a id='lesson_4'></a>
The following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
End of explanation
"""
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set(' '.join(reviews).split(' '))
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set(labels)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {x: i for i, x in enumerate(self.review_vocab)}
# Create a dictionary of labels mapped to index positions
self.label2index = {x: i for i, x in enumerate(self.label_vocab)}
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1, input_nodes))
def update_input_layer(self, review):
""" Update input layer with word counts """
self.layer_0 *= 0
for word in set(review.split(' ')):
if word in self.word2index:
self.layer_0[0, self.word2index[word]] = 1
def get_target_for_label(self,label):
return self.label2index[label]
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
n_records = len(training_reviews)
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i, (review, label) in enumerate(zip(training_reviews, training_labels)):
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
self.update_input_layer(review)
label = self.label2index[label]
layer_0 = self.layer_0 # (1, n_i)
layer_1 = self.layer_0 @ self.weights_0_1 # (1, n_i)@(n_i, n_h) = (1, n_h)
# the hidden layer doesn't change the input
hidden_output = layer_1
output = self.sigmoid(hidden_output @ self.weights_1_2)
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
error = label - output # output error
output_error_term = error * self.sigmoid_output_2_derivative(output)
hidden_error_term = self.weights_1_2 @ output_error_term # * 1
self.weights_0_1 += self.learning_rate * (layer_0.T @ hidden_error_term.T)
self.weights_1_2 += self.learning_rate * (layer_1.T @ output_error_term.T)
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
if abs(error[0,0]) < 0.5:
correct_so_far += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
print("Progress: %.4f%% Speed(reviews/sec): %.1f "
"Correct: %s #Trained: %s "
"Training Accuracy: %.5f" % (
100 * i/n_records, reviews_per_second,
correct_so_far, i+1,
100 * correct_so_far/(i+1)
), end="\r")
if(i and i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
self.update_input_layer(review.lower())
# input to (and output from) the hidden layer
hidden_output = self.layer_0 @ self.weights_0_1
# final output
output = self.sigmoid(hidden_output @ self.weights_1_2)
return 'POSITIVE' if output[0] > 0.5 else 'NEGATIVE'
"""
Explanation: Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>
TODO: Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:
* Copy the SentimentNetwork class you created earlier into the following cell.
* Modify update_input_layer so it does not count how many times each word is used, but rather just stores whether or not a word was used.
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of 0.1.
End of explanation
"""
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
End of explanation
"""
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (1 * weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
"""
Explanation: End of Project 4.
Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson.
Analyzing Inefficiencies in our Network<a id='lesson_5'></a>
The following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.
End of explanation
"""
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set(' '.join(reviews).split(' '))
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set(labels)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {x: i for i, x in enumerate(self.review_vocab)}
# Create a dictionary of labels mapped to index positions
self.label2index = {x: i for i, x in enumerate(self.label_vocab)}
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Generate weights
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.layer_1 = np.zeros((1, self.hidden_nodes))
def get_target_for_label(self,label):
return self.label2index[label]
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews_raw, training_labels):
training_reviews = [[self.word2index[x] for x in set(review.split(' '))]
for review in training_reviews_raw]
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct = 0
n_records = len(training_reviews)
# Remember when we started for printing time statistics
start = time.time()
layer_1 = self.layer_1
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i, (words, label) in enumerate(zip(training_reviews, training_labels)):
label = self.label2index[label]
layer_1 *= 0
for index in words:
# 1 is the input value
# shape of weights_0_1: (vocab_size, n_hidden_nodes)
# .[index] get the weights for this specific word
layer_1 += self.weights_0_1[index]
# the hidden layer doesn't change the input
hidden_output = layer_1
output = self.sigmoid(hidden_output @ self.weights_1_2)
error = label - output # output error
output_error_term = error * self.sigmoid_output_2_derivative(output)
hidden_error_term = (self.weights_1_2 @ output_error_term).T # * 1
for index in words:
self.weights_0_1[index] += self.learning_rate * hidden_error_term[0]
self.weights_1_2 += self.learning_rate * (layer_1.T @ output_error_term)
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
if abs(error[0,0]) < 0.5:
correct += 1
self.print_progress(n_records, start, correct, i)
def print_progress(self, n_records, start, correct, i):
# For debug purposes, print out our prediction accuracy and speed
# throughout the process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
print("\rProgress:%2.2f%% Speed:%4.2f/s Correct:%5d #Trained:%5s Accuracy:%2.4f" %
(100 * i/n_records, reviews_per_second, correct, i+1, 100 * correct/(i+1)),
end='')
if i != 0 and i % 2500 == 0:
print('\n', end='')
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
n_records = len(testing_reviews)
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(n_records):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
self.print_progress(n_records, start, correct, i)
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
layer_1 = self.layer_1
layer_1 *= 0
for word in set(review.lower().split(' ')):
if word in self.word2index:
layer_1 += self.weights_0_1[self.word2index[word]]
# final output
output = self.sigmoid(layer_1 @ self.weights_1_2)
# round to the nearest label
label_index = 0 if output[0,0] < 0.5 else 1
return self.label_vocab[label_index]
"""
Explanation: Project 5: Making our Network More Efficient<a id='project_5'></a>
TODO: Make the SentimentNetwork class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:
* Copy the SentimentNetwork class from the previous project into the following cell.
* Remove the update_input_layer function - you will not need it in this version.
* Modify init_network:
You no longer need a separate input layer, so remove any mention of self.layer_0
You will be dealing with the old hidden layer more directly, so create self.layer_1, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero
Modify train:
Change the name of the input parameter training_reviews to training_reviews_raw. This will help with the next step.
At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from word2index) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local list variable named training_reviews that should contain a list for each review in training_reviews_raw. Those lists should contain the indices for words found in the review.
Remove call to update_input_layer
Use self's layer_1 instead of a local layer_1 object.
In the forward pass, replace the code that updates layer_1 with new logic that only adds the weights for the indices used in the review.
When updating weights_0_1, only update the individual weights that were used in the forward pass.
Modify run:
Remove call to update_input_layer
Use self's layer_1 instead of a local layer_1 object.
Much like you did in train, you will need to pre-process the review so you can work with word indices, then update layer_1 by adding weights for the indices used in the review.
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
print('')
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Run the following cell to recreate the network and train it once again.
End of explanation
"""
mlp.test(reviews[-1000:], labels[-1000:])
"""
Explanation: That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
End of explanation
"""
Image(filename='sentiment_network_sparse_2.png')
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
"""
Explanation: End of Project 5.
Watch the next video to see Andrew's solution, then continue on to the next lesson.
Further Noise Reduction<a id='lesson_6'></a>
End of explanation
"""
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels,
hidden_nodes=10, learning_rate=0.1,
min_count=10, polarity_cutoff=1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels, min_count, polarity_cutoff)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
# def pre_process_data(self, reviews, labels, min_count, polarity_cutoff):
# ## ----------------------------------------
# ## New for Project 6: Calculate positive-to-negative ratios for words before
# # building vocabulary
# #
# positive_counts = Counter()
# negative_counts = Counter()
# total_counts = Counter()
# for i in range(len(reviews)):
# if(labels[i] == 'POSITIVE'):
# for word in reviews[i].split(" "):
# positive_counts[word] += 1
# total_counts[word] += 1
# else:
# for word in reviews[i].split(" "):
# negative_counts[word] += 1
# total_counts[word] += 1
# pos_neg_ratios = Counter()
# for term,cnt in list(total_counts.most_common()):
# if(cnt >= 50):
# pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
# pos_neg_ratios[term] = pos_neg_ratio
# for word,ratio in pos_neg_ratios.most_common():
# if(ratio > 1):
# pos_neg_ratios[word] = np.log(ratio)
# else:
# pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
# #
# ## end New for Project 6
# ## ----------------------------------------
# # populate review_vocab with all of the words in the given reviews
# review_vocab = set()
# for review in reviews:
# for word in review.split(" "):
# ## New for Project 6: only add words that occur at least min_count times
# # and for words with pos/neg ratios, only add words
# # that meet the polarity_cutoff
# if(total_counts[word] > min_count):
# if(word in pos_neg_ratios.keys()):
# if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
# review_vocab.add(word)
# else:
# review_vocab.add(word)
# # Convert the vocabulary set to a list so we can access words via indices
# self.review_vocab = list(review_vocab)
# # populate label_vocab with all of the words in the given labels.
# label_vocab = set()
# for label in labels:
# label_vocab.add(label)
# # Convert the label vocabulary set to a list so we can access labels via indices
# self.label_vocab = list(label_vocab)
# # Store the sizes of the review and label vocabularies.
# self.review_vocab_size = len(self.review_vocab)
# self.label_vocab_size = len(self.label_vocab)
# # Create a dictionary of words in the vocabulary mapped to index positions
# self.word2index = {}
# for i, word in enumerate(self.review_vocab):
# self.word2index[word] = i
# # Create a dictionary of labels mapped to index positions
# self.label2index = {}
# for i, label in enumerate(self.label_vocab):
# self.label2index[label] = i
def pre_process_data(self, reviews, labels, min_count, polarity_cutoff):
# Convert the label vocabulary set to a list so we can access labels via indices
label_vocab = set(labels)
self.label_vocab = list(label_vocab)
self.label_vocab_size = len(self.label_vocab)
pos_count, neg_count, total_count = Counter(), Counter(), Counter()
n_pos, n_neg = 0, 0
for review, label in zip(reviews, labels):
if label == 'POSITIVE':
c = pos_count
n_pos += 1
else:
c = neg_count
n_neg += 1
for word in review.split(' '):
c[word] += 1
total_count[word] += 1
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = [
x for x, count in total_count.items()
if (count > min_count and
abs(np.log(
((pos_count[x] + 0.5) / n_pos) /
((neg_count[x] + 0.5) / n_neg)
)) > polarity_cutoff)
]
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {x: i for i, x in enumerate(self.review_vocab)}
# Create a dictionary of labels mapped to index positions
self.label2index = {x: i for i, x in enumerate(self.label_vocab)}
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Generate weights
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.layer_1 = np.zeros((1, self.hidden_nodes))
def get_target_for_label(self,label):
return self.label2index[label]
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews_raw, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews_raw) == len(training_labels))
training_reviews = [
[self.word2index[x]
for x in set(review.split(' '))
if x in self.word2index]
for review in training_reviews_raw
]
# Keep track of correct predictions to display accuracy during training
correct = 0
n_records = len(training_reviews)
# Remember when we started for printing time statistics
start = time.time()
layer_1 = self.layer_1
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i, (words, label) in enumerate(zip(training_reviews, training_labels)):
label = self.label2index[label]
layer_1 *= 0
for index in words:
# 1 is the input value
# shape of weights_0_1: (vocab_size, n_hidden_nodes)
# .[index] get the weights for this specific word
layer_1 += self.weights_0_1[index]
# the hidden layer doesn't change the input
hidden_output = layer_1
output = self.sigmoid(hidden_output @ self.weights_1_2)
error = label - output # output error
output_error_term = error * self.sigmoid_output_2_derivative(output)
hidden_error_term = (self.weights_1_2 @ output_error_term).T # * 1
for index in words:
self.weights_0_1[index] += self.learning_rate * hidden_error_term[0]
self.weights_1_2 += self.learning_rate * (layer_1.T @ output_error_term)
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
if abs(error[0,0]) < 0.5:
correct += 1
self.print_progress(n_records, start, correct, i)
def print_progress(self, n_records, start, correct, i):
# For debug purposes, print out our prediction accuracy and speed
# throughout the process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
if i % 100 == 0 or i + 1 == n_records:
print("Progress:%2.2f%% Speed:%4.2f/s Correct:%5d #Trained:%5s Accuracy:%2.4f" %
(100 * i/n_records, reviews_per_second, correct, i+1, 100 * correct/(i+1)),
end='\r')
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
n_records = len(testing_reviews)
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(n_records):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
self.print_progress(n_records, start, correct, i)
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
layer_1 = self.layer_1
layer_1 *= 0
for word in set(review.lower().split(' ')):
if word in self.word2index:
layer_1 += self.weights_0_1[self.word2index[word]]
# final output
output = self.sigmoid(layer_1 @ self.weights_1_2)
# round to the nearest label
label_index = 0 if output[0,0] < 0.5 else 1
return self.label_vocab[label_index]
"""
Explanation: Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>
TODO: Improve SentimentNetwork's performance by reducing more noise in the vocabulary. Specifically, do the following:
* Copy the SentimentNetwork class from the previous project into the following cell.
* Modify pre_process_data:
Add two additional parameters: min_count and polarity_cutoff
Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)
Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like.
Change so words are only added to the vocabulary if they occur in the vocabulary more than min_count times.
Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least polarity_cutoff
Modify __init__:
Add the same two parameters (min_count and polarity_cutoff) and use them when you call pre_process_data
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Run the following cell to train your network with a small polarity cutoff.
End of explanation
"""
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: And run the following cell to test it's performance. It should be
End of explanation
"""
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Run the following cell to train your network with a much larger polarity cutoff.
End of explanation
"""
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: And run the following cell to test it's performance.
End of explanation
"""
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp_full.train(reviews[:-1000],labels[:-1000])
Image(filename='sentiment_network_sparse.png')
def get_most_similar_words(focus = "horrible"):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],
mlp_full.weights_0_1[mlp_full.word2index[focus]])
return most_similar.most_common()
get_most_similar_words("excellent")
get_most_similar_words("terrible")
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
"""
Explanation: End of Project 6.
Watch the next video to see Andrew's solution, then continue on to the next lesson.
Analysis: What's Going on in the Weights?<a id='lesson_7'></a>
End of explanation
"""
|
yaoxx151/UCSB_Boot_Camp_copy
|
Day01_ComputerBasics/notebooks/03 - Version Control.ipynb
|
cc0-1.0
|
from IPython.display import Image
Image(url='http://www.phdcomics.com/comics/archive/phd101212s.gif')
"""
Explanation: Why Version Control?
Here's why.
End of explanation
"""
%%bash
git status
"""
Explanation: If that hasn't convinced you, here are some other benefits:
http://stackoverflow.com/questions/1408450/why-should-i-use-version-control
Replace 'code' in the first answer with 'essay', 'thesis', 'homework' -- all stuff that a version control system such as git and GitHub can help you with!
Git for Scientists: A Tutorial (by John McDonnell)
http://nyuccl.org/pages/GitTutorial/
Go through the tutorial. You can either follow along from the terminal in the command line, or from within this very notebook using the %%bash magic:
End of explanation
"""
|
dataworkshop/webinar-jupyter
|
pandas.ipynb
|
mit
|
sq = pd.Series({'row1': 'row1 col a', 'row 2': 'row2 col a'})
sq
sq.index
df = pd.DataFrame(
{
'column_a': {'row1': 'row1 col a', 'row 2': 'row2 col a'},
'column_b': {'row1': 'row1 col b', 'row 2': 'row2 col b'},
})
df
df.index
df.columns
df.columns = ['new_column_a', 'new_column_b']
df
print(type(df.new_column_a))
df.new_column_a
type(df.new_column_a)
print(type(df.new_column_a.values))
df.new_column_a.values
"""
Explanation: Pandas
DataFrame
Series
End of explanation
"""
df = pd.read_csv('train.csv')
"""
Explanation: Read data
Let's read data from train.csv file into memory (assign to df variable in our case)
End of explanation
"""
df.info()
"""
Explanation: Questions:
How many objects (rows) and features (columns) are there?
What the name and type of features (columns)?
Are there missing values?
How many memory is use for keep this data in RAM?
Let's use .info() to find the answer
End of explanation
"""
print("count samples & features: ", df.shape)
print("Are there missing values: ", df.isnull().any().any())
"""
Explanation: RangeIndex: 10886 entries, 0 to 10885 => there're 10886 rows (objects)
Data columns (total 12 columns): => there're 12 columns (features)
dtypes: float64(3), int64(8), object(1) => three types (float, int, object)
memory usage: 1020.6+ KB => use about 1MB
There are not missing data (because each column has the same number non-missing values)
End of explanation
"""
df.head(10)
df.season.unique()
#df[''].unique()
df.season.nunique()
df.columns
for column in df.columns:
print(column, df[column].nunique())
"""
Explanation: Questions
How data looks like?
Are there categorical variables?
The categorical variables have high or low cardinality (how many unique values they have)?
Can we optimize memory usage?
End of explanation
"""
df.holiday.unique()
df[ ['holiday'] ].info()
df['holiday'] = df['holiday'].astype('int8')
df[ ['holiday'] ].info()
def optimize_memory(df):
for cat_var in ['holiday', 'weather', 'season', 'workingday']:
df[cat_var] = df[cat_var].astype('int8')
for float_var in ['temp', 'atemp', 'windspeed']:
df[float_var] = df[float_var].astype('float16')
for int_var in ['casual', 'registered', 'count']:
df[int_var] = df[int_var].astype('int16')
return df
df = optimize_memory(df)
df.info()
"""
Explanation: Categorical variables:
season: 4 unique values
holiday: 2 (binary)
workingday: 2 (binary)
weather: 4
End of explanation
"""
df['datetime'] = pd.to_datetime(df['datetime'])
df.info()
df = pd.read_csv('train.csv', parse_dates=['datetime'])
df = optimize_memory(df)
df.info()
"""
Explanation: Working with date
End of explanation
"""
df.head()
"""
Explanation: Understand Data
https://www.kaggle.com/c/bike-sharing-demand/data
datetime - hourly date + timestamp
season - 1 = spring, 2 = summer, 3 = fall, 4 = winter
holiday - whether the day is considered a holiday
workingday - whether the day is neither a weekend nor holiday
weather -
1: Clear, Few clouds, Partly cloudy, Partly cloudy
2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
temp - temperature in Celsius
atemp - "feels like" temperature in Celsius
humidity - relative humidity
windspeed - wind speed
casual - number of non-registered user rentals initiated
registered - number of registered user rentals initiated
count - number of total rentals
End of explanation
"""
df['count'].plot(figsize=(20, 10));
df['casual'].plot()
df['registered'].plot()
(df['count'] == df['casual'] + df['registered']).all()
"""
Explanation: Questions:
What is target variable (should be predicted)?
What the difference between count, registered and casual?
End of explanation
"""
df.datetime.map(lambda x: x.day)
df.datetime.dt.hour
def plot_by_hour(data, year=None, agg='sum'):
data['hour'] = data.datetime.dt.hour
dd = data[ data.datetime.dt.year == year ] if year else data
by_hour = dd.groupby(['hour', 'workingday'])['count'].agg(agg).unstack()
return by_hour.plot(kind='bar', ylim=(0, 80000), figsize=(15,5), width=0.9, title="Year = {0}".format(year))
plot_by_hour(df, year=2011)
plot_by_hour(df, year=2012);
def plot_by_year(data, agg_attr, title):
data['year'] = data.datetime.dt.year
data['month'] = data.datetime.dt.month
data['hour'] = data.datetime.dt.hour
by_year = data.groupby([agg_attr, 'year'])['count'].agg('sum').unstack()
return by_year.plot(kind='bar', figsize=(15,5), width=0.9, title=title)
plot_by_year(df, 'month', "Rent bikes per month in 2011 and 2012")
plot_by_year(df, 'hour', "Rent bikes per hour in 2011 and 2012");
df[ ['count', 'year'] ].boxplot(by="year", figsize=(15, 6));
for year in [2011, 2012]:
for workingday in [0, 1]:
dd = df[ (df.datetime.dt.year == year) | (df.workingday == workingday) ]
dd[ ['count', 'month'] ].boxplot(by="month", figsize=(15, 6));
"""
Explanation: Extract day, month, year... from datetime
End of explanation
"""
weather = {1: 'Clear', 2: 'Mist', 3: 'Light Snow', 4: 'Heavy Rain'}
df['weather_label'] = df.weather.map(lambda x: weather[x])
df['weather_label'].unique()
"""
Explanation: Mapping
Weather
Clear, Few clouds, Partly cloudy, Partly cloudy
Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
End of explanation
"""
df[ ['weather', 'season'] ].apply(lambda x: 'weather-{0}, season-{1}'.format(x['weather'], x['season']), axis=1).head()
"""
Explanation: Apply
End of explanation
"""
df.year = df.datetime.dt.year
df['year'].value_counts()
df['month'].value_counts()
"""
Explanation: Value counts
End of explanation
"""
df.groupby('year')['month'].value_counts()
"""
Explanation: Group
End of explanation
"""
df.groupby('year')['count'].min()
df.groupby('year')['count'].max()
df.groupby('year')['count'].agg(np.max)
for agg_func in [np.mean, np.median, np.min, np.max]:
print(agg_func.__name__, df.groupby(['year', 'month'])['count'].agg(agg_func))
"""
Explanation: Aggregation
End of explanation
"""
df.sort_values(by=['year', 'month'], ascending=False).head()
"""
Explanation: Sort
End of explanation
"""
df.to_csv('df.csv', index=False)
!head df.csv
df.to_hdf('df.h5', 'df')
"""
Explanation: Save
End of explanation
"""
|
joseerlang/PySpark_docker
|
notebook/Trabajando con Spark SQL y dataframes.ipynb
|
apache-2.0
|
from pyspark.sql import SparkSession
spark= SparkSession.builder.appName("Trabajando con Spark SQL").getOrCreate()
"""
Explanation: Punto de entrada
Vamos a crear un punto de entrada al API de dataframes y dataset.
End of explanation
"""
import json
with open('sql/PeriodicTableJSON.json') as data_file:
data = json.load(data_file)
with open('sql/PeriodicTableJSON.jsonl', 'w') as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write('\n')
df = spark.read.json("sql/PeriodicTableJSON.jsonl")
df.show()
df.printSchema()
df.select("name").show()
"""
Explanation: Lo primero que vamos a leer va a ser un fichero json que representa la tabla periodica y lo vamos a almacenar en un dataframe sobre el que vamos a ir realizando diferentes acciones como si se tratara de un RDD.
Nota: el formato json en spark SQL es un formato por línea , como si fuera un CSV, por lo tanto, hay que transformar el listado de objetos en un una fila por cada objeto.
End of explanation
"""
df.select(df['name'],df['atomic_mass']).filter(df['atomic_mass']<200).show(10)
df.groupBy('phase').count().show()
"""
Explanation: Seleccionamos los elementos químicos que tengan la masa atómica menor que 200 y mostramos los 10 primeros.
End of explanation
"""
df.createGlobalTempView("chemistryTable")
spark.sql("select name from global_temp.chemistryTable").show(5)
"""
Explanation: Ahora vamos a ver como a partir de un dataframe podemos generar una tabla temporal sobre la que ejecutaremos sentencias en SQL.
End of explanation
"""
from pyspark.sql import Row
sc = spark.sparkContext
lines=sc.textFile("sql/Periodictable.txt")
parts= lines.map(lambda p: p.split(","))
elements= parts.map(lambda e: Row(name=e[0],atomic_mass=float(e[1])))
schemeElements=spark.createDataFrame(elements)
schemeElements.createOrReplaceTempView("elements")
lightElements=spark.sql("select name from elements where atomic_mass>0 and atomic_mass<21")
lightElemName=lightElements.rdd.map(lambda elem: "Name: "+elem.name).collect()
for name in lightElemName:
print(name)
"""
Explanation: Como ya hemos comentado en el post, python no permite construir estructuras de dataset. Para que te hagas una idea si vienes del mundo Java o Scala. La creación de dataset se basa en la definición de una clase y permite añadir objetos de esa clase. El resultado es una estructura en formato de tabla como el dataframe mostrado en nuestro caso.
Infiriendo el esquema
En Spark SQL, existen dos formas de inferir el esquema un dataframe. Una es mediante reflexión y la otra es explicitamente con programación. A continuación vamos a ver ambos casos sobre un documento txt que contiene el elemento químico y su masa atómica.
End of explanation
"""
from pyspark.sql.types import *
sc=spark.sparkContext
lines=sc.textFile("sql/Periodictable.txt")
parts=lines.map(lambda line: line.split(","))
elements= parts.map(lambda p: (p[0],p[1]))
schemeString="name atomicMass"
fields= [StructField(field_name,StringType(),True) for field_name in schemeString.split()]
scheme =StructType(fields)
schemeElements= spark.createDataFrame(elements,scheme)
schemeElements.createOrReplaceTempView("elements")
spark.sql("select name,atomicMass from elements").show()
schemeElements.printSchema()
"""
Explanation: Ahora vamos a ver como se haría programáticamente.
End of explanation
"""
schemeElements.select("name","atomicMass").write.save("sql/namesAndAtomicMass.parquet")
"""
Explanation: Data Source
Existen multitud de formatos disponible en Spark SQL (json,parquet,jdbc,orc,libsvm,csv,text,...) aunque el formato por defecto es parquet.
En este apartado vamos a ver el manejo de diferentes formatos de datos y la comunicación con Hive, Parquet y JDBC para guardar/recuperar información.
Lo primero que vamos hacer es escribir la query anterior a un fichero parquet para despues hacer queries directamente sobre el fichero.
End of explanation
"""
newDf= spark.sql("select atomicMass from parquet.`sql/namesAndAtomicMass.parquet`")
newDf.show()
"""
Explanation: Y ahra podemos hacer una query directamente desde el fichero.
End of explanation
"""
jdbcDF = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql://db:5432/postgres") \
.option("dbtable", "books") \
.option("user", "postgres") \
.option("driver", "org.postgresql.Driver") \
.option("password", "root") \
.load()
jdbcDF.show();
jdbcDF.printSchema();
"""
Explanation: Cuando has arrancado el cluster de docker, en la base de datos hemos añadido una setencia books.sql alojada en notebook/initdb que carga dentro del esquema postgres la tabla libros con title author y año
End of explanation
"""
|
xdnian/pyml
|
code/bonus/scikit-model-to-json.ipynb
|
mit
|
%load_ext watermark
%watermark -a '' -v -d -p scikit-learn,numpy,scipy
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
"""
Explanation: Sebastian Raschka, 2016
https://github.com/1iyiwei/pyml
Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
End of explanation
"""
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression(C=100.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
max_iter=100,
multi_class='multinomial',
n_jobs=1,
penalty='l2',
random_state=1,
solver='newton-cg',
tol=0.0001,
verbose=0,
warm_start=False)
lr.fit(X, y)
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
"""
Explanation: Bonus Material - Scikit-learn Model Persistence using JSON
In many situations, it is desirable to store away a trained model for future use. These situations where we want to persist a model could be the deployment of a model in a web application, for example, or scientific reproducibility of our experiments.
I wrote a little bit about serializing scikit-learn models using pickle in context of the web applications that we developed in chapter 8. Also, you can find an excellent tutorial section on scikit-learn's website. Honestly, I would say that pickling Python objects via the pickle, dill or joblib modules is probably the most convenient approach to model persistence. However, pickling Python objects can sometimes be a little bit problematic, for example, deserializing a model in Python 3.x that was originally pickled in Python 2.7x and vice versa. Also, pickle offers different protocols (currently the protocols 0-4), which are not necessarily backwards compatible.
Thus, to prepare for the worst case scenario -- corrupted pickle files or version incompatibilities -- there's at least one other (a little bit more tedious) way to model persistence using JSON.
JSON (JavaScript Object Notation) is a lightweight data-interchange format. It is easy for humans to read and write. It is easy for machines to parse and generate. It is based on a subset of the JavaScript Programming Language, Standard ECMA-262 3rd Edition - December 1999. JSON is a text format that is completely language independent but uses conventions that are familiar to programmers of the C-family of languages, including C, C++, C#, Java, JavaScript, Perl, Python, and many others. These properties make JSON an ideal data-interchange language. [Source: http://www.json.org]
One of the advantages of JSON is that it is a human-readable format. So, if push comes to shove, we should still be able to read the parameter files and model coefficients "manually" and assign these values to the respective scikit-learn estimator or build our own model to reproduce scientific results.
Let's see how that works ... First, let us train a simple logistic regression classifier on Iris:
End of explanation
"""
lr.get_params()
"""
Explanation: Luckily, we don't have to retype or copy & paste all the estimator parameters manually if we want to store them away. To get a dictionary of these parameters, we can simply use the handy "get_params" method:
End of explanation
"""
import json
with open('./sckit-model-to-json/params.json', 'w', encoding='utf-8') as outfile:
json.dump(lr.get_params(), outfile)
"""
Explanation: Storing them in JSON format is easy, we simply import the json module from Python's standard library and dump the dictionary to a file:
End of explanation
"""
with open('./sckit-model-to-json/params.json', 'r', encoding='utf-8') as infile:
print(infile.read())
"""
Explanation: When we read the file, we can see that the JSON file is just a 1-to-1 copy of our Python dictionary in text format:
End of explanation
"""
attrs = [i for i in dir(lr) if i.endswith('_') and not i.endswith('__')]
print(attrs)
attr_dict = {i: getattr(lr, i) for i in attrs}
"""
Explanation: Now, the trickier part is to identify the "fit" parameters of the estimator, i.e., the parameters of our logistic regression model. However, in practice it's actually pretty straight forward to figure it out by heading over to the respective documentation page: Just look out for the "attributes" in the "Attribute" section that have a trailing underscore (thanks, scikit-learn team, for the beautifully thought-out API!). In case of logistic regression, we are interested in the weights .coef_, the bias unit .intercept_, and the classes_ and n_iter_ attributes.
End of explanation
"""
import numpy as np
for k in attr_dict:
if isinstance(attr_dict[k], np.ndarray):
attr_dict[k] = attr_dict[k].tolist()
"""
Explanation: In order to deserialize NumPy arrays to JSON objects, we need to cast the arrays to (nested) Python lists first, however, it's not that much of a hassle thanks to the tolist method. (Also, consider saving the attributes to separate JSON files, e.g., intercept.json and coef.json, for clarity.)
End of explanation
"""
with open('./sckit-model-to-json/attributes.json', 'w', encoding='utf-8') as outfile:
json.dump(attr_dict,
outfile,
separators=(',', ':'),
sort_keys=True,
indent=4)
"""
Explanation: Now, we are ready to dump our "attribute dictionary" to a JSON file:
End of explanation
"""
with open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8') as infile:
print(infile.read())
"""
Explanation: If everything went fine, our JSON file should look like this -- in plaintext format:
End of explanation
"""
import codecs
import json
obj_text = codecs.open('./sckit-model-to-json/params.json', 'r', encoding='utf-8').read()
params = json.loads(obj_text)
obj_text = codecs.open('./sckit-model-to-json/attributes.json', 'r', encoding='utf-8').read()
attributes = json.loads(obj_text)
"""
Explanation: With similar ease, we can now use json's loads method to read the data back from the ".json" files and re-assign them to Python objects. (Imagine the following happens in a new Python session.)
End of explanation
"""
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from mlxtend.evaluate import plot_decision_regions
import numpy as np
iris = load_iris()
y, X = iris.target, iris.data[:, [0, 2]] # only use 2 features
lr = LogisticRegression()
lr.set_params(**params)
for k in attributes:
if isinstance(attributes[k], list):
setattr(lr, k, np.array(attributes[k]))
else:
setattr(lr, k, attributes[k])
plot_decision_regions(X=X, y=y, clf=lr, legend=2)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.show()
"""
Explanation: Finally, we just need to initialize a default LogisticRegression estimator, feed it the desired parameters via the set_params method, and reassign the other attributes using Python's built-in setattr (don't forget to recast the Python lists to NumPy arrays, though!):
End of explanation
"""
|
Luke035/dlnd-lessons
|
into-to-tflearn/TFLearn_Sentiment_Analysis.ipynb
|
mit
|
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
"""
Explanation: Sentiment analysis with TFLearn
In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.
We'll start off by importing all the modules we'll need, then load and prepare the data.
End of explanation
"""
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
reviews.head()
"""
Explanation: Preparing the data
Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.
Read the data
Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.
End of explanation
"""
from collections import Counter
#Create the counter
total_counts = Counter()
#iter every row
for idx, row in reviews.iterrows():
#Review is contained in 0 position of the row (first column)
for word in row[0].split(' '):
total_counts[word] += 1
print("Total words in data set: ", len(total_counts))
total_counts.most_common()
"""
Explanation: Counting word frequency
To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class.
Exercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours.
End of explanation
"""
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
"""
Explanation: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
End of explanation
"""
print(vocab[-1], ': ', total_counts[vocab[-1]])
"""
Explanation: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
End of explanation
"""
word2idx = {} ## create the word-to-index dictionary here
i = 0
for w in vocab:
word2idx[w] = i
i += 1
word2idx['the']
"""
Explanation: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Note: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie.
Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.
Exercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on.
End of explanation
"""
def text_to_vector(text):
word_vector = np.zeros(len(word2idx))
for word in text.split(' '):
index = word2idx.get(word, None)
if index != None:
word_vector[index] = 1
return word_vector
"""
Explanation: Text to vector function
Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:
Initialize the word vector with np.zeros, it should be the length of the vocabulary.
Split the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here.
For each word in that list, increment the element in the index associated with that word, which you get from word2idx.
Note: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary.
End of explanation
"""
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
"""
Explanation: If you do this right, the following code should return
```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
```
End of explanation
"""
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
"""
Explanation: Now, run through our entire review data set and convert each review to a word vector.
End of explanation
"""
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split].reshape((len(Y.values[train_split]),)), 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split].reshape((len(Y.values[test_split]),)), 2)
trainY
"""
Explanation: Train, Validation, Test sets
Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.
End of explanation
"""
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
#input layer
net = tflearn.input_data([None, len(word2idx)])
#hidden layer
net = tflearn.fully_connected(net, n_units=200, activation='ReLU')
net = tflearn.fully_connected(net, n_units=25, activation='ReLU')
#output layer
net = tflearn.fully_connected(net, n_units=2, activation='softmax')
#training
net = tflearn.regression(net,
optimizer='sgd',
learning_rate=0.1,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=3, tensorboard_dir='model_dir')
return model
"""
Explanation: Building the network
TFLearn lets you build the network by defining the layers.
Input layer
For the input layer, you just need to tell it how many units you have. For example,
net = tflearn.input_data([None, 100])
would create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size.
The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.
Adding layers
To add new hidden layers, you use
net = tflearn.fully_connected(net, n_units, activation='ReLU')
This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units).
Output layer
The last layer you add is used as the output layer. Therefore, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.
net = tflearn.fully_connected(net, 2, activation='softmax')
Training
To set how you train the network, use
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
Again, this is passing in the network you've been building. The keywords:
optimizer sets the training method, here stochastic gradient descent
learning_rate is the learning rate
loss determines how the network error is calculated. In this example, with the categorical cross-entropy.
Finally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like
net = tflearn.input_data([None, 10]) # Input
net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden
net = tflearn.fully_connected(net, 2, activation='softmax') # Output
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
model = tflearn.DNN(net)
Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.
End of explanation
"""
model = build_model()
"""
Explanation: Intializing the model
Next we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.
Note: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.
End of explanation
"""
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=20)
"""
Explanation: Training the network
Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors.
You can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network.
End of explanation
"""
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
"""
Explanation: Testing
After you're satisified with your hyperparameters, you can run the network on the test set to measure its performance. Remember, only do this after finalizing the hyperparameters.
End of explanation
"""
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
sentence = "Ace Ventura is the best movie ever! I wonder why Jim Carrey didn't won the Oscar"
test_sentence(sentence=sentence)
"""
Explanation: Try out your own text!
End of explanation
"""
|
taiducvu/NudityDetection
|
VNG_MODEL_EXPERIMENT.ipynb
|
apache-2.0
|
%matplotlib inline
import glob
import os
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import tensorflow as tf
raw_image = imread('model/datasets/nudity_dataset/3.jpg')
# Define a tensor placeholder to store an image
image = tf.placeholder("uint8", [None, None, 3])
image1 = tf.image.convert_image_dtype(image, dtype=tf.float32)
image2 = tf.image.central_crop(image1, central_fraction=0.875) # Crop the central region of raw image
model = tf.initialize_all_variables() # Quan trong
print raw_image.shape
with tf.Session() as session:
session.run(model)
result = session.run(image2, feed_dict={image: raw_image})
print result.dtype
print("The shape of result: ",result.shape)
print result.shape
## Draw image
fig = plt.figure()
a = fig.add_subplot(1,2,1)
plt.imshow(raw_image)
a = fig.add_subplot(1,2,2)
plt.imshow(result)
plt.show()
"""
Explanation: Stage 1: Preprocess VNG's data
In this stage, we will read raw data from a given dataset. The dataset consists of variable-resolution images, while our system requires a constant input dimensionality. Therefore, we need to down-sampled the images to a fixed resolution (270 x 270)
Examples of processing
In the bellow code, we will crop the central region of raw image.
End of explanation
"""
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import tensorflow as tf
raw_image = imread('model/datasets/nudity_dataset/3.jpg')
image = tf.placeholder("uint8", [None, None, 3])
image1 = tf.image.convert_image_dtype(image, dtype = tf.float32)
image1_t = tf.expand_dims(image1, 0)
image2 = tf.image.resize_bilinear(image1_t, [270, 270], align_corners=False)
image2 = tf.squeeze(image2, [0])
image3 = tf.sub(image2, 0.5)
image3 = tf.mul(image2, 2.0)
model = tf.initialize_all_variables()
with tf.Session() as session:
session.run(model)
result = session.run(image3, feed_dict={image:raw_image})
## Draw image
fig = plt.figure()
a = fig.add_subplot(1,2,1)
plt.imshow(raw_image)
a = fig.add_subplot(1,2,2)
plt.imshow(result)
plt.show()
"""
Explanation: In the code bellow, resize image into the special resolution
End of explanation
"""
%matplotlib inline
%load_ext autoreload
%autoreload 2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from model.datasets.data import generate_standard_dataset
# Load Normal and Nude images into the train dataset
image_normal_ls, file_name_normal = generate_standard_dataset('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/train/normal')
nudity_ls, file_name_nudity = generate_standard_dataset('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/train/nude')
init_op = tf.initialize_all_variables()
labels = np.zeros(3000, dtype = np.uint)
database = []
with tf.Session() as session:
session.run(init_op)
# Start populating the filename queue
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
for i in range(3000):
#print i
if i % 2 == 0:
image = image_normal_ls.eval()
else:
image = nudity_ls.eval()
labels[i] = 1
database.append(image)
coord.request_stop()
database = np.array(database)
from Dataset.data import generate_standard_dataset
import numpy as np
import tensorflow as tf
img_nudity, _ = generate_standard_dataset('/media/taivu/Data/Project/Nudity_Detection/src/model/datasets/AdditionalDataset/vng/sex')
labels = np.ones(100, dtype = np.uint)
dataset = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
for i in range(100):
image = img_nudity.eval()
dataset.append(image)
coord.request_stop()
database = np.array(dataset)
print file_name_normal[1123]
"""
Explanation: 1.1 Create a standard training dataset
End of explanation
"""
import os
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_dir, dataset, labels, name):
"""Converts a dataset to tfrecords."""
images = dataset
labels = labels
num_examples = dataset.shape[0]
rows, cols, depth = dataset[0].shape
filename = os.path.join(data_dir, name + '.tfrecords')
writer = tf.python_io.TFRecordWriter(filename)
for idx in range(num_examples):
image_raw = images[idx].tostring()
example = tf.train.Example(features = tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[idx])),
'image_raw': _bytes_feature(image_raw)
}))
writer.write(example.SerializeToString())
writer.close()
convert_to('/home/taivu/workspace/NudityDetection/Dataset',
database, labels, 'nudity_test_set')
"""
Explanation: Generate tfrecords
End of explanation
"""
import tensorflow as tf
import matplotlib.pyplot as plt
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64)
})
image = tf.decode_raw(features['image_raw'], tf.float32)
image = tf.reshape(image,[34,34,3])
label = tf.cast(features['label'], tf.int32)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
return image, label, height, width, depth
def data_input(data_dir, batch_size):
filename_queue = tf.train.string_input_producer([data_dir], num_epochs = None)
image, label, height, width, depth = read_and_decode(filename_queue)
images_batch, labels_batch = tf.train.shuffle_batch(
[image, label],
batch_size = batch_size,
capacity = 2000,
min_after_dequeue = 80
)
return images_batch, labels_batch
#filename_queue = tf.train.string_input_producer(['/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/vng_dataset.tfrecords'], num_epochs = None)
#image, label, height,_,depth = read_and_decode(filename_queue)
img_batch, lb_batch = data_input('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/vng_dataset.tfrecords',500)
init_op = tf.initialize_all_variables()
fig = plt.figure()
with tf.Session() as session:
session.run(init_op)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
images, labels = session.run([img_batch, lb_batch])
coord.request_stop()
import matplotlib.pyplot as plt
fig = plt.figure()
plt.imshow(images[1])
print labels[0]
plt.show()
"""
Explanation: Read a batch images
End of explanation
"""
import tensorflow as tf
f = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"]
l = ["l1", "l2", "l3", "l4", "l5", "l6", "l7", "l8"]
fv = tf.constant(f)
lv = tf.constant(l)
rsq = tf.RandomShuffleQueue(10, 0, [tf.string, tf.string], shapes=[[],[]])
do_enqueues = rsq.enqueue_many([fv, lv])
gotf, gotl = rsq.dequeue()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess,coord = coord)
sess.run(do_enqueues)
for i in xrange(2):
one_f, one_l = sess.run([gotf, gotl])
print "F: ", one_f, "L: ", one_l
coord.request_stop()
"""
Explanation: Example shuffle dataset
End of explanation
"""
import cPickle as pickle
dict1 = {'name':[],'id':[]}
dict2 = {'local':[], 'paza':[]}
#with open('test.p', 'wb') as fp:
# pickle.dump(dict1,fp)
# pickle.dump(dict2,fp)
with open('test.p', 'rb') as fp:
d1 = pickle.load(fp)
d2 = pickle.load(fp)
print len(d1)
print len(d2)
"""
Explanation: Example cPickle
End of explanation
"""
import tensorflow as tf
import numpy as np
a = tf.constant(np.array([[.1]]))
init = tf.initialize_all_variables()
with tf.Session() as session:
session.run(init)
b = session.run(tf.nn.softmax(a))
c = session.run(tf.nn.softmax_cross_entropy_with_logits([0.6, 0.4],[0,1]))
#print b
#print c
label = np.array([[0], [1], [1]])
idx = np.arange(3) * 2
print ('IDX')
print idx
labels_one_hot = np.zeros((3,2))
print ('labels_one_hot')
print labels_one_hot
labels_one_hot.flat[idx + label.ravel()] = 1
print ('IDX + label.ravel()')
print idx + label.ravel()
import tensorflow as tf
import matplotlib.pyplot as plt
from Dataset.data import preprocess_image
import numpy as np
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(
'/home/taivu/workspace/NudityDetection/Dataset/train/normal/*.jpg'))
img_reader = tf.WholeFileReader()
_, img_file = img_reader.read(filename_queue)
image = tf.image.decode_jpeg(img_file, 3)
image = preprocess_image(image, 34, 34)
images = tf.train.batch([image],
batch_size = 10,
capacity = 50,
name = 'input')
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([images])
result_img = np.array(result_img)
coord.request_stop()
coord.join(threads)
fig = plt.figure()
plt.imshow(result_img[0][1])
plt.show()
import tensorflow as tf
import numpy as np
from execute_model import evaluate
from Dataset.data import data_input
import matplotlib.pyplot as plt
dt, _ = data_input('/home/taivu/workspace/NudityDetection/Dataset/vng_dataset_validation.tfrecords', 10, False)
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([dt])
coord.request_stop()
coord.join(threads)
#fig = plt.figure()
result_img = np.array(result_img)
print result_img.shape
print result_img.dtype
#plt.show()
import tensorflow as tf
import numpy as np
from execute_model import evaluate
from Dataset.data import data_input
import matplotlib.pyplot as plt
dt = data_input('/home/taivu/workspace/NudityDetection/Dataset/vng_dataset_validation.tfrecords', 10, False, False)
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([dt])
coord.request_stop()
coord.join(threads)
#fig = plt.figure()
result_img = np.array(result_img)
print result_img.shape
print result_img.dtype
#plt.show()
import tensorflow as tf
import os
import glob
from Dataset.data import preprocess_image
import matplotlib.pyplot as plt
data_dir = '/home/taivu/workspace/AddPic'
filenames = []
for pathAndFilename in glob.iglob(os.path.join(data_dir, '*.jpg')):
filenames.append(pathAndFilename)
filename_queue = tf.train.string_input_producer(filenames, shuffle = None)
filename = filename_queue.dequeue()
# img_reader = tf.WholeFileReader()
img_file = tf.read_file(filename)
#_, img_file = img_reader.read(filename)
img = tf.image.decode_jpeg(img_file, 3)
img = preprocess_image(img, 34, 34)
filename_batch, img_batch = tf.train.batch([filename, img], batch_size = 3, capacity=200, name = 'input')
init = tf.global_variables_initializer()
coord =tf.train.Coordinator()
with tf.Session() as sess:
sess.run(init)
tf.train.start_queue_runners(sess, coord)
ls_img, ls_nf = sess.run([img_batch, filename_batch])
#fig = plt.figure()
print ls_nf
for i in range(3):
a = fig.add_subplot(1,3, i)
a.set_title('%d'%i)
plt.imshow(ls_img[i])
plt.show()
coord.request_stop()
print ls_nf[0]
import tensorflow as tf
import numpy as np
a = [[1,2,3]]
b = [[4,5,6]]
np.column_stack((a,b))
import math
print int(math.ceil(float(5)/3))
"""
Explanation: Example reshape
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.