repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
jbwhit/coal-exploration
|
deliver/Coal prediction of production.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score, r2_score, mean_squared_error
sns.set();
"""
Explanation: Coal production in mines 2013
by: Jonathan Whitmore
Abstract: We did a lot of analysis and came to some interesting conclusions.
End of explanation
"""
df = pd.read_csv("../data/cleaned_coalpublic2013.csv", index_col='MSHA ID')
df[['Year', 'Mine_Name']].head()
"""
Explanation: Cleaned Data
We cleaned this data in the notebook stored in: deliver/Data_cleaning.ipynb
End of explanation
"""
features = ['Average_Employees',
'Labor_Hours',
]
categoricals = ['Mine_State',
'Mine_County',
'Mine_Status',
'Mine_Type',
'Company_Type',
'Operation_Type',
'Union_Code',
'Coal_Supply_Region',
]
target = 'log_production'
sns.set_context('poster')
fig = plt.subplots(figsize=(14,8))
sns.violinplot(y="Company_Type", x="log_production", data=df,
split=True, inner="stick");
plt.tight_layout()
plt.savefig("../figures/Coal_prediction_company_type_vs_log_production.png")
dummy_categoricals = []
for categorical in categoricals:
# Avoid the dummy variable trap!
drop_var = sorted(df[categorical].unique())[-1]
temp_df = pd.get_dummies(df[categorical], prefix=categorical)
df = pd.concat([df, temp_df], axis=1)
temp_df.drop('_'.join([categorical, str(drop_var)]), axis=1, inplace=True)
dummy_categoricals += temp_df.columns.tolist()
"""
Explanation: Predict the Production of coal mines
End of explanation
"""
train, test = train_test_split(df, test_size=0.3)
rf = RandomForestRegressor(n_estimators=100, oob_score=True)
rf.fit(train[features + dummy_categoricals], train[target])
fig = plt.subplots(figsize=(8,8))
sns.regplot(test[target], rf.predict(test[features + dummy_categoricals]), color='green')
plt.ylabel("Predicted production")
plt.xlim(0, 22)
plt.ylim(0, 22)
plt.tight_layout()
plt.savefig("../figures/Coal-production-RF-prediction.png")
predicted = rf.predict(test[features + dummy_categoricals])
print "R^2 score:", r2_score(test[target], predicted)
print "MSE:", mean_squared_error(test[target], predicted)
rf_importances = pd.DataFrame({'name':train[features + dummy_categoricals].columns,
'importance':rf.feature_importances_
}).sort_values(by='importance',
ascending=False).reset_index(drop=True)
rf_importances.head(5)
"""
Explanation: Random Forest Regressor
End of explanation
"""
|
snurk/meta-strains
|
scripts/others/clomial_genotypes.ipynb
|
mit
|
def draw_legend(class_colours, classes, right=False):
recs = []
for i in range(0, len(classes)):
recs.append(mpatches.Rectangle((0,0), 1, 1, fc=class_colours[i]))
if right:
plt.legend(recs, classes, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
plt.legend(recs, classes)
"""
Explanation: (Вспомогательная процедура, которая рисует легенду с обозначением цветов.)
End of explanation
"""
def plot_shared_snps(f_pca, f_0_pca, mask, names, draw_all=False):
combs = []
combs_nums = []
combinations = []
for m in mask:
if not draw_all:
if not (np.sum(m) > 1):
combinations.append(-1)
continue
cur = ""
for i in range(len(m)):
if m[i] == 1:
if cur != "":
cur += " + "
cur += names[i]
if cur == "":
cur = "none"
if cur not in combs:
combs.append(cur)
combs_nums.append(0)
combs_nums[combs.index(cur)] += 1
combinations.append(combs.index(cur))
df = pd.DataFrame({'pc1':f_pca[:, 0], 'pc2':f_pca[:, 1], 'combination':combinations})
df_valid = df.loc[df['combination'] != -1]
# reoder combinations by sizes of groups
order = sorted(zip(combs_nums, combs, range(12)), reverse=True)
new_comb_order = [0] * (2 ** len(mask[0]))
new_comb_names = []
for i in range(len(order)):
old_order = order[i][2]
new_comb_order[old_order] = i
new_comb_names.append('{:5d}'.format(order[i][0]) + ' ' + order[i][1])
#new_comb_names.append(order[i][1])
for i in df_valid.index:
df_valid.loc[i, "combination"] = new_comb_order[df_valid.loc[i, "combination"]]
# Kelly’s 20 (except the first 2) Colours of Maximum Contrast
colors = ['yellow', 'purple', 'orange', '#96cde6', 'red', '#c0bd7f', '#5fa641', '#d485b2',
'#4277b6', '#df8461', '#463397', '#e1a11a', '#91218c', '#e8e948', '#7e1510',
'#92ae31', '#6f340d', '#d32b1e', '#2b3514']
color_palette = sns.color_palette(colors)
cluster_colors = [color_palette[x] for x in df_valid["combination"]]
plt.figure(figsize=(15, 8))
ax = plt.gca()
ax.set_aspect('equal')
plt.xlabel("PC 1")
plt.ylabel("PC 2")
plt.scatter(f_0_pca[:, 0], f_0_pca[:, 1], s=40, linewidth=0, c="grey", alpha=0.2);
plt.scatter(df_valid["pc1"], df_valid["pc2"], s=40, linewidth=0, c=cluster_colors);
#plt.title("[Sharon et al, 2013]")
draw_legend(color_palette, new_comb_names, right=True)
def clusterization(f, pca=True, num_of_comp=2):
if pca:
f_pca = PCA(n_components = num_of_comp).fit(f).transform(f)
cur_f = f_pca
else:
cur_f = f
f_pca = PCA(n_components = 2).fit(f).transform(f)
#N = (nt) (len(f) * 0.005)
#print(N)
N = 100
clusterer = hdbscan.HDBSCAN(min_cluster_size=N, min_samples=1).fit(cur_f)
plt.figure(figsize=(15, 8))
ax = plt.gca()
ax.set_aspect('equal')
plt.xlabel("PC 1")
plt.ylabel("PC 2")
if pca:
plt.title("Clustering %s primary components" % num_of_comp)
else:
plt.title("Clustering initial frequencies")
color_palette = sns.color_palette("Set2", 20)
cluster_colors = [color_palette[x] if x >= 0
else (0.5, 0.5, 0.5)
for x in clusterer.labels_]
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, clusterer.probabilities_)]
plt.scatter(f_pca[:, 0], f_pca[:, 1], s=40, linewidth=0, c=cluster_member_colors, alpha=0.3);
sizes_of_classes = Counter(clusterer.labels_)
print(sizes_of_classes.get(-1, 0), "outliers\n")
labels = [str(x) + ' - ' + str(sizes_of_classes[x]) for x in range(max(clusterer.labels_)+1)]
draw_legend(color_palette, labels, right=True)
print("Medians in clusters:")
for i in range(max(clusterer.labels_)+1):
f_with_labels = f.copy()
f_with_labels = np.hstack([f_with_labels, clusterer.labels_.reshape(len(f_with_labels),1)])
col = f_with_labels[:, -1]
idx = (col == i)
print(i, np.round(np.median(f_with_labels[idx,:-1], axis=0), 2))
"""
Explanation: SNP, встречающиеся в комбинации стрейнов
End of explanation
"""
def filter_by_coverage(cur_r, bad_percent, bad_samples):
def filter_row(row):
num_of_samples = len(row)
valid = np.sum(np.array(([(min_coverage < row) & (row < max_coverage)])))
return num_of_samples - valid <= bad_samples
min_coverage = np.percentile(cur_r, bad_percent, axis=0)
max_coverage = np.percentile(cur_r, 100-bad_percent, axis=0)
good_coverage = np.array([filter_row(row) for row in cur_r])
return good_coverage
r_0 = np.genfromtxt("infant_gut_pure_STRAIN1/matrices/R_all", dtype=int, delimiter=' ')
x_0 = np.genfromtxt("infant_gut_pure_STRAIN1/matrices/X_all", dtype=int, delimiter=' ')
print(len(r_0))
names = ["strain 1", "strain 3", "strain 4"]
r_0 = np.delete(r_0, [i for i in range(len(names))], axis=1)
x_0 = np.delete(x_0, [i for i in range(len(names))], axis=1)
Ncut = 6
print("Delete zero and almost zero profiles:")
good_ind = [i for i in range(np.shape(x_0)[0])
if not ((np.abs(r_0[i, :] - x_0[i, :]) <= Ncut).all() or (x_0[i, :] <= Ncut).all())]
print(len(good_ind), "remained")
x_0 = x_0[good_ind, :]
r_0 = r_0[good_ind, :]
good_coverage = filter_by_coverage(r_0, 15, 2)
r_0 = r_0[good_coverage, :]
x_0 = x_0[good_coverage, :]
print(len(r_0))
r = np.genfromtxt("infant_gut_pure_STRAIN1/matrices/R_filtered", dtype=int, delimiter=' ')
x = np.genfromtxt("infant_gut_pure_STRAIN1/matrices/X_filtered", dtype=int, delimiter=' ')
print("%s sites" % len(r))
mask = np.genfromtxt("infant_gut_pure_STRAIN1/clomial_results/genotypes_3.txt",
dtype=float, delimiter=' ', skip_header=1)
mask = np.delete(mask, [0], axis=1)
mask = np.rint(mask)
names = ["C1", "C2", "C3"]
"""
Explanation: Infant Gut, выровненный на Strain 1
(Преобразование не делаем, так как референс есть в данных)
Частоты стрейнов в Infant Gut:
strain1 0.73 0.74 0.04 0.13 0.17 0.04 0.32 0.75 0.30 0.20 0.0
strain3 0.24 0.20 0.95 0.80 0.80 0.93 0.52 0.19 0.64 0.65 1.0
strain4 0.03 0.06 0.02 0.07 0.03 0.02 0.16 0.06 0.06 0.15 0.0
End of explanation
"""
f_0 = np.divide(x_0, r_0)
f_0_pca = PCA(n_components = 2).fit(f_0).transform(f_0)
f = np.divide(x, r)
f_pca = PCA(n_components = 2).fit(f_0).transform(f)
plot_shared_snps(f_pca, f_0_pca, mask, names, draw_all=True)
"""
Explanation: Рисуем получившиеся фичи на главных компонентах.
End of explanation
"""
r_0 = np.genfromtxt("infant_gut/infant_gut_pure_without_ref/matrices/R_all", dtype=int, delimiter=' ')
x_0 = np.genfromtxt("infant_gut/infant_gut_pure_without_ref/matrices/X_all", dtype=int, delimiter=' ')
print(len(r_0))
names = ["strain 1", "strain 3", "strain 4"]
r_0 = np.delete(r_0, [i for i in range(len(names))], axis=1)
x_0 = np.delete(x_0, [i for i in range(len(names))], axis=1)
Ncut = 6
print("Delete zero and almost zero profiles:")
good_ind = [i for i in range(np.shape(x_0)[0])
if not ((np.abs(r_0[i, :] - x_0[i, :]) <= Ncut).all() or (x_0[i, :] <= Ncut).all())]
print(len(good_ind), "remained")
x_0 = x_0[good_ind, :]
r_0 = r_0[good_ind, :]
good_coverage = filter_by_coverage(r_0, 15, 2)
r_0 = r_0[good_coverage, :]
x_0 = x_0[good_coverage, :]
print(len(r_0))
r = np.genfromtxt("infant_gut/infant_gut_pure_without_ref/matrices/R_filtered", dtype=int, delimiter=' ')
x = np.genfromtxt("infant_gut/infant_gut_pure_without_ref/matrices/X_filtered", dtype=int, delimiter=' ')
r = np.delete(r, [0], axis=1)
r = r / 1.1
r = np.rint(r)
r = r.astype(int)
x = np.delete(x, [0], axis=1)
print("%s sites" % len(r))
mask = np.genfromtxt("infant_gut/infant_gut_pure_without_ref/clomial_results/genotypes_4.txt",
dtype=float, delimiter=' ', skip_header=1)
mask = np.delete(mask, [0, 1], axis=1)
mask = np.rint(mask)
names = ["C2", "C3", "C4"]
"""
Explanation: Infant Gut, выровненный на референс NCBI + подмешали референс
(Преобразование делаем)
End of explanation
"""
f_0 = np.divide(x_0, r_0)
f_0_pca = PCA(n_components = 2).fit(f_0).transform(f_0)
f = np.divide(x, r)
f_pca = PCA(n_components = 2).fit(f_0).transform(f)
plot_shared_snps(f_pca, f_0_pca, mask, names, draw_all=True)
f_0 = normalize(x_0, r_0)
f_0_pca = PCA(n_components = 2).fit(f_0).transform(f_0)
f = normalize(x, r)
f_pca = PCA(n_components = 2).fit(f_0).transform(f)
plot_shared_snps(f_pca, f_0_pca, mask, names, draw_all=True)
"""
Explanation: Рисуем получившиеся фичи на главных компонентах.
End of explanation
"""
|
amcdawes/QMlabs
|
Lab 7 - Time Evolution.ipynb
|
mit
|
import matplotlib.pyplot as plt
from numpy import sqrt,pi,arange,cos,sin
from qutip import *
%matplotlib inline
pz = Qobj([[1],[0]])
mz = Qobj([[0],[1]])
px = Qobj([[1/sqrt(2)],[1/sqrt(2)]])
mx = Qobj([[1/sqrt(2)],[-1/sqrt(2)]])
py = Qobj([[1/sqrt(2)],[1j/sqrt(2)]])
my = Qobj([[1/sqrt(2)],[-1j/sqrt(2)]])
Sx = 1/2.0*sigmax()
Sy = 1/2.0*sigmay()
Sz = 1/2.0*sigmaz()
"""
Explanation: Lab 7 - Time Evolution
Exploring time evolution of quantum states. Run the usual imports, and use the spin-1/2 states as previously defined:
End of explanation
"""
Omega = 5
H = -Omega*Sz
t = arange(0,4*pi/Omega,0.05)
"""
Explanation: Define the Hamiltonian:
$$H= - \mathbf{\mu}\cdot \mathbf{B} =-\gamma S_z B$$
$$\hat{H} = -\Omega \hat{S}_z$$
End of explanation
"""
expect_ops = [Sx,Sy,Sz]
result1 = sesolve(H, px, t, expect_ops)
expect_ops[0] # TODO get name of variable to use in label
labels = ['x','y','z']
for r,l in zip(result1.expect,labels):
plt.plot(result1.times*Omega/pi, r, label="$\langle S_%c \\rangle $" % l)
plt.xlabel("Time ($\Omega t/\pi$)", size=18)
plt.legend(fontsize=16) #TODO fix legend text size
"""
Explanation: The next line calls a Schrödinger equation solver (sesolve). It's arguments are the Hamiltonian, the starting state $\lvert+x\rangle$ (px), the time values, and a list of operators. sesolve returns many things, but the expect method is most useful, it gives the expectation values of the three operators in the operator list.
End of explanation
"""
result2 = sesolve(H, pz, t, [Sx,Sy,Sz])
for r,l in zip(result2.expect,labels):
plt.plot(result2.times*Omega/pi, r, label="$\langle S_%c \\rangle $" % l)
plt.xlabel("Time ($\Omega t/\pi$)", size=18)
plt.legend(fontsize=16) #TODO fix legend text size
"""
Explanation: Now what if the system starts in $\lvert+z\rangle$?
End of explanation
"""
psi = 1/sqrt(2)*tensor(pz, mz) + 1/sqrt(2)*tensor(mz, pz)
"""
Explanation: Spin-up stays spin-up (i.e. no prescession)
Two particle systems:
$\lvert\psi\rangle = \frac{1}{\sqrt{2}} \lvert+z,-z\rangle + \frac{1}{\sqrt{2}} \lvert-z,+z\rangle$
Use the tensor QuTiP function to form multi-particle states
End of explanation
"""
omega = 5
H = -omega*tensor(Sz,Sz)
expect_op = tensor(mz,pz)*tensor(mz,pz).dag()
result3 = sesolve(H, psi, t, expect_op)
for r,l in zip(result3.expect,labels):
plt.plot(result3.times*omega/pi, r, label="$\langle -z,+z\\rangle$")
plt.xlabel("Time ($\Omega t/\pi$)", size=18)
plt.legend(fontsize=16) #TODO fix legend text size
"""
Explanation: Hamiltonian is the same for both particles so we use the tensor to form $\hat{H}$ from individual operators
End of explanation
"""
omega=2
H = -omega/sqrt(2)*(Sz + Sx)
t = arange(0,2*pi/omega,0.05)
result4 = sesolve(H, px, t, [Sx, Sy, Sz])
for r,l in zip(result4.expect,labels):
plt.plot(result4.times*omega/pi, r, label="$\langle S_%c \\rangle $" % l)
plt.xlabel("Time ($\Omega t/\pi$)", size=18)
plt.legend(fontsize=16) #TODO fix legend text size
"""
Explanation: The value is constant since the state is initially in an eigenstate of $\hat{H}$.
What if the magnetic field is not along an axis?
Notice the Hamiltonian has an $x$ and a $z$ component:
End of explanation
"""
sx, sy, sz = result4.expect
b = Bloch()
b.add_points([sx,sy,sz])
b.zlabel = ['$\\left|+z\\right>$', '$\\left|-z\\right>$']
b.view = [-45,20]
b.add_vectors([1/sqrt(2),0,1/sqrt(2)])
b.show()
"""
Explanation: Harder to interpret, so we'll use the Bloch sphere:
End of explanation
"""
omega0 = 2.0 * 2 * pi # pick a nice value for a frequency, note this is 1 Hz
omega1 = 0.25 * 2 * pi # 25% of omega0
w = 2.0 * 2 * pi # the driving frequency
H0 = - omega0 * Sz # the first term in H
H1 = - omega1 * Sx # the second term in H
omegaR = sqrt((w - omega0)**2 + (omega1/2.0)**2)
t = arange(0,3.0 * 2 * pi / omegaR,0.02) # scale the time by omegaR, plot 3 units of 2pi/omegaR
args = [H0, H1, w] # parts of the Hamiltonian
def H1_coeff(t, args):
return cos(w * t)
H = [H0, [H1, H1_coeff]]
"""
Explanation: Time-dependent Hamiltonian:
We'll explore the parameters of a spin in a time-varying magnetic field. This system is relevant to nuclear magnetic resonance (NMR) which is used in chemistry and as Magnetic Resonance Imaging (MRI) in medicine.
Following Compliment 9.A the Hamiltonian is:
$$\hat{H}= - \Omega_0 \hat{S}_z - \Omega_1 cos(\omega t)\hat{S}_x$$
We then solve for a certain amount of time after the state starts in $|\psi(0)\rangle = |+z\rangle$
We also use the definition of the Rabi frequency: $\Omega_R = \sqrt{(\omega - \Omega_0)^2 + (\Omega_1/2)^2}$ as in (9.A.28)
Note that the time span is 3 units of $\frac{2\pi}{\Omega_R}$. Leave the scaling in place, but to plot a longer time period, change 3.0 to something larger. This lets us match the units in Fig. 9.A.1.
End of explanation
"""
result5 = sesolve(H, pz, t, [Sx, Sy, Sz, mz*mz.dag()],args)
sx, sy, sz, Pmz = result5.expect
"""
Explanation: The next line calls a Schrödinger equation solver (sesolve). The arguments are the Hamiltonian, the starting state $\lvert+z\rangle$ (pz), the time values, a list of operators, and the arguments to the function H_t. sesolve returns many things, but the expect method is most useful, it gives the expectation values of the four operators in the operator list. Notice the fourth operator is the $\lvert-z\rangle$ projection operator. It's expectation value is $P(\lvert-z\rangle,t)$
End of explanation
"""
b2 = Bloch()
b2.add_points([sx,sy,sz])
b2.zlabel = ['$\\left|+z\\right>$', '$\\left|-z\\right>$']
b2.show()
"""
Explanation: Look at the Bloch sphere for this system:
End of explanation
"""
plt.tick_params(labelsize=18)
plt.plot(result5.times*omegaR/pi,Pmz)
plt.xlabel("Time ($\Omega_R t/\pi$)", size=18)
plt.ylabel("$P(-z, t)$", size=18)
"""
Explanation: Make a plot analogous to Fig 9.A.1:
End of explanation
"""
omega0 = 1.0 * 2 * pi # pick a nice value for a frequency, note this is 1 Hz
omega1 = 0.05 * 2 * pi # 25% of omega0
w = 1.0 * 2 * pi # the driving frequency
H0 = - omega0 * Sz # the first term in H
H1 = - omega1 * Sx # the second term in H
omegaR = sqrt((w - omega0)**2 + (omega1/2.0)**2)
t = arange(0,3.0 * 2 * pi / omegaR,0.05) # scale the time by omegaR, plot 3 units of 2pi/omegaR
def H1_coeff2(t, args): # this function calculates H at each time step t
if t < 2*pi/omegaR * 0.5: # only add the H1 piece for the first chunk of time.
coeff = cos(w * t)
else:
coeff = 0
return coeff
H = [H0, [H1, H1_coeff2]]
result6 = sesolve(H, pz, t, [Sx, Sy, Sz, mz*mz.dag()],args)
sx, sy, sz, Pz = result6.expect
plt.plot(result6.times,Pz)
plt.ylim(-0.1,1.1)
plt.xlim(-5,125)
plt.xlabel("Time ($\Omega_R t/\pi$)", size=18)
plt.ylabel("$P(-z, t)$", size=18)
"""
Explanation: Q) What happens in each unit of time ($\frac{2\pi}{\Omega_R}$)? Look at the plot of $P(-z,t)$ to interpret this. How is your figure different from the on in Fig. 9.A.1?
Q) How does the evolution change if you double $\Omega_0$?
Q) After doubling $\Omega_0$ what if you double the driving frequency ($\omega$) also? Interpret this observation in terms of Fig. 9.A.2. In practice, what experimental parameter changes $\Omega_0$?
Q) How does $\Omega_1$ influence the dynamics? (Be careful reading the plots since the units are scaled by $\Omega_R$).
Advanced topic: we can change the Hamiltonian so the applied field turns off at a certain time, and it is possible to get the spin to stay in a particular state. This is very useful in quantum optics where certain operations change the atomic state in a very specific way.
End of explanation
"""
|
ozorich/phys202-2015-work
|
assignments/assignment09/IntegrationEx02.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import integrate
"""
Explanation: Integration Exercise 2
Imports
End of explanation
"""
def integrand(x, a):
return 1.0/(x**2 + a**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*np.pi/a
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Indefinite integrals
Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.
Find five of these integrals and perform the following steps:
Typeset the integral using LateX in a Markdown cell.
Define an integrand function that computes the value of the integrand.
Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral.
Define an integral_exact function that computes the exact value of the integral.
Call and print the return value of integral_approx and integral_exact for one set of parameters.
Here is an example to show what your solutions should look like:
Example
Here is the integral I am performing:
$$ I_1 = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$
End of explanation
"""
def integrand(x,a,b):
return np.sin(a*x)/np.sinh(b*x)
def integrate_approx(a,b):
I,e=integrate.quad(integrand,0,np.inf, args=(a,b))
return I
def integrate_exact(a,b):
return np.pi/(2*b)*np.tanh(a*np.pi/(2*b))
print('Numerical:', integrate_approx(1.0,2.0))
print('Exact:', integrate_exact(1.0,2.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 1
Here is an integral from the hyperbolic subsection:
\begin{equation}
\int_{0}^{\infty} \frac{\sin ax}{\sinh bx} dx = \frac{\pi}{2b}\tanh \frac{a\pi}{2b}
\end{equation}
End of explanation
"""
def integrand(x,a,b):
return np.exp(-a*x)*np.cos(b*x)
def integrate_approx(a,b):
I,e=integrate.quad(integrand,0,np.inf, args=(a,b))
return I
def integrate_exact(a,b):
return a/(a**2+b**2)
print('Numerical:', integrate_approx(1.0,2.0))
print('Exact:', integrate_exact(1.0,2.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 2
Here is an integral from the exponential functions subsection:
\begin{equation}
\int_{0}^{\infty} e^{-ax} \cos bx \space dx = \frac{a}{a^{2}+b^{2}}
\end{equation}
End of explanation
"""
def integrand(x,p):
return (1-np.cos(p*x))/x**2
def integrate_approx(p):
I,e=integrate.quad(integrand,0,np.inf, args=(p))
return I
def integrate_exact(p):
return p*np.pi/2
print('Numerical:', integrate_approx(4.0))
print('Exact:', integrate_exact(4.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 3
Here is an integral from the trigonometric functions subsection:
\begin{equation}
\int_{0}^{\infty} \frac{1-cospx}{x^{2}} dx = \frac{\pi p}{2}
\end{equation}
End of explanation
"""
def integrand(x,a,b):
return np.log(a**2+x**2)/(b**2+x**2)
def integrate_approx(a,b):
I,e=integrate.quad(integrand,0,np.inf, args=(a,b))
return I
def integrate_exact(a,b):
return np.pi/b*np.log(a+b)
print('Numerical:', integrate_approx(3.0,4.0))
print('Exact:', integrate_exact(3.0,4.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 4
Here is an integral from the logarithmic functions subsection:
\begin{equation}
\int_{0}^{\infty} \frac{\ln (a^{2}+x^{2})}{b^{2}+x^{2}} dx = \frac{\pi}{b}ln(a+b) \space \space a,b>0
\end{equation}
End of explanation
"""
def integrand(x,a,b):
return np.sqrt(a**2-x**2)
def integrate_approx(a,b):
I,e=integrate.quad(integrand,0,a, args=(a,b))
return I
def integrate_exact(a,b):
return np.pi*a**2/4
print('Numerical:', integrate_approx(1.0,2.0))
print('Exact:', integrate_exact(1.0,2.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 5
Here is an integral from the rational and irrational functions subsection:
\begin{equation}
\int_{0}^{a} \sqrt{a^{2}-x^{2}} dx = \frac{\pi a^{2}}{4}
\end{equation}
End of explanation
"""
|
palandatarxcom/sklearn_tutorial_cn
|
notebooks/03.1-Classification-SVMs.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# 使用seaborn的一些默认配置
import seaborn as sns; sns.set()
"""
Explanation: 这个分析笔记由Jake Vanderplas编辑汇总。 源代码和license文件在GitHub。 中文翻译由派兰数据在派兰大数据分析平台上完成。 源代码在GitHub上。
深度探索监督学习:支持向量机
之前我们已经介绍了监督学习。监督学习中有很多算法,在这里我们深入探索其中一种最强大的也最有趣的算法之一:支持向量机(Support Vector Machines,SVMs).
End of explanation
"""
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring');
"""
Explanation: 支持向量机
支持向量机(SVMs)是监督学习中用来分类或者回归的最强大的算法之一。支持向量机是一种判别分类器:它可以在数据的集合中画出一条分割线。
我们可以来看一个简单的支持向量机的做分类的例子。首先我们需要创建一个数据集:
End of explanation
"""
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
#绘制分割线
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
"""
Explanation: 一个判别分类器尝试着去在两组数据间画一条分割线。我们首先需要面临一个问题:这条线的位置很难去定。比如,我们可以找出很多可能的线去将两个数据群体完美的划分:
End of explanation
"""
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
#绘制分割线
plt.plot(xfit, yfit, '-k')
#绘制分割线两侧的区域
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
"""
Explanation: 上面的图中有三个各异的分割线,它们都可以将数据集合完美地分隔开来。一个新的数据的分类结果会根据你的选择,得出完全不一样的结果。
我们如何去改进这一点呢?
支持向量机:最大化边界
支持向量机有一种方法去解决这个问题。支持向量机做的事情不仅仅是画一条线,它还考虑了这条分割线两侧的“区域“的选择。关于这个“区域”是个什么,下面是一个例子:
End of explanation
"""
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(X, y)
"""
Explanation: 注意到,如果我们需要使得直线旁边的区域的宽度达到最大,中间的那条线是最合适的选择。这也就是支持向量机的特点和属性,它会优化分隔的直线,使得直线的边界与数据集的垂直距离最大。
生成一个支持向量机
现在我们需要根据这些点来生成我们的支持向量机模型。固然生成模型的数学细节很有趣,但是我们想让您在其他地方去了解这些东西。在这里,我们会让您掌握使用scikit-learn的黑盒算法去完成上面的工作。
End of explanation
"""
def plot_svc_decision_function(clf, ax=None):
"""绘制一个 2D SVC 的决策函数"""
if ax is None:
ax = plt.gca()
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)
y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)
Y, X = np.meshgrid(y, x)
P = np.zeros_like(X)
for i, xi in enumerate(x):
for j, yj in enumerate(y):
P[i, j] = clf.decision_function([[xi, yj]])
# 绘制边界
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
"""
Explanation: 为了更好的知道发生了什么,我们创造一个简单方便的函数,去画出SVM算法生成的数据集边界:
End of explanation
"""
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, alpha=0.3);
"""
Explanation: 注意到图中的虚线碰到了一些点:这些点在这次模型的生成中非常重要,它们也就是所谓的支持向量。
在scikit-learn中,这些支持向量被存储在分类器的suppport_vectors_属性中:
End of explanation
"""
from ipywidgets import interact
def plot_svm(N=10):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
clf = SVC(kernel='linear')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_svc_decision_function(clf, plt.gca())
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, alpha=0.3)
interact(plot_svm, N=[10, 200], kernel='linear');
"""
Explanation: 让我们使用IPython的interact功能去探索这些点的分布是如何影响支持向量和判别模型生成的。
(这个功能只适用于IPython 2.0+,而且在静态视图下无效)
End of explanation
"""
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
"""
Explanation: 注意到,只有那些支持向量才会影响SVM:如果你移动任意的非支持向量,只要它们不越过边界,对分类的结果就不会有影响。
进一步探索:核方法
当SVM与核(kernels)联系起来的时候,它会变得非常有趣。为了进一步的解释“核”是什么,我们去看一些无法被线性分隔的数据。
End of explanation
"""
r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))
"""
Explanation: 很显然,线性的分隔是不能把这些数据隔开的。我们可以通过应用核方法去改变,核方法是一些可以转换输入数据的方法。
比如,我们可以使用一个简单的径向基函数
End of explanation
"""
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')
# ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
# interact(plot_3D, elev=[-90, 90], azip=(-180, 180));
plot_3D()
"""
Explanation: 如果我们连同数据一起去绘图,我们可以看见它的效果:
End of explanation
"""
clf = SVC(kernel='rbf')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, alpha=0.3);
"""
Explanation: 我们可以看到,这个增加的维度让我们的数据变得线性可分起来!这是一个相对简单的核方法;SVM有很多更成熟更复杂的集成的方法可供使用。这个方法可以通过使用kernel='rbf'来实现,其中rbf是radial basis function的缩写:
End of explanation
"""
|
IST256/learn-python
|
content/lessons/13-Visualization/Slides.ipynb
|
mit
|
import pandas as pd
x = [ { 'a' :2, 'b' : 'x', 'c' : 10},
{ 'a' :4, 'b' : 'y', 'c' : 3},
{ 'a' :1, 'b' : 'x', 'c' : 6} ]
y = pd.DataFrame(x)
"""
Explanation: IST256 Lesson 13
Visualizations
Zybook Ch10
Links
Participation: https://poll.ist256.com
Zoom Chat!
Agenda
Last Lecture... but we ain't gone!
Go over the homework
Project
Introduction to Data Visualization
Matplotlib Basics
Plot.ly Basics
Folium Basics
Project P2 Deliverable
Your rough draft is due Tuesday 5/11.
You must make a 10 minute appointment with your SG prof between 5/12 and 5/14 to go over your project and get verbal feedback.
Take notes at the meeting; we will expect you to take our feedback into consideration for your final submission.
Exam 4
Covers Lessons 11,12,13.
Issued on Monday 5/10 During our LARGE GROUP.
You are expected to sign into Zoom at 3:45pm.
Exam starts at 4pm EDT. There will be an exam password posted.
Students in Alex Smith's online section who cannot take the exam at 4PM EDT will need to arrange another time within 24 hours from the 5/10 4PM EDT.
FEQT (Future Exam Questions Training) 1
Only show part of the data frame where column b is an 'x' ?
End of explanation
"""
import pandas as pd
x = [ { 'a' :2, 'b' : 'x', 'c' : 10},
{ 'a' :4, 'b' : 'y', 'c' : 3},
{ 'a' :1, 'b' : 'x', 'c' : 6} ]
y = pd.DataFrame(x)
y[ ['a','b'] ]
"""
Explanation: A. y[ ['b'] == 'x' ]
B. y[ y['b'] == 'x' ]
C. y['b'] == 'x'
D. y[ y['b'] == y['x'] ]
Vote Now: https://poll.ist256.com
FEQT (Future Exam Questions Training) 2
Only show columns a and c ?
End of explanation
"""
import pandas as pd
x = [ { 'a' :2, 'b' : 'x', 'c' : 10},
{ 'a' :4, 'b' : 'y', 'c' : 3},
{ 'a' :1, 'b' : 'x', 'c' : 6} ]
y = pd.DataFrame(x)
for z in y.to_records():
if z['a']>2:
print(z['c'])
"""
Explanation: A. y[ 'a','c' ]
B. y[ 'ac' ]
C. y[ ['a'],['c'] ]
D. y[ ['a','c'] ]
Vote Now: https://poll.ist256.com
FEQT (Future Exam Questions Training) 3
What is the output of the following code:
End of explanation
"""
import pandas as pd
x = [ { 'a' : {'b' : 'x', 'name' : 'mike'} , 'c' : 10},
{ 'a' : {'b' : 'y'}, 'c' : 3},
{ 'a' : {'b' : 'x'}, 'c' : 6} ]
y = pd.json_normalize(x)
y
"""
Explanation: A. 10
B. 3
C. 6
D. 4
Vote Now: https://poll.ist256.com
FEQT (Future Exam Questions Training) 4
Which code will output the 2nd row in this data frame?
End of explanation
"""
|
calebmadrigal/radio-hacking-scripts
|
auto_crop.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy
#import scipy.io.wavfile
def setup_graph(title='', x_label='', y_label='', fig_size=None):
fig = plt.figure()
if fig_size != None:
fig.set_size_inches(fig_size[0], fig_size[1])
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
"""
Explanation: Auto Crop signal
Usually, when recording either radio waves or sound waves, you start recording, the signal comes, and then, as the slow human being that you are (slow relative to computers), you stop recording a while after the signal transmission completed.
But usually, you just want the signal and don't care about the surrounding dead air/noise.
To this end, let's develop an algorithm to auto-crop the signal of interest. We will do this by:
* breaking the signal into 16 chunks
* calculating the power in each chunk
* finding where the largest increase in power happens
* finding where the largest decrease in power happens
* saving only the portion between the largest power increase in decrease
Since this is mostly meant for use with radio signals, we will experiment with a radio signal in the PCM raw wave format - which is essentially the same as a WAV file without the headers.
Graphing boilerplate code
End of explanation
"""
def auto_crop_signal(signal_data, margin_percent=1, num_chunks=16):
""" Break the signal into chunks, and find the chunk there is the largest
jump from quiet to loud (start index), and the largest jump from
loud to quiet (stop index). """
chunk_size = int(len(signal_data) / num_chunks)
largest_increase_index = 0
largest_increase_size = -999999999
largest_decrease_index = chunk_size * num_chunks
largest_decrease_size = 999999999
last_chunk_sum = sum([abs(i) for i in signal_data[0:chunk_size]])
for chunk_start in range(0, len(signal_data), chunk_size):
chunk = signal_data[chunk_start:chunk_start+chunk_size]
# Don't consider the last chunk if it's not a full chunk,
# since that will likely yield the smallest sum
if len(chunk) < chunk_size:
continue
chunk_sum = sum([abs(i) for i in chunk])
chunk_diff = chunk_sum - last_chunk_sum
last_chunk_sum = chunk_sum
if chunk_diff > largest_increase_size:
largest_increase_size = chunk_diff
largest_increase_index = chunk_start
if chunk_diff < largest_decrease_size:
largest_decrease_size = chunk_diff
largest_decrease_index = chunk_start
margin = int((largest_decrease_index - largest_increase_index) * (margin_percent / 100))
return signal_data[largest_increase_index-margin:largest_decrease_index+margin]
"""
Explanation: Auto crop algorithm
End of explanation
"""
in_signal = scipy.fromfile(open('raw_signal_to_crop.pcm'), dtype=scipy.complex64)
plt.plot(in_signal)
cropped_sig = auto_crop_signal(in_signal)
plt.plot(cropped_sig)
"""
Explanation: Read in PCM file
End of explanation
"""
def write_pcm_file(signal_data, file_path):
np.array(signal_data).astype('complex64').tofile(file_path)
write_pcm_file(cropped_sig, 'cropped_sig.pcm')
"""
Explanation: Write file
End of explanation
"""
read_cropped = scipy.fromfile(open('cropped_sig.pcm'), dtype=scipy.complex64)
plt.plot(read_cropped)
"""
Explanation: Verify write worked by reading back in
End of explanation
"""
|
Quadrocube/rep
|
howto/03-howto-gridsearch(Higgs).ipynb
|
apache-2.0
|
%pylab inline
"""
Explanation: About
This notebook demonstrates several additional tools to optimize classification model provided by Reproducible experiment platform (REP) package:
grid search for the best classifier hyperparameters
different optimization algorithms
different scoring models (optimization of arbirtary figure of merit)
End of explanation
"""
import numpy, pandas
from rep.utils import train_test_split
from sklearn.metrics import roc_auc_score
data = pandas.read_csv('toy_datasets/Higgs.csv', sep='\t')
labels = data['Label'].values
labels = labels == 's'
sample_weight = data['Weight'].values
train_data, test_data, train_labels, test_labels, train_weight, test_weight = train_test_split(data, labels, sample_weight)
list(data.columns)
"""
Explanation: Loading data for Higgs Boson Challenge
End of explanation
"""
features = list(set(data.columns) - set(['Weight', 'Label', 'EventId']))
"""
Explanation: Variables used in training
End of explanation
"""
from rep.report import metrics
def AMS(s, b):
br = 10.0
radicand = 2 *( (s+b+br) * numpy.log (1.0 + s/(b+br)) - s)
return numpy.sqrt(radicand)
optimal_AMS = metrics.OptimalMetric(AMS, expected_s=692., expected_b=410999.)
"""
Explanation: Metric definition
In Higgs challenge the aim is to maximize AMS metrics. <br />
To measure the quality one should choose not only classifier, but also an optimal threshold,
where the maximal value of AMS is achieved.
Such metrics (which require a threshold) are called threshold-based.
rep.utils contain class OptimalMetric, which computes the maximal value for threshold-based metric (and may be used as metric).
Use this class to generate metric and use it in grid search.
Prepare quality metric
first we define AMS metric, and utils.OptimalMetric generates
End of explanation
"""
probs_rand = numpy.ndarray((1000, 2))
probs_rand[:, 1] = numpy.random.random(1000)
probs_rand[:, 0] = 1 - probs_rand[:, 1]
labels_rand = numpy.random.randint(0, high=2, size=1000)
optimal_AMS.plot_vs_cut(labels_rand, probs_rand)
"""
Explanation: Compute threshold vs metric quality
random predictions for signal and background were used here
End of explanation
"""
optimal_AMS(labels_rand, probs_rand)
"""
Explanation: The best quality
End of explanation
"""
from rep.metaml import GridOptimalSearchCV
from rep.metaml.gridsearch import RandomParameterOptimizer, FoldingScorer
from rep.estimators import SklearnClassifier
from sklearn.ensemble import AdaBoostClassifier
from collections import OrderedDict
"""
Explanation: Hyperparameters optimization algorithms
AbstractParameterGenerator is an abstract class to generate new points, where the scorer function will be computed. It is used in grid search to get new set of parameters to train classifier.
Properties:
best_params_ - return the best grid point
best_score_ - return the best quality
print_results(self, reorder=True) - print all points with corresponding quality
The following algorithms inherit from AbstractParameterGenerator:
RandomParameterOptimizer - generates random point in parameters space
RegressionParameterOptimizer - generate next point using regression algorithm, which was trained on previous results
SubgridParameterOptimizer - uses subgrids if grid is huge + annealing-like technique (details see in REP)
Grid search
GridOptimalSearchCV implemets optimal search over specified parameter values for an estimator. Parameters to use it are:
estimator - object of type that implements the "fit" and "predict" methods
params_generator - generator of grid search algorithm (AbstractParameterGenerator)
scorer - which implement method call with kwargs: "base_estimator", "params", "X", "y", "sample_weight"
Important members are "fit", "fit_best_estimator"
End of explanation
"""
# define grid parameters
grid_param = OrderedDict()
grid_param['n_estimators'] = [10, 20, 30]
grid_param['learning_rate'] = [0.1, 0.05]
# use random hyperparameter optimization algorithm
generator = RandomParameterOptimizer(grid_param)
# define folding scorer
scorer = FoldingScorer(optimal_AMS, folds=4, fold_checks=2)
grid_sk = GridOptimalSearchCV(SklearnClassifier(AdaBoostClassifier(), features=features), generator, scorer)
grid_sk.fit(data, labels)
"""
Explanation: Grid search with folding scorer
FoldingScorer provides folding cross-validation for train dataset:
folds - k, number of folds (train on k-1 fold, test on 1 fold)
folds_check - number of times model will be tested
score_function - function to calculate quality with interface "function(y_true, proba, sample_weight=None)"
NOTE: if fold_checks > 1, the quality is averaged over tests.
End of explanation
"""
grid_sk.generator.best_params_
"""
Explanation: Print best parameters
End of explanation
"""
grid_sk.generator.print_results()
"""
Explanation: Print all qualities for used parameters
End of explanation
"""
def normed_weight(y, weight):
weight[y == 1] *= sum(weight[y == 0]) / sum(weight[y == 1])
return weight
"""
Explanation: Grid search with user-defined scorer
You can define your own scorer with specific logic by simple way. Scorer must have just the following:
scorer(base_estimator, params, X, y, sample_weight)
Prepare reweight function
End of explanation
"""
from sklearn import clone
def generate_scorer(test, labels, test_weight=None):
""" Generate scorer which calculate metric on fixed test dataset """
def custom(base_estimator, params, X, y, sample_weight=None):
cl = clone(base_estimator)
cl.set_params(**params)
cl.fit(X, y)
res = optimal_AMS(labels, cl.predict_proba(test), sample_weight=test_weight)
return res
return custom
# define grid parameters
grid_param = OrderedDict()
grid_param['n_estimators'] = [10, 20, 30]
grid_param['learning_rate'] = [0.1, 0.05]
grid_param['features'] = [features[:5], features[:10]]
# define random hyperparameter optimization algorithm
generator = RandomParameterOptimizer(grid_param)
# define specific scorer
scorer = generate_scorer(test_data, test_labels, test_weight)
grid = GridOptimalSearchCV(SklearnClassifier(clf=AdaBoostClassifier(), features=features), generator, scorer)
grid.fit(train_data, train_labels, train_weight)
len(train_data), len(test_data)
"""
Explanation: Define scorer, which will be train model on all dataset and test it on the pre-defined dataset
End of explanation
"""
grid.generator.print_results()
"""
Explanation: Print all tried combinations of parameters and quality
End of explanation
"""
from rep.report import ClassificationReport
from rep.data.storage import LabeledDataStorage
lds = LabeledDataStorage(test_data, test_labels, test_weight)
classifiers = {'grid_fold': grid_sk.fit_best_estimator(train_data[features], train_labels, train_weight),
'grid_test_dataset': grid.fit_best_estimator(train_data[features], train_labels, train_weight) }
report = ClassificationReport(classifiers, lds)
"""
Explanation: Results comparison
End of explanation
"""
report.roc().plot()
"""
Explanation: ROCs
End of explanation
"""
report.metrics_vs_cut(AMS, metric_label='AMS').plot()
"""
Explanation: Metric
End of explanation
"""
|
vaibhavi-r/CSE-415
|
Assignment 7 - Part A.ipynb
|
mit
|
import re
from time import time
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pprint import pprint
#Sklearn Imports
from sklearn import metrics
from sklearn.datasets import fetch_20newsgroups
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_auc_score, auc
from nltk import PorterStemmer
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
nltk.download('stopwords') #download the latest stopwords
"""
Explanation: Assignment 7 - Part A
Student UW NetID : vaibhavi@uw.edu
Student Name : Vaibhavi Rangarajan
Import Libraries
End of explanation
"""
all_newsgroups= fetch_20newsgroups()
pprint(list(all_newsgroups.target_names))
"""
Explanation: Load Dataset
End of explanation
"""
cats = ['sci.med' , 'rec.motorcycles']
newsgroups_train = fetch_20newsgroups(subset='train', categories=cats, remove=('headers', 'footers', 'quotes'))
newsgroups_test = fetch_20newsgroups(subset='test', categories=cats, remove=('headers', 'footers', 'quotes'))
print("Categories to classify\n-----------------------")
print(list(newsgroups_train.target_names))
print("TRAIN DATA\n---------------")
print("Data Type:", type(newsgroups_train))
print("%d documents" % len(newsgroups_train.filenames))
print("%d categories" % len(newsgroups_train.target_names))
print("X shape :", newsgroups_train.filenames.shape)
print("Y shape :",newsgroups_train.target.shape)
print("Y head :", newsgroups_train.target[:10])
print("TEST DATA\n---------------")
print("Data Type:", type(newsgroups_test))
print("%d documents" % len(newsgroups_test.filenames))
print("%d categories" % len(newsgroups_test.target_names))
print("X shape :", newsgroups_test.filenames.shape)
print("Y shape :",newsgroups_test.target.shape)
print("Y head :", newsgroups_test.target[:10])
"""
Explanation: Create Train and Test Data [from categories-medical and automobiles]
End of explanation
"""
print(newsgroups_train.data[0])
print(newsgroups_test.data[0])
print(type(newsgroups_test.data))
print(type(newsgroups_test.data[0]))
"""
Explanation: Explore the data
End of explanation
"""
train_labels = newsgroups_train.target #0, 1 array
#print(train_labels)
test_labels = newsgroups_test.target
#print(test_labels)
RE_PREPROCESS = r'\W+|\d+' #the regular expressions that matches all non-characters
#train_corpus = np.array( [re.sub(RE_PREPROCESS, ' ', text).lower() for text in df_train.jobDescription.values])
#test_corpus = np.array( [re.sub(RE_PREPROCESS, ' ', text).lower() for text in df_test.jobDescription.values])
labels = np.append(train_labels, test_labels)
"""
Explanation: Pre-process Data
End of explanation
"""
vectorizer = TfidfVectorizer()
vectors_train = vectorizer.fit_transform(newsgroups_train.data)
vectors_train.shape
vectors_train.nnz / float(vectors_train.shape[0])
vectors_test = vectorizer.transform(newsgroups_test.data)
"""
Explanation: Transform Data (Vectorize)
End of explanation
"""
clf = MultinomialNB(alpha=.01)
clf.fit(vectors_train, newsgroups_train.target)
"""
Explanation: There are 18000+ features for each document. And on average, 87 out of 18000 features are non-zeros. This is a sparse matrix
Train Classifier
End of explanation
"""
y_true = newsgroups_test.target
y_pred = clf.predict(vectors_test)
metrics.f1_score(y_true, y_pred, average='macro')
"""
Explanation: Evaluate Classifier
End of explanation
"""
cm = confusion_matrix(y_true, y_pred)
"""
Explanation: Interpretation: An F-1 score of 0.94 is high. Our model is performant.
Plot Confusion Matrix
End of explanation
"""
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
print_cm(cm, labels = ['Automobiles', 'Medical'])
pd.crosstab(y_true, y_pred, rownames=['True'], colnames=['Predicted'], margins=True)
"""
Explanation: Pretty Print Confusion Matrix
End of explanation
"""
def plot_precision_recall(y_true,y_score):
"""
Plot a precision recall curve
Parameters
----------
y_true: ls
ground truth labels
y_score: ls
score output from model
"""
precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true,y_score[:,1])
plt.plot(recall_curve, precision_curve)
plt.xlabel('Recall')
plt.ylabel('Precision')
auc_val = auc(recall_curve,precision_curve)
print('AUC-PR: {0:1f}'.format(auc_val))
plt.show()
plt.clf()
y_score = clf.predict_proba(vectors_test)
plot_precision_recall(y_true, y_score)
"""
Explanation: Interpretation:
From 398 Automobile related articles, we classifier 385 Correctly.
From 396 Medicine related articles, we classified, 363 Correctly.
Plot: Precision-Recall Curve
End of explanation
"""
#Params - NOT tuned
ANALYZER = "word" #unit of features are single words rather then phrases of words
STRIP_ACCENTS = 'unicode'
TOKENIZER = None
MAX_DF = (1.0) # Exclude words that have a frequency greater than the threshold
STOP_WORDS = (stopwords.words('english'), None)
#Params - TUNED
NGRAM_RANGE = ((0,1), (0,2)) #Range for pharases of words
MIN_DF = (0, 0.01) # Exclude words that have a frequency less than the threshold
ALPHA = (0.01, 0.1, 1)
pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('clf', MultinomialNB())])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'tfidf__ngram_range':NGRAM_RANGE,
'tfidf__min_df':MIN_DF,
'clf__alpha': ALPHA,
}
def optimize_pipeline(pipeline):
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=True)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(newsgroups_train.data, newsgroups_train.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
optimize_pipeline(pipeline)
"""
Explanation: Interpretation:
The area under the curve is 0.98, just shy of the ideal 1.0.
The trained classifier is extending to the test set well.
Improve: Grid Search CV for Classifier
Let's play with parameters for the TFIDF Vectorizer, and Alpha (Laplace smoothing) for the Naive Bayes Classifier
http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html
End of explanation
"""
|
wanderer2/pymc3
|
docs/source/notebooks/Euler-Maruyama and SDEs.ipynb
|
apache-2.0
|
%pylab inline
import pymc3 as pm
import theano.tensor as tt
import scipy
from pymc3.distributions.timeseries import EulerMaruyama
"""
Explanation: Inferring parameters of SDEs using a Euler-Maruyama scheme
This notebook is derived from a presentation prepared for the Theoretical Neuroscience Group, Institute of Systems Neuroscience at Aix-Marseile University.
End of explanation
"""
# parameters
λ = -0.78
σ2 = 5e-3
N = 200
dt = 1e-1
# time series
x = 0.1
x_t = []
# simulate
for i in range(N):
x += dt * λ * x + sqrt(dt) * σ2 * randn()
x_t.append(x)
x_t = array(x_t)
# z_t noisy observation
z_t = x_t + randn(x_t.size) * 5e-3
figure(figsize=(10, 3))
subplot(121)
plot(x_t[:30], 'k', label='$x(t)$', alpha=0.5), plot(z_t[:30], 'r', label='$z(t)$', alpha=0.5)
title('Transient'), legend()
subplot(122)
plot(x_t[30:], 'k', label='$x(t)$', alpha=0.5), plot(z_t[30:], 'r', label='$z(t)$', alpha=0.5)
title('All time');
tight_layout()
"""
Explanation: Toy model 1
Here's a scalar linear SDE in symbolic form
$ dX_t = \lambda X_t + \sigma^2 dW_t $
discretized with the Euler-Maruyama scheme
End of explanation
"""
def lin_sde(x, lam):
return lam * x, σ2
"""
Explanation: What is the inference we want to make? Since we've made a noisy observation of the generated time series, we need to estimate both $x(t)$ and $\lambda$.
First, we rewrite our SDE as a function returning a tuple of the drift and diffusion coefficients
End of explanation
"""
with pm.Model() as model:
# uniform prior, but we know it must be negative
lam = pm.Flat('lam')
# "hidden states" following a linear SDE distribution
# parametrized by time step (det. variable) and lam (random variable)
xh = EulerMaruyama('xh', dt, lin_sde, (lam, ), shape=N, testval=x_t)
# predicted observation
zh = pm.Normal('zh', mu=xh, sd=5e-3, observed=z_t)
"""
Explanation: Next, we describe the probability model as a set of three stochastic variables, lam, xh, and zh:
End of explanation
"""
with model:
# optimize to find the mode of the posterior as starting point for prob. mass
start = pm.find_MAP(vars=[xh], fmin=scipy.optimize.fmin_l_bfgs_b)
# "warm up" to transition from mode to prob. mass
step = pm.NUTS(scaling=start)
trace = pm.sample(1000, step, progressbar=True)
# sample from the prob. mass
step = pm.NUTS(scaling=trace[-1], gamma=.25)
trace = pm.sample(2000, step, start=trace[-1], progressbar=True)
"""
Explanation: Once the model is constructed, we perform inference, i.e. sample from the posterior distribution, in the following steps:
End of explanation
"""
figure(figsize=(10, 3))
subplot(121)
plot(percentile(trace[xh], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
plot(x_t, 'r', label='$x(t)$')
legend()
subplot(122)
hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5)
axvline(λ, color='r', label='$\lambda$', alpha=0.5)
legend();
"""
Explanation: Next, we plot some basic statistics on the samples from the posterior,
End of explanation
"""
# generate trace from posterior
ppc_trace = pm.sample_ppc(trace, model=model)
# plot with data
figure(figsize=(10, 3))
plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$')
plot(z_t, 'r', label='$z(t)$')
legend()
"""
Explanation: A model can fit the data precisely and still be wrong; we need to use posterior predictive checks to assess if, under our fit model, the data our likely.
In other words, we
- assume the model is correct
- simulate new observations
- check that the new observations fit with the original data
End of explanation
"""
N, τ, a, m, σ2 = 200, 3.0, 1.05, 0.2, 1e-1
xs, ys = [0.0], [1.0]
for i in range(N):
x, y = xs[-1], ys[-1]
dx = τ * (x - x**3.0/3.0 + y)
dy = (1.0 / τ) * (a - x)
xs.append(x + dt * dx + sqrt(dt) * σ2 * randn())
ys.append(y + dt * dy + sqrt(dt) * σ2 * randn())
xs, ys = array(xs), array(ys)
zs = m * xs + (1 - m) * ys + randn(xs.size) * 0.1
figure(figsize=(10, 2))
plot(xs, label='$x(t)$')
plot(ys, label='$y(t)$')
plot(zs, label='$z(t)$')
legend()
"""
Explanation: Note that
inference also estimates the initial conditions
the observed data $z(t)$ lies fully within the 95% interval of the PPC.
there are many other ways of evaluating fit
Toy model 2
As the next model, let's use a 2D deterministic oscillator,
\begin{align}
\dot{x} &= \tau (x - x^3/3 + y) \
\dot{y} &= \frac{1}{\tau} (a - x)
\end{align}
with noisy observation $z(t) = m x + (1 - m) y + N(0, 0.05)$.
End of explanation
"""
def osc_sde(xy, τ, a):
x, y = xy[:, 0], xy[:, 1]
dx = τ * (x - x**3.0/3.0 + y)
dy = (1.0 / τ) * (a - x)
dxy = tt.stack([dx, dy], axis=0).T
return dxy, σ2
"""
Explanation: Now, estimate the hidden states $x(t)$ and $y(t)$, as well as parameters $\tau$, $a$ and $m$.
As before, we rewrite our SDE as a function returned drift & diffusion coefficients:
End of explanation
"""
xys = c_[xs, ys]
with pm.Model() as model:
τh = pm.Uniform('τh', lower=0.1, upper=5.0)
ah = pm.Uniform('ah', lower=0.5, upper=1.5)
mh = pm.Uniform('mh', lower=0.0, upper=1.0)
xyh = EulerMaruyama('xyh', dt, osc_sde, (τh, ah), shape=xys.shape, testval=xys)
zh = pm.Normal('zh', mu=mh * xyh[:, 0] + (1 - mh) * xyh[:, 1], sd=0.1, observed=zs)
"""
Explanation: As before, the Euler-Maruyama discretization of the SDE is written as a prediction of the state at step $i+1$ based on the state at step $i$.
We can now write our statistical model as before, with uninformative priors on $\tau$, $a$ and $m$:
End of explanation
"""
with model:
# optimize to find the mode of the posterior as starting point for prob. mass
start = pm.find_MAP(vars=[xyh], fmin=scipy.optimize.fmin_l_bfgs_b)
# "warm up" to transition from mode to prob. mass
step = pm.NUTS(scaling=start)
trace = pm.sample(100, step, progressbar=True)
# sample from the prob. mass
step = pm.NUTS(scaling=trace[-1], gamma=.25)
trace = pm.sample(2000, step, start=trace[-1], progressbar=True)
"""
Explanation: As with the linear SDE, we 1) find a MAP estimate, 2) warm up and 3) sample from the probability mass:
End of explanation
"""
figure(figsize=(10, 6))
subplot(211)
plot(percentile(trace[xyh][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
plot(xs, 'r', label='$x(t)$')
legend(loc=0)
subplot(234), hist(trace['τh']), axvline(τ), xlim([1.0, 4.0]), title('τ')
subplot(235), hist(trace['ah']), axvline(a), xlim([0, 2.0]), title('a')
subplot(236), hist(trace['mh']), axvline(m), xlim([0, 1]), title('m')
tight_layout()
"""
Explanation: Again, the result is a set of samples from the posterior, including our parameters of interest but also the hidden states
End of explanation
"""
# generate trace from posterior
ppc_trace = pm.sample_ppc(trace, model=model)
# plot with data
figure(figsize=(10, 3))
plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$')
plot(zs, 'r', label='$z(t)$')
legend()
"""
Explanation: Again, we can perform a posterior predictive check, that our data are likely given the fit model
End of explanation
"""
|
saketkc/notebooks
|
python/Expectation Maximisation.ipynb
|
bsd-2-clause
|
%matplotlib notebook
from __future__ import division
from collections import OrderedDict
from scipy.stats import binom as binomial
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#from ipywidgets import StaticInteract, RangeWidget
import pandas as pd
from IPython.display import display, Image
from scipy.spatial.distance import euclidean
from sympy import init_printing, symbols, Eq
init_printing()
Image('images/nbt1406-F1.png')
coin_toss = []
coin_toss.append('H T T T H H T H T H'.split())
coin_toss.append('H H H H T H H H H H'.split())
coin_toss.append('H T H H H H H T H H'.split())
coin_toss.append('H T H T T T H H T T'.split())
coin_toss.append('T H H H T H H H T H'.split())
columns = range(1,11)
df = pd.DataFrame(coin_toss, index=None, columns=columns)
df.index.rename('Toss')
"""
Explanation: Expectation Maximisation with Python : Coin Toss
This notebook implements the example, I consider a classic for understanding Expectation Maximisation.
See: http://www.nature.com/nbt/journal/v26/n8/full/nbt1406.html
Notations:
\begin{align}
\theta_A &= \text{Probability of a Heads showing up given the coin tossed is A}\
\theta_B &= \text{Probability of a Heads showing up given the coin tossed is B}\
\end{align}
End of explanation
"""
df
"""
Explanation: Our configuration looks like this:
End of explanation
"""
thetaA, thetaB = symbols('theta_A theta_B')
a,b = thetaA, thetaB # Hack to display
## Observed Case
observed = ['B', 'A', 'A', 'B', 'A']
index_A = [i for i,x in enumerate(observed) if x=='A']
index_B = [i for i,x in enumerate(observed) if x=='B']
total_tosses = df.size
A_tosses = df.iloc[index_A].unstack()
B_tosses = df.iloc[index_B].unstack()
A_heads = A_tosses.value_counts()['H']
B_heads = B_tosses.value_counts()['H']
theta_A = A_heads/A_tosses.size
theta_B = B_heads/B_tosses.size
(a, theta_A)
(b, theta_B)
"""
Explanation: Case 1: Identity of coin being tossed known
If the identity of the coin being tossed is known and is observed = ['B', 'A', 'A', 'B', 'A'] it is not so difficult to calculate the corresponding values of $\theta_A$ and $\theta_B$:
$$
\theta_A = \frac{\text{Total Heads when coin tossed is A}}{\text{Total tosses for coin A}}
$$
$$
\theta_B = \frac{\text{Total Heads when coin tossed is B}}{\text{Total tosses for coin B}}
$$
End of explanation
"""
thetaA = 0.6
thetaB = 0.5
def em(theta_old):
row_prob = []
## Expectation
for row in coin_toss:
count_heads = row.count('H')
p_a = binomial.pmf(count_heads, len(row), theta_old['A'])
p_b = binomial.pmf(count_heads, len(row), theta_old['B'])
p_t = p_a+p_b
p_a = p_a/p_t
p_b = p_b/p_t
row_prob.append({'A': p_a, 'B': p_b, 'count_heads': count_heads, 'total_tosses': len(row)})
## Maximisation
new_coin_toss = []
for row in row_prob:
total_tosses = row['total_tosses']
total_heads = row['count_heads']
A_heads = row['A']*total_heads
A_tails = row['A']*(total_tosses-total_heads)
B_heads = row['B']*total_heads
B_tails = row['B']*(total_tosses-total_heads)
new_coin_toss.append([A_heads, A_tails, B_heads, B_tails])
df = pd.DataFrame(new_coin_toss, columns=['A Heads', 'A Tails', 'B Heads', 'B Tails'])
new_pa = df['A Heads'].sum()/(df['A Heads'].sum()+df['A Tails'].sum())
new_pb = df['B Heads'].sum()/(df['B Heads'].sum()+df['B Tails'].sum())
new_theta = OrderedDict({'A': new_pa, 'B': new_pb})
display(df)
return new_theta
theta = OrderedDict({'A': thetaA, 'B': thetaB})
theta_new = OrderedDict()
max_iterations = 10000
iterations = 0
diff = 1
tolerance = 1e-6
while (iterations < max_iterations) and (diff>tolerance):
new_theta = em(theta)
diff = euclidean(new_theta.values(), theta.values())
theta = new_theta
(a, new_theta['A'])
(b, new_theta['B'])
"""
Explanation: Case 2 Identity of coin being tossed is unknown
When the identity of coin being tossed is unknwon we rely on Expectation Maximisation to give us the estimates of $\theta_A$ and $\theta_B$. We start with an initial value of $\theta_A, \theta_B$ and then given the observed data (the 50 coin tosses) run the 'E-step' calculating the probability of coin $A$ or $B$ being used for a series of tosses . Remember each one of the 5 sets is actually 'in reality' done using coin of a single type, the Expectation step simply involves treating each set to have come out of a 'mixture' of coins 'A' and 'B'. Given initial values $\theta_A=0.6$ and $\theta_B=0.5$, let's try to calculate the 'weights' associated with our mixture model. Rather than simply estimating which coin is ore likely to have generated tho tossees, we calculate the probabilty of each possible 'completion' of the missing data. The missing data here of course, being the lable of the coin where the tosses came from.
Numerically this involves the following:
Consider the series of toss to be H T T T H H T H T H
$$
\begin{eqnarray}
\theta_A = 0.6\
\theta_B = 0.5\
n_{heads} = 5\
n_{tails} = 5\
P(5H | \text{Coin A}) = {10 \choose 5} \theta_A^5(1-theta_A)^{10-5} = 0.2000\
P(5H | \text{Coin B}) = {10 \choose 5} \theta_B^5(1-theta_B)^{10-5} = 0.2460\
w_A = \frac{P(5H | \text{Coin A})}{P(5H | \text{Coin A})+P(5H | \text{Coin B})} = 0.4484\
w_B = \frac{P(5H | \text{Coin B})}{P(5H | \text{Coin A})+P(5H | \text{Coin B})} = 0.5516\
\end{eqnarray}
$$
Mathematically:
$(X_1, X_2, X_3 \dots X_{5})$ : Coin tosses (Observed) for the set of 10 coint tosses, where $X_i$ is a $10 \times 1$ vector representing 10 coin tosses in each set.
$(Z_1, Z_2, Z_3, Z_4, Z_5)$: Unobserved(hidden) label of coins
Complete Data: $Y={(X_1,Z_1), (X_2, Z_2) \dots (X_5, Z_5)}$
Parameter $\theta = (\theta_A, \theta_B)$
Likelihood: $L(\theta|X) = P(X|\theta) = \sum_{Z}P(X,Z|\theta)$
Complete data likelihood(When $Z_i$ is known): $L(\theta|(X,Z)) = \prod_{i=1}^5 \sum_{z={A,B}} P(X_i,Z_i=z | \theta)$
$$
\log(L(\theta|(X,Z))) = \sum_{i=1}^5 \log\big( \sum_{z={A,B}} P(X_i,Z_i=z | \theta) \big)
$$
Let $n_i$ be the number of heads in the $i^{th}$ set.
$$
\begin{align}
P(X_i,Z=z|\theta) &= {10 \choose n_i} \theta_z^{n_i}(1-\theta_z)^{10-n_i} \text{ where } z \in {A,B} \
P(X_i|\theta) &= {10 \choose n_i} \theta_A^{n_i}(1-\theta_A)^{10-n_i} + {10 \choose n_i} \theta_B^{n_i}(1-\theta_B)^{10-n_i}
\end{align}
$$
$$
\begin{align}
P(Z_i=z|X_i,\theta) &= \frac{P(X_i,Z_i=z|\theta)}{P(X_i|\theta)}\
&= \frac{P(X_i,Z_i=z|\theta)}{{10 \choose n_i} \theta_A^{n_i}(1-\theta_A)^{10-n_i} + {10 \choose n_i} \theta_B^{n_i}(1-\theta_B)^{10-n_i}}
\end{align}
$$
Thus,
$$
\begin{align}
P(Z_i=A|X_i,\theta) &= \frac{P(X_i,Z_i=A|\theta)}{{10 \choose n_i} \theta_A^{n_i}(1-\theta_A)^{10-n_i} + {10 \choose n_i} \theta_B^{n_i}(1-\theta_B)^{10-n_i}}
\end{align}
$$
For a binomial distribution $Bin(N,p)$ , $E[heads] = Np$
$$
E(\text{Number of heads of coin A} |X_i, \theta) = n_i \times P(Z_i=A|X_i,\theta)
$$
Now, the $M$ step:
In order to maximise the log likelihood, with respect to $\theta_A,\theta_B$:
End of explanation
"""
|
stijnvanhoey/flexible_vhm_implementation
|
vhm_run_examples.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.ticker import LinearLocator
sns.set_style('whitegrid')
mpl.rcParams['font.size'] = 16
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
from VHM import VHM_flexible
import brewer2mpl
setblue = brewer2mpl.get_map('Greys', 'Sequential', 6,
reverse = True).mpl_colors
"""
Explanation: VHM python implemented model structure
Import libraries and set image properties
End of explanation
"""
data = pd.read_csv("/media/DATA/Githubs/project_breach_pdm_application/data/data_brach_case_nete.csv",
parse_dates=True, index_col=0)
data.head()
"""
Explanation: Load observations
End of explanation
"""
# Parameters
umax =280.0
uevap = 150.0
c1s = 1.8
c2s = 0.4
c3s = 1.0
c1o = -3.9
c2o = 1.59
c3o = 0.0
c4o = 0.0
c1i = -2.7
c2i = 1.
c3i = 0.0
c4i = 0.0
nso = 50
nsi = 50
Kg = 2400.0
Ki =120.0
Ko =10.0
# Define the constants
area = 361.
timestep = 1.
# Define the initial conditions
u = 170.0
qg =1.0
cg =0.0
qo =0.0
co =0.0
qi =1.0
ci =0.0
pars = [umax,uevap,c1s,c2s,c3s,c1o,c2o,c3o,c4o,c1i,c2i,c3i,c4i,nso,nsi,Kg,Ki,Ko]
constants = [area,timestep]
init_conditions = [u, qg, cg, qo, co, qi, ci]
"""
Explanation: Model simulation
Parameter values, initial conditions and constant values
End of explanation
"""
structure_options=['relative', 'nonlinear', True, True, '211']
"""
Explanation: Structural options
fracthand 'relative' or 'sequentialx' with x [1-4]
storhand 'linear' or 'nonlinear'
interflowhand True or False
infexcesshand True or False
nres_g/nres_i/nres_o string of 3 options, each [1-2], eg 211, 121,...
End of explanation
"""
rain = data['rain'].values
pet = data['evapotranspiration'].values
vhm_output = VHM_flexible(pars, constants, init_conditions,
structure_options, rain, pet)
outflows, fractions, moisture = vhm_output
# create dataframe with
data['modtot'] = outflows[:, 0]
data['modover'] = outflows[:, 1]
data['modinter'] = outflows[:, 2]
data['modbase'] = outflows[:, 3]
data['fracover'] = fractions[:, 0]
data['fracinter'] = fractions[:, 1]
data['fracbase'] = fractions[:, 2]
data['fracsoil'] = fractions[:, 3]
data['fractotal'] = data['fracover'] + data['fracinter'] + data['fracbase'] + data['fracsoil']
data['soil'] = moisture
"""
Explanation: Run the model
End of explanation
"""
data2plot = data['2003':'2005']
"""
Explanation: Focus on a subperiod to plot
End of explanation
"""
fig, axs = plt.subplots(1, 1, figsize=(14, 6), sharex=True)
axs.plot(data2plot.index, data2plot['modtot'], label='modelled')
axs.plot(data2plot.index, data2plot['meas'], label='observed')
axs.set_ylabel("flow ($m^3s^{-1}$)")
axs.yaxis.labelpad = 15
axs.xaxis.set_major_locator(
mpl.dates.MonthLocator(interval = 12))
axs.xaxis.set_major_formatter(
mpl.dates.DateFormatter('%d %b \n %Y'))
axs.tick_params(axis = 'x', pad = 15, direction='out')
# y-axis
axs.tick_params(axis = 'y', pad = 5, direction='out')
#remove spines
axs.spines['bottom'].set_visible(False)
axs.spines['top'].set_visible(False)
# set grid
axs.grid(which='both', axis='both', color='0.7',
linestyle='--', linewidth=0.8)
# line colors of the plots
axs.lines[0].set_color(setblue[0])
axs.lines[1].set_color(setblue[2])
# line widths
for line in axs.lines:
line.set_linewidth(1.2)
axs.legend(loc='upper right', fontsize=16, ncol=2, bbox_to_anchor=(1., 1.1))
#plt.savefig('vhm_flow_example.pdf', dpi=300)
#plt.savefig('vhm_flow_example.png', dpi=300)
"""
Explanation: Plot the model output and observations to evaluate the fit
End of explanation
"""
overf = pd.read_csv("Filter_Overlandflow3.txt", index_col=0, sep='\t', parse_dates=True, dayfirst=True)
overf.columns = ['overland flow']
interf = pd.read_csv("Filter_Interflow3.txt", index_col=0, sep='\t', parse_dates=True, dayfirst=True)
interf.columns = ['interflow']
basef = pd.read_csv("Filter_Baseflow3.txt", index_col=0, sep='\t', parse_dates=True, dayfirst=True)
basef.columns = ['baseflow']
subflow_data = overf.join(interf).join(basef)
subflow2plot = subflow_data['2003':'2005']
fig, axs = plt.subplots(3, 1, figsize=(14, 6), sharex=True)
fig.subplots_adjust(hspace = 0.2)
#first plot
axs[0].plot(data2plot.index, data2plot['modover'], label='subflow modelled')
axs[0].plot(subflow2plot.index, subflow2plot['overland flow'].values, label='subflow seperation')
axs[0].set_ylabel("overland flow \n ($m^3s^{-1}$)")
axs[0].yaxis.labelpad = 15
#second plot
axs[1].plot(data2plot.index, data2plot['modinter'])
axs[1].plot(subflow2plot.index, subflow2plot['interflow'].values)
axs[1].yaxis.tick_right()
axs[1].yaxis.set_label_position("right")
axs[1].set_ylabel("interflow \n ($m^3s^{-1}$)")
axs[1].yaxis.labelpad = 15
# third plot
axs[2].plot(data2plot.index, data2plot['modbase'])
axs[2].plot(subflow2plot.index, subflow2plot['baseflow'].values)
axs[2].xaxis.set_major_locator(
mpl.dates.MonthLocator(interval = 12))
axs[2].xaxis.set_major_formatter(
mpl.dates.DateFormatter('%d %b \n %Y'))
axs[2].tick_params(axis = 'x', pad = 15, direction='out')
axs[2].set_ylabel("baseflow \n($m^3s^{-1}$)")
axs[2].yaxis.labelpad = 10
#editing of the style:
for ax in axs:
# y-axis
ax.tick_params(axis = 'y', pad = 5, direction='out')
ax.yaxis.set_major_locator(LinearLocator(3))
#remove spines
ax.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
# set grid
ax.grid(which='both', axis='both', color='0.7',
linestyle='--', linewidth=0.8)
# line colors of the plots
ax.lines[0].set_color(setblue[0])
ax.lines[1].set_color(setblue[2])
# line widths
for line in ax.lines:
line.set_linewidth(1.2)
# remove ticklabels if redundant
if not ax.is_last_row():
ax.set_xlabel('')
plt.setp(axs[1].get_xminorticklabels(), visible=False)
plt.setp(axs[1].get_xmajorticklabels(), visible=False)
plt.setp(axs[1].get_xminorticklabels(), visible=False)
temp = axs[0]
temp.legend(loc='upper right', fontsize=16, ncol=2, bbox_to_anchor=(1., 1.4))
fig.savefig('vhm_subflow_example.pdf')
fig.savefig('vhm_subflow_example.png')
"""
Explanation: Plot modelled and filtered subflows in function of time
End of explanation
"""
fig, axs = plt.subplots(1, 1, figsize=(14, 6), sharex=True)
axs.plot(data2plot.index, data2plot['fracover'],'-', label='fraction overland flow')
axs.plot(data2plot.index, data2plot['fracinter'],'-.', label='fraction interflow')
axs.plot(data2plot.index, data2plot['fracbase'],':', label='fraction base flow')
axs.plot(data2plot.index, data2plot['fracsoil'],'-', label='fraction infiltration')
axs.plot(data2plot.index, data2plot['fractotal'],'-', label='total fractions')
axs.set_ylabel("fractions")
axs.yaxis.labelpad = 15
axs.xaxis.set_major_locator(
mpl.dates.MonthLocator(interval = 12))
axs.xaxis.set_major_formatter(
mpl.dates.DateFormatter('%d %b \n %Y'))
axs.tick_params(axis = 'x', pad = 15, direction='out')
# y-axis
axs.tick_params(axis = 'y', pad = 5, direction='out')
axs.yaxis.set_ticks([0,0.5,1.])
axs.set_ylim([0., 1.05])
#remove spines
axs.spines['bottom'].set_visible(False)
axs.spines['top'].set_visible(False)
# set grid
axs.grid(which='both', axis='both', color='0.7',
linestyle='--', linewidth=0.8)
# line colors of the plots
axs.lines[0].set_color(setblue[0])
axs.lines[1].set_color(setblue[0])
axs.lines[2].set_color(setblue[1])
axs.lines[3].set_color(setblue[1])
axs.lines[4].set_color(setblue[3])
# line widths
for line in axs.lines:
line.set_linewidth(1.2)
axs.legend(loc='upper right', fontsize=16, ncol=3, bbox_to_anchor=(1., 0.95))
#plt.savefig('vhm_fractions_example_noante.pdf', dpi=300)
#plt.savefig('vhm_fractions_example_noante.png', dpi=300)
"""
Explanation: Plot fractions in time overview
End of explanation
"""
fig, axs = plt.subplots(1, 1, figsize=(14, 6), sharex=True)
axs.plot(data2plot.index, data2plot['soil'],'-')
axs.set_ylabel(r"soil moisture ($mm$)")
axs.yaxis.labelpad = 15
axs.xaxis.set_major_locator(
mpl.dates.MonthLocator(interval = 12))
axs.xaxis.set_major_formatter(
mpl.dates.DateFormatter('%d %b \n %Y'))
axs.tick_params(axis = 'x', pad = 15, direction='out')
# y-axis
axs.tick_params(axis = 'y', pad = 5, direction='out')
#remove spines
axs.spines['bottom'].set_visible(False)
axs.spines['top'].set_visible(False)
# set grid
axs.grid(which='both', axis='both', color='0.7',
linestyle='--', linewidth=0.8)
# line colors of the plots
axs.lines[0].set_color(setblue[0])
# line widths
for line in axs.lines:
line.set_linewidth(1.2)
#plt.savefig('vhm_moisture_example.pdf', dpi=300)
#plt.savefig('vhm_moisture_example.png', dpi=300)
"""
Explanation: Soil moisture plot
End of explanation
"""
|
google/uncertainty-baselines
|
baselines/notebooks/Hyperparameter_Ensembles.ipynb
|
apache-2.0
|
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import uncertainty_baselines as ub
def _ensemble_accuracy(labels, logits_list):
"""Compute the accuracy resulting from the ensemble prediction."""
per_probs = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(per_probs, axis=0)
acc = tf.keras.metrics.SparseCategoricalAccuracy()
acc.update_state(labels, probs)
return acc.result()
def _ensemble_cross_entropy(labels, logits):
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
labels = tf.cast(labels, tf.int32)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits=logits)
nll = -tf.reduce_logsumexp(-ce, axis=0) + tf.math.log(ensemble_size)
return tf.reduce_mean(nll)
def greedy_selection(val_logits, val_labels, max_ens_size, objective='nll'):
"""Greedy procedure from Caruana et al. 2004, with replacement."""
assert_msg = 'Unknown objective type (received {}).'.format(objective)
assert objective in ('nll', 'acc', 'nll-acc'), assert_msg
# Objective that should be optimized by the ensemble. Arbitrary objectives,
# e.g., based on nll, acc or calibration error (or combinations of those) can
# be used.
if objective == 'nll':
get_objective = lambda acc, nll: nll
elif objective == 'acc':
get_objective = lambda acc, nll: acc
else:
get_objective = lambda acc, nll: nll-acc
best_acc = 0.
best_nll = np.inf
best_objective = np.inf
ens = []
def get_ens_size():
return len(set(ens))
while get_ens_size() < max_ens_size:
current_val_logits = [val_logits[model_id] for model_id in ens]
best_model_id = None
for model_id, logits in enumerate(val_logits):
acc = _ensemble_accuracy(val_labels, current_val_logits + [logits])
nll = _ensemble_cross_entropy(val_labels, current_val_logits + [logits])
obj = get_objective(acc, nll)
if obj < best_objective:
best_acc = acc
best_nll = nll
best_objective = obj
best_model_id = model_id
if best_model_id is None:
print('Ensemble could not be improved: Greedy selection stops.')
break
ens.append(best_model_id)
return ens, best_acc, best_nll
def parse_checkpoint_dir(checkpoint_dir):
"""Parse directory of checkpoints."""
paths = []
subdirectories = tf.io.gfile.glob(os.path.join(checkpoint_dir, '*'))
is_checkpoint = lambda f: ('checkpoint' in f and '.index' in f)
print('Load checkpoints')
for subdir in subdirectories:
for path, _, files in tf.io.gfile.walk(subdir):
if any(f for f in files if is_checkpoint(f)):
latest_checkpoint = tf.train.latest_checkpoint(path)
paths.append(latest_checkpoint)
print('.', end='')
break
print('')
return paths
DATASET = 'cifar10'
TRAIN_PROPORTION = 0.95
BATCH_SIZE = 64
ENSEMBLE_SIZE = 4
CHECKPOINT_DIR = 'gs://gresearch/reliable-deep-learning/checkpoints/baselines/cifar/hyper_ensemble/'
# Load data.
ds_info = tfds.builder(DATASET).info
num_classes = ds_info.features['label'].num_classes
# Test set.
steps_per_eval = ds_info.splits['test'].num_examples // BATCH_SIZE
test_dataset = ub.datasets.get(
DATASET,
split=tfds.Split.TEST).load(batch_size=BATCH_SIZE)
# Validation set.
validation_percent = 1 - TRAIN_PROPORTION
val_dataset = ub.datasets.get(
dataset_name=DATASET,
split=tfds.Split.VALIDATION,
validation_percent=validation_percent,
drop_remainder=False).load(batch_size=BATCH_SIZE)
steps_per_val_eval = int(ds_info.splits['train'].num_examples *
validation_percent) // BATCH_SIZE
"""
Explanation: Hyperparameter Ensembles for Robustness and Uncertainty Quantification
Florian Wenzel, April 8th 2021. Licensed under the Apache License, Version 2.0.
Recently, we proposed Hyper-deep Ensembles (Wenzel et al., NeurIPS 2020) a simple, yet powerful, extension of deep ensembles. The approach works with any given deep network architecture and, therefore, can be easily integrated (and improve) a machine learning system that is already used in production.
Hyper-deep ensembles improve the performance of a given deep network by forming an ensemble over multiple variants of that architecture where each member uses different hyperparameters. In this notebook we consider a ResNet-20 architecture with block-wise $\ell_2$-regularization parameters and a label smoothing parameter. We construct an ensemble of 4 members where each member uses a different set of hyperparameters. This leads to an ensemble of diverse members, i.e., members that are complementary in their predictions. The final ensemble greatly improves the prediction performance and the robustness of the model, e.g., in out-of-distribution settings.
Let's start with some boilerplate code for data loading and the model definition.
Requirements:
bash
!pip install "git+https://github.com/google/uncertainty-baselines.git#egg=uncertainty_baselines"
End of explanation
"""
# The model architecture we want to form the ensemble over
# here, we use the original ResNet-20 model by He et al. 2015.
model = ub.models.wide_resnet(
input_shape=ds_info.features['image'].shape,
depth=22,
width_multiplier=1,
num_classes=num_classes,
l2=0.,
version=1)
# Load checkpoints:
# These are 100 checkpoints and loading will take a few minutes.
ensemble_filenames = parse_checkpoint_dir(CHECKPOINT_DIR)
model_pool_size = len(ensemble_filenames)
checkpoint = tf.train.Checkpoint(model=model)
print('Model pool size: {}'.format(model_pool_size))
"""
Explanation: Let's construct the hyper-deep ensemble over a ResNet-20 architecture
This is the (simplified) hyper-deep ensembles construction pipeline
1. Random search: train several models on the train set using different (random) hyperparameters.
2. Ensemble construction: on a validation set using a greedy selection method.
Remark:
In this notebook we use a slightly simplified version of the pipeline compared to the approach of the original paper (where an additional stratification step is used). Additionally, after selecting the optimal hyperparameters the ensemble performance can be improved even more by retraining the selected models on the full train set (i.e., this time not reserving a portion for validation). The simplified pipeline in this notebook is slightly less performant but easier to implement. The simplified pipeline is similar to the ones used by Caranua et al., 2004 and Zaidi et al., 2020 in the context of neural architecture search.
Step 1: Random Hyperparameter Search
We start by training 100 different versions of the ResNet-20 using different $\ell_2$-regularization parameters and label smoothing parameters. Since this would take some time we have already trained the models using a standard training script (which can be found here) and directly load the checkpoints (which can be browsed here).
End of explanation
"""
# Compute the logits on the validation set.
val_logits, val_labels = [], []
for m, ensemble_filename in enumerate(ensemble_filenames):
# Enforce memory clean-up.
tf.keras.backend.clear_session()
checkpoint.restore(ensemble_filename)
val_iterator = iter(val_dataset)
val_logits_m = []
for _ in range(steps_per_val_eval):
inputs = next(val_iterator)
features = inputs['features']
labels = inputs['labels']
val_logits_m.append(model(features, training=False))
if m == 0:
val_labels.append(labels)
val_logits.append(tf.concat(val_logits_m, axis=0))
if m == 0:
val_labels = tf.concat(val_labels, axis=0)
if m % 10 == 0 or m == model_pool_size - 1:
percent = (m + 1.) / model_pool_size
message = ('{:.1%} completion for prediction on validation set: '
'model {:d}/{:d}.'.format(percent, m + 1, model_pool_size))
print(message)
"""
Explanation: Step 2: Construction of the hyperparameter ensemble on the validation set
First we compute the logits of all models in our model pool on the validation set.
End of explanation
"""
# Ensemble construction by greedy member selection on the validation set.
selected_members, val_acc, val_nll = greedy_selection(val_logits, val_labels,
ENSEMBLE_SIZE,
objective='nll')
unique_selected_members = list(set(selected_members))
message = ('Members selected by greedy procedure: model ids = {} (with {} '
'unique member(s)).').format(
selected_members, len(unique_selected_members))
print(message)
"""
Explanation: Now we are ready to construct the ensemble.
* In the first step, we take the best model (on the validation set) -> model_1.
* In the second step, we fix model_1 and try all models in our model pool and construct the ensemble [model_1, model_2]. We select the model model_2 that leads to the highest performance gain.
* In the third step, we fix model_1, model_2 and choose model_3 to construct an ensemble [model_1, model_2, model_3] that leads to the highest performance gain over step 2.
* ... and so on, until the desired ensemble size is reached or no performance gain could be achieved anymore.
End of explanation
"""
# Evaluate the following metrics on the test set.
metrics = {
'ensemble/negative_log_likelihood': tf.keras.metrics.Mean(),
'ensemble/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
}
metrics_single = {
'single/negative_log_likelihood': tf.keras.metrics.SparseCategoricalCrossentropy(),
'single/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
}
# Compute logits for each ensemble member on the test set.
logits_test = []
for m, member_id in enumerate(unique_selected_members):
ensemble_filename = ensemble_filenames[member_id]
checkpoint.restore(ensemble_filename)
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features = next(test_iterator)['features']
logits.append(model(features, training=False))
logits_test.append(tf.concat(logits, axis=0))
logits_test = tf.convert_to_tensor(logits_test)
print('Completed computation of member logits on the test set.')
# Compute test metrics.
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
labels = next(test_iterator)['labels']
logits = logits_test[:, (step*BATCH_SIZE):((step+1)*BATCH_SIZE)]
labels = tf.cast(labels, tf.int32)
negative_log_likelihood = _ensemble_cross_entropy(labels, logits)
# Per member output probabilities.
per_probs = tf.nn.softmax(logits)
# Ensemble output probabilites.
probs = tf.reduce_mean(per_probs, axis=0)
metrics['ensemble/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['ensemble/accuracy'].update_state(labels, probs)
# For comparison compute performance of the best single model,
# this is by definition the first model that was selected by the greedy
# selection method.
logits_single = logits_test[0, (step*BATCH_SIZE):((step+1)*BATCH_SIZE)]
probs_single = tf.nn.softmax(logits_single)
metrics_single['single/negative_log_likelihood'].update_state(labels, logits_single)
metrics_single['single/accuracy'].update_state(labels, probs_single)
percent = (step + 1) / steps_per_eval
if step % 25 == 0 or step == steps_per_eval - 1:
message = ('{:.1%} completion final test prediction'.format(percent))
print(message)
ensemble_results = {name: metric.result() for name, metric in metrics.items()}
single_results = {name: metric.result() for name, metric in metrics_single.items()}
"""
Explanation: Evaluation on the test set
Let's see how the hyper-deep ensemble performs on the test set.
End of explanation
"""
print('Ensemble performance:')
for m, val in ensemble_results.items():
print(' {}: {}'.format(m, val))
print('\nFor comparison:')
for m, val in single_results.items():
print(' {}: {}'.format(m, val))
"""
Explanation: Here is the final ensemble performance
We gained almost 2 percentage points in terms of accuracy over the best single model!
End of explanation
"""
|
jviada/QuantEcon.py
|
solutions/lakemodel_solutions.ipynb
|
bsd-3-clause
|
%pylab inline
import LakeModel
alpha = 0.012
lamb = 0.2486
b = 0.001808
d = 0.0008333
g = b-d
N0 = 100.
e0 = 0.92
u0 = 1-e0
T = 50
"""
Explanation: Lake Model Solutions
Excercise 1
We begin by initializing the variables and import the necessary modules
End of explanation
"""
LM0 = LakeModel.LakeModel(lamb,alpha,b,d)
x0 = LM0.find_steady_state()# initial conditions
print "Initial Steady State: ", x0
"""
Explanation: Now construct the class containing the initial conditions of the problem
End of explanation
"""
LM1 = LakeModel.LakeModel(0.2,alpha,b,d)
xbar = LM1.find_steady_state() # new steady state
X_path = vstack(LM1.simulate_stock_path(x0*N0,T)) # simulate stocks
x_path = vstack(LM1.simulate_rate_path(x0,T)) # simulate rates
print "New Steady State: ", xbar
"""
Explanation: New legislation changes $\lambda$ to $0.2$
End of explanation
"""
figure(figsize=[10,9])
subplot(3,1,1)
plot(X_path[:,0])
title(r'Employment')
subplot(3,1,2)
plot(X_path[:,1])
title(r'Unemployment')
subplot(3,1,3)
plot(X_path.sum(1))
title(r'Labor Force')
"""
Explanation: Now plot stocks
End of explanation
"""
figure(figsize=[10,6])
subplot(2,1,1)
plot(x_path[:,0])
hlines(xbar[0],0,T,'r','--')
title(r'Employment Rate')
subplot(2,1,2)
plot(x_path[:,1])
hlines(xbar[1],0,T,'r','--')
title(r'Unemployment Rate')
"""
Explanation: And how the rates evolve:
End of explanation
"""
bhat = 0.003
T_hat = 20
LM1 = LakeModel.LakeModel(lamb,alpha,bhat,d)
"""
Explanation: We see that it takes 20 periods for the economy to converge to it's new steady state levels
Exercise 2
This next exercise has the economy expriencing a boom in entrances to the labor market and then later returning to the original levels. For 20 periods the economy has a new entry rate into the labor market
End of explanation
"""
X_path1 = vstack(LM1.simulate_stock_path(x0*N0,T_hat)) # simulate stocks
x_path1 = vstack(LM1.simulate_rate_path(x0,T_hat)) # simulate rates
"""
Explanation: We simulate for 20 periods at the new parameters
End of explanation
"""
X_path2 = vstack(LM0.simulate_stock_path(X_path1[-1,:2],T-T_hat+1)) # simulate stocks
x_path2 = vstack(LM0.simulate_rate_path(x_path1[-1,:2],T-T_hat+1)) # simulate rates
"""
Explanation: Now using the state after 20 periods for the new initial conditions we simulate for the additional 30 periods
End of explanation
"""
x_path = vstack([x_path1,x_path2[1:]]) # note [1:] to avoid doubling period 20
X_path = vstack([X_path1,X_path2[1:]]) # note [1:] to avoid doubling period 20
figure(figsize=[10,9])
subplot(3,1,1)
plot(X_path[:,0])
title(r'Employment')
subplot(3,1,2)
plot(X_path[:,1])
title(r'Unemployment')
subplot(3,1,3)
plot(X_path.sum(1))
title(r'Labor Force')
"""
Explanation: Finally we combine these two paths and plot
End of explanation
"""
figure(figsize=[10,6])
subplot(2,1,1)
plot(x_path[:,0])
hlines(x0[0],0,T,'r','--')
title(r'Employment Rate')
subplot(2,1,2)
plot(x_path[:,1])
hlines(x0[1],0,T,'r','--')
title(r'Unemployment Rate')
"""
Explanation: And the rates:
End of explanation
"""
|
fastai/fastai
|
nbs/41_tabular.data.ipynb
|
apache-2.0
|
#|export
class TabularDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for tabular data"
@classmethod
@delegates(Tabular.dataloaders, but=["dl_type", "dl_kwargs"])
def from_df(cls,
df:pd.DataFrame,
path:(str,Path)='.', # Location of `df`, defaults to current working directory
procs:list=None, # List of `TabularProc`s
cat_names:list=None, # Column names pertaining to categorical variables
cont_names:list=None, # Column names pertaining to continuous variables
y_names:list=None, # Names of the dependent variables
y_block:TransformBlock=None, # `TransformBlock` to use for the target(s)
valid_idx:list=None, # List of indices to use for the validation set, defaults to a random split
**kwargs
):
"Create `TabularDataLoaders` from `df` in `path` using `procs`"
if cat_names is None: cat_names = []
if cont_names is None: cont_names = list(set(df)-set(L(cat_names))-set(L(y_names)))
splits = RandomSplitter()(df) if valid_idx is None else IndexSplitter(valid_idx)(df)
to = TabularPandas(df, procs, cat_names, cont_names, y_names, splits=splits, y_block=y_block)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls,
csv:(str,Path,io.BufferedReader), # A csv of training data
skipinitialspace:bool=True, # Skip spaces after delimiter
**kwargs
):
"Create `TabularDataLoaders` from `csv` file in `path` using `procs`"
return cls.from_df(pd.read_csv(csv, skipinitialspace=skipinitialspace), **kwargs)
@delegates(TabDataLoader.__init__)
def test_dl(self,
test_items, # Items to create new test `TabDataLoader` formatted the same as the training data
rm_type_tfms=None, # Number of `Transform`s to be removed from `procs`
process:bool=True, # Apply validation `TabularProc`s to `test_items` immediately
inplace:bool=False, # Keep separate copy of original `test_items` in memory if `False`
**kwargs
):
"Create test `TabDataLoader` from `test_items` using validation `procs`"
to = self.train_ds.new(test_items, inplace=inplace)
if process: to.process()
return self.valid.new(to, **kwargs)
Tabular._dbunch_type = TabularDataLoaders
TabularDataLoaders.from_csv = delegates(to=TabularDataLoaders.from_df)(TabularDataLoaders.from_csv)
"""
Explanation: Tabular data
Helper functions to get data in a DataLoaders in the tabular application and higher class TabularDataLoaders
The main class to get your data ready for model training is TabularDataLoaders and its factory methods. Checkout the tabular tutorial for examples of use.
TabularDataLoaders -
End of explanation
"""
show_doc(TabularDataLoaders.from_df)
"""
Explanation: This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:
cat_names: the names of the categorical variables
cont_names: the names of the continuous variables
y_names: the names of the dependent variables
y_block: the TransformBlock to use for the target
valid_idx: the indices to use for the validation set (defaults to a random split otherwise)
bs: the batch size
val_bs: the batch size for the validation DataLoader (defaults to bs)
shuffle_train: if we shuffle the training DataLoader or not
n: overrides the numbers of elements in the dataset
device: the PyTorch device to use (defaults to default_device())
End of explanation
"""
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv', skipinitialspace=True)
df.head()
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_df(df, path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
dls.show_batch()
show_doc(TabularDataLoaders.from_csv)
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
show_doc(TabularDataLoaders.test_dl)
"""
Explanation: Let's have a look on an example with the adult dataset:
End of explanation
"""
test_data = {
'age': [49],
'workclass': ['Private'],
'fnlwgt': [101320],
'education': ['Assoc-acdm'],
'education-num': [12.0],
'marital-status': ['Married-civ-spouse'],
'occupation': [''],
'relationship': ['Wife'],
'race': ['White'],
}
input = pd.DataFrame(test_data)
tdl = dls.test_dl(input)
test_ne(0, tdl.dataset.iloc[0]['workclass'])
"""
Explanation: External structured data files can contain unexpected spaces, e.g. after a comma. We can see that in the first row of adult.csv "49, Private,101320, ...". Often trimming is needed. Pandas has a convenient parameter skipinitialspace that is exposed by TabularDataLoaders.from_csv(). Otherwise category labels use for inference later such as workclass:Private will be categorized wrongly to 0 or "#na#" if training label was read as " Private". Let's test this feature.
End of explanation
"""
#|hide
from nbdev.export import notebook2script
notebook2script()
"""
Explanation: Export -
End of explanation
"""
|
rvperry/phys202-2015-work
|
assignments/assignment05/InteractEx01.ipynb
|
mit
|
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 01
Import
End of explanation
"""
def print_sum(a, b):
"""Print the sum of the arguments a and b."""
c=a+b
return c
raise NotImplementedError()
"""
Explanation: Interact basics
Write a print_sum function that prints the sum of its arguments a and b.
End of explanation
"""
interact(print_sum, a=(-10.0,10.0),b=(-8,8,2))
assert True # leave this for grading the print_sum exercise
"""
Explanation: Use the interact function to interact with the print_sum function.
a should be a floating point slider over the interval [-10., 10.] with step sizes of 0.1
b should be an integer slider the interval [-8, 8] with step sizes of 2.
End of explanation
"""
def print_string(s, length=False):
"""Print the string s and optionally its length."""
print(s)
if length==True:
print(len(s))
raise NotImplementedError()
"""
Explanation: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
End of explanation
"""
interact(print_string, s='Hello World!', length=True)
assert True # leave this for grading the print_string exercise
"""
Explanation: Use the interact function to interact with the print_string function.
s should be a textbox with the initial value "Hello World!".
length should be a checkbox with an initial value of True.
End of explanation
"""
|
lknelson/text-analysis-2017
|
05-TextExploration/00-IntroductionToTopicModeling_ExerciseSolutions.ipynb
|
bsd-3-clause
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
df_lit = pandas.read_csv("../Data/childrens_lit.csv.bz2", sep='\t', index_col=0, encoding = 'utf-8', compression='bz2')
#drop rows where the text is missing.
df_lit = df_lit.dropna(subset=['text'])
#view the dataframe
df_lit
"""
Explanation: Introduction to Topic Modeling
Today we'll implement the most basic, and the original, topic modeling algorithm, LDA, using Python's scikit-learn. The other major topic modeling package is Gensim.
Learning Goals
Implement a basic topic modeling algorithm and learn how to tweak it
Learn how to use different methods to calculate topic prevalence
Learn how to create some simple graphs with this output
Think though how and why you might use topic modeling in a text analysis project
Outline
<ol start="0">
<li>[The Pandas Dataframe: Music Reviews](#df)</li>
<li>[Fit an LDA Topic Model using scikit-learn](#fit)</li>
<li>[Document by Topic Distribution](#dtd)</li>
<li>[Words Aligned with each Topic](#words)</li>
<li>[Topic Prevalence](#prev)</li>
<li>[Topics Over Time](#time)</li>
</ol>
Key Terms
Topic Modeling:
A statistical model to uncover abstract topics within a text. It uses the co-occurrence fo words within documents, compared to their distribution across documents, to uncover these abstract themes. The output is a list of weighted words, which indicate the subject of each topic, and a weight distribution across topics for each document.
LDA:
Latent Dirichlet Allocation. A implementation of topic modeling that assumes a Dirichlet prior. It does not take document order into account, unlike other topic modeling algorithms.
Further Resources
More detailed description of implementing LDA using scikit-learn.
<a id='df'></a>
0. The Pandas Dataframe: Music Reviews
First, we read our music reviews corpus, which is stored as a .csv file on our hard drive, into a Pandas dataframe.
End of explanation
"""
####Adopted From:
#Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
n_samples = 2000
n_topics = 4
n_top_words = 50
##This is a function to print out the top words for each topic in a pretty way.
#Don't worry too much about understanding every line of this code.
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Vectorize our text using CountVectorizer
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.80, min_df=50,
max_features=None,
stop_words='english'
)
tf = tf_vectorizer.fit_transform(df_lit.text)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_topics=%d..."
% (n_samples, n_topics))
#define the lda function, with desired options
#Check the documentation, linked above, to look through the options
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=20,
learning_method='online',
learning_offset=80.,
total_samples=n_samples,
random_state=0)
#fit the model
lda.fit(tf)
#print the top words per topic, using the function defined above.
#Unlike R, which has a built-in function to print top words, we have to write our own for scikit-learn
#I think this demonstrates the different aims of the two packages: R is for social scientists, Python for computer scientists
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
####Exercise:
###Copy and paste the above code and fit a new model, lda_new, by changing some of the parameters. How does this change the output.
###Suggestions:
## 1. Change the number of topics.
## 2. Do not remove stop words.
## 3. Change other options, either in the vectorize stage or the LDA model
lda_new = LatentDirichletAllocation(n_topics=10, max_iter=20,
learning_method='online',
learning_offset=80.,
total_samples=n_samples,
random_state=0)
#fit the model
lda_new.fit(tf)
"""
Explanation: <a id='fit'></a>
1. Fit a Topic Model, using LDA
Now we're ready to fit the model. This requires the use of CountVecorizer, which we've already used, and the scikit-learn function LatentDirichletAllocation.
See here for more information about this function.
End of explanation
"""
topic_dist = lda.transform(tf)
topic_dist
"""
Explanation: <a id='dtd'></a>
2. Document by Topic Distribution
One thing we may want to do with the output is find the most representative texts for each topic. A simple way to do this (but not memory efficient), is to merge the topic distribution back into the Pandas dataframe.
First get the topic distribution array.
End of explanation
"""
topic_dist_df = pandas.DataFrame(topic_dist)
df_w_topics = topic_dist_df.join(df_lit)
df_w_topics
"""
Explanation: Merge back in with the original dataframe.
End of explanation
"""
print(df_w_topics[['title', 'author gender', 0]].sort_values(by=[0], ascending=False))
print(df_w_topics[['title', 'author gender', 1]].sort_values(by=[1], ascending=False))
#EX: What is the average topic weight by author gender, for each topic?
### Grapth these results
#Hint: You can use the python 'range' function and a for-loop
grouped_mean=df_w_topics.groupby('author gender').mean()
grouped_mean[[0,1,2,3]].plot(kind='bar')
plt.show()
"""
Explanation: Now we can sort the dataframe for the topic of interest, and view the top documents for the topics.
Below we sort the documents first by Topic 0 (looking at the top words for this topic I think it's about family, health, and domestic activities), and next by Topic 1 (again looking at the top words I think this topic is about children playing outside in nature). These topics may be a family/nature split?
Look at the titles for the two different topics. Look at the gender of the author. Hypotheses?
End of explanation
"""
#first create word count column
df_w_topics['word_count'] = df_w_topics['text'].apply(lambda x: len(str(x).split()))
df_w_topics['word_count']
#multiple topic weight by word count
df_w_topics['0_wc'] = df_w_topics[0] * df_w_topics['word_count']
df_w_topics['0_wc']
#create a for loop to do this for every topic
topic_columns = range(0, n_topics)
col_list = []
for num in topic_columns:
col = "%d_wc" % num
col_list.append(col)
#Solution
df_w_topics[col] = df_w_topics[num] * df_w_topics['word_count']
df_w_topics
#EX: What is the total number of words aligned with each topic, by author gender?
###Solution
grouped = df_w_topics.groupby("author gender")
grouped.sum()
#EX: What is the proportion of total words aligned with each topic, by author gender?
wc_columns = ['0_wc', '1_wc', '2_wc', '3_wc']
for n in wc_columns:
print(n)
print(grouped[n].sum()/grouped['word_count'].sum())
"""
Explanation: <a id='words'></a>
3. Words Aligned with each Topic
Following DiMaggio et al., we can calculate the total number of words aligned with each topic, and compare by author gender.
End of explanation
"""
###EX:
# Find the most prevalent topic in the corpus.
# Find the least prevalent topic in the corpus.
# Hint: How do we define prevalence? What are different ways of measuring this,
# and the benefits/drawbacks of each?
for e in col_list:
print(e)
print(df_w_topics[e].sum()/df_w_topics['word_count'].sum())
for e in topic_columns:
print(e)
print(df_w_topics[e].mean())
"""
Explanation: Question: Why might we want to do one calculation over the other? Take average topic weight per documents versus the average number of words aligned with each topic?
This brings us to...
<a id='prev'></a>
4. Topic Prevalence
End of explanation
"""
grouped_year = df_w_topics.groupby('year')
fig3 = plt.figure()
chrt = 0
for e in col_list:
chrt += 1
ax2 = fig3.add_subplot(2,3, chrt)
(grouped_year[e].sum()/grouped_year['word_count'].sum()).plot(kind='line', title=e)
fig3.tight_layout()
plt.show()
"""
Explanation: <a id='time'></a>
4. Prevalence over time
We can do the same as above, but by year, to graph the prevalence of each topic over time.
End of explanation
"""
|
RyanSkraba/beam
|
examples/notebooks/documentation/transforms/python/elementwise/keys-py.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Explanation: <a href="https://colab.research.google.com/github/apache/beam/blob/master//Users/dcavazos/src/beam/examples/notebooks/documentation/transforms/python/elementwise/keys-py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
<table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/keys"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table>
End of explanation
"""
!pip install --quiet -U apache-beam
"""
Explanation: Keys
<script type="text/javascript">
localStorage.setItem('language', 'language-py')
</script>
<table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.Keys"><img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> Pydoc</a>
</td>
</table>
<br/><br/><br/>
Takes a collection of key-value pairs and returns the key of each element.
Setup
To run a code cell, you can click the Run cell button at the top left of the cell,
or select it and press Shift+Enter.
Try modifying a code cell and re-running it to see what happens.
To learn more about Colab, see
Welcome to Colaboratory!.
First, let's install the apache-beam module.
End of explanation
"""
import apache_beam as beam
with beam.Pipeline() as pipeline:
icons = (
pipeline
| 'Garden plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
])
| 'Keys' >> beam.Keys()
| beam.Map(print)
)
"""
Explanation: Example
In the following example, we create a pipeline with a PCollection of key-value pairs.
Then, we apply Keys to extract the keys and discard the values.
End of explanation
"""
|
0x4a50/udacity-0x4a50-deep-learning-nanodegree
|
tv-script-generation/dlnd_tv_script_generation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (100, 110)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
from collections import Counter
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: i for i, word in enumerate(set(vocab)) if len(word) > 0}
int_to_vocab = {i: word for i, word in enumerate(set(vocab)) if len(word) > 0}
return (vocab_to_int, int_to_vocab)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
tokens = {
".": "||Period||",
",": "||Comma||",
'"': "||Quotation_Mark||",
";": "||Semicolon||",
"!": "||Exclamation_Mark||",
"?": "||Question_Mark||",
"(": "||Left_Parentheses||",
")": "||Right_Parentheses||",
"--": "||Dash||",
"\n": "||Return||"
}
return tokens
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return (input, targets, learning_rate)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm_layers = 1
keep_prob = 0.7
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name="initial_state")
return (cell, initial_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
return outputs, tf.identity(final_state, name="final_state")
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embedding = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embedding)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return (logits, final_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
batch_length = batch_size * seq_length
n_batches = int(len(int_text) / batch_length)
inputs = np.array(int_text[: n_batches * batch_length])
targets = np.array(int_text[1: n_batches * batch_length + 1])
targets[-1] = inputs[0]
input_batches = np.split(inputs.reshape(batch_size, -1), n_batches, 1)
target_batches = np.split(targets.reshape(batch_size, -1), n_batches, 1)
batches = np.array(list(zip(input_batches, target_batches)))
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
End of explanation
"""
# Number of Epochs
num_epochs = 40
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 48
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 1
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
max_prob = np.argmax(probabilities)
return int_to_vocab[max_prob]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
development/tutorials/plotting_advanced.ipynb
|
gpl-3.0
|
#!pip install -I "phoebe>=2.4,<2.5"
"""
Explanation: Advanced: Plotting Options
For basic plotting usage, see the plotting tutorial
PHOEBE 2.4 uses autofig 1.1 as an intermediate layer for highend functionality to matplotlib.
Setup
Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
logger = phoebe.logger()
"""
Explanation: This first line is only necessary for ipython noteboooks - it allows the plots to be shown on this page instead of in interactive mode. Depending on your version of Jupyter, Python, and matplotlib - you may or may not need this line in order to see plots in the notebook.
End of explanation
"""
b = phoebe.default_binary()
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,101))
b.run_compute(irrad_method='none')
times = b.get_value('times', context='model')
fluxes = b.get_value('fluxes', context='model') + np.random.normal(size=times.shape) * 0.01
sigmas = np.ones_like(times) * 0.05
"""
Explanation: First we're going to create some fake observations so that we can show how to plot observational data. In real life, we would use something like np.loadtxt to get arrays from a data file instead.
End of explanation
"""
b = phoebe.default_binary()
b.set_value('q', 0.8)
b.set_value('ecc', 0.1)
b.set_value('irrad_method', 'none')
b.add_dataset('orb', compute_times=np.linspace(0,4,1000), dataset='orb01', component=['primary', 'secondary'])
b.add_dataset('lc', times=times, fluxes=fluxes, sigmas=sigmas, dataset='lc01')
"""
Explanation: Now we'll create a new Bundle and attach an orbit dataset (without observations) and a light curve dataset (with our "fake" observations - see Datasets for more details):
End of explanation
"""
b.set_value(qualifier='incl', kind='orbit', value=90)
b.run_compute(model='run_with_incl_90')
b.set_value(qualifier='incl', kind='orbit', value=85)
b.run_compute(model='run_with_incl_85')
b.set_value(qualifier='incl', kind='orbit', value=80)
b.run_compute(model='run_with_incl_80')
"""
Explanation: And run several forward models. See Computing Observables for more details.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0, show=True)
"""
Explanation: Time (highlight and uncover)
The built-in plot method also provides convenience options to either highlight the interpolated point for a given time, or only show the dataset up to a given time.
Highlight
The higlight option is enabled by default so long as a time (or times) is passed to plot. It simply adds an extra marker at the sent time - interpolating in the synthetic model if necessary.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0,
highlight_marker='s',
highlight_color='g',
highlight_ms=20,
show=True)
"""
Explanation: To change the style of the "highlighted" points, you can pass matplotlib recognized markers, colors, and markersizes to the highlight_marker, highlight_color, and highlight_ms keywords, respectively.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0,
highlight=False,
show=True)
"""
Explanation: To disable highlighting, simply send highlight=False
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(time=0.5,
uncover=True,
show=True)
"""
Explanation: Uncover
Uncover shows the observations or synthetic model up to the provided time and is disabled by default, even when a time is provided, but is enabled simply by providing uncover=True. There are no additional options available for uncover.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xunit='AU', yunit='AU', show=True)
"""
Explanation: Units
Likewise, each array that is plotted is automatically plotted in its default units. To override these defaults, simply provide the unit (as a string or as a astropy units object) for a given axis.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xlabel='X POS', ylabel='Z POS', show=True)
"""
Explanation: WARNING: when plotting two arrays with the same dimensions, PHOEBE attempts to set the aspect ratio to equal, but overriding to use two different units will result in undesired results. This may be fixed in the future, but for now can be avoided by using consistent units for the x and y axes when they have the same dimensions.
Axes Labels
Axes labels are automatically generated from the qualifier of the array and the plotted units. To override these defaults, simply pass a string for the label of a given axis.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xlim=(-2,2), show=True)
"""
Explanation: Axes Limits
Axes limits are determined by the data automatically. To set custom axes limits, either use matplotlib methods on the returned axes objects, or pass limits as a list or tuple.
End of explanation
"""
afig, mplfig = b['lc01@dataset'].plot(yerror='sigmas', show=True)
"""
Explanation: Errorbars
In the cases of observational data, errorbars can be added by passing the name of the column.
End of explanation
"""
afig, mplfig = b['lc01@dataset'].plot(yerror=None, show=True)
"""
Explanation: To disable the errorbars, simply set yerror=None.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(c='r', show=True)
"""
Explanation: Colors
Colors of points and lines, by default, cycle according to matplotlib's color policy. To manually set the color, simply pass a matplotlib recognized color to the 'c' keyword.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', show=True)
"""
Explanation: In addition, you can point to an array in the dataset to use as color.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', cmap='spring', show=True)
"""
Explanation: Choosing colors works slightly differently for meshes (ie you can set fc for facecolor and ec for edgecolor). For more details, see the tutorial on the MESH dataset.
Colormaps
The colormaps is determined automatically based on the parameter used for coloring (ie RVs will be a red-blue colormap). To override this, pass a matplotlib recognized colormap to the cmap keyword.
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', draw_sidebars=True, show=True)
"""
Explanation: Adding a Colorbar
To add a colorbar (or sizebar, etc), send draw_sidebars=True to the plot call.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(show=True, legend=True)
"""
Explanation: Labels and Legends
To add a legend, include legend=True.
For details on placement and formatting of the legend see matplotlib's documentation.
End of explanation
"""
afig, mplfig = b['primary@orb@run_with_incl_80'].plot(label='primary')
afig, mplfig = b['secondary@orb@run_with_incl_80'].plot(label='secondary', legend=True, show=True)
"""
Explanation: The legend labels are generated automatically, but can be overriden by passing a string to the label keyword.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(show=True, legend=True, legend_kwargs={'loc': 'center', 'facecolor': 'r'})
"""
Explanation: To override the position or styling of the legend, you can pass valid options to legend_kwargs which will be passed on to plt.legend
End of explanation
"""
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(linestyle=':', s=0.1, show=True)
"""
Explanation: Other Plotting Options
Valid plotting options that are directly passed to matplotlib include:
- linestyle
- marker
Note that sizes (markersize, linewidth) should be handled by passing the size to 's' and attempting to set markersize or linewidth directly will raise an error. See also the autofig documention on size scales.
End of explanation
"""
afig, mplfig = b['orb@run_with_incl_80'].plot(time=0, projection='3d', show=True)
"""
Explanation: 3D Axes
To plot a in 3d, simply pass projection='3d' to the plot call. To override the defaults for the z-direction, pass a twig or array just as you would for x or y.
End of explanation
"""
|
goodwordalchemy/thinkstats_notes_and_exercises
|
code/chap06_Pdfs_notes.ipynb
|
gpl-3.0
|
%matplotlib inline
import thinkstats2
import thinkplot
import pandas as pd
import numpy as np
import math, random
mean, var = 163, 52.8
std = math.sqrt(var)
pdf = thinkstats2.NormalPdf(mean, std)
print "Density:",pdf.Density(mean + std)
thinkplot.Pdf(pdf, label='normal')
thinkplot.Show()
#by default, makes pmf stetching 3*sigma in either direction
pmf = pdf.MakePmf()
thinkplot.Pmf(pmf,label='normal')
thinkplot.Show()
"""
Explanation: probability density function - derivative of a CDF. Evaluating for x gives a probability density or "the probability per unit of x. In order to get a probability mass, you have to integrate over x.
Pdf class probides...
* Density take a value, x and returns the density at x
* Render evaluates the density at a discrete set of values and returns a pair of sequences: sorted values, xs, and their probabilty densities.
* MakePmf, evaluates Density at a discrete set of values and returns a normalized Pmf that approximates the Pdf.
* GetLinspace, returns the default set of points used by Render and MakePmf
...but they are implemented in children classes
End of explanation
"""
sample = [random.gauss(mean, std) for i in range(500)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample PDF made by KDE')
##Evaluates PDF at 101 points
pmf = sample_pdf.MakePmf()
thinkplot.Pmf(pmf, label='sample PMF')
thinkplot.Show()
"""
Explanation: Kernel density estimation - an algorithm that takes a sampel and finds an approximately smooth PDF that fits the data.
End of explanation
"""
def RawMoment(xs, k):
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
"""
Explanation: Advantages of KDE:
Visualiztion - estimated pdf are easy to get when you look at them.
Interpolation - If you think smooth, you can use KDE to estimate the in-between values in a PDF.
Simulation - smooths out a small sample allowing for wider degree of outcomes during simulations
discretizing a PMF if you evaluate a PDF at discrete points, you can generate a PMF that is an approximation of the PDF.
statistic Any time you take a sample and reduce it to a single number, that number is a statistic.
raw moment if you have a sample of values, $x_i$, the $k$th raw moment is:
$$
m'_k = \frac{1}{n} \sum_i x_i^k
$$
when k = 1 the result is the sample mean.
central moments are more useful...
End of explanation
"""
##normalized so there are no units
def StandardizedMoment(xs, k):
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
return StandardizedMoment(xs, 3)
"""
Explanation: ...note that when k = 2, the second central moment is variance.
If we attach a weight along a ruler at each location, $x_i$, and then spin the ruler around the mean, the moment of inertia of the spinning weights is the variance of the values
Skewness describes the shape of a distribution. Negative means distribution skews left. Positive means skews right. To compute sample skewness $g1$...
End of explanation
"""
def Median(xs):
cdf = thinkstats2.Cdf(xs)
return cdf.Value(0.5)
def PearsonMedianSkewness(xs):
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
"""
Explanation: Pearson's median skewness coefficient is a measure of the skewness based on the difference between the sample mean and median:
$$
g_p = 3(\bar{x}-m)/S
$$
It is a more robust statistic than sample skewness because it is less sensitive to outliers.
End of explanation
"""
import hinc, hinc2
print "starting..."
df = hinc.ReadData()
log_sample = hinc2.InterpolateSample(df)
log_cdf = thinkstats2.Cdf(log_sample)
print "done"
# thinkplot.Cdf(log_cdf)
# thinkplot.Show(xlabel='household income',
# ylabel='CDF')
"""
Explanation: To summarize the Moments:
the mean is a raw moment with k = 1
the variance is a central moment with k = 2
the sample skewness is a standardized moment with k = 3
note that Pearson Median Skewness is a more robust measure of skewness.
Exercise
End of explanation
"""
import density
sample = np.power(10,log_sample)
mean, median = density.Summarize(sample)
log_pdf = thinkstats2.EstimatedPdf(log_sample)
thinkplot.Pdf(log_pdf, label='KDE of income')
thinkplot.Show(xlabel='log10 $',
ylabel='PDF')
thinkplot.PrePlot(2, rows=2)
thinkplot.SubPlot(1)
sample_cdf = thinkstats2.Cdf(sample, label='SampleCdf')
thinkplot.Cdf(sample_cdf)
thinkplot.SubPlot(2)
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf)
pctBelowMean = sample_cdf.Prob(mean) * 100
print "%d%% of households report taxable incomes below the mean" % pctBelowMean
"""
Explanation: Compute the mean, median, skewness, and Pearson's skewness. What fraction of households report a taxable income below the mean?
End of explanation
"""
|
mdeff/ntds_2016
|
toolkit/04_sol_visualization.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Random time series.
n = 1000
rs = np.random.RandomState(42)
data = rs.randn(n, 4).cumsum(axis=0)
plt.figure(figsize=(15,5))
plt.plot(data[:, 0], label='A')
plt.plot(data[:, 1], '.-k', label='B')
plt.plot(data[:, 2], '--m', label='C')
plt.plot(data[:, 3], ':', label='D')
plt.legend(loc='upper left')
plt.xticks(range(0, 1000, 50))
plt.ylabel('Value')
plt.xlabel('Day')
plt.grid()
idx = pd.date_range('1/1/2000', periods=n)
df = pd.DataFrame(data, index=idx, columns=list('ABCD'))
df.plot(figsize=(15,5));
"""
Explanation: A Python Tour of Data Science: Data Visualization
Michaël Defferrard, PhD student, EPFL LTS2
Exercise
Data visualization is a key aspect of exploratory data analysis.
During this exercise we'll gradually build more and more complex vizualisations. We'll do this by replicating plots. Try to reproduce the lines but also the axis labels, legends or titles.
Goal of data visualization: clearly and efficiently communicate information through visual representations. While tables are generally used to look up a specific measurement, charts are used to show patterns or relationships.
Means: mainly statistical graphics for exploratory analysis, e.g. scatter plots, histograms, probability plots, box plots, residual plots, but also infographics for communication.
Data visualization is both an art and a science. It should combine both aesthetic form and functionality.
1 Time series
To start slowly, let's make a static line plot from some time series. Reproduce the plots below using:
1. The procedural API of matplotlib, the main data visualization library for Python. Its procedural API is similar to matlab and convenient for interactive work.
2. Pandas, which wraps matplotlib around his DataFrame format and makes many standard plots easy to code. It offers many helpers for data visualization.
Hint: to plot with pandas, you first need to create a DataFrame, pandas' tabular data format.
End of explanation
"""
data = [10, 40, 25, 15, 10]
categories = list('ABCDE')
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
axes[1].pie(data, explode=[0,.1,0,0,0], labels=categories, autopct='%1.1f%%', startangle=90)
axes[1].axis('equal')
pos = range(len(data))
axes[0].bar(pos, data, align='center')
axes[0].set_xticks(pos)
axes[0].set_xticklabels(categories)
axes[0].set_xlabel('Category')
axes[0].set_title('Allotment');
"""
Explanation: 2 Categories
Categorical data is best represented by bar or pie charts. Reproduce the plots below using the object-oriented API of matplotlib, which is recommended for programming.
Question: What are the pros / cons of each plot ?
Tip: the matplotlib gallery is a convenient starting point.
End of explanation
"""
import seaborn as sns
import os
df = sns.load_dataset('iris', data_home=os.path.join('..', 'data'))
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
g = sns.distplot(df['petal_width'], kde=True, rug=False, ax=axes[0])
g.set(title='Distribution of petal width')
g = sns.boxplot('species', 'petal_width', data=df, ax=axes[1])
g.set(title='Distribution of petal width by species');
import ggplot
ggplot.ggplot(df, ggplot.aes(x='petal_width', fill='species')) + \
ggplot.geom_histogram() + \
ggplot.ggtitle('Distribution of Petal Width by Species')
import altair
altair.Chart(df).mark_bar(opacity=.75).encode(
x=altair.X('petal_width', bin=altair.Bin(maxbins=30)),
y='count(*)',
color=altair.Color('species')
)
"""
Explanation: 3 Frequency
A frequency plot is a graph that shows the pattern in a set of data by plotting how often particular values of a measure occur. They often take the form of an histogram or a box plot.
Reproduce the plots with the following three libraries, which provide high-level declarative syntax for statistical visualization as well as a convenient interface to pandas:
* Seaborn is a statistical visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics. Its advantage is that you can modify the produced plots with matplotlib, so you loose nothing.
* ggplot is a (partial) port of the popular ggplot2 for R. It has his roots in the influencial book the grammar of graphics. Convenient if you know ggplot2 already.
* Vega is a declarative format for statistical visualization based on D3.js, a low-level javascript library for interactive visualization. Vincent (discontinued) and altair are Python libraries to vega. Altair is quite new and does not provide all the needed functionality yet, but it is promising !
Hints:
* Seaborn, look at distplot() and boxplot().
* ggplot, we are interested by the geom_histogram geometry.
End of explanation
"""
sns.pairplot(df, hue="species");
"""
Explanation: 4 Correlation
Scatter plots are very much used to assess the correlation between 2 variables. Pair plots are then a useful way of displaying the pairwise relations between variables in a dataset.
Use the seaborn pairplot() function to analyze how separable is the iris dataset.
End of explanation
"""
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
pca = PCA(n_components=2)
X = pca.fit_transform(df.values[:, :4])
df['pca1'] = X[:, 0]
df['pca2'] = X[:, 1]
tsne = TSNE(n_components=2)
X = tsne.fit_transform(df.values[:, :4])
df['tsne1'] = X[:, 0]
df['tsne2'] = X[:, 1]
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.swarmplot(x='pca1', y='pca2', data=df, hue='species', ax=axes[0])
sns.swarmplot(x='tsne1', y='tsne2', data=df, hue='species', ax=axes[1]);
"""
Explanation: 5 Dimensionality reduction
Humans can only comprehend up to 3 dimensions (in space, then there is e.g. color or size), so dimensionality reduction is often needed to explore high dimensional datasets. Analyze how separable is the iris dataset by visualizing it in a 2D scatter plot after reduction from 4 to 2 dimensions with two popular methods:
1. The classical principal componant analysis (PCA).
2. t-distributed stochastic neighbor embedding (t-SNE).
Hints:
* t-SNE is a stochastic method, so you may want to run it multiple times.
* The easiest way to create the scatter plot is to add columns to the pandas DataFrame, then use the Seaborn swarmplot().
End of explanation
"""
|
tensorflow/workshops
|
tfx_colabs/TFX_Workshop_Colab.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright © 2019 The TensorFlow Authors.
End of explanation
"""
!pip install -q -U \
tensorflow==2.0.0 \
tfx==0.15.0 \
pyarrow==0.14.1
!pip install -U grpcio==1.24.3
"""
Explanation: TensorFlow Extended (TFX) Workshop
Run this notebook in Colab
Running a simple pipeline manually in a Colab Notebook
This notebook demonstrates how to use Jupyter/Colab notebooks for TFX iterative development. Here, we walk through the Chicago Taxi example in an interactive notebook.
Working in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.
Orchestration
In a production deployment of TFX you will use an orchestrator such as Apache Airflow, Kubeflow, or Apache Beam. In an interactive notebook the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.
Metadata
In a production deployment of TFX you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in the /tmp directory on the Jupyter notebook or Colab server.
Setup
First, install the necessary packages, download data, import modules and set up paths.
Install TFX and TensorFlow
Note
Because of some of the updates to packages you must use the button at the bottom of the output of this cell to restart the runtime. Following restart, you should rerun this cell.
End of explanation
"""
import os
import pprint
import tempfile
import urllib
import tensorflow as tf
pp = pprint.PrettyPrinter()
import tfx
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tensorflow.core.example import example_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
import tensorflow_data_validation as tfdv
import tensorflow_transform as tft
import tensorflow_model_analysis as tfma
"""
Explanation: Import packages
Import necessary packages, including standard TFX component classes.
End of explanation
"""
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
print('TFT version: {}'.format(tft.__version__))
print('TFDV version: {}'.format(tfdv.__version__))
print('TFMA version: {}'.format(tfma.VERSION_STRING))
"""
Explanation: Check the versions
End of explanation
"""
# Download the example data.
_data_root = tempfile.mkdtemp(prefix='tfx-data')
DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
"""
Explanation: Download example data
Download the sample dataset for use in our TFX pipeline.
The data comes from the Taxi Trips dataset released by the City of Chicago. You will develop a binary classification model to predict whether or not customers will tip their taxi drivers more or less than 20%.
The columns in the dataset are:
<table>
<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>
<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>
<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>
<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>
<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>
<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>
</table>
End of explanation
"""
!head {_data_filepath}
"""
Explanation: Take a quick look at the CSV file.
End of explanation
"""
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext.
context = InteractiveContext()
"""
Explanation: Create the InteractiveContext
An interactive context is used to provide global context when running a TFX pipeline in a notebook without using a runner or orchestrator such as Apache Airflow or Kubeflow. This style of development is only useful when developing the code for a pipeline, and cannot currently be used to deploy a working pipeline to production.
End of explanation
"""
# Use the packaged CSV input data.
input_data = external_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=input_data)
context.run(example_gen)
"""
Explanation: Run TFX Components Interactively
In the cells that follow you will construct TFX components and run each one interactively within the InteractiveContext to obtain ExecutionResult objects. This mirrors the process of an orchestrator running components in a TFX DAG based on when the dependencies for each component are met.
The ExampleGen Component
In any ML development process the first step when starting code development is to ingest the training and test datasets. The ExampleGen component brings data into the TFX pipeline.
Create an ExampleGen component and run it.
End of explanation
"""
for artifact in example_gen.outputs['examples'].get():
print(artifact.split, artifact.uri)
"""
Explanation: ExampleGen's outputs include 2 artifacts: the training examples and the eval examples (by default, split 2/3 training, 1/3 eval):
End of explanation
"""
train_uri = example_gen.outputs['examples'].get()[0].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
"""
Explanation: Take a peek at the output training examples to see what they look like.
Get the URI of the output artifact representing the training examples, which is a directory
Get the list of files in this directory (all compressed TFRecord files), and create a TFRecordDataset to read these files
Iterate over the first 3 records and decode them using a TFExampleDecoder to check the results:
End of explanation
"""
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(
input_data=example_gen.outputs['examples'])
context.run(statistics_gen)
"""
Explanation: The StatisticsGen Component
The StatisticsGen component computes descriptive statistics for your dataset. The statistics that it generates can be visualized for review, and are used for example validation and to infer a schema.
Create a StatisticsGen component and run it.
End of explanation
"""
train_uri = statistics_gen.outputs['statistics'].get()[0].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames)
for tfrecord in dataset.take(1):
serialized_example = tfrecord.numpy()
stats = statistics_pb2.DatasetFeatureStatisticsList()
stats.ParseFromString(serialized_example)
"""
Explanation: Again, let's take a peek at the output training artifact. Note that this time it is a TFRecord file containing a single record with a serialized DatasetFeatureStatisticsList protobuf:
End of explanation
"""
tfdv.visualize_statistics(stats)
"""
Explanation: The statistics can be visualized using the tfdv.visualize_statistics() function:
End of explanation
"""
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
context.run(infer_schema)
"""
Explanation: The SchemaGen Component
The SchemaGen component generates a schema for your data based on the statistics from StatisticsGen. It tries to infer the data types of each of your features, and the ranges of legal values for categorical features.
Create a SchemaGen component and run it.
End of explanation
"""
train_uri = infer_schema.outputs['schema'].get()[0].uri
schema_filename = os.path.join(train_uri, "schema.pbtxt")
schema = tfx.utils.io_utils.parse_pbtxt_file(file_name=schema_filename,
message=schema_pb2.Schema())
"""
Explanation: The generated artifact is just a schema.pbtxt containing a text representation of a schema_pb2.Schema protobuf:
End of explanation
"""
tfdv.display_schema(schema)
"""
Explanation: It can be visualized using tfdv.display_schema():
End of explanation
"""
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
context.run(validate_stats)
"""
Explanation: The ExampleValidator Component
The ExampleValidator performs anomaly detection, based on the statistics from StatisticsGen and the schema from SchemaGen. It looks for problems such as missing values, values of the wrong type, or categorical values outside of the domain of acceptable values.
Create an ExampleValidator component and run it.
End of explanation
"""
train_uri = validate_stats.outputs['anomalies'].get()[0].uri
anomalies_filename = os.path.join(train_uri, "anomalies.pbtxt")
anomalies = tfx.utils.io_utils.parse_pbtxt_file(
file_name=anomalies_filename,
message=anomalies_pb2.Anomalies())
"""
Explanation: The output artifact of ExampleValidator is an anomalies.pbtxt file describing an anomalies_pb2.Anomalies protobuf:
End of explanation
"""
tfdv.display_anomalies(anomalies)
"""
Explanation: This can be visualized using the tfdv.display_anomalies() function. Did it find any anomalies?
End of explanation
"""
_constants_module_file = 'chicago_taxi_constants.py'
%%writefile {_constants_module_file}
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
def transformed_name(key):
return key + '_xf'
"""
Explanation: The Transform Component
The Transform component performs data transformations and feature engineering. The results include an input TensorFlow graph which is used during both training and serving to preprocess the data before training or inference. This graph becomes part of the SavedModel that is the result of model training. Since the same input graph is used for both training and serving, the preprocessing will always be the same, and only needs to be written once.
The Transform component requires more code than many other components because of the arbitrary complexity of the feature engineering that you may need for the data and/or model that you're working with. It requires code files to be available which define the processing needed.
Define some constants and functions for both the Transform component and the Trainer component. Define them in a Python module, in this case saved to disk using the %%writefile magic command since you are working in a notebook:
End of explanation
"""
_transform_module_file = 'chicago_taxi_transform.py'
%%writefile {_transform_module_file}
import tensorflow_transform as tft
import tensorflow as tf
from chicago_taxi_constants import *
def _transformed_names(keys):
return [transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=VOCAB_SIZE,
num_oov_buckets=OOV_SIZE)
for key in BUCKET_FEATURE_KEYS:
outputs[transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), FEATURE_BUCKET_COUNT,
always_return_num_quantiles=False)
for key in CATEGORICAL_FEATURE_KEYS:
outputs[transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[FARE_KEY])
tips = _fill_in_missing(inputs[LABEL_KEY])
outputs[transformed_name(LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
"""
Explanation: Now define a module containing the preprocessing_fn() function that will be passed to the Transform component:
End of explanation
"""
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=_transform_module_file)
context.run(transform)
"""
Explanation: Create and run the Transform component, referring to the files that were created above.
End of explanation
"""
transform.outputs
"""
Explanation: The Transform component has 2 types of outputs:
* transform_graph is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).
* transformed_examples represents the preprocessed training and evaluation data.
End of explanation
"""
train_uri = transform.outputs['transform_graph'].get()[0].uri
os.listdir(train_uri)
"""
Explanation: Take a peek at the transform_graph artifact. It points to a directory containing 3 subdirectories.
End of explanation
"""
train_uri = transform.outputs['transformed_examples'].get()[1].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
"""
Explanation: The transform_fn subdirectory contains the actual preprocessing graph. The metadata subdirectory contains the schema of the original data. The transformed_metadata subdirectory contains the schema of the preprocessed data.
Take a look at some of the transformed examples and check that they are indeed processed as intended.
End of explanation
"""
# Setup paths.
_trainer_module_file = 'chicago_taxi_trainer.py'
%%writefile {_trainer_module_file}
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from chicago_taxi_constants import *
def transformed_names(keys):
return [transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting taxi tips
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
The estimator that will be used for training and eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in transformed_names(DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=VOCAB_SIZE + OOV_SIZE, default_value=0)
for key in transformed_names(VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0)
for key in transformed_names(BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
transformed_names(CATEGORICAL_FEATURE_KEYS),
MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedRegressor(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_graph, schema):
"""Build the serving in inputs.
Args:
tf_transform_graph: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_graph.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_graph, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_graph: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add a parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_graph.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[transformed_name(LABEL_KEY)])
def _input_fn(filenames, tf_transform_graph, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_graph: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is a dictionary of
Tensors, and indices is a single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_graph.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
transformed_features = dataset.make_one_shot_iterator().get_next()
# We pop the label because we do not want to use it as a feature while we're
# training.
return transformed_features, transformed_features.pop(
transformed_name(LABEL_KEY))
# TFX will call this function
def trainer_fn(hparams, schema):
"""Build the estimator using the high level API.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_graph = tft.TFTransformOutput(hparams.transform_output)
train_input_fn = lambda: _input_fn(
hparams.train_files,
tf_transform_graph,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn(
hparams.eval_files,
tf_transform_graph,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec(
train_input_fn,
max_steps=hparams.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn(
tf_transform_graph, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=hparams.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=hparams.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=hparams.warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn(
tf_transform_graph, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
"""
Explanation: The Trainer Component
The Trainer component trains models using TensorFlow.
Create a Python module containing a trainer_fn function, which must return an estimator. If you prefer creating a Keras model, you can do so and then convert it to an estimator using keras.model_to_estimator().
End of explanation
"""
# Uses user-provided Python function that implements a model using TensorFlow.
trainer = Trainer(
module_file=_trainer_module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
context.run(trainer)
"""
Explanation: Create and run the Trainer component.
End of explanation
"""
train_uri = trainer.outputs['model'].get()[0].uri
serving_model_path = os.path.join(train_uri, 'serving_model_dir', 'export', 'chicago-taxi')
latest_serving_model_path = os.path.join(serving_model_path, max(os.listdir(serving_model_path)))
exported_model = tf.saved_model.load(latest_serving_model_path)
exported_model.graph.get_operations()[:10] + ["..."]
"""
Explanation: Take a peek at the trained model which was exported from Trainer.
End of explanation
"""
%load_ext tensorboard
%tensorboard --bind_all --logdir {os.path.join(train_uri, 'serving_model_dir')}
"""
Explanation: Analyze Training with TensorBoard
Use TensorBoard to analyze the model training that was done in Trainer, and see how well our model trained.
End of explanation
"""
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['weekday'])
]))
context.run(model_analyzer)
model_analyzer.outputs
"""
Explanation: The Evaluator Component
The Evaluator component analyzes model performance using the TensorFlow Model Analysis library. It runs inference requests on particular subsets of the test dataset, based on which slices are defined by the developer. Knowing which slices should be analyzed requires domain knowledge of what is imporant in this particular use case or domain.
Create and run an Evaluator component.
End of explanation
"""
import csv
BASE_DIR = tempfile.mkdtemp()
reader = csv.DictReader(open(_data_filepath))
examples = []
for line in reader:
example = tf.train.Example()
for feature in schema.feature:
key = feature.name
if len(line[key]) > 0:
if feature.type == schema_pb2.FLOAT:
example.features.feature[key].float_list.value[:] = [float(line[key])]
elif feature.type == schema_pb2.INT:
example.features.feature[key].int64_list.value[:] = [int(line[key])]
elif feature.type == schema_pb2.BYTES:
example.features.feature[key].bytes_list.value[:] = [line[key].encode('utf8')]
else:
if feature.type == schema_pb2.FLOAT:
example.features.feature[key].float_list.value[:] = []
elif feature.type == schema_pb2.INT:
example.features.feature[key].int64_list.value[:] = []
elif feature.type == schema_pb2.BYTES:
example.features.feature[key].bytes_list.value[:] = []
examples.append(example)
TFRecord_file = os.path.join(BASE_DIR, 'train_data.rio')
with tf.io.TFRecordWriter(TFRecord_file) as writer:
for example in examples:
writer.write(example.SerializeToString())
writer.flush()
writer.close()
!ls {TFRecord_file}
"""
Explanation: Use the Evaluator results to generate model performance data which can be visualized. First create evaluation input data.
End of explanation
"""
def run_and_render(eval_model=None, slice_list=None, slice_idx=0):
"""Runs the model analysis and renders the slicing metrics
Args:
eval_model: An instance of tf.saved_model saved with evaluation data
slice_list: A list of tfma.slicer.SingleSliceSpec giving the slices
slice_idx: An integer index into slice_list specifying the slice to use
Returns:
A SlicingMetricsViewer object if in Jupyter notebook; None if in Colab.
"""
eval_result = tfma.run_model_analysis(eval_shared_model=eval_model,
data_location=TFRecord_file,
file_format='tfrecords',
slice_spec=slice_list,
output_path='sample_data',
extractors=None)
return tfma.view.render_slicing_metrics(eval_result, slicing_spec=slice_list[slice_idx] if slice_list else None)
# Load the TFMA results for the first training run
# This will take a minute
eval_model_base_dir_0 = os.path.join(train_uri, 'eval_model_dir')
eval_model_dir_0 = os.path.join(eval_model_base_dir_0,
max(os.listdir(eval_model_base_dir_0)))
eval_shared_model_0 = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_dir_0)
# Slice our data by the trip_start_hour feature
slices = [tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])]
run_and_render(eval_model=eval_shared_model_0, slice_list=slices, slice_idx=0)
"""
Explanation: Run the analysis of a particular slice of data.
End of explanation
"""
evaluation_uri = model_analyzer.outputs['output'].get()[0].uri
eval_result = tfma.load_eval_result(evaluation_uri)
print('{}\n\nslicing_metrics:\n'.format(eval_result))
for metric in eval_result.slicing_metrics:
pp.pprint(metric)
"""
Explanation: Print the slicing metrics.
End of explanation
"""
eval_path_uri = model_analyzer.outputs['output'].get()[0].uri
tfrecord_filenames = [os.path.join(eval_path_uri, name)
for name in os.listdir(eval_path_uri)]
pp.pprint(tfrecord_filenames)
dataset = tf.data.TFRecordDataset(tfrecord_filenames)
pp.pprint(dataset)
"""
Explanation: Examine the output data.
End of explanation
"""
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
context.run(model_validator)
"""
Explanation: The ModelValidator Component
The ModelValidator component performs validation of your candidate model compared to the previously deployed model (if any) using criteria that you define, or to a baseline value. If the new model scores better than the previous model it will be "blessed" by ModelValidator, approving it for deployment.
End of explanation
"""
model_validator.outputs
blessing_uri = model_validator.outputs['blessing'].get()[0].uri
!ls -l {blessing_uri}
"""
Explanation: Examine the output of ModelValidator.
End of explanation
"""
# Setup serving path
_serving_model_dir = os.path.join(tempfile.mkdtemp(),
'serving_model/chicago_taxi_simple')
"""
Explanation: The Pusher Component
The Pusher component checks whether a model has been "blessed", and if so, deploys it to production by pushing the model to a well known file destination.
End of explanation
"""
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
"""
Explanation: Create and run a Pusher component.
End of explanation
"""
pusher.outputs
push_uri = pusher.outputs['pushed_model'].get()[0].uri
latest_version = max(os.listdir(push_uri))
latest_version_path = os.path.join(push_uri, latest_version)
model = tf.saved_model.load(latest_version_path)
for item in model.signatures.items():
pp.pprint(item)
"""
Explanation: Examine the output of Pusher.
End of explanation
"""
latest_pushed_model = os.path.join(_serving_model_dir, max(os.listdir(_serving_model_dir)))
!saved_model_cli show --dir {latest_pushed_model} --all
"""
Explanation: TensorFlow Serving
Now that we have a trained model that has been blessed by ModelValidator, and pushed to our deployment target by Pusher, we can load it into TensorFlow Serving and start serving inference requests.
Examine your saved model
We'll use the command line utility saved_model_cli to look at the MetaGraphDefs (the models) and SignatureDefs (the methods you can call) in our SavedModel. See this discussion of the SavedModel CLI in the TensorFlow Guide.
End of explanation
"""
# This is the same as you would do from your command line, but without the [arch=amd64], and no sudo
# You would instead do:
# echo "deb [arch=amd64] http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | sudo tee /etc/apt/sources.list.d/tensorflow-serving.list && \
# curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | sudo apt-key add -
!echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \
curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
!apt update
"""
Explanation: That tells us a lot about our model! In this case we just trained our model, so we already know the inputs and outputs, but if we didn't this would be important information. It doesn't tell us everything, but it's a great start.
Add TensorFlow Serving distribution URI as a package source:
We're preparing to install TensorFlow Serving using Aptitude since this Colab runs in a Debian environment. We'll add the tensorflow-model-server package to the list of packages that Aptitude knows about. Note that we're running as root.
Note: This example is running TensorFlow Serving natively, but you can also run it in a Docker container, which is one of the easiest ways to get started using TensorFlow Serving.
End of explanation
"""
!apt-get install tensorflow-model-server
"""
Explanation: Install TensorFlow Serving
This is all you need - one command line! Please note that running TensorFlow Serving in a Docker Container is also a great option, with a lot of advantages.
End of explanation
"""
os.environ["MODEL_DIR"] = os.path.split(latest_pushed_model)[0]
%%bash --bg
nohup tensorflow_model_server \
--rest_api_port=8501 \
--model_name=chicago_taxi_simple \
--model_base_path="${MODEL_DIR}" >server.log 2>&1
!tail server.log
"""
Explanation: Start running TensorFlow Serving
This is where we start running TensorFlow Serving and load our model. After it loads we can start making inference requests using REST. There are some important parameters:
rest_api_port: The port that you'll use for REST requests.
model_name: You'll use this in the URL of REST requests. It can be anything.
model_base_path: This is the path to the directory where you've saved your model. Note that this base_path should not include the model version directory, which is why we split it off below.
End of explanation
"""
import base64
import json
import requests
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from tensorflow_transform import coders as tft_coders
from chicago_taxi_constants import *
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def make_serialized_examples(examples_csv_file, num_examples, schema):
"""Parses examples from CSV file and returns seralized proto examples."""
filtered_features = [
feature for feature in schema.feature if feature.name != LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
columns = tfx.utils.io_utils.load_csv_column_names(examples_csv_file)
csv_coder = _make_csv_coder(schema, columns)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_csv_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
return serialized_examples
"""
Explanation: Prepare data for inference requests
Our example data is stored in a CSV file on disk.
We first have to read the file and decode the examples from CSV, and then encode these as Example protos to feed to Tensorflow Serving.
A few notes:
The regress and classify APIs are higher-level and thus encouraged to be used over predict - here we use the predict API to showcase the more involved route.
While the regress and classify APIs expect and can parse tf.Example, the predict API expects arbitrary TensorProto. This means we will have to construct the tf.Example proto using coders from Tensorflow Transform.
The REST API surface accepts JSON, which uses UTF-8 encoding. Thus to access the model via REST, we will encode our serialized tf.Example using Base64.
This is quite complicated and in general, if using the predict API, you should strongly consider using the gRPC API surface.
End of explanation
"""
def do_inference(server_addr, model_name, serialized_examples):
"""Sends requests to the model and prints the results.
Args:
server_addr: network address of model server in "host:port" format
model_name: name of the model as understood by the model server
serialized_examples: serialized examples of data to do inference on
"""
parsed_server_addr = server_addr.split(':')
host=parsed_server_addr[0]
port=parsed_server_addr[1]
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/' + model_name + ':predict'
response = requests.post(
server_url, data=json_request, timeout=5.0)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
serialized_examples = make_serialized_examples(
examples_csv_file=_data_filepath,
num_examples=3,
schema=schema)
do_inference(server_addr='127.0.0.1:8501',
model_name='chicago_taxi_simple',
serialized_examples=serialized_examples)
"""
Explanation: Perform Inference on example data
Prepare the example data using the utility defined above and batch all requests together to send a single REST API call to Tensorflow Serving.
End of explanation
"""
|
sgratzl/ipython-tutorial-VA2015
|
04_MachineLearning_solution.ipynb
|
cc0-1.0
|
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.},
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
tf_measurements = vec.fit_transform(measurements)
tf_measurements.toarray()
vec.get_feature_names()
"""
Explanation: Machine Learning using Scikit Learn
There are two kinds of machine learning we will talk about today: supervised learning and unsupervised learning.
supervised: classification, regression,...
unsupervised: clustering, dimension reduction,...
sklearn estimator API
Scikit-learn strives to have a uniform interface across all objects. Given a scikit-learn estimator named model, the following methods are available:
Available in all Estimators
model.fit() : fit training data. For supervised learning applications,
this accepts two arguments: the data X and the labels y (e.g. model.fit(X, y)).
For unsupervised learning applications, fit takes only a single argument,
the data X (e.g. model.fit(X)).
Available in supervised estimators
model.predict() : given a trained model, predict the label of a new set of data.
This method accepts one argument, the new data X_new (e.g. model.predict(X_new)),
and returns the learned label for each object in the array.
model.fit_predict(): fits and predicts for trained one and the same time
model.predict_proba() : For classification problems, some estimators also provide
this method, which returns the probability that a new observation has each categorical label.
In this case, the label with the highest probability is returned by model.predict().
model.score() : An indication of how well the model fits the training data. Scores are between 0 and 1, with a larger score indicating a better fit.
Data in scikit-learn
Data in scikit-learn, with very few exceptions, is assumed to be stored as a
two-dimensional array, of size [n_samples, n_features]. Many algorithms also accept scipy.sparse matrices of the same shape.
n_samples: The number of samples: each sample is an item to process (e.g. classify).
A sample can be a document, a picture, a sound, a video, an astronomical object,
a row in database or CSV file,
or whatever you can describe with a fixed set of quantitative traits.
n_features: The number of features or distinct traits that can be used to describe each
item in a quantitative manner. Features are generally real-valued, but may be boolean or
discrete-valued in some cases.
Numerical vs Categorical
What if you have categorical features? For example, imagine there is data on the color of each
iris:
color in [red, blue, purple]
You might be tempted to assign numbers to these features, i.e. red=1, blue=2, purple=3
but in general this is a bad idea. Estimators tend to operate under the assumption that
numerical features lie on some continuous scale, so, for example, 1 and 2 are more alike
than 1 and 3, and this is often not the case for categorical features.
A better strategy is to give each category its own dimension.
The enriched iris feature set would hence be in this case:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
color=purple (1.0 or 0.0)
color=blue (1.0 or 0.0)
color=red (1.0 or 0.0)
Note that using many of these categorical features may result in data which is better
represented as a sparse matrix, as we'll see with the text classification example
below.
Using the DictVectorizer to encode categorical features
When the source data is encoded has a list of dicts where the values are either strings names for categories or numerical values, you can use the DictVectorizer class to compute the boolean expansion of the categorical features while leaving the numerical features unimpacted:
End of explanation
"""
#disable some annoying warning
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#load the iris datasets
import sklearn.datasets
data = sklearn.datasets.load_iris()
data.data.shape
from sklearn.cluster import KMeans
iris_pred = KMeans(n_clusters=3, random_state = 102).fit_predict(data.data)
plt.figure(figsize=(12, 12))
colors = sns.color_palette()
plt.subplot(211)
plt.scatter(data.data[:, 0], data.data[:, 1], c=[colors[i] for i in iris_pred], s=40)
plt.title('KMeans-3 clusterer')
plt.xlabel(data.feature_names[0])
plt.ylabel(data.feature_names[1])
plt.subplot(212)
plt.scatter(data.data[:, 0], data.data[:, 1], c=[colors[i] for i in data.target],s=40)
plt.title('Ground Truth')
plt.xlabel(data.feature_names[0])
plt.ylabel(data.feature_names[1])
"""
Explanation: Unsupervised Clustering using K-Means
End of explanation
"""
import sklearn.cross_validation
data_train, data_test, target_train, target_test = sklearn.cross_validation.train_test_split(
data.data, data.target, test_size=0.20, random_state = 5)
print(data.data.shape, data_train.shape, data_test.shape)
"""
Explanation: Supervised Classification using Decision Trees
well not that great. Let's use a supervised classifier
First, split our data in train and test set
End of explanation
"""
from sklearn.tree import DecisionTreeClassifier
instance = DecisionTreeClassifier()
r = instance.fit(data_train, target_train)
target_predict = instance.predict(data_test)
from sklearn.metrics import accuracy_score
print('Prediction accuracy: ',accuracy_score(target_predict, target_test))
"""
Explanation: Now, we use a DecisionTree to learn a model and test our result
End of explanation
"""
from sklearn import manifold
#create mds instance
mds = manifold.MDS(n_components=2, random_state=5)
#fit the model and get the embedded coordinates
pos = mds.fit(data.data).embedding_
plt.scatter(pos[:, 0], pos[:, 1], s=20, c=[colors[i] for i in data.target])
#create a legend since we just have one plot and not three fake the legend using patches
import matplotlib.patches as mpatches
patches = [ mpatches.Patch(color=colors[i], label=data.target_names[i]) for i in range(3) ]
plt.legend(handles=patches)
plt.legend()
#compare with e.g. PCA
from sklearn import decomposition
pca = decomposition.PCA(n_components=2)
pca_pos = pca.fit(data.data).transform(data.data)
mds_pos = mds.fit(data.data).embedding_
plt.figure(figsize=[20,7])
plt.subplot(121)
plt.scatter(mds_pos[:, 0], mds_pos[:, 1], s=30, c=[colors[i] for i in data.target])
plt.title('MDS')
plt.subplot(122)
plt.scatter(pca_pos[:, 0], pca_pos[:, 1], s=30, c=[colors[i] for i in data.target])
plt.title('PCA')
"""
Explanation: pretty good, isn't it?
Dimension Reduction using MDS and PCA
if we go back to our K-Means example, the clustering doesn't really make sense. However, we are just looking at two out of four dimensions. So, we can't really see the real distances/similarities between items. Dimension reduction techniques reduce the number of dimensions, while preserving the inner structure of the higher dimensions. We take a look at two of them: Multi Dimensional Scaling (MDS) and Principal Component Analysis (PCA).
End of explanation
"""
from IPython.html.widgets import interact
colors = sns.color_palette(n_colors=10)
mds_pos = mds.fit(data.data).embedding_
@interact(n_clusters=(1,10))
def draw_plot(n_clusters):
instance = KMeans(n_clusters=n_clusters, random_state = 102)
clusters_assignment = instance.fit_predict(data.data)
plt.scatter(mds_pos[:, 0], mds_pos[:, 1], s=20, c=[colors[i] for i in clusters_assignment])
"""
Explanation: seems like versicolor and virginicia are more similar then setosa
TASK
create an interactive colored plot of the Iris dataset projected in 2D using MDS. The color should correspong to the result of a K-Means clusterin alrogithm where the user can interactivly define the number of clusters between 1 and 10.
End of explanation
"""
|
pfschus/fission_bicorrelation
|
methods/singles_correction_e.ipynb
|
mit
|
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import imageio
import pandas as pd
import seaborn as sns
sns.set(style='ticks')
sys.path.append('../scripts/')
import bicorr as bicorr
import bicorr_e as bicorr_e
import bicorr_plot as bicorr_plot
import bicorr_sums as bicorr_sums
import bicorr_math as bicorr_math
%load_ext autoreload
%autoreload 2
"""
Explanation: Goal: Correct for singles rate with $W$ calculation
In order to correct for differences in detection efficiencies and solid angles, we will divide all of the doubles rates by the singles rates of the two detectors as follows:
$ W_{i,j} = \frac{D_{i,j}}{S_i*S_j}$
This requires calculating $S_i$ and $S_j$ from the cced files. I need to rewrite my analysis from the beginning, or write another function that parses the cced file.
In this file, I will import the singles and bicorr data and calculate all $D_{i,j}$, $S_i$, $S_j$, and $W_{i,j}$.
This notebook does the analysis in energy space.
End of explanation
"""
det_df = bicorr.load_det_df('../meas_info/det_df_pairs_angles.csv')
det_df.head()
chList, fcList, detList, num_dets, num_det_pairs = bicorr.build_ch_lists()
dict_pair_to_index, dict_index_to_pair, dict_pair_to_angle = bicorr.build_dict_det_pair(det_df)
num_fissions = 2194651200.00
"""
Explanation: Load some data
I'm going to work with the data from the combined data sets. The analysis for this data set is in analysis\Cf072115_to_Cf072215b.
The one limitation here is that this data has already cut out the fission chamber neighbors.
det_df without fission chamber neighbors
End of explanation
"""
e_min = 0.62
e_max = 12
"""
Explanation: Specify energy range
End of explanation
"""
singles_hist_e_n, e_bin_edges, dict_det_to_index, dict_index_to_det = bicorr_e.load_singles_hist_both(filepath = '../analysis/Cf072115_to_Cf072215b/datap/',plot_flag=True, show_flag=True)
bicorr_plot.plot_singles_hist_e_n(singles_hist_e_n, e_bin_edges, show_flag=False, clear_flag=False)
for e in [e_min, e_max]:
plt.axvline(e,c='r')
plt.show()
singles_hist_e_n.shape
"""
Explanation: singles_hist_e_n.npz
End of explanation
"""
bhm_e, e_bin_edges, note = bicorr_e.load_bhm_e('../analysis/Cf072115_to_Cf072215b/datap')
bhm_e.shape
bhp_e = np.zeros((len(det_df),len(e_bin_edges)-1,len(e_bin_edges)-1))
bhp_e.shape
for index in det_df.index.values: # index is same as in `bhm`
bhp_e[index,:,:] = bicorr_e.build_bhp_e(bhm_e,e_bin_edges,pair_is=[index])[0]
bicorr_plot.bhp_e_plot(np.sum(bhp_e,axis=0),e_bin_edges, show_flag=True)
"""
Explanation: Load bhp_nn_e for all pairs
I'm going to skip a few steps in order to save memory. This data was produced in analysis_build_bhp_nn_by_pair_1_ns.ipynb and is stored in datap\bhp_nn_by_pair_1ns.npz. Load it now, as explained in the notebook.
End of explanation
"""
det_df.head()
det_df = bicorr_sums.init_det_df_sums(det_df)
det_df.head()
singles_e_df = bicorr_sums.init_singles_e_df(dict_index_to_det)
singles_e_df.head()
"""
Explanation: Set up det_df columns and singles_df
End of explanation
"""
bhp_e.shape
det_df, energies_real = bicorr_sums.fill_det_df_doubles_e_sums(det_df, bhp_e, e_bin_edges, e_min, e_max, True)
det_df.head()
bicorr_plot.counts_vs_angle_all(det_df, save_flag=False)
"""
Explanation: Calculate and fill doubles sums
End of explanation
"""
singles_e_df.head()
bicorr_plot.Sd_vs_ch_all(singles_e_df, save_flag=False)
det_df = bicorr_sums.fill_det_df_singles_sums(det_df, singles_e_df)
det_df.head()
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],s=13,marker='s',
edgecolor = 'none', c=det_df['Cd'],cmap='viridis')
ax.set_xlabel('d1 channel')
ax.set_ylabel('d2 channel')
ax.set_title('Doubles counts')
cbar = plt.colorbar(sc,fraction=0.043,pad=0.1)
cbar.set_label('Doubles counts')
plt.show()
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],s=13,marker='s',
edgecolor = 'none', c=det_df['Sd1'],cmap='viridis')
ax.set_xlabel('d1 channel')
ax.set_ylabel('d2 channel')
ax.set_title('D1 singles counts')
cbar = plt.colorbar(sc,fraction=0.043,pad=0.1)
cbar.set_label('D1 singles counts')
plt.show()
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],s=13,marker='s',
edgecolor = 'none', c=det_df['Sd2'],cmap='viridis')
ax.set_xlabel('d1 channel')
ax.set_ylabel('d2 channel')
ax.set_title('D2 singles counts')
cbar = plt.colorbar(sc,fraction=0.043,pad=0.1)
cbar.set_label('D2 Singles counts')
plt.show()
"""
Explanation: Calculate singles sums
End of explanation
"""
det_df = bicorr_sums.calc_det_df_W(det_df)
det_df.head()
plt.figure(figsize=(4,4))
ax = plt.gca()
sc = ax.scatter(det_df['d1'],det_df['d2'],s=13,marker='s',
edgecolor = 'none', c=det_df['W'],cmap='viridis')
ax.set_xlabel('d1 channel')
ax.set_ylabel('d2 channel')
ax.set_title('W')
cbar = plt.colorbar(sc,fraction=0.043,pad=0.1)
cbar.set_label('W')
plt.show()
chIgnore = [1,17,33]
det_df_ignore = det_df[~det_df['d1'].isin(chIgnore) & ~det_df['d2'].isin(chIgnore)]
bicorr_plot.W_vs_angle_all(det_df_ignore, save_flag=False)
bicorr_plot.W_vs_angle_all?
"""
Explanation: Calculate W values
End of explanation
"""
angle_bin_edges = np.arange(8,190,10)
print(angle_bin_edges)
by_angle_df = bicorr_sums.condense_det_df_by_angle(det_df_ignore, angle_bin_edges)
by_angle_df.head()
"""
Explanation: Condense to angle bin
End of explanation
"""
bicorr_plot.W_vs_angle(det_df_ignore, by_angle_df, save_flag=False)
"""
Explanation: Plot it
End of explanation
"""
singles_e_df.to_csv('singles_e_df_filled.csv')
det_df.to_csv(r'det_df_e_filled.csv')
by_angle_df.to_csv(r'by_angle_e_df.csv')
"""
Explanation: Save to disk
In order to compare datasets, it would be nice to save these results to disk and reload in another notebook for comparison. These results are pretty easy, format-wise, so I'll just use the built-in pandas methods.
End of explanation
"""
det_df_filled = pd.read_csv(r'det_df_e_filled.csv',index_col=0)
det_df_filled.head()
chIgnore = [1,17,33]
det_df_ignore = det_df_filled[~det_df_filled['d1'].isin(chIgnore) & ~det_df_filled['d2'].isin(chIgnore)]
det_df_ignore.head()
singles_e_df_filled = pd.read_csv(r'singles_e_df_filled.csv',index_col=0)
singles_e_df_filled.head()
by_angle_e_df = pd.read_csv(r'by_angle_e_df.csv',index_col=0)
by_angle_e_df.head()
bicorr_plot.W_vs_angle(det_df_ignore, by_angle_e_df, save_flag=False)
"""
Explanation: Reload
End of explanation
"""
|
jokedurnez/neuropower_extended
|
peakdistribution/chengschwartzman_thresholdfree_distribution_simulation.ipynb
|
mit
|
% matplotlib inline
import numpy as np
import math
import nibabel as nib
import scipy.stats as stats
import matplotlib.pyplot as plt
from nipy.labs.utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
import palettable.colorbrewer as cb
from nipype.interfaces import fsl
import os
import pandas as pd
import scipy.integrate as integrate
"""
Explanation: Distribution of local maxima in a Gaussian Random Field
In this notebook, I apply the distribution of local maxima of Cheng & Schwartzman. I reproduce the figure with the distribution in 1D, 2D and 3D and then check how much the distribution fits with simulated data.
Check code for peak distribution of Cheng&Schwartzman
Below I defined the formulae of Cheng&Schwartzman in arXiv:1503.01328v1. On page 3.3 the density functions are displayed for 1D, 2D and 3D.
Consequently, I apply these formulae to a range of x-values, which reproduces Figure 1.
End of explanation
"""
def peakdens1D(x,k):
f1 = (3-k**2)**0.5/(6*math.pi)**0.5*np.exp(-3*x**2/(2*(3-k**2)))
f2 = 2*k*x*math.pi**0.5/6**0.5*stats.norm.pdf(x)*stats.norm.cdf(k*x/(3-k**2)**0.5)
out = f1+f2
return out
def peakdens2D(x,k):
f1 = 3**0.5*k**2*(x**2-1)*stats.norm.pdf(x)*stats.norm.cdf(k*x/(2-k**2)**0.5)
f2 = k*x*(3*(2-k**2))**0.5/(2*math.pi) * np.exp(-x**2/(2-k**2))
f31 = 6**0.5/(math.pi*(3-k**2))**0.5*np.exp(-3*x**2/(2*(3-k**2)))
f32 = stats.norm.cdf(k*x/((3-k**2)*(2-k**2))**0.5)
out = f1+f2+f31*f32
return out
def peakdens3D(x,k):
fd1 = 144*stats.norm.pdf(x)/(29*6**(0.5)-36)
fd211 = k**2.*((1.-k**2.)**3. + 6.*(1.-k**2.)**2. + 12.*(1.-k**2.)+24.)*x**2. / (4.*(3.-k**2.)**2.)
fd212 = (2.*(1.-k**2.)**3. + 3.*(1.-k**2.)**2.+6.*(1.-k**2.)) / (4.*(3.-k**2.))
fd213 = 3./2.
fd21 = (fd211 + fd212 + fd213)
fd22 = np.exp(-k**2.*x**2./(2.*(3.-k**2.))) / (2.*(3.-k**2.))**(0.5)
fd23 = stats.norm.cdf(2.*k*x / ((3.-k**2.)*(5.-3.*k**2.))**(0.5))
fd2 = fd21*fd22*fd23
fd31 = (k**2.*(2.-k**2.))/4.*x**2. - k**2.*(1.-k**2.)/2. - 1.
fd32 = np.exp(-k**2.*x**2./(2.*(2.-k**2.))) / (2.*(2.-k**2.))**(0.5)
fd33 = stats.norm.cdf(k*x / ((2.-k**2.)*(5.-3.*k**2.))**(0.5))
fd3 = fd31 * fd32 * fd33
fd41 = (7.-k**2.) + (1-k**2)*(3.*(1.-k**2.)**2. + 12.*(1.-k**2.) + 28.)/(2.*(3.-k**2.))
fd42 = k*x / (4.*math.pi**(0.5)*(3.-k**2.)*(5.-3.*k**2)**0.5)
fd43 = np.exp(-3.*k**2.*x**2/(2.*(5-3.*k**2.)))
fd4 = fd41*fd42 * fd43
fd51 = math.pi**0.5*k**3./4.*x*(x**2.-3.)
f521low = np.array([-10.,-10.])
f521up = np.array([0.,k*x/2.**(0.5)])
f521mu = np.array([0.,0.])
f521sigma = np.array([[3./2., -1.],[-1.,(3.-k**2.)/2.]])
fd521,i = stats.mvn.mvnun(f521low,f521up,f521mu,f521sigma)
f522low = np.array([-10.,-10.])
f522up = np.array([0.,k*x/2.**(0.5)])
f522mu = np.array([0.,0.])
f522sigma = np.array([[3./2., -1./2.],[-1./2.,(2.-k**2.)/2.]])
fd522,i = stats.mvn.mvnun(f522low,f522up,f522mu,f522sigma)
fd5 = fd51*(fd521+fd522)
out = fd1*(fd2+fd3+fd4+fd5)
return out
"""
Explanation: Define formulae
End of explanation
"""
xs = np.arange(-4,10,0.01).tolist()
ys_3d_k01 = []
ys_3d_k05 = []
ys_3d_k1 = []
ys_2d_k01 = []
ys_2d_k05 = []
ys_2d_k1 = []
ys_1d_k01 = []
ys_1d_k05 = []
ys_1d_k1 = []
for x in xs:
ys_1d_k01.append(peakdens1D(x,0.1))
ys_1d_k05.append(peakdens1D(x,0.5))
ys_1d_k1.append(peakdens1D(x,1))
ys_2d_k01.append(peakdens2D(x,0.1))
ys_2d_k05.append(peakdens2D(x,0.5))
ys_2d_k1.append(peakdens2D(x,1))
ys_3d_k01.append(peakdens3D(x,0.1))
ys_3d_k05.append(peakdens3D(x,0.5))
ys_3d_k1.append(peakdens3D(x,1))
"""
Explanation: Apply formulae to a range of x-values
End of explanation
"""
plt.figure(figsize=(7,5))
plt.plot(xs,ys_1d_k01,color="black",ls=":",lw=2)
plt.plot(xs,ys_1d_k05,color="black",ls="--",lw=2)
plt.plot(xs,ys_1d_k1,color="black",ls="-",lw=2)
plt.plot(xs,ys_2d_k01,color="blue",ls=":",lw=2)
plt.plot(xs,ys_2d_k05,color="blue",ls="--",lw=2)
plt.plot(xs,ys_2d_k1,color="blue",ls="-",lw=2)
plt.plot(xs,ys_3d_k01,color="red",ls=":",lw=2)
plt.plot(xs,ys_3d_k05,color="red",ls="--",lw=2)
plt.plot(xs,ys_3d_k1,color="red",ls="-",lw=2)
plt.ylim([-0.1,0.55])
plt.xlim([-4,4])
plt.show()
"""
Explanation: Figure 1 from paper
End of explanation
"""
os.chdir("/Users/Joke/Documents/Onderzoek/ProjectsOngoing/Power/WORKDIR/")
sm=1
smooth_FWHM = 3
smooth_sd = smooth_FWHM/(2*math.sqrt(2*math.log(2)))
data = surrogate_3d_dataset(n_subj=1,sk=smooth_sd,shape=(500,500,500),noise_level=1)
minimum = data.min()
newdata = data - minimum #little trick because fsl.model.Cluster ignores negative values
img=nib.Nifti1Image(newdata,np.eye(4))
img.to_filename(os.path.join("RF_"+str(sm)+".nii.gz"))
cl=fsl.model.Cluster()
cl.inputs.threshold = 0
cl.inputs.in_file=os.path.join("RF_"+str(sm)+".nii.gz")
cl.inputs.out_localmax_txt_file=os.path.join("locmax_"+str(sm)+".txt")
cl.inputs.num_maxima=10000000
cl.inputs.connectivity=26
cl.inputs.terminal_output='none'
cl.run()
plt.figure(figsize=(6,4))
plt.imshow(data[1:20,1:20,1])
plt.colorbar()
plt.show()
peaks = pd.read_csv("locmax_"+str(1)+".txt",sep="\t").drop('Unnamed: 5',1)
peaks.Value = peaks.Value + minimum
500.**3/len(peaks)
twocol = cb.qualitative.Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.hist(peaks.Value,lw=0,facecolor=twocol[0],normed=True,bins=np.arange(-5,5,0.1),label="observed distribution")
plt.xlim([-2,5])
plt.ylim([0,0.6])
plt.plot(xs,ys_3d_k1,color=twocol[1],lw=3,label="theoretical distribution")
plt.title("histogram")
plt.xlabel("peak height")
plt.ylabel("density")
plt.legend(loc="upper left",frameon=False)
plt.show()
peaks[1:5]
"""
Explanation: Apply the distribution to simulated data, extracted peaks with FSL
I now simulate random field, extract peaks with FSL and compare these simulated peaks with the theoretical distribution.
End of explanation
"""
ss = 10000
smpl = np.random.choice(len(peaks),ss,replace=False)
peaksmpl = peaks.loc[smpl].reset_index()
"""
Explanation: Are the peaks independent?
Below, I take a random sample of peaks to compute distances for computational ease. With 10K peaks, it already takes 15 minutes to compute al distances.
End of explanation
"""
dist = []
diff = []
for p in range(ss):
for q in range(p+1,ss):
xd = peaksmpl.x[q]-peaksmpl.x[p]
yd = peaksmpl.y[q]-peaksmpl.y[p]
zd = peaksmpl.z[q]-peaksmpl.z[p]
if not any(x > 20 or x < -20 for x in [xd,yd,zd]):
dist.append(np.sqrt(xd**2+yd**2+zd**2))
diff.append(abs(peaksmpl.Value[p]-peaksmpl.Value[q]))
"""
Explanation: Compute distances between peaks and the difference in their height.
End of explanation
"""
mn = []
ds = np.arange(start=2,stop=100)
for d in ds:
mn.append(np.mean(np.array(diff)[np.round(np.array(dist))==d]))
twocol = cb.qualitative.Paired_12.mpl_colors
plt.figure(figsize=(7,5))
plt.plot(dist,diff,"r.",color=twocol[0],linewidth=0,label="combination of 2 points")
plt.xlim([2,20])
plt.plot(ds,mn,color=twocol[1],lw=4,label="average over all points in bins with width 1")
plt.title("Are peaks independent?")
plt.xlabel("Distance between peaks")
plt.ylabel("Difference between peaks heights")
plt.legend(loc="upper left",frameon=False)
plt.show()
np.min(dist)
def nulprobdensEC(exc,peaks):
f0 = exc*np.exp(-exc*(peaks-exc))
return f0
def peakp(x):
y = []
iterator = (x,) if not isinstance(x, (tuple, list)) else x
for i in iterator:
y.append(integrate.quad(lambda x:peakdens3D(x,1),-20,i)[0])
return y
fig,axs=plt.subplots(1,5,figsize=(13,3))
fig.subplots_adjust(hspace = .5, wspace=0.3)
axs=axs.ravel()
thresholds=[2,2.5,3,3.5,4]
bins=np.arange(2,5,0.5)
x=np.arange(2,10,0.1)
twocol=cb.qualitative.Paired_10.mpl_colors
for i in range(5):
thr=thresholds[i]
axs[i].hist(peaks.Value[peaks.Value>thr],lw=0,facecolor=twocol[i*2-2],normed=True,bins=np.arange(thr,5,0.1))
axs[i].set_xlim([thr,5])
axs[i].set_ylim([0,3])
xn = x[x>thr]
ynb = nulprobdensEC(thr,xn)
ycs = []
for n in xn:
ycs.append(peakdens3D(n,1)/(1-peakp(thr)[0]))
axs[i].plot(xn,ycs,color=twocol[i*2-1],lw=3,label="C&S")
axs[i].plot(xn,ynb,color=twocol[i*2-1],lw=3,linestyle="--",label="EC")
axs[i].set_title("threshold:"+str(thr))
axs[i].set_xticks(np.arange(thr,5,0.5))
axs[i].set_yticks([1,2])
axs[i].legend(loc="upper right",frameon=False)
axs[i].set_xlabel("peak height")
axs[i].set_ylabel("density")
plt.show()
"""
Explanation: Take the mean of heights in bins of 1.
End of explanation
"""
|
cleuton/datascience
|
covid19_Brasil/Covid19_no_Brasil.ipynb
|
apache-2.0
|
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('./covid19-86691a57080d4801a240e49035b292fc.csv')
df.head()
list_cidades = df.groupby("city").count().index.tolist()
list_cidades
"""
Explanation: Covid 19 - Visualização Brasil
Dados oriundos de https://brasil.io/dataset/covid19/caso
Cleuton Sampaio
End of explanation
"""
import requests
import json
cidades = {}
cidades_nao_encontradas = []
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = dict(
key='** USE SUA API KEY**'
)
inx = 0
start=None # Inform None to process all cities. Inform a city to begin processing after it
start_saving=False
for city in list_cidades:
if start != None:
if city == start:
start_saving = True
else:
start_saving = True
if start_saving:
params['address'] = city + ',brasil'
resp = requests.get(url=url, params=params)
data = resp.json()
try:
latitude = data['results'][0]['geometry']['location']['lat']
longitude = data['results'][0]['geometry']['location']['lng']
print(city,latitude,longitude)
cidades[city]={'latitude': latitude, 'longitude': longitude}
inx = inx + 1
except:
print("Erro na cidade: ",city)
cidades_nao_encontradas.append(city)
else:
print('Pulando a cidade já processada:',city)
print('Cidades salvas no arquivo:',inx)
print('Cidades não encontradas',cidades_nao_encontradas)
with open('cidades.json', 'w') as fp:
json.dump(cidades, fp)
print(json.dumps(cidades))
"""
Explanation: Vamos geocodificar as cidades buscando-as pela Geocode API da Google. Você precisa obter uma API Key: https://console.cloud.google.com/apis/ Se quiser, pode usar o arquivo geocodificado que eu salvei.
End of explanation
"""
#-13.6593766,-58.6914406
latitude = -13.6593766
longitude = -50.6914406
zoom = 4
size = 530
scale = 2
apikey = "** SUA API KEY**"
gmapas = "https://maps.googleapis.com/maps/api/staticmap?center=" + str(latitude) + "," + str(longitude) + \
"&zoom=" + str(zoom) + \
"&scale=" + str(scale) + \
"&size=" + str(size) + "x" + str(size) + "&key=" + apikey
with open('mapa.png', 'wb') as handle:
response = requests.get(gmapas, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
"""
Explanation: Eu salvei um arquivo chamado "cidades.json" com a geocodificação, de modo a evitar acessar a API novamenete.
Agora, preciso baixar um mapa contendo o Brasil. Escolhi o centro usando o Google Maps e ajustei o zoom, o tamanho e a escala. Note que você vai precisar de uma chave de API.
End of explanation
"""
import MercatorProjection
centerLat = latitude
centerLon = longitude
mapWidth = size
mapHeight = size
centerPoint = MercatorProjection.G_LatLng(centerLat, centerLon)
corners = MercatorProjection.getCorners(centerPoint, zoom, mapWidth, mapHeight)
print(corners)
"""
Explanation: Agora, preciso de uma função para usar a projeção de Mercator e calcular as bordas do mapa que eu baixei. Eu baixei desta resposta do StackOverflow: https://stackoverflow.com/questions/12507274/how-to-get-bounds-of-a-google-static-map E funciona melhor do que a que estava utilizando.
End of explanation
"""
casos = df.groupby("city")['confirmed'].sum()
df2 = pd.DataFrame.from_dict(cidades,orient='index')
df2['casos'] = casos
print(df2.head())
"""
Explanation: Gerei um novo Dataframe contendo as latitudes, longitudes e quantidade de casos:
End of explanation
"""
def calcular_cor(valor):
cor = 'r'
if valor <= 10:
cor = '#ffff00'
elif valor <= 30:
cor = '#ffbf00'
elif valor <= 50:
cor = '#ff8000'
return cor
df2['cor'] = [calcular_cor(codigo) for codigo in df2['quantidade']]
df2.head()
"""
Explanation: Agora, vou acrescentar um atributo com a cor do ponto, de acordo com uma heurística de quantidade de casos: Quanto mais, mais vermelho:
End of explanation
"""
df2 = df2.sort_values(['casos'])
"""
Explanation: Vamos ordenar pela quantidade de casos:
End of explanation
"""
print(df2.loc[(df2['latitude'] > 20) | (df2['longitude']< -93)])
df3 = df2.drop(df2[(df2.latitude > 20) | (df2.longitude < -93)].index)
"""
Explanation: Temos alguns "outliers", ou seja, coordenadas muito fora do país. Provavelmente, problemas de geocodificação. Vamos retirá-las:
End of explanation
"""
import matplotlib.image as mpimg
mapa=mpimg.imread('./mapa.png')
fig, ax = plt.subplots(figsize=(10, 10))
#{'N': 20.88699826581544, 'E': -15.535190599999996, 'S': -43.89198802990045, 'W': -85.84769059999999}
plt.imshow(mapa, extent=[corners['W'],corners['E'],corners['S'],corners['N']], alpha=1.0, aspect='auto')
ax.scatter(df3['longitude'],df3['latitude'], c=df3['cor'],s=8+df3['casos']*0.03)
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
ax.set_title('Casos de Covid19 em Abril')
plt.show()
"""
Explanation: Agora dá para plotar um gráfico utilizando aquela imagem baixada. Eu tive que ajustar as coordenadas de acordo com os limites do retângulo, calculados pela projeção de Mercator:
End of explanation
"""
|
SIMEXP/Projects
|
metaad/network_level_meta-clusters.ipynb
|
mit
|
#seed_data = pd.read_csv('20160128_AD_Decrease_Meta_Christian.csv')
template_036= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale036.nii.gz')
template_020= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale020.nii.gz')
template_012= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale012.nii.gz')
template_007= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale007.nii.gz')
template = template_007
scale = '7'
#seed_data = pd.read_csv('20160405_AD_Seed_Regions_ForChristian_Revised.csv')
#seed_data = pd.read_csv('20160405_MCI_Seed_Regions_ForChristian_Revised.csv')
seed_data = pd.read_csv('20160405_ADMCI_Seed_Regions_ForChristian_Revised.csv')
#output_stats = 'AD_seedregions_scale'+scale+'_stats.mat'
#output_vol = 'AD_seedregions_hitfreq_scale'+scale+'_vol.nii.gz'
#output_stats = 'MCI_seedregions_scale'+scale+'_stats.mat'
#output_vol = 'MCI_seedregions_hitfreq_scale'+scale+'_vol.nii.gz'
output_stats = 'ADMCI_seedregions_scale'+scale+'_stats.mat'
output_vol = 'ADMCI_seedregions_hitfreq_scale'+scale+'_vol.nii.gz'
seed_data.iloc[1]['Cambridge_Scale7_label']
"""
Explanation: Load data
End of explanation
"""
from numpy.linalg import norm
# find the closest network to the coordo
def get_nearest_net(template,world_coor):
list_coord = np.array(np.where(template.get_data()>0))
mni_coord = apply_affine(template.get_affine(),list_coord.T)
distances = norm(mni_coord-np.array(world_coor),axis=1)
#print distances.shape
idx_nearest_net = np.where(distances == np.min(distances))[0][0]
return int(template.get_data()[list_coord[:,idx_nearest_net][0],list_coord[:,idx_nearest_net][1],list_coord[:,idx_nearest_net][2]])
#get_nearest_net(template,[-15,-10,-10])
# Convert from world MNI space to the EPI voxel space
def get_world2vox(template, mni_coord):
return np.round(apply_affine(npl.inv(template.get_affine()),mni_coord)+[1])
network_votes = np.zeros((np.max(template.get_data().flatten()),1))[:,0]
network_votes
# get the voxel coordinates of the MNI seeds
votes = []
n_outofbrain=0
for i in range(seed_data.shape[0]):
tmp_val = seed_data['Cambridge_Scale7_label'].iloc[i]
if tmp_val == 0:
mni_space_targets = seed_data[['x','y','z']].iloc[i]
vox_corrd = get_world2vox(template,mni_space_targets)
net_class = template.get_data()[vox_corrd[0],vox_corrd[1],vox_corrd[2]]
else:
net_class = tmp_val
if net_class==0:
n_outofbrain+=1
votes.append(get_nearest_net(template,[mni_space_targets[0],mni_space_targets[1],mni_space_targets[2]]))
else:
votes.append(net_class)
print('Out of brain coordinates: '+ str(n_outofbrain))
votes = np.array(votes)
# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
votes.shape
frequency_votes=np.zeros((len(uni_pmid),len(network_votes)))
#for i in range(len(uni_pmid)):
# frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
for j in aa:
frequency_votes[i,j-1] = (aa == j).sum()/float(len(aa))
print frequency_votes
# compile the stats for each network
#for i in range(1,len(network_votes)+1):
# network_votes[i-1] = np.mean(frequency_votes==i)
network_votes = np.mean(frequency_votes,axis=0)
print network_votes
#vox_corrd[np.array(votes)==5,:]
frequency_votes.shape
print votes
seed_data['Cambridge_Scale7_label'] = votes
print seed_data['Cambridge_Scale7_label'].values
seed_data.to_csv('20160314_MCI_Seed_Regions_christian_assigned.csv')
def gen1perm(n_seeds,proba):
ratio_votes_1study = np.zeros_like(proba)
perm_votes = np.random.choice(range(0,len(proba)),size=(n_seeds,1),p=proba)
for j in perm_votes:
ratio_votes_1study[j] = (perm_votes == j).sum()/float(len(perm_votes))
return ratio_votes_1study
# check if the proba is respected
#print proba_networks
#gen1perm(10000,proba_networks)
#ange(0,len(proba_networks))
"""
Explanation: Get the number of coordinates reported for each network
End of explanation
"""
'''
from numpy.random import permutation
def permute_table(frequency_votes,n_iter):
h0_results = []
for n in range(n_iter):
perm_freq = frequency_votes.copy()
#print perm_freq
for i in range(perm_freq.shape[0]):
perm_freq[i,:] = permutation(perm_freq[i,:])
#print perm_freq
h0_results.append(np.mean(perm_freq,axis=0))
return np.array(h0_results).T
'''
def compute_freq(votes,data_ratio_votes,seed_data,proba):
# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
ratio_votes=np.zeros((data_ratio_votes.shape[0],data_ratio_votes.shape[1],10000))
for idx_perm in range(ratio_votes.shape[-1]):
# frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
n_seeds = len(aa)
ratio_votes[i,:,idx_perm] = gen1perm(n_seeds,proba)
#print ratio_votes.shape
# compute the frequency
freq_data = np.mean(ratio_votes,axis=0)
for i in range(freq_data.shape[0]):
freq_data[i,:] = np.sort(freq_data[i,:])[::-1]
return freq_data
# Total volume of the brain
total_volume = np.sum(template.get_data()>0)
# compute the proba of each network
proba_networks=[]
for i in range(1,len(network_votes)+1):
proba_networks.append(np.sum(template.get_data()==i)/(total_volume*1.))
proba_networks = np.array(proba_networks)
print np.sum(proba_networks)
print proba_networks
# generate random values
'''
def gen_rnd_hits(proba,n_seeds):
results_h0 = np.random.choice(range(0,len(proba)),size=(n_seeds,1000),p=proba)
#results_h0 = permute_table(frequency_votes,1000)
print results_h0.shape
ditributions = []
for i in range(frequency_votes.shape[1]):
results_h0[i,:] = np.sort(results_h0[i,:])[::-1]
#ditributions.append(one_way_pdf)
#return ditributions
return results_h0
'''
#dist_data = gen_rnd_hits(proba_networks,np.sum(network_votes))
dist_data = compute_freq(votes,frequency_votes,seed_data,proba_networks)
plt.figure()
plt.hist(dist_data[0],bins=np.arange(0,1,.01))
plt.figure()
plt.plot(dist_data[0].T)
"""
Explanation: Generate random coordinates
The assigned coodinates are generated for each network witha proability equivalent to there volume size compare to the total volume of the brain
End of explanation
"""
def getpval_old(nhit,dist_data):
distribution_val = np.histogram(dist_data,bins=np.arange(0,1,0.01))
idx_bin = np.where((distribution_val[1]>=round(nhit,2)) & (distribution_val[1]<=round(nhit,2)))[0][0]
#print distribution_val[1]
return (np.sum(distribution_val[0][idx_bin:-1])+1)/(dist_data.shape[0]+1.)
def getpval(target,dist_data):
dist_sorted = np.sort(np.copy(dist_data))
b = np.sum(dist_sorted > target)
#print b
#print dist_data.shape[0]
#print distribution_val[1]
return ((b+1.)/(dist_data.shape[0]+1.))
print network_votes
pval_results=[]
for i in range(0,len(dist_data)):
pval_results.append(getpval(network_votes[i],dist_data[i,:]))
print pval_results
plt.figure()
plt.bar(np.arange(1,len(pval_results)+1),pval_results,width=0.5,align='center')
plt.xlabel('Networks')
plt.ylabel('p-value')
"""
Explanation: Generate the p-values for each network
End of explanation
"""
from proteus.matrix import tseries as ts
hitfreq_vol = ts.vec2map(network_votes,template)
pval_vol = ts.vec2map(1-np.array(pval_results),template)
plt.figure()
plotting.plot_stat_map(hitfreq_vol,cut_coords=(0,0,0),draw_cross=False)
plt.figure()
plotting.plot_stat_map(pval_vol,cut_coords=(0,0,0),draw_cross=False)
"""
Explanation: Map the p-values to the template
End of explanation
"""
# correct for FRD
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
fdr_test,fdr_pval=fdrcorrection0(pval_results,alpha=0.05)
print network_votes
print fdr_test
print fdr_pval
# save the results
path_output = '/home/cdansereau/git/Projects/metaad/maps_results/'
stats_results = {'Hits':network_votes ,'pvalues':pval_results,'fdr_test':fdr_test,'fdr_pval':fdr_pval,'n_outofbrain':n_outofbrain}
scipy.io.savemat(path_output + output_stats, stats_results)
hitfreq_vol.to_filename(os.path.join(path_output,output_vol))
#hitfreq_vol.to_filename(os.path.join('/home/cdansereau/git/Projects/metaad/maps_results/','AD_pval_vol.nii.gz'))
"""
Explanation: FDR correction of the p-values
End of explanation
"""
|
mikecassell/Deep-Learning-ND
|
first-neural-network/.ipynb_checkpoints/Your_first_neural_network-checkpoint.ipynb
|
mit
|
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
self.activation_function = lambda x : 1 / (1+np.exp(-x))
self.derivative = lambda x : x * (1 - x)
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = features.dot(self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with your calculations.
final_inputs = hidden_outputs.dot(self.weights_hidden_to_output)
final_outputs = self.activation_function(final_inputs)
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = (targets - final_outputs)
output_error_term = error * self.derivative(final_outputs)
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = output_error_term * self.weights_hidden_to_output
hidden_error_term = hidden_error * self.derivative(hidden_outputs)
# Weight step (input to hidden)
print(delta_weights_i_h, hidden_error_term , self.lr)
delta_weights_i_h += hidden_error_term * self.lr
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * self.lr
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += delta_weights_h_o # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += delta_weights_i_h # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = features.dot(self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = hidden_outputs.dot(self.weights_hidden_to_output)
final_outputs = self.activation_function(final_inputs)
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
<img src="assets/neural_network.png" width=300px>
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
print('okay')
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
a = np.array([[-0.00233536, -0.00231863],[ 0.00077845, 0.00077288]])
b=0.5
c = np.array([[ 0., 0.],
[ 0., 0.],
[ 0. , 0.]] )
c + a*b
"""
Explanation: Unit tests
Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
End of explanation
"""
import sys
### Set the hyperparameters here ###
iterations = 100
learning_rate = 0.1
hidden_nodes = 2
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of iterations
This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
|
pastas/pastas
|
examples/groundwater_paper/Ex2_monitoring_network/Example2.ipynb
|
mit
|
# Import the packages
import pandas as pd
import pastas as ps
import numpy as np
import os
import matplotlib.pyplot as plt
ps.show_versions()
ps.set_log_level("ERROR")
"""
Explanation: Example 2: Analysis of groundwater monitoring networks using Pastas
This notebook is supplementary material to the following paper submitted to Groundwater:
Collenteur, R.A., Bakker, M., Caljé, R., Klop, S.A., Schaars, F. (2019) Pastas: open source software for the analysis of groundwater time series. Groundwater. doi: 10.1111/gwat.12925.
In this second example, it is demonstrated how scripts can be used to analyze a large number of time series. Consider a pumping well field surrounded by a number of observations wells. The pumping wells are screened in the middle aquifer of a three-aquifer system. The objective is to estimate the drawdown caused by the groundwater pumping in each observation well.
1. Import the packages
End of explanation
"""
# Dictionary to hold all heads
heads = {}
# Load a metadata-file with xy-coordinates from the groundwater heads
metadata_heads = pd.read_csv("data/metadata_heads.csv", index_col=0)
distances = pd.read_csv("data/distances.csv", index_col=0)
# Add the groundwater head observations to the database
for fname in os.listdir("./data/heads/"):
fname = os.path.join("./data/heads/", fname)
obs = pd.read_csv(fname, parse_dates=True, index_col=0, squeeze=True)
heads[obs.name] = obs
# Load a metadata-file with xy-coordinates from the explanatory variables
metadata = pd.read_csv("data/metadata_stresses.csv", index_col=0)
# Import the precipitation, evaporation and well time series
rain = pd.read_csv("data/rain.csv", parse_dates=True, index_col=0, squeeze=True)
evap = pd.read_csv("data/evap.csv", parse_dates=True, index_col=0, squeeze=True)
well = pd.read_csv("data/well.csv", parse_dates=True, index_col=0, squeeze=True)
# Plot the stresses
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, figsize=(10,5), sharex=True)
rain.plot(ax=ax1)
evap.plot(ax=ax2)
well.plot(ax=ax3)
plt.xlim("1960", "2018");
"""
Explanation: 2. Importing the time series
In this codeblock the time series are imported. The following time series are imported:
44 time series with head observations [m] from the monitoring network;
precipitation [m/d] from KNMI station Oudenbosch;
potential evaporation [m/d] from KNMI station de Bilt;
Total pumping rate [m3/d] from well field Seppe.
End of explanation
"""
# Create folder to save the model figures
mls = {}
mlpath = "models"
if not os.path.exists(mlpath):
os.mkdir(mlpath)
# Choose the calibration period
tmin = "1970"
tmax = "2017-09"
num = 0
for name, head in heads.items():
# Create a Model for each time series and add a StressModel2 for the recharge
ml = ps.Model(head, name=name)
# Add the RechargeModel to simulate the effect of rainfall and evaporation
rm = ps.RechargeModel(rain, evap, rfunc=ps.Gamma, name="recharge")
ml.add_stressmodel(rm)
# Add a StressModel to simulate the effect of the groundwater extractions
sm = ps.StressModel(well / 1e6, rfunc=ps.Hantush, name="well", settings="well", up=False)
ml.add_stressmodel(sm)
# Estimate the model parameters
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=ps.LmfitSolve)
# Check if the estimated effect of the groundwater extraction is significant.
# If not, delete the stressmodel and calibrate the model again.
gain, stderr = ml.parameters.loc["well_A", ["optimal", "stderr"]]
if stderr is None:
stderr = 10
if 1.96 * stderr > -gain:
num += 1
ml.del_stressmodel("well")
ml.solve(tmin=tmin, tmax=tmax, report=False)
# Plot the results and store the plot
mls[name] = ml
ml.plots.results()
path = os.path.join(mlpath, name + ".png")
plt.savefig(path, bbox_inches="tight")
plt.close()
print(f"The number of models where the well is dropped from the model is {num}")
"""
Explanation: 3/4/5. Creating and optimizing the Time Series Model
For each time series of groundwater head observations a TFN model is constructed with the following model components:
- A Constant
- A NoiseModel
- A RechargeModel object to simulate the effect of recharge
- A StressModel object to simulate the effect of groundwater extraction
Calibrating all models can take a couple of minutes!!
End of explanation
"""
try:
from timml import ModelMaq, Well
plot_timml = True
# Values from REGIS II v2.2 (Site id B49F0240)
z = [9, -25, -83, -115, -190] # Reference to NAP
kv = np.array([1e-3, 5e-3]) # Min-Max of Vertical hydraulic conductivity for both leaky layer
D1 = z[0]-z[1] # Estimated thickness of leaky layer
c1 = D1/kv # Estimated resistance
D2 = z[2] - z[3]
c2 = D2 / kv
kh1 = np.array([1e0, 2.5e0]) # Min-Max of Horizontal hydraulic conductivity for aquifer 1
kh2 = np.array([1e1, 2.5e1]) # Min-Max of Horizontal hydraulic conductivity for aquifer 2
mlm = ModelMaq(kaq=[kh1.mean(), 35], z=z, c=[c1.max(), c2.mean()], \
topboundary='semi', hstar=0)
w = Well(mlm, 0, 0, 34791, layers=1)
mlm.solve()
x = np.linspace(100, 5000, 100)
h = mlm.headalongline(x, 0)
except:
plot_timml = False
# Get the parameters and distances to plot
params = pd.DataFrame(index=mls.keys(), columns=["optimal", "stderr"], dtype=float)
for name, ml in mls.items():
if "well" in ml.stressmodels.keys():
params.loc[name] = ml.parameters.loc["well_A", ["optimal", "stderr"]] * well.loc["2007":].mean() /1e6
# Select model per aquifer
shallow = metadata_heads.z.loc[(metadata_heads.z<96)].index
aquifer = metadata_heads.z.loc[(metadata_heads.z<186) & (metadata_heads.z>96)].index
# Make the plot
fig = plt.figure(figsize=(8,5))
plt.grid(zorder=-10)
display_error_bars = True
if display_error_bars:
plt.errorbar(distances.loc[shallow, "Seppe"], params.loc[shallow, "optimal"],
yerr=1.96*params.loc[shallow, "stderr"], linestyle="",
elinewidth=2, marker="", markersize=10, capsize=4)
plt.errorbar(distances.loc[aquifer, "Seppe"], params.loc[aquifer, "optimal"],
yerr=1.96*params.loc[aquifer, "stderr"], linestyle="",
elinewidth=2, marker="", capsize=4)
plt.scatter(distances.loc[shallow], params.loc[shallow, "optimal"], marker="^", s=80)
plt.scatter(distances.loc[aquifer], params.loc[aquifer, "optimal"], marker="s", s=80)
# Plot two-layer TimML model for comparison
if plot_timml:
plt.plot(x, h[0], color="C0", linestyle="--" )
plt.plot(x, h[1], color="C1", linestyle="--" )
legend = ["TimML L1", "TimML L2", "aquifer 1", "aquifer 2"]
else:
legend = ["aquifer 1", "aquifer 2"]
plt.ylabel("steady drawdown (m)")
plt.xlabel("radial distance from the center of the well field (m)")
plt.xlim(0, 4501)
plt.legend(legend, loc=4)
"""
Explanation: Make plots for publication
In the next codeblocks the Figures used in the Pastas paper are created. The following figures are created:
Figure of the drawdown estimated for each observations well;
Figure of the decomposition of the different contributions;
Figure of the pumping rate of the well field.
Figure of the drawdown estimated for each observations well
End of explanation
"""
# Select a model to plot
ml = mls["B49F0232_5"]
# Create the figure
[ax1, ax2, ax3] = ml.plots.decomposition(split=False, figsize=(7,6), ytick_base=1, tmin="1985")
plt.xticks(rotation=0)
ax1.set_yticks([2, 0, -2])
ax1.set_ylabel("head (m)")
ax1.legend().set_visible(False)
ax3.set_yticks([-4, -6])
ax2.set_ylabel("contributions (m) ") # Little trick to get the label right
ax3.set_xlabel("year")
ax3.set_title("pumping well")
"""
Explanation: Example figure of a TFN model
End of explanation
"""
fig, ax = plt.subplots(1,1, figsize=(8,2.5), sharex=True)
ax.plot(well, color="k")
ax.set_ylabel("pumping rate\n[m$^3$/day]")
ax.set_xlabel("year")
ax.set_xlim(pd.Timestamp("1951"), pd.Timestamp("2018"))
"""
Explanation: Figure of the pumping rate of the well field
End of explanation
"""
|
mdpiper/dakota-tutorial
|
notebooks/3-Python.ipynb
|
mit
|
%pylab inline
"""
Explanation: <img src="images/csdms_logo.jpg">
Example 3
Use the CSDMS Dakota interface in Python to perform a centered parameter study on HydroTrend and evaluate the output.
Use pylab magic:
End of explanation
"""
import os
import shutil
"""
Explanation: And include other necessary imports:
End of explanation
"""
from dakota.core import Dakota
"""
Explanation: Set up and run the experiment
Start by importing the Dakota class:
End of explanation
"""
d = Dakota(method='centered_parameter_study')
"""
Explanation: Next, create a new Dakota instance to perform a centered parameter study:
End of explanation
"""
d.__dict__
"""
Explanation: Note that method string follows the syntax of Dakota keywords; e.g., centered_parameter_study.
The Dakota instance comes with a few predefined attributes, including the method attribute, which is used to set up the experiment.
End of explanation
"""
m = d.method
m.__dict__
"""
Explanation: Pull out the method attribute to save some time typing in the following steps:
End of explanation
"""
m.component = 'hydrotrend'
m.interface = 'fork'
m.run_directory = '../examples/3-Python/'
"""
Explanation: Configure the Dakota instance to run an experiment on HydroTrend
in the examples/3-Python directory of this project:
End of explanation
"""
m.variable_descriptors = ['river_mean_velocity', 'base_flow']
m.initial_point = [1.0, 1.0]
m.step_vector = [0.2, 0.25]
m.steps_per_variable = [3, 2] # in each direction
"""
Explanation: The fork interface is used when Dakota calls an executable on the file system.
In this experiment, let's explore the effects of mean river velocity $u$ [$m\,s^{-1}$] and the constant annual base flow $q_0$ [$m^3 s^{-1}$] on the median daily bedload at the river mouth $Q_b$ [$kg\,s^{-1}$] for one year of simulation time. All HydroTrend parameters that are not included in the parameter study are held constant.
First, configure the inputs:
End of explanation
"""
def calc_vector(start, step_size, n_steps_per_direction):
"""Calculate a vector from a center, step size and number of steps."""
v = []
for i in range(len(start)):
v_start = start[i] - step_size[i]*n_steps_per_direction[i]
v_stop = start[i] + step_size[i]*n_steps_per_direction[i]
v.append(numpy.linspace(v_start, v_stop, 2*n_steps_per_direction[i]+1))
return v
u, q_0 = calc_vector(m.initial_point, m.step_vector, m.steps_per_variable)
print 'u =', u
print 'q_0 =', q_0
"""
Explanation: Note (again) that the attribute names match the Dakota keyword names for a centered parameter study.
The steps_per_variable attribute is tricky: this is the number of steps in each direction from the initial point; e.g, river_mean_velocity will be evaluated at a total of 7 locations.
From these attributes, calculate the parameter values at which HydroTrend will be evaluated. Use the helper function calc_vector.
End of explanation
"""
x = [mean(q_0)]*len(q_0)
y = [mean(u)]*len(u)
plot(x, q_0, 'ro')
plot(u, y, 'ro')
xlim((0, 2))
ylim((0, 2))
xlabel('$u (m\,s^{-1})$')
ylabel('$q_0 (m^3\,s^{-1})$')
title('Centered parameter study')
"""
Explanation: Make a quick plot to visualize the evaluation nodes in parameter space:
End of explanation
"""
m.response_descriptors = 'Qb_median', # must be list or tuple
m.response_statistics = 'median',
m.response_files = 'HYDROASCII.QB',
"""
Explanation: Next, set up the responses from HydroTrend used by Dakota. Each of these must be a list or a tuple. (This is a little clumsy.)
End of explanation
"""
m.input_files = os.path.join(m.run_directory, 'HYDRO0.HYPS'),
"""
Explanation: HydroTrend requires a hypsometry file. The default Waipaoa hypsometry file is included in the run directory. Link it to the input_files attribute of the centered parameter study object, which also must be a tuple or a list:
End of explanation
"""
base_tmpl_file = os.path.join(m.run_directory, 'hydrotrend.in.tmpl')
base_input_file = os.path.join(m.run_directory, 'HYDRO.IN.defaults')
"""
Explanation: HydroTrend requires an input file that provides all of the parameter values needed for a run.
Dakota requires a modified version of this file,
called a template file,
with the values of the parameters Dakota is operating on
replaced with their corresponding descriptors.
When Dakota runs, it replaces the descriptors with actual values.
All parameters that are not included in the parameter study are held constant.
Make a template file for this parameter study using:
the HydroTrend component template file
a base HydroTrend input file, with default parameter values
the parameters to replace
The first two files are included in the run directory for this study:
End of explanation
"""
from dakota.plugins.hydrotrend import HydroTrend
tmpl_file = HydroTrend.write_tmpl_file(base_tmpl_file, base_input_file, m.variable_descriptors)
shutil.move(tmpl_file, m.run_directory)
m.template_file = os.path.join(m.run_directory, tmpl_file)
"""
Explanation: Create the template file and move it to the run directory:
End of explanation
"""
m.analysis_driver = 'dakota_run_plugin'
"""
Explanation: In the final setup step, set the Dakota analysis driver. This is easy, because it's always the same:
End of explanation
"""
d.method.__dict__
"""
Explanation: The dakota_run_plugin script automates the actions needed for the analysis driver.
This might be a good time to review the settings for the experiment:
End of explanation
"""
os.chdir(m.run_directory)
"""
Explanation: All of the parameters for the experiment have been configured.
For the remaining steps leading up to running the experiment,
let's switch to the run directory:
End of explanation
"""
d.write_configuration_file('config.yaml')
"""
Explanation: To help Dakota and Hydrotrend communicate,
a configuration file,
containing all the experiment parameters,
is used.
Create it with:
End of explanation
"""
d.write_input_file()
"""
Explanation: Finally, use the information added to the Dakota object to write the Dakota input file:
End of explanation
"""
d.run()
"""
Explanation: Run Dakota!
End of explanation
"""
%ls
"""
Explanation: Evaluate the results
Get a directory listing:
End of explanation
"""
%cat dakota.dat
"""
Explanation: View the tabular data file:
End of explanation
"""
data = numpy.loadtxt(m.data_file, skiprows=1, unpack=True, usecols=[0,2,3,4])
"""
Explanation: Read the tabular data into this notebook:
End of explanation
"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data[1,], data[2,], data[3,], depthshade=False)
ax.set_xlabel('$u \,(m \, s^{-1})$')
ax.set_ylabel('$q_0 \,(m^3 s^{-1})$')
ax.set_zlabel('$Q_b \,(kg \, s^{-1})$')
"""
Explanation: Make a plot of the experimental results:
End of explanation
"""
e = Dakota.from_file_like('config.yaml')
"""
Explanation: Modify the experiment
Use the configuration file created in the initial experiment to create a new Dakota instance.
Modify its settings.
Save configuration.
Write new Dakota input file.
Run modified experiment.
The configuration file created above can be used make to Dakota instance:
End of explanation
"""
|
franzpl/StableGrid
|
jupyter_notebooks/mains_frequency_measurement_one_day.ipynb
|
mit
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
data = np.genfromtxt('frequency_data.txt')
frequency_data = data[:, 0]
hour = data[:, 1]
%pylab inline
pylab.rcParams['figure.figsize'] = (15, 10)
fig, ax = plt.subplots()
plt.title("Frequency characteristic 16/06/2017 in Rostock",fontsize=22)
plt.xlabel("Time of day",fontsize=22)
plt.ylabel("f / Hz",fontsize=22)
x_ticks_labels = ["00:00", "01:00", "02:00", "03:00", "04:00", "05:00", "06:00",
"07:00", "08:00", "09:00", "10:00", "11:00", "12:00",
"13:00", "14:00", "15:00", "16:00", "17:00", "18:00",
"19:00", "20:00", "21:00", "22:00", "23:00", "00:00"]
ax.set_xticklabels(x_ticks_labels)
start, end = ax.get_xlim()
ax.xaxis.set_ticks([0, 240, 479, 718, 958, 1197, 1436, 1675, 1914, 2153, 2393,
2632, 2871, 3110, 3349, 3588, 3828, 4067, 4306, 4545,
4784, 5023, 5263, 5502, 5741])
ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
fig.autofmt_xdate()
plt.grid()
plt.plot(frequency_data)
plt.show()
"""
Explanation: Mains frequency measurement for one day
End of explanation
"""
hour01 = np.mean(frequency_data[:240])
hour02 = np.mean(frequency_data[240:479])
hour03 = np.mean(frequency_data[479:718])
hour04 = np.mean(frequency_data[718:958])
hour05 = np.mean(frequency_data[958:1197])
hour06 = np.mean(frequency_data[1197:1436])
hour07 = np.mean(frequency_data[1436:1675])
hour08 = np.mean(frequency_data[1675:1914])
hour09 = np.mean(frequency_data[1914:2153])
hour10 = np.mean(frequency_data[2153:2393])
hour11 = np.mean(frequency_data[2393:2632])
hour12 = np.mean(frequency_data[2632:2871])
hour13 = np.mean(frequency_data[2871:3110])
hour14 = np.mean(frequency_data[3110:3349])
hour15 = np.mean(frequency_data[3349:3588])
hour16 = np.mean(frequency_data[3588:3828])
hour17 = np.mean(frequency_data[3828:4067])
hour18 = np.mean(frequency_data[4067:4306])
hour19 = np.mean(frequency_data[4306:4545])
hour20 = np.mean(frequency_data[4545:4784])
hour21 = np.mean(frequency_data[4784:5023])
hour22 = np.mean(frequency_data[5023:5263])
hour23 = np.mean(frequency_data[5263:5502])
hour00 = np.mean(frequency_data[5502:5741])
hour_array = np.array([hour01, hour02, hour03, hour04, hour05, hour06, hour07, hour08, hour09, hour10,
hour11, hour12, hour13, hour14, hour15, hour16, hour17, hour18, hour19, hour20,
hour21, hour22, hour23, hour00])
fig, ax2 = plt.subplots()
plt.title("Frequency characteristic 16/06/2017 in Rostock",fontsize=22)
plt.xlabel("Time of day",fontsize=22)
plt.ylabel("f / Hz",fontsize=22)
x_ticks_labels = ["01:00", "02:00", "03:00", "04:00", "05:00", "06:00",
"07:00", "08:00", "09:00", "10:00", "11:00", "12:00",
"13:00", "14:00", "15:00", "16:00", "17:00", "18:00",
"19:00", "20:00", "21:00", "22:00", "23:00", "00:00"]
ax2.set_xticklabels(x_ticks_labels)
start, end = ax.get_xlim()
ax2.xaxis.set_ticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24])
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
fig.autofmt_xdate()
plt.grid()
plt.ylim([49.97,50.04])
plt.plot(hour_array, linewidth=2, label='temporal average of mains frequency')
ax2.axhline(y=50.00, color="k",linewidth=1)
axhspan(49.99, 50.01 , facecolor='g', alpha=0.5, label='dead band')
axhspan(50.01, 50.04 , facecolor='r', alpha=0.5, label='adjustment')
axhspan(49.99, 49.97 , facecolor='r', alpha=0.5)
ax2.legend(loc='lower right')
plt.show()
hour_array
bins = np.array([ 49.934 , 49.93697959, 49.93995918, 49.94293878,
49.94591837, 49.94889796, 49.95187755, 49.95485714,
49.95783673, 49.96081633, 49.96379592, 49.96677551,
49.9697551 , 49.97273469, 49.97571429, 49.97869388,
49.98167347, 49.98465306, 49.98763265, 49.99061224,
49.99359184, 49.99657143, 49.99955102, 50.00253061,
50.0055102 , 50.0084898 , 50.01146939, 50.01444898,
50.01742857, 50.02040816, 50.02338776, 50.02636735,
50.02934694, 50.03232653, 50.03530612, 50.03828571,
50.04126531, 50.0442449 , 50.04722449, 50.05020408,
50.05318367, 50.05616327, 50.05914286, 50.06212245,
50.06510204, 50.06808163, 50.07106122, 50.07404082,
50.07702041, 50.08 ])
my = np.mean(frequency_data)
sig = np.std(frequency_data)
plt.hist(frequency_data, bins=bins)
plt.title(" Histogram $\mu$ = %.3f, $\sigma$ = %.3f" % (my, sig),fontsize=22)
plt.xlabel("f / Hz",fontsize=22)
plt.ylabel("Absolute Frequency",fontsize=22)
plt.grid()
plt.show()
weights = np.ones_like(frequency_data)/float(len(frequency_data))
counts, bin_edges = np.histogram(frequency_data, bins=bins, weights=weights)
cdf = np.cumsum(counts)
plt.title("Cumulative Density Function" ,fontsize=22)
plt.xlabel("f / Hz",fontsize=22)
plt.ylabel("CDF",fontsize=22)
plt.grid()
plt.plot(bin_edges[1:], cdf, linewidth=1.6)
plt.show()
from scipy import stats
stats.anderson(frequency_data)
stats.kstest(frequency_data, 'norm')
x = np.random.normal(0,1,1000)
test_stat = stats.kstest(x, 'norm')
stats.anderson(x)
poisson = np.random.poisson(5, 10000)
stats.kstest(poisson, 'norm')
"""
Explanation: Averaging mains frequency per hour
End of explanation
"""
frequency_data_normalized = (frequency_data - np.mean(frequency_data)) / np.std(frequency_data)
from scipy.stats import norm
counts, bin_edges = np.histogram(frequency_data_normalized, weights=weights, bins = 139)
cdf = np.cumsum(counts)
plt.title("Visual comparison between CDF of data and standard normal" ,fontsize=22)
plt.xlabel("f / Hz",fontsize=22)
plt.ylabel("CDF",fontsize=22)
plt.grid()
plt.plot(bin_edges[1:], cdf, linewidth=3, color='b', label='Data CDF')
standard = norm.cdf(bin_edges)
plt.plot(bin_edges, standard, linewidth=3, color='r', label='Standard Normal CDF')
plt.ylim([0,1])
plt.legend(loc='lower right')
plt.show()
"""
Explanation: "If the K-S statistic is small or the p-value is high, then we cannot reject the hypothesis that the distributions of the two samples are the same."
Normalizing Data
End of explanation
"""
stats.kstest(frequency_data_normalized, 'norm')
stats.kstest(frequency_data_normalized, 'norm')
"""
Explanation: Kolmogorov-Smirnov-Test
End of explanation
"""
|
Kreiswolke/gensim
|
docs/notebooks/doc2vec-IMDB.ipynb
|
lgpl-2.1
|
import locale
import glob
import os.path
import requests
import tarfile
import sys
import codecs
dirname = 'aclImdb'
filename = 'aclImdb_v1.tar.gz'
locale.setlocale(locale.LC_ALL, 'C')
if sys.version > '3':
control_chars = [chr(0x85)]
else:
control_chars = [unichr(0x85)]
# Convert text to lower-case and strip punctuation/symbols from words
def normalize_text(text):
norm_text = text.lower()
# Replace breaks with spaces
norm_text = norm_text.replace('<br />', ' ')
# Pad punctuation with spaces on both sides
for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']:
norm_text = norm_text.replace(char, ' ' + char + ' ')
return norm_text
import time
start = time.clock()
if not os.path.isfile('aclImdb/alldata-id.txt'):
if not os.path.isdir(dirname):
if not os.path.isfile(filename):
# Download IMDB archive
url = u'http://ai.stanford.edu/~amaas/data/sentiment/' + filename
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
tar = tarfile.open(filename, mode='r')
tar.extractall()
tar.close()
# Concat and normalize test/train data
folders = ['train/pos', 'train/neg', 'test/pos', 'test/neg', 'train/unsup']
alldata = u''
for fol in folders:
temp = u''
output = fol.replace('/', '-') + '.txt'
# Is there a better pattern to use?
txt_files = glob.glob('/'.join([dirname, fol, '*.txt']))
for txt in txt_files:
with codecs.open(txt, 'r', encoding='utf-8') as t:
t_clean = t.read()
for c in control_chars:
t_clean = t_clean.replace(c, ' ')
temp += t_clean
temp += "\n"
temp_norm = normalize_text(temp)
with codecs.open('/'.join([dirname, output]), 'w', encoding='utf-8') as n:
n.write(temp_norm)
alldata += temp_norm
with codecs.open('/'.join([dirname, 'alldata-id.txt']), 'w', encoding='utf-8') as f:
for idx, line in enumerate(alldata.splitlines()):
num_line = u"_*{0} {1}\n".format(idx, line)
f.write(num_line)
end = time.clock()
print ("total running time: ", end-start)
import os.path
assert os.path.isfile("aclImdb/alldata-id.txt"), "alldata-id.txt unavailable"
"""
Explanation: gensim doc2vec & IMDB sentiment dataset
TODO: section on introduction & motivation
TODO: prerequisites + dependencies (statsmodels, patsy, ?)
Requirements
Following are the dependencies for this tutorial:
- testfixtures
- statsmodels
Load corpus
Fetch and prep exactly as in Mikolov's go.sh shell script. (Note this cell tests for existence of required files, so steps won't repeat once the final summary file (aclImdb/alldata-id.txt) is available alongside this notebook.)
End of explanation
"""
import gensim
from gensim.models.doc2vec import TaggedDocument
from collections import namedtuple
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
alldocs = [] # will hold all docs in original order
with open('aclImdb/alldata-id.txt', encoding='utf-8') as alldata:
for line_no, line in enumerate(alldata):
tokens = gensim.utils.to_unicode(line).split()
words = tokens[1:]
tags = [line_no] # `tags = [tokens[0]]` would also work at extra memory cost
split = ['train','test','extra','extra'][line_no//25000] # 25k train, 25k test, 25k extra
sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no//12500] # [12.5K pos, 12.5K neg]*2 then unknown
alldocs.append(SentimentDocument(words, tags, split, sentiment))
train_docs = [doc for doc in alldocs if doc.split == 'train']
test_docs = [doc for doc in alldocs if doc.split == 'test']
doc_list = alldocs[:] # for reshuffling per pass
print('%d docs: %d train-sentiment, %d test-sentiment' % (len(doc_list), len(train_docs), len(test_docs)))
"""
Explanation: The data is small enough to be read into memory.
End of explanation
"""
from gensim.models import Doc2Vec
import gensim.models.doc2vec
from collections import OrderedDict
import multiprocessing
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
simple_models = [
# PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size
Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores),
# PV-DBOW
Doc2Vec(dm=0, size=100, negative=5, hs=0, min_count=2, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=100, window=10, negative=5, hs=0, min_count=2, workers=cores),
]
# speed setup by sharing results of 1st model's vocabulary scan
simple_models[0].build_vocab(alldocs) # PV-DM/concat requires one special NULL word so it serves as template
print(simple_models[0])
for model in simple_models[1:]:
model.reset_from(simple_models[0])
print(model)
models_by_name = OrderedDict((str(model), model) for model in simple_models)
"""
Explanation: Set-up Doc2Vec Training & Evaluation Models
Approximating experiment of Le & Mikolov "Distributed Representations of Sentences and Documents", also with guidance from Mikolov's example go.sh:
./word2vec -train ../alldata-id.txt -output vectors.txt -cbow 0 -size 100 -window 10 -negative 5 -hs 0 -sample 1e-4 -threads 40 -binary 0 -iter 20 -min-count 1 -sentence-vectors 1
Parameter choices below vary:
100-dimensional vectors, as the 400d vectors of the paper don't seem to offer much benefit on this task
similarly, frequent word subsampling seems to decrease sentiment-prediction accuracy, so it's left out
cbow=0 means skip-gram which is equivalent to the paper's 'PV-DBOW' mode, matched in gensim with dm=0
added to that DBOW model are two DM models, one which averages context vectors (dm_mean) and one which concatenates them (dm_concat, resulting in a much larger, slower, more data-hungry model)
a min_count=2 saves quite a bit of model memory, discarding only words that appear in a single doc (and are thus no more expressive than the unique-to-each doc vectors themselves)
End of explanation
"""
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])
models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])
"""
Explanation: Following the paper, we also evaluate models in pairs. These wrappers return the concatenation of the vectors from each model. (Only the singular models are trained.)
End of explanation
"""
import numpy as np
import statsmodels.api as sm
from random import sample
# for timing
from contextlib import contextmanager
from timeit import default_timer
import time
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
def logistic_predictor_from_data(train_targets, train_regressors):
logit = sm.Logit(train_targets, train_regressors)
predictor = logit.fit(disp=0)
#print(predictor.summary())
return predictor
def error_rate_for_model(test_model, train_set, test_set, infer=False, infer_steps=3, infer_alpha=0.1, infer_subsample=0.1):
"""Report error rate on test_doc sentiments, using supplied model and train_docs"""
train_targets, train_regressors = zip(*[(doc.sentiment, test_model.docvecs[doc.tags[0]]) for doc in train_set])
train_regressors = sm.add_constant(train_regressors)
predictor = logistic_predictor_from_data(train_targets, train_regressors)
test_data = test_set
if infer:
if infer_subsample < 1.0:
test_data = sample(test_data, int(infer_subsample * len(test_data)))
test_regressors = [test_model.infer_vector(doc.words, steps=infer_steps, alpha=infer_alpha) for doc in test_data]
else:
test_regressors = [test_model.docvecs[doc.tags[0]] for doc in test_docs]
test_regressors = sm.add_constant(test_regressors)
# predict & evaluate
test_predictions = predictor.predict(test_regressors)
corrects = sum(np.rint(test_predictions) == [doc.sentiment for doc in test_data])
errors = len(test_predictions) - corrects
error_rate = float(errors) / len(test_predictions)
return (error_rate, errors, len(test_predictions), predictor)
"""
Explanation: Predictive Evaluation Methods
Helper methods for evaluating error rate.
End of explanation
"""
from collections import defaultdict
best_error = defaultdict(lambda :1.0) # to selectively-print only best errors achieved
from random import shuffle
import datetime
alpha, min_alpha, passes = (0.025, 0.001, 20)
alpha_delta = (alpha - min_alpha) / passes
print("START %s" % datetime.datetime.now())
for epoch in range(passes):
shuffle(doc_list) # shuffling gets best results
for name, train_model in models_by_name.items():
# train
duration = 'na'
train_model.alpha, train_model.min_alpha = alpha, alpha
with elapsed_timer() as elapsed:
train_model.train(doc_list)
duration = '%.1f' % elapsed()
# evaluate
eval_duration = ''
with elapsed_timer() as eval_elapsed:
err, err_count, test_count, predictor = error_rate_for_model(train_model, train_docs, test_docs)
eval_duration = '%.1f' % eval_elapsed()
best_indicator = ' '
if err <= best_error[name]:
best_error[name] = err
best_indicator = '*'
print("%s%f : %i passes : %s %ss %ss" % (best_indicator, err, epoch + 1, name, duration, eval_duration))
if ((epoch + 1) % 5) == 0 or epoch == 0:
eval_duration = ''
with elapsed_timer() as eval_elapsed:
infer_err, err_count, test_count, predictor = error_rate_for_model(train_model, train_docs, test_docs, infer=True)
eval_duration = '%.1f' % eval_elapsed()
best_indicator = ' '
if infer_err < best_error[name + '_inferred']:
best_error[name + '_inferred'] = infer_err
best_indicator = '*'
print("%s%f : %i passes : %s %ss %ss" % (best_indicator, infer_err, epoch + 1, name + '_inferred', duration, eval_duration))
print('completed pass %i at alpha %f' % (epoch + 1, alpha))
alpha -= alpha_delta
print("END %s" % str(datetime.datetime.now()))
"""
Explanation: Bulk Training
Using explicit multiple-pass, alpha-reduction approach as sketched in gensim doc2vec blog post – with added shuffling of corpus on each pass.
Note that vector training is occurring on all documents of the dataset, which includes all TRAIN/TEST/DEV docs.
Evaluation of each model's sentiment-predictive power is repeated after each pass, as an error rate (lower is better), to see the rates-of-relative-improvement. The base numbers reuse the TRAIN and TEST vectors stored in the models for the logistic regression, while the inferred results use newly-inferred TEST vectors.
(On a 4-core 2.6Ghz Intel Core i7, these 20 passes training and evaluating 3 main models takes about an hour.)
End of explanation
"""
# print best error rates achieved
for rate, name in sorted((rate, name) for name, rate in best_error.items()):
print("%f %s" % (rate, name))
"""
Explanation: Achieved Sentiment-Prediction Accuracy
End of explanation
"""
doc_id = np.random.randint(simple_models[0].docvecs.count) # pick random doc; re-run cell for more examples
print('for doc %d...' % doc_id)
for model in simple_models:
inferred_docvec = model.infer_vector(alldocs[doc_id].words)
print('%s:\n %s' % (model, model.docvecs.most_similar([inferred_docvec], topn=3)))
"""
Explanation: In my testing, unlike the paper's report, DBOW performs best. Concatenating vectors from different models only offers a small predictive improvement. The best results I've seen are still just under 10% error rate, still a ways from the paper's 7.42%.
Examining Results
Are inferred vectors close to the precalculated ones?
End of explanation
"""
import random
doc_id = np.random.randint(simple_models[0].docvecs.count) # pick random doc, re-run cell for more examples
model = random.choice(simple_models) # and a random model
sims = model.docvecs.most_similar(doc_id, topn=model.docvecs.count) # get *all* similar documents
print(u'TARGET (%d): «%s»\n' % (doc_id, ' '.join(alldocs[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(alldocs[sims[index][0]].words)))
"""
Explanation: (Yes, here the stored vector from 20 epochs of training is usually one of the closest to a freshly-inferred vector for the same words. Note the defaults for inference are very abbreviated – just 3 steps starting at a high alpha – and likely need tuning for other applications.)
Do close documents seem more related than distant ones?
End of explanation
"""
word_models = simple_models[:]
import random
from IPython.display import HTML
# pick a random word with a suitable number of occurences
while True:
word = random.choice(word_models[0].wv.index2word)
if word_models[0].wv.vocab[word].count > 10:
break
# or uncomment below line, to just pick a word from the relevant domain:
#word = 'comedy/drama'
similars_per_model = [str(model.most_similar(word, topn=20)).replace('), ','),<br>\n') for model in word_models]
similar_table = ("<table><tr><th>" +
"</th><th>".join([str(model) for model in word_models]) +
"</th></tr><tr><td>" +
"</td><td>".join(similars_per_model) +
"</td></tr></table>")
print("most similar words for '%s' (%d occurences)" % (word, simple_models[0].wv.vocab[word].count))
HTML(similar_table)
"""
Explanation: (Somewhat, in terms of reviewer tone, movie genre, etc... the MOST cosine-similar docs usually seem more like the TARGET than the MEDIAN or LEAST.)
Do the word vectors show useful similarities?
End of explanation
"""
# assuming something like
# https://word2vec.googlecode.com/svn/trunk/questions-words.txt
# is in local directory
# note: this takes many minutes
for model in word_models:
sections = model.accuracy('questions-words.txt')
correct, incorrect = len(sections[-1]['correct']), len(sections[-1]['incorrect'])
print('%s: %0.2f%% correct (%d of %d)' % (model, float(correct*100)/(correct+incorrect), correct, correct+incorrect))
"""
Explanation: Do the DBOW words look meaningless? That's because the gensim DBOW model doesn't train word vectors – they remain at their random initialized values – unless you ask with the dbow_words=1 initialization parameter. Concurrent word-training slows DBOW mode significantly, and offers little improvement (and sometimes a little worsening) of the error rate on this IMDB sentiment-prediction task.
Words from DM models tend to show meaningfully similar words when there are many examples in the training data (as with 'plot' or 'actor'). (All DM modes inherently involve word vector training concurrent with doc vector training.)
Are the word vectors from this dataset any good at analogies?
End of explanation
"""
This cell left intentionally erroneous.
"""
Explanation: Even though this is a tiny, domain-specific dataset, it shows some meager capability on the general word analogies – at least for the DM/concat and DM/mean models which actually train word vectors. (The untrained random-initialized words of the DBOW model of course fail miserably.)
Slop
End of explanation
"""
from gensim.models import KeyedVectors
w2v_g100b = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
w2v_g100b.compact_name = 'w2v_g100b'
word_models.append(w2v_g100b)
"""
Explanation: To mix the Google dataset (if locally available) into the word tests...
End of explanation
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
"""
Explanation: To get copious logging output from above steps...
End of explanation
"""
%load_ext autoreload
%autoreload 2
"""
Explanation: To auto-reload python code while developing...
End of explanation
"""
|
deepmind/dm_pix
|
examples/image_augmentation.ipynb
|
apache-2.0
|
%%capture
!pip install dm-pix
!git clone https://github.com/deepmind/dm_pix.git
import dm_pix as pix
import jax.numpy as jnp
import numpy as np
import PIL.Image as pil
from jax import random
IMAGE_PATH = '/content/dm_pix/examples/assets/jax_logo.jpg'
# Helper functions to read images and display them
def get_image(img_path) -> jnp.ndarray:
return jnp.array(pil.open(img_path), dtype=jnp.float32) / 255.
def imshow(image: jnp.ndarray) -> None:
"""Shows the input image using PIL/Pillow backend."""
image = pil.fromarray(np.asarray(image * 255.).astype(np.uint8), "RGB")
display(image)
"""
Explanation: <a href="https://colab.research.google.com/github/SupreethRao99/dm_pix/blob/master/examples/image_augmentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
PIX
PIX is an image processing library in JAX, for JAX.
overview
JAX is a library resulting from the union of Autograd and XLA for high-performance machine learning research. It provides NumPy, SciPy, automatic differentiation and first class GPU/TPU support.
PIX is a library built on top of JAX with the goal of providing image processing functions and tools to JAX in a way that they can be optimized and parallelised through jax.jit(), jax.vmap(), jax.pmap()
End of explanation
"""
image = get_image(IMAGE_PATH)
delta = 0.42 #@param {type: "slider", min: 0, max: 1}
new_image = pix.adjust_brightness(
image=image,
delta=delta)
imshow(new_image)
"""
Explanation: adjust_brightness
Shifts the brightness of an RGB image by a given amount
End of explanation
"""
image = get_image(IMAGE_PATH)
factor = 0.42 #@param {type: "slider", min: 0, max: 1}
new_image = pix.adjust_contrast(
image=image,
factor=factor)
imshow(new_image)
"""
Explanation: adjust_contrast
Adjusts the contrast of an RGB image by a given multiplicative amount.
End of explanation
"""
image = get_image(IMAGE_PATH)
gamma = 3 #@param {type: "slider", min: 0, max: 10}
gain = 4 #@param{type: "slider",min:0, max:10}
new_image = pix.adjust_gamma(
image=image,
gain=gain,
gamma=gamma)
imshow(new_image)
"""
Explanation: adjust_gamma
Adjusts the gamma of an RGB image
End of explanation
"""
image = get_image(IMAGE_PATH)
delta = 0.7 #@param {type: "slider", min: 0, max: 1}
new_image = pix.adjust_hue(
image=image,
delta=delta)
imshow(new_image)
"""
Explanation: adjust_hue
Adjust the hue of an RGB image by a given multiplicative amount
End of explanation
"""
image = get_image(IMAGE_PATH)
factor = 0.42 #@param {type: "slider", min: 0, max: 1}
new_image = pix.adjust_saturation(
image=image,
factor=factor)
imshow(new_image)
"""
Explanation: adjust_saturation
Adjusts the saturation of an RGB image by a given multiplicative amount
End of explanation
"""
image = get_image(IMAGE_PATH)
new_image = pix.flip_left_right(
image=image)
imshow(new_image)
"""
Explanation: flip_left_right
Flips an image along the horizontal axis.
Assumes that the image is either ...HWC or ...CHW and flips the W axis
End of explanation
"""
image = get_image(IMAGE_PATH)
new_image = pix.flip_up_down(
image=image)
imshow(new_image)
"""
Explanation: flip_up_down
Flips an image along the vertical axis.
Assumes that the image is either ...HWC or ...CHW and flips the H axis
End of explanation
"""
image = get_image(IMAGE_PATH)
sigma = 5 #@param {type: "slider", min: 0, max: 10}
kernel_size = 5 #@param{type: "slider",min:0, max:10}
new_image = pix.gaussian_blur(
image=image,
sigma=sigma,
kernel_size=kernel_size)
imshow(new_image)
"""
Explanation: gaussian_blur
End of explanation
"""
key = random.PRNGKey(0) # change to see different brightness
image = get_image(IMAGE_PATH)
delta = 0.9
new_image = pix.random_brightness(
key=key,
image=image,
max_delta=delta)
imshow(new_image)
"""
Explanation: random_brightness
adjust_brightness(...) with random delta in [-max_delta, max_delta]
End of explanation
"""
key = random.PRNGKey(0) # change to see different contrast
image = get_image(IMAGE_PATH)
new_image = pix.random_contrast(
key=key,
image=image,
lower=0,
upper=5)
imshow(new_image)
"""
Explanation: random_contrast
adjust_contrast(...) with random factor in [lower, upper).
End of explanation
"""
key = random.PRNGKey(5) #change to see different crop
image = get_image(IMAGE_PATH)
new_image = pix.random_crop(
key=key,
image=image,
crop_sizes=(128,128,3))
imshow(new_image)
"""
Explanation: random_crop
Crop images randomly to specified sizes.
Given an input image, it crops the image to the specified crop_sizes. If crop_sizes are lesser than the image's size, the offset for cropping is chosen at random
End of explanation
"""
key = random.PRNGKey(1) #change to see different views
image = get_image(IMAGE_PATH)
new_image = pix.random_flip_left_right(
key=key,
image=image
)
imshow(new_image)
"""
Explanation: random_flip_left_right
50% chance of flip_up_down(...) otherwise returns image unchanged.
End of explanation
"""
key = random.PRNGKey(0) #change to see different views
image = get_image(IMAGE_PATH)
new_image = pix.random_flip_up_down(
key=key,
image=image
)
imshow(new_image)
"""
Explanation: random_flip_up_down
50% chance of flip_up_down(...) otherwise returns image unchanged.
End of explanation
"""
key = random.PRNGKey(0) #change to see different views
image = get_image(IMAGE_PATH)
delta = 0.7
new_image = pix.random_hue(
key=key,
image=image,
max_delta=delta)
imshow(new_image)
"""
Explanation: random_hue
adjust_hue(...) with random delta in [-max_delta, max_delta).
End of explanation
"""
key = random.PRNGKey(0) # change to see different saturation
image = get_image(IMAGE_PATH)
new_image = pix.random_saturation(
key=key,
image=image,
lower=0,
upper=5)
imshow(new_image)
"""
Explanation: random_saturation
adjust_saturation(...) with random factor in [lower, upper)
End of explanation
"""
image = get_image(IMAGE_PATH)
new_image = pix.rot90(
k=1,#number of times the rotation is applied
image=image)
imshow(new_image)
"""
Explanation: rot90
Rotate an image counter-clockwise by 90 degrees.
Assumes that the image is either ...HWC or ...CHW
End of explanation
"""
image = get_image(IMAGE_PATH)
new_image = pix.solarize(
threshold=0.6,
image=image)
imshow(new_image)
"""
Explanation: solarize
Applies solarization to an image.
All values above a given threshold will be inverted
End of explanation
"""
|
kimkipyo/dss_git_kkp
|
통계, 머신러닝 복습/160601수_11일차_데이터 전처리 Data Preprocessing, (결정론적)선형 회귀 분석 Linear Regression Analysis/2.회귀 분석용 가상 데이터 생성 방법.ipynb
|
mit
|
from sklearn.datasets import make_regression
X, y, c = make_regression(n_samples=10, n_features=1, bias=0, noise=0, coef=True, random_state=0)
print("X\n", X)
print("y\n", y)
print("c\n", c)
plt.scatter(X, y, s=100)
plt.show()
"""
Explanation: 회귀 분석용 가상 데이터 생성 방법
Scikit-learn 의 datasets 서브 패키지에는 회귀 분석 시험용 가상 데이터를 생성하는 명령어인 make_regression() 이 있다.
http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html
입출력 요소
make_regression()는 다음과 같은 입출력 요소를 가진다.
입력
n_samples : 정수 (옵션, 디폴트 100)
표본의 갯수
n_features : 정수 (옵션, 디폴트 100)
독립 변수(feature)의 수(차원)
n_targets : 정수 (옵션, 디폴트 1)
종속 변수(target)의 수(차원)
n_informative : 정수 (옵션, 디폴트 10)
독립 변수(feature) 중 실제로 종속 변수와 상관 관계가 있는 독립 변수의 수(차원)
effective_rank: 정수 또는 None (옵션, 디폴트 None)
독립 변수(feature) 중 서로 독립인 독립 변수의 수. 만약 None이면 모두 독립
tail_strength : 0부터 1사이의 실수 (옵션, 디폴트 0.5)
effective_rank가 None이 아닌 경우 독립 변수간의 상관 관계 형태를 결정하는 변수
bias : 실수 (옵션, 디폴트 0.0)
절편
noise : 실수 (옵션, 디폴트 0.0)
출력 즉, 종속 변수에 더해지는 정규 분포의 표준 편차
coef : 불리언 (옵션, 디폴트 False)
True 이면 선형 모형의 계수도 출력
random_state : 정수 (옵션, 디폴트 None)
난수 발생용 시작값
출력
X : [n_samples, n_features] 형상의 2차원 배열
독립 변수의 표본 데이터
y : [n_samples] 형상의 1차원 배열 또는 [n_samples, n_targets] 형상의 2차원 배열
종속 변수의 표본 데이터
coef : [n_features] 형상의 1차원 배열 또는 [n_features, n_targets] 형상의 2차원 배열 (옵션)
선형 모형의 계수, 입력 인수 coef가 True 인 경우에만 출력됨
예를 들어 독립 변수가 1개, 종속 변수가 1개 즉, 선형 모형이 다음과 같은 수식인 경우
$$ y = C_0 + C_1 x + e $$
이러한 관계를 만족하는 표본 데이터는 다음과 같이 생성한다.
End of explanation
"""
X, y, c = make_regression(n_samples=50, n_features=1, bias=100, noise=10, coef=True, random_state=0)
plt.scatter(X, y, s=100)
plt.show()
print("c\n", c)
"""
Explanation: 위 선형 모형은 다음과 같다.
$$
y = 100 + 79.1725 x
$$
noise 인수를 증가시키면 $\text{Var}[e]$가 증가하고 bias 인수를 증가시키면 y 절편이 증가한다.
End of explanation
"""
X, y, c = make_regression(n_samples=300, n_features=2, noise=10, coef=True, random_state=0)
plt.scatter(X[:,0], X[:,1], c=y, s=100)
plt.xlabel("x1")
plt.ylabel("x2")
plt.axis("equal")
plt.show()
"""
Explanation: 이번에는 n_features 즉, 독립 변수가 2개인 표본 데이터를 생성하여 스캐터 플롯을 그리면 다음과 같다. 종속 변수 값은 점의 명암으로 표시하였다.
End of explanation
"""
X, y, c = make_regression(n_samples=300, n_features=2, n_informative=1, noise=0, coef=True, random_state=0)
plt.scatter(X[:,0], X[:,1], c=y, s=100)
plt.xlabel("x1")
plt.ylabel("x2")
plt.axis("equal")
plt.show()
"""
Explanation: 만약 실제로 y값에 영향을 미치는 독립 변수는 하나 뿐이라면 다음과 같이 사용한다.
End of explanation
"""
X, y, c = make_regression(n_samples=300, n_features=2, effective_rank=1, noise=0, tail_strength=0, coef=True, random_state=0)
plt.scatter(X[:,0], X[:,1], c=y, s=100)
plt.xlabel("x1")
plt.ylabel("x2")
plt.axis("equal")
plt.show()
X, y, c = make_regression(n_samples=300, n_features=2, effective_rank=1, noise=0, tail_strength=1, coef=True, random_state=0)
plt.scatter(X[:,0], X[:,1], c=y, s=100)
plt.xlabel("x1")
plt.ylabel("x2")
plt.axis("equal")
plt.show()
"""
Explanation: 만약 두 독립 변수가 상관관계가 있다면 다음과 같이 생성하고 스캐터 플롯에서도 이를 알아볼 수 있다.
End of explanation
"""
|
willettk/insight
|
notebooks/Kyle_Willett_BenignOrNot.ipynb
|
apache-2.0
|
# Load some basic plotting and data analysis packages from Python
%matplotlib inline
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns;
"""
Explanation: Benign or not?
Predicting the incidence of breast cancer diagnosis using multiple cytological characteristics
Kyle Willett (12 Jul 2016)
This project focuses on analyzing results of a study measuring several different physical characteristics of a tumor, and then building a classifier to predict whether the tumor is ultimately benign (good for the patient) or malignant (bad). The data were originally collected from a study at the University of Wisconsin and retrieved from the Machine Learning Repository at UC Irvine.
The data schema are described in detail on the UCI site. The dataset is a single file containing 699 rows, each corresponding to properties of a tumor measured in a particular patient. For each row (instance), there is a (presumably) random ID, nine attributes that measure properties of the tumor, and a class label that indicates whether the tumor was benign or malignant. Each of the nine attributes is scaled to be an integer between [1,10]. The goal is to use the values of these attributes to predict the class.
Wolberg & Mangasarian (1990) achieved an accuracy of 93.5% using two pairs of parallel hyperplanes to separate the data. Our minimum goal is to improve upon that benchmark.
Initial inspection and plotting
End of explanation
"""
# Read in the cleaned dataset as a pandas dataframe.
names = ["sample", "clump_thickness", "uniformity_size", "uniformity_shape",
"adhesion", "single_epithelial", "bare_nuclei", "bland_chromatin",
"normal_nucleoli", "mitoses", "class"]
data = pd.read_csv("../dc/breast-cancer-wisconsin-cleaned.data",names=names)
"""
Explanation: The original data set had 16 rows with at least one missing attribute (designated as "?") in the data. These instances included 14 benign and 2 malignant tumors, which is a significantly different ratio than the roughly 65%-35% distribution of benign-to-malignant classifiers over the entire dataset. This could be an indicator that missing experimental values are correlated with the class label.
Since this is a very small fraction of the total dataset (16/699 = 2.2%), the dataset used in this notebook simply eliminates any row where "?" appears.
End of explanation
"""
# Plot the relationships in the full dataset on a large grid
with sns.plotting_context("notebook", font_scale=2):
g = sns.PairGrid(data[names[1:-1]], diag_sharey=False)
try:
g.map_lower(sns.kdeplot, cmap="Blues_d",dropna=True)
except ValueError:
pass
try:
g.map_upper(plt.scatter)
except ValueError:
pass
try:
g.map_diag(sns.distplot)
except ValueError:
pass
"""
Explanation: As an initial look, let's plot the data using a version of the Seaborn plotting package. Since there are nine attributes, this plots each against each other to visualize the relative correlations.
Plots in the lower left of the grid are 2-D kernel-density estimates (KDE), which show a smoothed version of the relationship between the attributes. Plots in the upper right show the same data as a scatter plot; this is more difficult to interpret since each value can only be an integer from 1 to 10 and most of the points overlap. The plots along the diagonal show both a histogram and a 1-D KDE of each distribution.
End of explanation
"""
# Import the modules for the SVM from scikit-learn
import numpy as np
from sklearn.svm import LinearSVC
from sklearn import cross_validation
"""Load the data
SVM expects the attributes to be an array in the shape (N,M)
and the labels as an array with shape (N,)
where N = number of rows (samples)
and M = number of attributes
"""
X = np.array(data[names[1:-1]])
y = np.array(data[['class']]).ravel()
"""
Explanation: The plot above shows the relationships between all attributes (with the exception of the mitoses vector, which had undetermined mathematical issues measuring a unique KDE). Looking along the diagonals, most of the attributes are relatively unbalanced, having the majority of their values $\leq 2$. The exceptions are clump_thickness and bland_chromatin, which have higher fractions of values $>5$.
Creating a classifier
Selection of an estimator is done in part by assessing the traits of the dataset and the desired predictions. The ultimate goal is to predict categorical data (benign or malignant), so regression models aren't appropriate. The data are pre-labeled with their class, so clustering isn't necessary. The total amount of data is $\lesssim1000$ samples, so a simple implementation of a support vector machine classifier (SVC) will be able to handle it.
SVCs should be useful because they can classify high-dimensional data ($N=9$ here) and have a number of different parameters that can be optimized for the kernel function. The probability estimates for each class can be estimated later using $k$-fold cross-validation.
End of explanation
"""
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.4, random_state=1)
print X_train.shape, y_train.shape
print X_test.shape, y_test.shape
"""
Explanation: Simple training/test split at 40%
To avoid overfitting the model, the data are split into a training sample on which the model is fit and a test sample on which it is evaluated. This begins with a 60%-40% split for the test and training data.
End of explanation
"""
clf = LinearSVC(C=1).fit(X_train, y_train)
print("Accuracy: %0.2f" % clf.score(X_test, y_test))
"""
Explanation: So there are 409 samples in the training data and 274 in the test data. Now let's fit the model to the training data and assess the accuracy on the test.
End of explanation
"""
clf = LinearSVC(C=1)
scores = cross_validation.cross_val_score(clf, X, y, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
"""
Explanation: This is excellent; without any fine-tuning, our 97% accuracy exceeds the initial benchmark of 93% from the published paper.
Cross-validate
To do a better job of assessing the model accuracy than just using a single training-test split (which could be biased), cross-validation can be used to run the same comparison many times using different splits. The implementation below runs 5-fold validation; the variance between the results gives an estimate on the uncertainty of the accuracy.
End of explanation
"""
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
"""tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
"""
# Limit to a linear kernel; this will allow relative ranking of
# the features
tuned_parameters = [{'C': np.logspace(-2,0,25)}]
scores = ['precision', 'recall']
for score in scores:
print("\nTuning hyper-parameters for %s" % score)
clf = GridSearchCV(LinearSVC(), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(clf.best_params_)
best_params = clf.best_params_
"""
Explanation: This is a very close result to the single test above, indicating that the initial split was fairly unbiased.
Parameter estimation through grid search
This is initially a good result, but can be fine-tuned. The SVM has several parameters that control the features of the separating hyperplanes, including the nature of the kernel, the kernel coefficient, and the penalty parameter of the error term. These can be optimized to provide a better fit.
To prevent from overfitting through optimization, the data set are now split into three sets: training, test, and validation. The SVM parameters will be optimized on the training and test sets and the accuracy evaluated on the validation set.
End of explanation
"""
# Test again with the new parameters and cross-validation
clf = LinearSVC(C=best_params['C']).fit(X_train,y_train)
scores = cross_validation.cross_val_score(clf, X, y, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
"""
Explanation: Tuning the parameters in the grid can attempt to optimize different predictive value; the grid above examines both precision and recall. For this particular dataset, the best parameters are the same no matter which one is optimized, which is useful. The overall accuracy is still the same as the initial run.
End of explanation
"""
benign_test = X_test[y_test == 2]
malignant_test = X_test[y_test == 4]
n = len(X_test)
predicted_benign = clf.predict(benign_test)
predicted_malignant = clf.predict(malignant_test)
print "True positive rate: {}/{}".format(sum(predicted_benign == 2),len(benign_test))
print "True negative rate: {}/{}".format(sum(predicted_malignant == 4),len(malignant_test))
print "\nFalse positive rate: {}/{} ({:.1f}% of all cases)".format(
sum(predicted_benign == 4),len(benign_test),sum(predicted_benign == 4)/float(n)*100.)
print "False negative rate: {}/{} ({:.1f}% of all cases)".format(
sum(predicted_malignant == 2),len(malignant_test),sum(predicted_malignant == 2)/float(n)*100.)
"""
Explanation: Results
End of explanation
"""
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/.ipynb_checkpoints/07_price_history_varlen_rnn_cells-checkpoint.ipynb
|
agpl-3.0
|
from __future__ import division
import tensorflow as tf
from os import path
import numpy as np
import pandas as pd
import csv
from sklearn.model_selection import StratifiedShuffleSplit
from time import time
from matplotlib import pyplot as plt
import seaborn as sns
from mylibs.jupyter_notebook_helper import show_graph
from tensorflow.contrib import rnn
from tensorflow.contrib import learn
import shutil
from tensorflow.contrib.learn.python.learn import learn_runner
from IPython.display import Image
from IPython.core.display import HTML
from mylibs.tf_helper import getDefaultGPUconfig
from data_providers.binary_shifter_varlen_data_provider import \
BinaryShifterVarLenDataProvider
from data_providers.price_history_varlen_data_provider import PriceHistoryVarLenDataProvider
from models.model_05_price_history_rnn_varlen import PriceHistoryRnnVarlen
from sklearn.metrics import r2_score
from mylibs.py_helper import factors
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from statsmodels.tsa.stattools import coint
dtype = tf.float32
seed = 16011984
random_state = np.random.RandomState(seed=seed)
config = getDefaultGPUconfig()
%matplotlib inline
from common import get_or_run_nn
"""
Explanation: https://r2rt.com/recurrent-neural-networks-in-tensorflow-iii-variable-length-sequences.html
End of explanation
"""
num_epochs = 10
series_max_len = 60
num_features = 1 #just one here, the function we are predicting is one-dimensional
state_size = 400
target_len = 30
batch_size = 47
"""
Explanation: Step 0 - hyperparams
End of explanation
"""
csv_in = '../price_history_03a_fixed_width.csv'
npz_path = '../price_history_03_dp_60to30_from_fixed_len.npz'
# XX, YY, sequence_lens, seq_mask = PriceHistoryVarLenDataProvider.createAndSaveDataset(
# csv_in=csv_in,
# npz_out=npz_path,
# input_seq_len=60, target_seq_len=30)
# XX.shape, YY.shape, sequence_lens.shape, seq_mask.shape
dp = PriceHistoryVarLenDataProvider(filteringSeqLens = lambda xx : xx >= target_len,
npz_path=npz_path)
dp.inputs.shape, dp.targets.shape, dp.sequence_lengths.shape, dp.sequence_masks.shape
"""
Explanation: Step 1 - collect data (and/or generate them)
End of explanation
"""
model = PriceHistoryRnnVarlen(rng=random_state, dtype=dtype, config=config)
graph = model.getGraph(batch_size=batch_size, state_size=state_size,
rnn_cell= PriceHistoryRnnVarlen.RNN_CELLS.GRU,
target_len=target_len, series_max_len=series_max_len)
show_graph(graph)
"""
Explanation: Step 2 - Build model
End of explanation
"""
rnn_cell = PriceHistoryRnnVarlen.RNN_CELLS.GRU
num_epochs, state_size, batch_size
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
rnn_cell=rnn_cell,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='002_rnn_gru_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
"""
Explanation: Step 3 training the network
GRU cell
End of explanation
"""
rnn_cell = PriceHistoryRnnVarlen.RNN_CELLS.GRU
num_epochs = 50
state_size, batch_size
def experiment():
dynStats, predictions_dict = model.run(epochs=num_epochs,
rnn_cell=rnn_cell,
state_size=state_size,
series_max_len=series_max_len,
target_len=target_len,
npz_path=npz_path,
batch_size=batch_size)
return dynStats, predictions_dict
dyn_stats, preds_dict = get_or_run_nn(experiment,
filename='002_rnn_gru_60to30_50epochs')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
"""
Explanation: Conclusion
GRU has performed much better than basic RNN
GRU cell - 50 epochs
End of explanation
"""
|
tensorflow/docs-l10n
|
site/en-snapshot/lite/guide/model_analyzer.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
"""
Explanation: TensorFlow Lite Model Analyzer
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/guide/model_analyzer"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
TensorFlow Lite Model Analyzer API helps you analyze models in TensorFlow Lite format by listing a model's structure.
Model Analyzer API
The following API is available for the TensorFlow Lite Model Analyzer.
tf.lite.experimental.Analyzer.analyze(model_path=None,
model_content=None,
gpu_compatibility=False)
You can find the API details from https://www.tensorflow.org/api_docs/python/tf/lite/experimental/Analyzer or run help(tf.lite.experimental.Analyzer.analyze) from a Python terminal.
Basic usage with simple Keras model
The following code shows basic usage of Model Analyzer. It shows contents of the converted Keras model in TFLite model content, formatted as a flatbuffer object.
End of explanation
"""
model = tf.keras.applications.MobileNetV3Large()
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
"""
Explanation: Basic usage with MobileNetV3Large Keras model
This API works with large models such as MobileNetV3Large. Since the output is large, you might want to browse it with your favorite text editor.
End of explanation
"""
import tensorflow as tf
@tf.function(input_signature=[
tf.TensorSpec(shape=[4, 4], dtype=tf.float32)
])
def func(x):
return tf.cosh(x) + tf.slice(x, [1, 1], [1, 1])
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func.get_concrete_function()], func)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
fb_model = converter.convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
"""
Explanation: Check GPU delegate compatibility
The ModelAnalyzer API provides a way to check the GPU delegate compatibility of the given model by providing gpu_compatibility=True option.
Case 1: When model is incompatibile
The following code shows a way to use gpu_compatibility=True option for simple tf.function which uses tf.slice with a 2D tensor and tf.cosh which are not compatible with GPU delegate.
You will see GPU COMPATIBILITY WARNING per every node which has compatibility issue(s).
End of explanation
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
"""
Explanation: Case 2: When model is compatibile
In this example, the given model is compatbile with GPU delegate.
Note: Even though the tool doesn't find any compatibility issue, it doesn't guarantee that your model works well with GPU delegate on every device. There could be some runtime incompatibililty happen such as missing CL_DEVICE_IMAGE_SUPPORT feature by target OpenGL backend.
End of explanation
"""
|
IBMDecisionOptimization/docplex-examples
|
examples/mp/jupyter/lifegame.ipynb
|
apache-2.0
|
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
"""
Explanation: Using logical constraints: Conway's Game of Life
This tutorial includes everything you need to set up decision optimization engines, build a mathematical programming model, leveraging logical constraints.
When you finish this tutorial, you'll have a foundational knowledge of Prescriptive Analytics.
It requires either an installation of CPLEX Optimizers or it can be run on IBM Cloud Pak for Data as a Service (Sign up for a free IBM Cloud account
and you can start using IBM Cloud Pak for Data as a Service right away).
CPLEX is available on <i>IBM Cloud Pack for Data</i> and <i>IBM Cloud Pak for Data as a Service</i>:
- <i>IBM Cloud Pak for Data as a Service</i>: Depends on the runtime used:
- <i>Python 3.x</i> runtime: Community edition
- <i>Python 3.x + DO</i> runtime: full edition
- <i>Cloud Pack for Data</i>: Community edition is installed by default. Please install DO addon in Watson Studio Premium for the full edition
This model is greater than the size allowed in trial mode of CPLEX.
Table of contents:
Describe the business problem
How decision optimization (prescriptive analytics) can help
Use decision optimization
Step 1: Import the library
Step 2: Set up the prescriptive model
Step 3: Solve the problem with default CPLEX algorithm
Summary
This example is demonstrating Life Game from Robert Bosch and Michael Trick, CP 2001, CPAIOR 2002. using CPLEX
The original paper can be found here
It is based on Conway's Game of Life and is a basic integer program with birth constraints.
To begin the game, the player places checkers on some of the cells of the board, creating an initial pattern.
A cell with a checker in it is living and those without are dead.
The pattern is then modified by applying the following rules over ad over abain.
* If a cell has exactly two living neighbors, then its state remain the same in the new pattern (if living, it remains living, if dead it remains dead).
* If a cell has exactly three living neightbors, then it is living in the next pattern. This is a birth condition.
* If a cell has fewer than 2 or more than 3 living neighbors, then it is dead in the next pattern. These are the death by isolation and death by overcrowding conditions reespectively.
How decision optimization can help
Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes.
Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
<br/>
<u>With prescriptive analytics, you can:</u>
Automate the complex decisions and trade-offs to better manage your limited resources.
Take advantage of a future opportunity or mitigate a future risk.
Proactively update recommendations based on changing events.
Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
Use decision optimization
Step 1: Import the library
Run the following code to import Decision Optimization CPLEX Modeling library. The DOcplex library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
End of explanation
"""
from docplex.mp.model import Model
import math
from collections import namedtuple
Tdv = namedtuple('Tdv', ['dx', 'dy'])
neighbors = [Tdv(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if i or j]
assert len(neighbors) == 8
n = 6
assert Model.supports_logical_constraints(), "This model requires logical constraints cplex.version must be 12.80 or higher"
lm = Model(name='game_of_life_{0}'.format(n))
border = range(0, n + 2)
inside = range(1, n + 1)
# one binary var per cell
life = lm.binary_var_matrix(border, border, name=lambda rc: 'life_%d_%d' % rc)
# store sum of alive neighbors for interior cells
sum_of_neighbors = {(i, j): lm.sum(life[i + n.dx, j + n.dy] for n in neighbors) for i in inside for j in inside}
# all borderline cells are dead
for j in border:
life[0, j].ub = 0
life[j, 0].ub = 0
life[j, n + 1].ub = 0
life[n + 1, j].ub = 0
"""
Explanation: A restart of the kernel might be needed if you updated docplex.
Step 2: Set up the prescriptive model
End of explanation
"""
for i in inside:
for j in inside:
lm.add(2 * life[i, j] <= sum_of_neighbors[i, j])
"""
Explanation: The sum of alive neighbors for an alive cell is greater than 2
End of explanation
"""
for i in inside:
for j in inside:
lm.add(5 * life[i, j] + sum_of_neighbors[i, j] <= 8)
"""
Explanation: The sum of alive neighbors for an alive cell is less than 3
End of explanation
"""
for i in inside:
for j in inside:
ct3 = sum_of_neighbors[i, j] == 3
lm.add(ct3 <= life[i, j]) # use logical cts here
"""
Explanation: For a dead cell, the sum of alive neighbors cannot be 3
End of explanation
"""
for i in border:
if i < n:
for d in [1, n]:
lm.add(life[i, d] + life[i + 1, d] + life[i + 2, d] <= 2)
lm.add(life[d, i] + life[d, i + 1] + life[d, i + 2] <= 2)
"""
Explanation: Satisfy the 'no 3 alive neighbors for extreme rows, columns
End of explanation
"""
n2 = int(math.ceil(n/2))
half1 = range(1, n2 + 1)
half2 = range(n2 + 1, n)
# there are more alive cells in left side
lm.add(lm.sum(life[i1, j1] for i1 in half1 for j1 in inside) >= lm.sum(life[i2, j2] for i2 in half2 for j2 in inside))
# there are more alive cells in upper side
lm.add(lm.sum(life[i1, j1] for i1 in inside for j1 in half1) >= lm.sum(life[i2, j2] for i2 in inside for j2 in half2))
"""
Explanation: Symmetry breaking
End of explanation
"""
lm.maximize(lm.sum(life))
# add a dummy kpi
nlines = lm.sum( (lm.sum(life[i,j] for j in inside) >= 1) for i in inside)
lm.add_kpi(nlines, 'nlines')
# parameters: branch up, use heusristics, emphasis on opt, threads free
lm.parameters.mip.strategy.branch = 1
lm.parameters.mip.strategy.heuristicfreq = 10
lm.parameters.emphasis.mip = 2
lm.parameters.threads = 0
# store data items as fields
lm.size = n
lm.life = life
border3 = range(1, lm.size-1, 3)
life_vars = lm.life
vvmap = {}
for i in border3:
for j in border3:
vvmap[life_vars[i, j]] = 1
vvmap[life_vars[i+1, j]] = 1
vvmap[life_vars[i, j+1]] = 1
vvmap[life_vars[i+1, j+1]] = 1
ini_s = lm.new_solution(vvmap)
assert ini_s.is_valid_solution(), 'error in initial solution'
lm.add_mip_start(ini_s)
"""
Explanation: Setting up the objective: find maximum number of alive cells
End of explanation
"""
assert lm.solve(log_output=True), "!!! Solve of the model fails"
lm.report()
def lifegame_solution_to_matrix(mdl):
rr = range(0, mdl.size+2)
life_vars = mdl.life
array2 = [[life_vars[i, j].solution_value for j in rr] for i in rr]
return array2
print(lifegame_solution_to_matrix(lm))
"""
Explanation: Step 3: Solve the problem with default CPLEX algorithm
End of explanation
"""
|
EnergyID/opengrid
|
scripts/SynchronizeData.ipynb
|
gpl-2.0
|
import os, sys
import inspect
# Obtain path of the opengrid codebase and import opengrid libraries
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(script_dir, os.pardir, os.pardir))
from opengrid.library import fluksoapi
from opengrid.library import config
c = config.Config()
# for tmpo, only use user-speficied path if a correct path is provided
try:
if os.path.exists(c.get('tmpo', 'data')):
path_to_tmpo_data = c.get('tmpo', 'data')
except:
path_to_tmpo_data = None
"""
Explanation: Synchronize the opengrid data to your computer.
There are two different solutions.
The first approach is based on the tmpo data format and uses a local database. This approach is more elegant, but does not cover historic data from before 23rd of October 2014.
The second is based on csv files that are stored on the opengrid webserver. These csv files are extracted from the flukso webserver and stored as a zip file per day. This script will only download missing files and convert all the data into a single csv per sensor. This approach covers all possible opengrid data as for now.
End of explanation
"""
sys.path.append(c.get('tmpo', 'folder'))
import tmpo
from opengrid.library import houseprint
"""
Explanation: TMPO-based synchronization
End of explanation
"""
%run cache_anonymous_houseprint.py
"""
Explanation: Run the cell below if you want to obtain the very last houseprint information
End of explanation
"""
tmpos = tmpo.Session(path_to_tmpo_data)
tmpos.debug = True
hp = houseprint.load_houseprint_from_file('new_houseprint.pkl')
hp.init_tmpo(tmpos)
"""
Explanation: Create a tmpo session and load the houseprint
End of explanation
"""
hp.sync_tmpos()
"""
Explanation: Make sure all known sensors of the houseprint are added to the tmpo database. Then synchronize all data up to now.
End of explanation
"""
# path to data is stored in opengrid.cfg.
# for syncing with csv
path_to_csv_data = c.get('data', 'folder')
# This synchronization can take a while...
fluksoapi.synchronize(path_to_csv_data)
# see the other notebooks on how to import the data and start analysing.
"""
Explanation: CSV-based synchronization
End of explanation
"""
|
azhurb/deep-learning
|
sentiment_network/Sentiment Classification - How to Best Frame a Problem for a Neural Network (Project 4).ipynb
|
mit
|
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: Transforming Text into Numbers
End of explanation
"""
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
"""
Explanation: Project 2: Creating the Input/Output Data
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Project 3: Building a Neural Network
Start with your neural network from the last chapter
3 layer neural network
no non-linearity in hidden layer
use our functions to create the training data
create a "pre_process_data" function to create vocabulary for our training data generating functions
modify "train" to train over the entire corpus
Where to Get Help if You Need it
Re-watch previous week's Udacity Lectures
Chapters 3-5 - Grokking Deep Learning - (40% Off: traskud17)
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
"""
Explanation: Understanding Neural Noise
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: Project 4: Reducing Noise in our Input Data
End of explanation
"""
|
mbeyeler/opencv-machine-learning
|
notebooks/09.02-Implementing-a-Multi-Layer-Perceptron-in-OpenCV.ipynb
|
mit
|
from sklearn.datasets.samples_generator import make_blobs
X_raw, y_raw = make_blobs(n_samples=100, centers=2,
cluster_std=5.2, random_state=42)
"""
Explanation: <!--BOOK_INFORMATION-->
<a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a>
This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler.
The code is released under the MIT license,
and is available on GitHub.
Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations.
If you find this content useful, please consider supporting the work by
buying the book!
<!--NAVIGATION-->
< Understanding Perceptrons | Contents | Getting Acquainted with Deep Learning >
Implementing a Multi-Layer Perceptron (MLP) in OpenCV
In order to create nonlinear decision boundaries, we can combine multiple perceptrons to
form a larger network. This is also known as a multilayer perceptron (MLP). MLPs usually
consist of at least three layers, where the first layer has a node (or neuron) for every input
feature of the dataset, and the last layer has a node for every class label. The layer in
between is called the hidden layer.
Loading and preprocessing the data
Implementing an MLP in OpenCV uses the same syntax that we have seen at least a dozen
times before. In order to see how an MLP compares to a single perceptron, we will operate
on the same toy data as before:
End of explanation
"""
import numpy as np
X = X_raw.astype(np.float32)
"""
Explanation: Preprocessing the data
However, since we are working with OpenCV, this time, we want to make sure the input
matrix is made up of 32-bit floating point numbers, otherwise the code will break:
End of explanation
"""
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(sparse=False, dtype=np.float32)
y = enc.fit_transform(y_raw.reshape(-1, 1))
"""
Explanation: Furthermore, we need to think back to Chapter 4, Representing Data and Engineering and Features, and remember how to represent categorical variables. We need to find a way to
represent target labels, not as integers but with a one-hot encoding. The easiest way to
achieve this is by using scikit-learn's preprocessing module:
End of explanation
"""
import cv2
mlp = cv2.ml.ANN_MLP_create()
"""
Explanation: Creating an MLP classifier in OpenCV
The syntax to create an MLP in OpenCV is the same as for all the other classifiers:
End of explanation
"""
n_input = 2
n_hidden = 8
n_output = 2
mlp.setLayerSizes(np.array([n_input, n_hidden, n_output]))
"""
Explanation: However, now we need to specify how many layers we want in the network and how many
neurons there are per layer. We do this with a list of integers, which specify the number of
neurons in each layer. Since the data matrix X has two features, the first layer should also
have two neurons in it (n_input). Since the output has two different values, the last layer
should also have two neurons in it (n_output). In between these two layers, we can put as
many hidden layers with as many neurons as we want. Let's choose a single hidden layer
with an arbitrary number of eight neurons in it (n_hidden):
End of explanation
"""
mlp.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2.5, 1.0)
"""
Explanation: Customizing the MLP classifier
Before we move on to training the classifier, we can customize the MLP classifier via a
number of optional settings:
- mlp.setActivationFunction: This defines the activation function to be used for every neuron in the network
- mlp.setTrainMethod: This defines a suitable training method
- mlp.setTermCriteria: This sets the termination criteria of the training phase
Whereas our home-brewed perceptron classifier used a linear activation function, OpenCV
provides two additional options:
- cv2.ml.ANN_MLP_IDENTITY: This is the linear activation function, $f(x) = x$.
- cv2.ml.ANN_MLP_SIGMOID_SYM: This is the symmetrical sigmoid function (also known as hyperbolic tangent), $f(x) = \beta (1 - \exp(-\alpha x)) / (1 + \exp(-\alpha x))$. Whereas $\alpha$ controls the slope of the function, $\beta$ defines the upper and lower bounds of the output.
- cv2.ml.ANN_GAUSSIAN: This is the Gaussian function (also known as the bell curve), $f(x) = \beta \exp(-\alpha x^2)$. Whereas $α$ controls the slope of the function, $\beta$ defines the upper bound of the output.
In this example, we will use a proper sigmoid function that squashes the input values into
the range [0, 1]. We do this by choosing $\alpha = 2.5$ and $\beta = 1.0$:
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
alpha = 2.5
beta = 1.0
x_sig = np.linspace(-1.0, 1.0, 100)
y_sig = beta * (1.0 - np.exp(-alpha * x_sig))
y_sig /= (1 + np.exp(-alpha * x_sig))
plt.figure(figsize=(10, 6))
plt.plot(x_sig, y_sig, linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
"""
Explanation: If you are curious what this activation function looks like, we can take a short excursion
with Matplotlib:
End of explanation
"""
mlp.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
"""
Explanation: As mentioned in the preceding part, a training method can be set via
mlp.setTrainMethod. The following methods are available:
- cv2.ml.ANN_MLP_BACKPROP: This is the backpropagation algorithm we talked about previously. You can set additional scaling factors via mlp.setBackpropMomentumScale and mlp.setBackpropWeightScale.
- cv2.ml.ANN_MLP_RPROP: This is the Rprop algorithm, which is short for resilient backpropagation. We won't have time to discuss this algorithm, but you can set additional parameters of this algorithm via mlp.setRpropDW0, mlp.setRpropDWMax, mlp.setRpropDWMin, mlp.setRpropDWMinus, and mlp.setRpropDWPlus.
In this example, we will choose backpropagation:
End of explanation
"""
term_mode = cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS
term_max_iter = 300
term_eps = 0.01
mlp.setTermCriteria((term_mode, term_max_iter, term_eps))
"""
Explanation: Lastly, we can specify the criteria that must be met for training to end via
mlp.setTermCriteria. This works the same for every classifier in OpenCV and is closely
tied to the underlying C++ functionality. We first tell OpenCV which criteria we are going to
specify (for example, the maximum number of iterations). Then we specify the value for this
criterion. All values must be delivered in a tuple.
End of explanation
"""
mlp.train(X, cv2.ml.ROW_SAMPLE, y)
"""
Explanation: Training and testing the MLP classifier
This is the easy part. Training the MLP classifier is the same as with all other classifiers:
End of explanation
"""
_, y_hat = mlp.predict(X)
"""
Explanation: The same goes for predicting target labels:
End of explanation
"""
from sklearn.metrics import accuracy_score
accuracy_score(y_hat.round(), y)
"""
Explanation: The easiest way to measure accuracy is by using scikit-learn's helper function:
End of explanation
"""
def plot_decision_boundary(classifier, X_test, y_test):
# create a mesh to plot in
h = 0.02 # step size in mesh
x_min, x_max = X_test[:, 0].min() - 1, X_test[:, 0].max() + 1
y_min, y_max = X_test[:, 1].min() - 1, X_test[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
X_hypo = np.c_[xx.ravel().astype(np.float32),
yy.ravel().astype(np.float32)]
_, zz = classifier.predict(X_hypo)
zz = np.argmax(zz, axis=1)
zz = zz.reshape(xx.shape)
plt.contourf(xx, yy, zz, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=200)
plt.figure(figsize=(10, 6))
plot_decision_boundary(mlp, X, y_raw)
"""
Explanation: It looks like we were able to increase our performance from 81% with a single perceptron to
84% with an MLP consisting of ten hidden-layer neurons and two output neurons. In order
to see what changed, we can look at the decision boundary one more time:
End of explanation
"""
|
huajianmao/learning
|
coursera/deep-learning/5.nlp-sequence-models/week1/Dinosaurus Island -- Character level language model final - v3.ipynb
|
mit
|
import numpy as np
from utils import *
import random
"""
Explanation: Character level language model - Dinosaurus land
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width:250;height:300px;">
</td>
</table>
Luckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this dataset. (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath!
By completing this assignment you will learn:
How to store text data for processing using an RNN
How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit
How to build a character-level text generation recurrent neural network
Why clipping the gradients is important
We will begin by loading in some functions that we have provided for you in rnn_utils. Specifically, you have access to functions such as rnn_forward and rnn_backward which are equivalent to those you've implemented in the previous assignment.
End of explanation
"""
data = open('dinos.txt', 'r').read()
data = data.lower()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
"""
Explanation: 1 - Problem Statement
1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
End of explanation
"""
char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }
ix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }
print(ix_to_char)
"""
Explanation: The characters are a-z (26 characters) plus the "\n" (or newline character), which in this assignment plays a role similar to the <EOS> (or "End of sentence") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, char_to_ix and ix_to_char are the python dictionaries.
End of explanation
"""
### GRADED FUNCTION: clip
def clip(gradients, maxValue):
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWax, dWaa, dWya, db, dby]:
np.clip(gradient, -maxValue, maxValue, out=gradient)
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, 10)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
"""
Explanation: 1.2 - Overview of the model
Your model will have the following structure:
Initialize parameters
Run the optimization loop
Forward propagation to compute the loss function
Backward propagation to compute the gradients with respect to the loss function
Clip the gradients to avoid exploding gradients
Using the gradients, update your parameter with the gradient descent update rule.
Return the learned parameters
<img src="images/rnn1.png" style="width:450;height:300px;">
<caption><center> Figure 1: Recurrent Neural Network, similar to what you had built in the previous notebook "Building a RNN - Step by Step". </center></caption>
At each time-step, the RNN tries to predict what is the next character given the previous characters. The dataset $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is a list of characters in the training set, while $Y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$ is such that at every time-step $t$, we have $y^{\langle t \rangle} = x^{\langle t+1 \rangle}$.
2 - Building blocks of the model
In this part, you will build two important blocks of the overall model:
- Gradient clipping: to avoid exploding gradients
- Sampling: a technique used to generate characters
You will then apply these two functions to build the model.
2.1 - Clipping the gradients in the optimization loop
In this section you will implement the clip function that you will call inside of your optimization loop. Recall that your overall loop structure usually consists of a forward pass, a cost computation, a backward pass, and a parameter update. Before updating the parameters, you will perform gradient clipping when needed to make sure that your gradients are not "exploding," meaning taking on overly large values.
In the exercise below, you will implement a function clip that takes in a dictionary of gradients and returns a clipped version of gradients if needed. There are different ways to clip gradients; we will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. More generally, you will provide a maxValue (say 10). In this example, if any component of the gradient vector is greater than 10, it would be set to 10; and if any component of the gradient vector is less than -10, it would be set to -10. If it is between -10 and 10, it is left alone.
<img src="images/clip.png" style="width:400;height:150px;">
<caption><center> Figure 2: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into slight "exploding gradient" problems. </center></caption>
Exercise: Implement the function below to return the clipped gradients of your dictionary gradients. Your function takes in a maximum threshold and returns the clipped versions of your gradients. You can check out this hint for examples of how to clip in numpy. You will need to use the argument out = ....
End of explanation
"""
# GRADED FUNCTION: sample
def sample(parameters, char_to_ix, seed):
"""
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
char_to_ix -- python dictionary mapping each character to an index.
seed -- used for grading purposes. Do not worry about it.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
"""
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)
x = np.zeros((vocab_size, 1))
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = np.zeros((n_a, 1))
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# Idx is a flag to detect a newline character, we initialize it to -1
idx = -1
# Loop over time-steps t. At each time-step, sample a character from a probability distribution and append
# its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well
# trained model), which helps debugging and prevents entering an infinite loop.
counter = 0
newline_character = char_to_ix['\n']
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b)
z = np.dot(Wya, a) + by
y = softmax(z)
# for grading purposes
np.random.seed(counter + seed)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
idx = np.random.choice(list(range(vocab_size)), p=y.ravel())
# Append the index to "indices"
indices.append(idx)
# Step 4: Overwrite the input character as the one corresponding to the sampled index.
x = np.zeros((vocab_size, 1))
x[idx] = 1
# Update "a_prev" to be "a"
a_prev = a
# for grading purposes
seed += 1
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(char_to_ix['\n'])
return indices
np.random.seed(2)
_, n_a = 20, 100
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
indices = sample(parameters, char_to_ix, 0)
print("Sampling:")
print("list of sampled indices:", indices)
print("list of sampled characters:", [ix_to_char[i] for i in indices])
"""
Explanation: Expected output:
<table>
<tr>
<td>
**gradients["dWaa"][1][2] **
</td>
<td>
10.0
</td>
</tr>
<tr>
<td>
**gradients["dWax"][3][1]**
</td>
<td>
-10.0
</td>
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td>
0.29713815361
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td>
[ 10.]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>
[ 8.45833407]
</td>
</tr>
</table>
2.2 - Sampling
Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:
<img src="images/dinos3.png" style="width:500;height:300px;">
<caption><center> Figure 3: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption>
Exercise: Implement the sample function below to sample characters. You need to carry out 4 steps:
Step 1: Pass the network the first "dummy" input $x^{\langle 1 \rangle} = \vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\langle 0 \rangle} = \vec{0}$
Step 2: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations:
$$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$
$$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$
$$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$
Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character. We have provided a softmax() function that you can use.
Step 3: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$. This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability. To implement it, you can use np.random.choice.
Here is an example of how to use np.random.choice():
python
np.random.seed(0)
p = np.array([0.1, 0.0, 0.7, 0.2])
index = np.random.choice([0, 1, 2, 3], p = p.ravel())
This means that you will pick the index according to the distribution:
$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.
Step 4: The last step to implement in sample() is to overwrite the variable x, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$. You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating you've reached the end of the dinosaur name.
End of explanation
"""
# GRADED FUNCTION: optimize
def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
"""
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X, Y, a_prev, parameters)
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X, Y, parameters, cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients, 5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters, gradients, learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
"""
Explanation: Expected output:
<table>
<tr>
<td>
**list of sampled indices:**
</td>
<td>
[12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, <br>
7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 5, 6, 12, 25, 0, 0]
</td>
</tr><tr>
<td>
**list of sampled characters:**
</td>
<td>
['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', <br>
'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', <br>
'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'e', 'f', 'l', 'y', '\n', '\n']
</td>
</tr>
</table>
3 - Building the language model
It is time to build the character-level language model for text generation.
3.1 - Gradient descent
In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. As a reminder, here are the steps of a common optimization loop for an RNN:
Forward propagate through the RNN to compute the loss
Backward propagate through time to compute the gradients of the loss with respect to the parameters
Clip the gradients if necessary
Update your parameters using gradient descent
Exercise: Implement this optimization process (one step of stochastic gradient descent).
We provide you with the following functions:
```python
def rnn_forward(X, Y, a_prev, parameters):
""" Performs the forward propagation through the RNN and computes the cross-entropy loss.
It returns the loss' value as well as a "cache" storing values to be used in the backpropagation."""
....
return loss, cache
def rnn_backward(X, Y, parameters, cache):
""" Performs the backward propagation through time to compute the gradients of the loss with respect
to the parameters. It returns also all the hidden states."""
...
return gradients, a
def update_parameters(parameters, gradients, learning_rate):
""" Updates parameters using the Gradient Descent Update Rule."""
...
return parameters
```
End of explanation
"""
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text, size of the vocabulary
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss, don't worry about it)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Use the hint above to define one training example (X,Y) (≈ 2 lines)
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters)
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result for grading purposed, increment the seed by one.
print('\n')
return parameters
"""
Explanation: Expected output:
<table>
<tr>
<td>
**Loss **
</td>
<td>
126.503975722
</td>
</tr>
<tr>
<td>
**gradients["dWaa"][1][2]**
</td>
<td>
0.194709315347
</td>
<tr>
<td>
**np.argmax(gradients["dWax"])**
</td>
<td> 93
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td> -0.007773876032
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td> [-0.06809825]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>[ 0.01538192]
</td>
</tr>
<tr>
<td>
**a_last[4]**
</td>
<td> [-1.]
</td>
</tr>
</table>
3.2 - Training the model
Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order.
Exercise: Follow the instructions and implement model(). When examples[index] contains one dinosaur name (string), to create an example (X, Y), you can use this:
python
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
Note that we use: index= j % len(examples), where j = 1....num_iterations, to make sure that examples[index] is always a valid statement (index is smaller than len(examples)).
The first entry of X being None will be interpreted by rnn_forward() as setting $x^{\langle 0 \rangle} = \vec{0}$. Further, this ensures that Y is equal to X but shifted one step to the left, and with an additional "\n" appended to signify the end of the dinosaur name.
End of explanation
"""
parameters = model(data, ix_to_char, char_to_ix)
"""
Explanation: Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
End of explanation
"""
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from shakespeare_utils import *
import sys
import io
"""
Explanation: Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like maconucon, marloralus and macingsersaurus. Your model hopefully also learned that dinosaur names tend to end in saurus, don, aura, tor, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, dromaeosauroides is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!
<img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
4 - Writing like Shakespeare
The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were quite short.
<img src="images/shakespeare.jpg" style="width:500;height:400px;">
<caption><center> Let's become poets! </center></caption>
We have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.
End of explanation
"""
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])
# Run this cell to try with different inputs without having to re-train the model
generate_output()
"""
Explanation: To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called "The Sonnets".
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run generate_output, which will prompt asking you for an input (<40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.
End of explanation
"""
|
kubeflow/code-intelligence
|
Issue_Embeddings/notebooks/09_LangModel_API_Demo.ipynb
|
mit
|
import requests
import json
import numpy as np
from passlib.apps import custom_app_context as pwd_context
API_ENDPOINT = 'https://embeddings.gh-issue-labeler.com/text'
API_KEY = 'YOUR_API_KEY' # Contact maintainers for your api key
"""
Explanation: <h1 align="center">GitHub Issue Embeddings API</h1>
This tutorial shows you how ping the microservice that allows you to retrieve an embedding given an issue title and body. This notebook is available on GitHub.
Import Dependencies
End of explanation
"""
data = {'title': 'Fix the issue',
'body': 'I am encountering an error\n when trying to push the button.'}
# sending post request and saving response as response object
r = requests.post(url=API_ENDPOINT,
headers={'Token':pwd_context.hash(API_KEY)},
json=data)
"""
Explanation: API Endpoints
Route 1: https://embeddings.gh-issue-labeler.com/text
Allows you to get embeddings for the raw text corresponding to a single GitHub issue. The motivation for this endpoint is to use this at inference time, for example, when you need to perform computation on a new issue.
This endpoint listens to POST requests, with the payload illustrated below:
End of explanation
"""
embeddings = np.frombuffer(r.content, dtype='<f4')
embeddings.shape
"""
Explanation: Convert byte stream sent over REST back to a numpy array. The numpy array is a 2,400 dimensional embedding which are latent features of the GitHub Issue.
End of explanation
"""
|
jtwhite79/pyemu
|
verification/Freyberg/.ipynb_checkpoints/verify_unc_results-checkpoint.ipynb
|
bsd-3-clause
|
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pyemu
"""
Explanation: verify pyEMU results with the henry problem
End of explanation
"""
la = pyemu.Schur("freyberg.jcb",verbose=False,forecasts=[])
la.drop_prior_information()
jco_ord = la.jco.get(la.pst.obs_names,la.pst.par_names)
ord_base = "freyberg_ord"
jco_ord.to_binary(ord_base + ".jco")
la.pst.write(ord_base+".pst")
"""
Explanation: instaniate pyemu object and drop prior info. Then reorder the jacobian and save as binary. This is needed because the pest utilities require strict order between the control file and jacobian
End of explanation
"""
pv_names = []
predictions = ["sw_gw_0","sw_gw_1","or28c05_0","or28c05_1"]
for pred in predictions:
pv = jco_ord.extract(pred).T
pv_name = pred + ".vec"
pv.to_ascii(pv_name)
pv_names.append(pv_name)
"""
Explanation: extract and save the forecast sensitivity vectors
End of explanation
"""
prior_uncfile = "pest.unc"
la.parcov.to_uncfile(prior_uncfile,covmat_file=None)
"""
Explanation: save the prior parameter covariance matrix as an uncertainty file
End of explanation
"""
post_mat = "post.cov"
post_unc = "post.unc"
args = [ord_base + ".pst","1.0",prior_uncfile,
post_mat,post_unc,"1"]
pd7_in = "predunc7.in"
f = open(pd7_in,'w')
f.write('\n'.join(args)+'\n')
f.close()
out = "pd7.out"
pd7 = os.path.join("i64predunc7.exe")
os.system(pd7 + " <" + pd7_in + " >"+out)
for line in open(out).readlines():
print(line)
"""
Explanation: PRECUNC7
write a response file to feed stdin to predunc7
End of explanation
"""
post_pd7 = pyemu.Cov.from_ascii(post_mat)
la_ord = pyemu.Schur(jco=ord_base+".jco",predictions=predictions)
post_pyemu = la_ord.posterior_parameter
#post_pyemu = post_pyemu.get(post_pd7.row_names)
"""
Explanation: load the posterior matrix written by predunc7
End of explanation
"""
post_pd7.x
post_pyemu.x
delta = (post_pd7 - post_pyemu).x
(post_pd7 - post_pyemu).to_ascii("delta.cov")
print(delta.sum())
print(delta.max(),delta.min())
delta = np.ma.masked_where(np.abs(delta) < 0.0001,delta)
plt.imshow(delta)
df = (post_pd7 - post_pyemu).to_dataframe().apply(np.abs)
df /= la_ord.pst.parameter_data.parval1
df *= 100.0
print(df.max())
delta
"""
Explanation: The cumulative difference between the two posterior matrices:
End of explanation
"""
print((delta.sum()/post_pyemu.x.sum()) * 100.0)
print(np.abs(delta).sum())
"""
Explanation: A few more metrics ...
End of explanation
"""
args = [ord_base + ".pst", "1.0", prior_uncfile, None, "1"]
pd1_in = "predunc1.in"
pd1 = os.path.join("i64predunc1.exe")
pd1_results = {}
for pv_name in pv_names:
args[3] = pv_name
f = open(pd1_in, 'w')
f.write('\n'.join(args) + '\n')
f.close()
out = "predunc1" + pv_name + ".out"
os.system(pd1 + " <" + pd1_in + ">" + out)
f = open(out,'r')
for line in f:
if "pre-cal " in line.lower():
pre_cal = float(line.strip().split()[-2])
elif "post-cal " in line.lower():
post_cal = float(line.strip().split()[-2])
f.close()
pd1_results[pv_name.split('.')[0].lower()] = [pre_cal, post_cal]
"""
Explanation: PREDUNC1
write a response file to feed stdin. Then run predunc1 for each forecast
End of explanation
"""
# save the results for verification testing
pd.DataFrame(pd1_results).to_csv("predunc1_results.dat")
pyemu_results = {}
for pname in la_ord.prior_prediction.keys():
pyemu_results[pname] = [np.sqrt(la_ord.prior_prediction[pname]),
np.sqrt(la_ord.posterior_prediction[pname])]
"""
Explanation: organize the pyemu results into a structure for comparison
End of explanation
"""
f = open("predunc1_textable.dat",'w')
for pname in pd1_results.keys():
print(pname)
f.write(pname+"&{0:6.5f}&{1:6.5}&{2:6.5f}&{3:6.5f}\\\n"\
.format(pd1_results[pname][0],pyemu_results[pname][0],
pd1_results[pname][1],pyemu_results[pname][1]))
print("prior",pname,pd1_results[pname][0],pyemu_results[pname][0])
print("post",pname,pd1_results[pname][1],pyemu_results[pname][1])
f.close()
"""
Explanation: compare the results:
End of explanation
"""
f = open("pred_list.dat",'w')
out_files = []
for pv in pv_names:
out_name = pv+".predvar1b.out"
out_files.append(out_name)
f.write(pv+" "+out_name+"\n")
f.close()
args = [ord_base+".pst","1.0","pest.unc","pred_list.dat"]
for i in range(36):
args.append(str(i))
args.append('')
args.append("n")
args.append("n")
args.append("y")
args.append("n")
args.append("n")
f = open("predvar1b.in", 'w')
f.write('\n'.join(args) + '\n')
f.close()
os.system("predvar1b.exe <predvar1b.in")
pv1b_results = {}
for out_file in out_files:
pred_name = out_file.split('.')[0]
f = open(out_file,'r')
for _ in range(3):
f.readline()
arr = np.loadtxt(f)
pv1b_results[pred_name] = arr
"""
Explanation: PREDVAR1b
write the nessecary files to run predvar1b
End of explanation
"""
omitted_parameters = [pname for pname in la.pst.parameter_data.parnme if pname.startswith("wf")]
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
omitted_parameters=omitted_parameters,
verbose=False)
df = la_ord_errvar.get_errvar_dataframe(np.arange(36))
df
"""
Explanation: now for pyemu
End of explanation
"""
fig = plt.figure(figsize=(6,8))
max_idx = 13
idx = np.arange(max_idx)
for ipred,pred in enumerate(predictions):
arr = pv1b_results[pred][:max_idx,:]
first = df[("first", pred)][:max_idx]
second = df[("second", pred)][:max_idx]
third = df[("third", pred)][:max_idx]
ax = plt.subplot(len(predictions),1,ipred+1)
#ax.plot(arr[:,1],color='b',dashes=(6,6),lw=4,alpha=0.5)
#ax.plot(first,color='b')
#ax.plot(arr[:,2],color='g',dashes=(6,4),lw=4,alpha=0.5)
#ax.plot(second,color='g')
#ax.plot(arr[:,3],color='r',dashes=(6,4),lw=4,alpha=0.5)
#ax.plot(third,color='r')
ax.scatter(idx,arr[:,1],marker='x',s=40,color='g',
label="PREDVAR1B - first term")
ax.scatter(idx,arr[:,2],marker='x',s=40,color='b',
label="PREDVAR1B - second term")
ax.scatter(idx,arr[:,3],marker='x',s=40,color='r',
label="PREVAR1B - third term")
ax.scatter(idx,first,marker='o',facecolor='none',
s=50,color='g',label='pyEMU - first term')
ax.scatter(idx,second,marker='o',facecolor='none',
s=50,color='b',label="pyEMU - second term")
ax.scatter(idx,third,marker='o',facecolor='none',
s=50,color='r',label="pyEMU - third term")
ax.set_ylabel("forecast variance")
ax.set_title("forecast: " + pred)
if ipred == len(predictions) -1:
ax.legend(loc="lower center",bbox_to_anchor=(0.5,-0.75),
scatterpoints=1,ncol=2)
ax.set_xlabel("singular values")
else:
ax.set_xticklabels([])
#break
plt.savefig("predvar1b_ver.eps")
"""
Explanation: generate some plots to verify
End of explanation
"""
cmd_args = [os.path.join("i64identpar.exe"),ord_base,"5",
"null","null","ident.out","/s"]
cmd_line = ' '.join(cmd_args)+'\n'
print(cmd_line)
print(os.getcwd())
os.system(cmd_line)
identpar_df = pd.read_csv("ident.out",delim_whitespace=True)
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
verbose=False)
df = la_ord_errvar.get_identifiability_dataframe(5)
df
"""
Explanation: Identifiability
End of explanation
"""
diff = identpar_df["identifiability"].values - df["ident"].values
diff.max()
fig = plt.figure()
ax = plt.subplot(111)
axt = plt.twinx()
ax.plot(identpar_df["identifiability"])
ax.plot(df.ident.values)
ax.set_xlim(-10,600)
diff = identpar_df["identifiability"].values - df["ident"].values
#print(diff)
axt.plot(diff)
axt.set_ylim(-1,1)
ax.set_xlabel("parameter")
ax.set_ylabel("identifiability")
axt.set_ylabel("difference")
"""
Explanation: cheap plot to verify
End of explanation
"""
|
sony/nnabla
|
tutorial/vat_semi_supervised_learning.ipynb
|
apache-2.0
|
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla-examples.git
%cd nnabla-examples
"""
Explanation: Deep learning frequently requires a large amount of labeled data, but in practice, it can be very costly to collect data with labels. Semi-supervised setting has gained attention since it can leverage unlabeled data to train a model.
In this tutorial, we will show you how to perform semi-supervised learning on MNIST with NNabla, using the model known as virtual adversarial training (VAT). Although MNIST is fully labeled, we will assume a setting where some of the labels are missing.
End of explanation
"""
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solver as S
from nnabla.logger import logger
import nnabla.utils.save as save
from nnabla.utils.data_iterator import data_iterator_simple
from utils.neu.save_nnp import save_nnp
import numpy as np
import time
import os
"""
Explanation: As always, let's start by importing dependencies.
End of explanation
"""
import struct
import zlib
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
def load_mnist(train=True):
'''
Load MNIST dataset images and labels from the original page by Yan LeCun or the cache file.
Args:
train (bool): The testing dataset will be returned if False. Training data has 60000 images, while testing has 10000 images.
Returns:
numpy.ndarray: A shape of (#images, 1, 28, 28). Values in [0.0, 1.0].
numpy.ndarray: A shape of (#images, 1). Values in {0, 1, ..., 9}.
'''
if train:
image_uri = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
label_uri = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
else:
image_uri = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
label_uri = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
logger.info('Getting label data from {}.'.format(label_uri))
r = download(label_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size = struct.unpack('>II', data[0:8])
labels = np.frombuffer(data[8:], np.uint8).reshape(-1, 1)
r.close()
logger.info('Getting label data done.')
logger.info('Getting image data from {}.'.format(image_uri))
r = download(image_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size, height, width = struct.unpack('>IIII', data[0:16])
images = np.frombuffer(data[16:], np.uint8).reshape(
size, 1, height, width)
r.close()
logger.info('Getting image data done.')
return images, labels
class MnistDataSource(DataSource):
'''
Get data directly from MNIST dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None):
super(MnistDataSource, self).__init__(shuffle=shuffle)
self._train = train
self._images, self._labels = load_mnist(train)
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(MnistDataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def data_iterator_mnist(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`MnistDataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`MnistDataSource` is able to store all data into memory.
For example,
.. code-block:: python
with data_iterator_mnist(True, batch_size) as di:
for data in di:
SOME CODE TO USE data.
'''
return data_iterator(MnistDataSource(train=train, shuffle=shuffle, rng=rng),
batch_size,
rng,
with_memory_cache,
with_file_cache)
"""
Explanation: Let's also define data iterator for MNIST. You can disregard the details for now.
End of explanation
"""
def mlp_net(x, n_h, n_y, test=False):
"""
Args:
x(`~nnabla.Variable`): N-D array
n_h(int): number of units in an intermediate layer
n_y(int): number of classes
test: operation type train=True, test=False
Returns:
~nnabla.Variable: h
"""
h = x
with nn.parameter_scope("fc1"):
h = F.relu(PF.batch_normalization(
PF.affine(h, n_h), batch_stat=not test), inplace=True)
with nn.parameter_scope("fc2"):
h = F.relu(PF.batch_normalization(
PF.affine(h, n_h), batch_stat=not test), inplace=True)
with nn.parameter_scope("fc3"):
h = PF.affine(h, n_y)
return h
"""
Explanation: We now define a multi-layer perceptron (MLP) network to be used later. Our MLP consists of 3 fully-connected layers, two of whiich are followed by batch normalization and non-linear activation.
End of explanation
"""
def distance(y0, y1):
"""
Distance function is Kullback-Leibler Divergence for categorical distribution
"""
return F.kl_multinomial(F.softmax(y0), F.softmax(y1))
"""
Explanation: Let's also define a function to measure the distance between two distributions. In this example, we use a function called multinomial Kullback-Leibler divergence, commonly known as KL-divergence.
End of explanation
"""
def calc_validation_error(di_v, xv, tv, err, val_iter):
"""
Calculate validation error rate
Args:
di_v; validation dataset
xv: variable for input
tv: variable for label
err: variable for error estimation
val_iter: number of iteration
Returns:
error rate
"""
ve = 0.0
for j in range(val_iter):
xv.d, tv.d = di_v.next()
xv.d = xv.d / 255
err.forward(clear_buffer=True)
ve += err.d
return ve / val_iter
"""
Explanation: Before we get into the main computational graph, let's also define a function to evaluate the network. This function simply returns error rate during validation, which is averaged over the number of iterations.
End of explanation
"""
# Get context.
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context('cudnn')
nn.set_default_context(ctx)
# Load MNIST Dataset
images, labels = load_mnist(train=True)
rng = np.random.RandomState(706)
inds = rng.permutation(len(images))
"""
Explanation: Now we get into the main computational graph. We start by setting context to use cuDNN, and loading data iterator for MNIST.
End of explanation
"""
def feed_labeled(i):
j = inds[i]
return images[j], labels[j]
def feed_unlabeled(i):
j = inds[i]
return images[j], labels[j]
shape_x = (1, 28, 28)
n_h = 1200 #number of units
n_y = 10 #number of classes
n_labeled = 100
n_train = 60000
batchsize_l = 100
batchsize_u = 250
batchsize_v = 100
di_l = data_iterator_simple(feed_labeled, n_labeled,
batchsize_l, shuffle=True, rng=rng, with_file_cache=False)
di_u = data_iterator_simple(feed_unlabeled, n_train,
batchsize_u, shuffle=True, rng=rng, with_file_cache=False)
di_v = data_iterator_mnist(batchsize_v, train=False)
"""
Explanation: Let's define two functions for loading data for labeled and unlabeled settings respectively. Although feed_unlabeled function is also returning labels, we will later see that the labels are disregarded in the graph.
After declaring some hyperparameters, we also define data iterator variables using the two load functions we just defined, separately for labeled and unlabeled settings. Let's also define a data iterator variable for validation.
End of explanation
"""
# Create networks
# feed-forward-net building function
def forward(x, test=False):
return mlp_net(x, n_h, n_y, test)
# Net for learning labeled data
xl = nn.Variable((batchsize_l,) + shape_x, need_grad=False)
yl = forward(xl, test=False)
tl = nn.Variable((batchsize_l, 1), need_grad=False)
loss_l = F.mean(F.softmax_cross_entropy(yl, tl))
# Net for learning unlabeled data
xu = nn.Variable((batchsize_u,) + shape_x, need_grad=False)
yu = forward(xu, test=False)
y1 = yu.get_unlinked_variable()
y1.need_grad = False
"""
Explanation: We first define a simple forward function that calls the multi-layer perceptron network that we defined above.
We then define the variables separately for labeled and unlabeled data. xl, xu and yl,yu refer to input and output for MLP network. In the labeled setting, we also have teacher variable tl, from which we can calculate the loss by applying softmax cross entropy. Note that this loss is for labeled data only and we will define separate loss variable later for unlabeled data.
Also, notice that we do not have teacher variable for unlabeled setting, because we assume that the labels are missing. Instead, we define an unlinked variable of yu.
End of explanation
"""
xi_for_vat = 10.0
eps_for_vat = 1.5
noise = nn.Variable((batchsize_u,) + shape_x, need_grad=True)
r = noise / (F.sum(noise ** 2, [1, 2, 3], keepdims=True)) ** 0.5
r.persistent = True
y2 = forward(xu + xi_for_vat * r, test=False)
y3 = forward(xu + eps_for_vat * r, test=False)
loss_k = F.mean(distance(y1, y2))
loss_u = F.mean(distance(y1, y3))
# Net for evaluating validation data
xv = nn.Variable((batchsize_v,) + shape_x, need_grad=False)
hv = forward(xv, test=True)
tv = nn.Variable((batchsize_v, 1), need_grad=False)
err = F.mean(F.top_n_error(hv, tv, n=1))
"""
Explanation: We now define variables for noise, which are added to the input variable xu and fed to MLP. The KL-divergence between the MLP outputs of noisy variable and noise-free variable is used to compute loss. Of the two losses, one is used to perform power method iteration, and another one is loss for unlabeled data.
End of explanation
"""
# Create solver
solver = S.Adam(2e-3)
solver.set_parameters(nn.get_parameters())
# Monitor training and validation stats.
model_save_path = 'tmp.monitor.vat'
import nnabla.monitor as M
monitor = M.Monitor(model_save_path)
monitor_verr = M.MonitorSeries("Test error", monitor, interval=240)
monitor_time = M.MonitorTimeElapsed("Elapsed time", monitor, interval=240)
"""
Explanation: We define our solver and monitor variables. We will use Adam as our solver.
End of explanation
"""
# Training Loop.
t0 = time.time()
max_iter = 24000
val_interval = 240
val_iter = 100
weight_decay = 0
n_iter_for_power_method = 1
iter_per_epoch = 240
learning_rate_decay = 0.9
for i in range(max_iter):
# Validation Test
if i % val_interval == 0:
valid_error = calc_validation_error(
di_v, xv, tv, err, val_iter)
monitor_verr.add(i, valid_error)
#################################
## Training by Labeled Data #####
#################################
# forward, backward and update
xl.d, tl.d = di_l.next()
xl.d = xl.d / 255
solver.zero_grad()
loss_l.forward(clear_no_need_grad=True)
loss_l.backward(clear_buffer=True)
solver.weight_decay(weight_decay)
solver.update()
#################################
## Training by Unlabeled Data ###
#################################
# Calculate y without noise, only once.
xu.d, _ = di_u.next()
xu.d = xu.d / 255
yu.forward(clear_buffer=True)
##### Calculate Adversarial Noise #####
# Do power method iteration
noise.d = np.random.normal(size=xu.shape).astype(np.float32)
for k in range(n_iter_for_power_method):
r.grad.zero()
loss_k.forward(clear_no_need_grad=True)
loss_k.backward(clear_buffer=True)
noise.data.copy_from(r.grad)
##### Calculate loss for unlabeled data #####
# forward, backward and update
solver.zero_grad()
loss_u.forward(clear_no_need_grad=True)
loss_u.backward(clear_buffer=True)
solver.weight_decay(weight_decay)
solver.update()
##### Learning rate update #####
if i % iter_per_epoch == 0:
solver.set_learning_rate(
solver.learning_rate() * learning_rate_decay)
monitor_time.add(i)
"""
Explanation: Now we get into our training loop. We will have separate training stages for labeled and unlabeled data. We first start with labeled data, which is pretty much the same as usual training graph.
Then, we define our training graph for unlabeled data. Note that we are ignoring the label returned by data iterator, setting it as a garbage variable _. We first forward the noise-free variable, and then calculate adversarial noise first by generating random noise followed by power method over iterations. Finally, we compute loss for unlabeled data.
End of explanation
"""
# Evaluate the final model by the error rate with validation dataset
valid_error = calc_validation_error(di_v, xv, tv, err, val_iter)
print(valid_error)
# If you need to save the model, please comment out the following lines:
# parameter_file = os.path.join(
# model_save_path, 'params_%06d.h5' % max_iter)
# nn.save_parameters(parameter_file)
"""
Explanation: Finally, we evaluate our model on the validation dataset. If the model was trained correctly, we should get an error rate of around 1.5%.
End of explanation
"""
|
cuttlefishh/papers
|
red-sea-single-cell-genomes/code/singlecell_tara_heatmap_histogram.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import re
import math
from sys import argv
"""
Explanation: Single-cell Paper: Tara Heatmap and Histogram
Histograms for Proch and Pelag of all gene clusters and those missing in Tara metagenomes
Heatmaps for Proch and Pelag of Tara count vs cluster size
End of explanation
"""
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.set_context("poster")
"""
Explanation: Seaborn settings
End of explanation
"""
def read_clusters_tsv(path):
df = pd.read_csv(path, sep='\t', header=0, index_col=0)
df = df.rename(columns={'Unnamed: 1': 'cluster_size'})
col = [int(re.sub(r'SIZE:([0-9]*)', r'\1', i)) for i in df['cluster_size']]
df['cluster_size'] = col
return df
"""
Explanation: Function to read tsv of clusters and fix cluster size column
End of explanation
"""
def plot_hist_tara(df_all_clusters, df_missing_clusters, bin_max, bin_size, max_frac, num_genomes, ymax, fig_path):
fig, ax = plt.subplots()
sns.distplot(df_all_clusters.cluster_size, kde=False, color='b', bins=np.arange(0,bin_max+bin_size,bin_size), label='All gene clusters')
sns.distplot(df_missing_clusters.cluster_size, kde=False, color='r', bins=np.arange(0,bin_max+bin_size,bin_size), label='Missing from Tara metagenomes')
sns.despine(offset=10)
xticks = np.array(np.arange(0, max_frac, 0.2) * num_genomes)
xticklabels = xticks / num_genomes
plt.xticks(xticks, xticklabels)
plt.xlim(0, max_frac*num_genomes)
plt.xlabel('Cluster copy number (per genome)')
plt.yscale('log')
plt.ylim(0.5, 1e4)
yticks = np.array([1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000])
yticklabels = ['1', '2', '5', '10', '20', '50', '100', '200', '500', '1000', '2000', '5000', '10000']
plt.yticks(yticks, yticklabels)
plt.ylabel('Number of clusters')
plt.legend()
fig.set_size_inches(12, 8)
plt.savefig(fig_path)
"""
Explanation: Function to plot histograms of cluster size
End of explanation
"""
def merge_cluster_counts(path):
# Paths of input files, containing cluster counts
paths = pd.Series.from_csv(path, header=-1, sep='\t', index_col=None)
# Data frame containing all samples cluster counts (NaN if missing)
pieces = []
for path in paths:
fullpath = "/Users/luke/singlecell/tara/PROK-139/%s" % path
counts = pd.DataFrame.from_csv(fullpath, header=-1, sep='\t', index_col=0)
pieces.append(counts)
frame = pd.concat(pieces, axis=1)
headings = paths.tolist()
frame.columns = headings
return frame
"""
Explanation: Function to merge cluster counts
End of explanation
"""
def make_df_size_count(df_all, df_counts):
df_size_count = pd.DataFrame()
df_size_count['cluster_size'] = df_all.cluster_size
df_size_count['tara_count'] = df_counts.count(axis=1)
df_size_count.fillna(0, inplace=True)
df_size_count.tara_count = df_size_count.tara_count.astype(int)
return(df_size_count)
"""
Explanation: Function to make two-column dataframe of cluster_size and tara_count
End of explanation
"""
def make_groupby_size_count(df_size_count):
# matrix style of cluster_size and tara_count
groupby_size_count = df_size_count.groupby(['cluster_size', 'tara_count']).size().unstack().transpose()
max_size = df_size_count.cluster_size.max()
max_count = int(df_size_count.tara_count.max())
# add empty columns
for i in range(1, max_size+1):
if not i in groupby_size_count:
groupby_size_count[i] = np.nan
# add empty rows (might not be any)
for i in range(1, max_count+1):
if not i in groupby_size_count.index:
groupby_size_count.loc[i] = np.nan
groupby_size_count.sort_index(axis=1, inplace=True)
#groupby_size_count.fillna(0, inplace=True)
return(groupby_size_count, max_size, max_count)
"""
Explanation: Function to make groupby_size_count frame
End of explanation
"""
def plot_heatmap_tara(df, num_genomes, max_size, max_count, max_frac, xinches, yinches, species, fig_path):
fig, ax = plt.subplots()
myfig = sns.heatmap(np.log(df.iloc[::-1]), square=False, robust=True, cmap='inferno')
xticks = np.array(np.arange(0, 5, 0.2) * num_genomes)
xticklabels = xticks / num_genomes
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_xlabel('Cluster copy number (per genome)')
ylabels = np.array(df.index[0::20].tolist() + [max_count])
ax.set_yticks(ylabels+0.5)
ax.set_yticklabels(ylabels)
ax.set_ylabel('Tara metagenomes cluster found in')
ax.set_title('Density heatmap of %s gene clusters' % species)
ax.axis([0, max_frac*num_genomes, 0, max_count+2])
fig.set_size_inches(xinches, yinches)
fig.savefig(fig_path)
"""
Explanation: Function to plot heatmap
End of explanation
"""
# missing clusters
path = '/Users/luke/singlecell/tara/table_missing_proch_all_1e-5.tsv'
num_genomes = 145
fig_full = '/Users/luke/singlecell/tara/hist_missing_proch_all_1e-5_full.pdf'
fig_zoom = '/Users/luke/singlecell/tara/hist_missing_proch_all_1e-5_zoom.pdf'
df_missing_clusters = read_clusters_tsv(path)
# all clusters
path = '/Users/luke/singlecell/tara/table_all_proch.tsv'
df_all_clusters = read_clusters_tsv(path)
# tara counts by cluster
species = 'proch'
evalue = '1e-5'
path = '/Users/luke/singlecell/tara/paths_%s_%s.list' % (species, evalue)
df_counts = merge_cluster_counts(path)
# col_SRF = [col for col in list(df_counts.columns) if 'SRF' in col]
# df_counts = df_counts[col_SRF]
"""
Explanation: PROCHLOROCOCCUS
End of explanation
"""
# full hist
plot_hist_tara(df_all_clusters, df_missing_clusters, 240, 5.8, 1.6001, num_genomes, 10000, fig_full)
# zoom hist
# plot_hist_tara(df_all_clusters, df_missing_clusters, 1.6201, num_genomes, 30, fig_zoom)
"""
Explanation: Histograms of Prochlorococcus gene cluster size among clusters MISSING in all Tara samples
End of explanation
"""
df_size_count = make_df_size_count(df_all_clusters, df_counts)
groupby_size_count, max_size, max_count = make_groupby_size_count(df_size_count)
print groupby_size_count.max().max()
# def fourth_root(num):
# return num**0.25
# def square_root(num):
# return num**0.5
# groupby_size_count_fourth_root = groupby_size_count_nonan.apply(fourth_root)
# groupby_size_count_square_root = groupby_size_count_nonan.apply(square_root)
fig_path = '/Users/luke/singlecell/tara/heatmap_tara_proch.pdf'
plot_heatmap_tara(groupby_size_count, num_genomes, max_size, max_count, 1.1, 12, 8, 'Prochlorococcus', fig_path) # 1.1 was 1.62
# jointplot of cluster_size and tara_count
fig = sns.jointplot(x='cluster_size', y='tara_count', data=df_size_count)
"""
Explanation: Density heatmap of Prochlorococcus gene clusters by cluster size (numer of genomes) and presence/absence in 139 Tara prokaryote metagenomes
End of explanation
"""
# missing clusters
path = '/Users/luke/singlecell/tara/table_missing_pelag_all_1e-5.tsv'
num_genomes = 47
fig_full = '/Users/luke/singlecell/tara/hist_missing_pelag_all_1e-5_full.pdf'
fig_zoom = '/Users/luke/singlecell/tara/hist_missing_pelag_all_1e-5_zoom.pdf'
df_missing_clusters = read_clusters_tsv(path)
# all clusters
path = '/Users/luke/singlecell/tara/table_all_pelag.tsv'
df_all_clusters = read_clusters_tsv(path)
# tara counts by cluster
species = 'pelag'
evalue = '1e-5'
path = '/Users/luke/singlecell/tara/paths_%s_%s.list' % (species, evalue)
df_counts = merge_cluster_counts(path)
# col_SRF = [col for col in list(df_counts.columns) if 'SRF' in col]
# df_counts = df_counts[col_SRF]
df_counts.shape
"""
Explanation: PELAGIBACTER
End of explanation
"""
# full hist
plot_hist_tara(df_all_clusters, df_missing_clusters, 100, 1.88, 1.6001, num_genomes, 5000, fig_full)
# zoom hist
#plot_hist_tara(df_all_clusters, df_missing_clusters, 1.6201, num_genomes, 30, fig_zoom)
"""
Explanation: Histograms of Pelagibacter gene cluster size among clusters MISSING in all Tara samples
End of explanation
"""
df_size_count = make_df_size_count(df_all_clusters, df_counts)
groupby_size_count, max_size, max_count = make_groupby_size_count(df_size_count)
print groupby_size_count.max().max()
groupby_size_count.max().max()
fig_path = '/Users/luke/singlecell/tara/heatmap_tara_pelag.pdf'
plot_heatmap_tara(groupby_size_count, num_genomes, max_size, max_count, 1.1, 12, 8, 'Pelagibacter', fig_path) # 1.1 was 1.62
"""
Explanation: Density heatmap of Pelagibacter gene clusters by cluster size (numer of genomes) and presence/absence in 139 Tara prokaryote metagenomes
End of explanation
"""
|
knub/master-thesis
|
notebooks/Evaluation Results.ipynb
|
apache-2.0
|
df_tc_results = pnd.DataFrame([
("topic.full.alpha-1-100.256-400.model", 0.469500859375, 0.00617111859067, 0.6463414634146342),
("topic.16-400.model", 0.43805875, 0.00390183951094, 0.5975609756097561),
("topic.256-1000.model", 0.473455351563, 0.00635883046394, 0.5853658536585366),
("topic.64-400.model", 0.45327734375, 0.00385141007263, 0.6341463414634146),
("topic.256-400.model", 0.46836359375, 0.00599032492068, 0.5731707317073171),
("topic.full.fixed-vocabulary.alpha-1-100.256-400.model", 0.468437070312, 0.00562772603243, 0.5975609756097561),
("topic.full.256-400.model", 0.472498945313, 0.00624853749772, 0.5975609756097561),
("topic.256-600.model", 0.478640273437, 0.00685787139094, 0.5609756097560975)
],
columns=["Topic model parameters", "TC_mean", "TC_var", "CC_purity"])
del df_tc_results["CC_purity"]
df_tc_results.sort_values(by="TC_mean", ascending=False)
df_tc_results.sort_values(by="TC_var", ascending=False)
df_tc_results_2 = pnd.read_csv("../models/topic_models_coherence_2.tsv", sep="\t", index_col=None)
df_tc_results_2.sort_values(by="TC_mean", ascending=False)
"""
Explanation: Topic Models → Topic Coherence, Concept Categorization
Evaluated using Palmetto tool from Exploring the Space of Topic Coherence Measures paper
Values still seem low compared to example values from the paper
End of explanation
"""
df_ar_results = pnd.DataFrame([
("embedding.skip-gram.size-200.window-5.negative-5.model", 0.481221858371),
("embedding.cbow.size-200.window-5.model", 0.416547277937),
("embedding.google.size-300", 0.735878018829),
],
columns=["Word Embeddings", "Analogy_Reasoning"])
df_ar_results.sort_values(by="Analogy_Reasoning", ascending=False)
"""
Explanation: Word Embeddings → Analogy Reasoning
Using manual set parameters
Using the question word data set (~19k questions) from Efficient Estimation of Word Representations in Vector Space (word2vec).
End of explanation
"""
df_ar_spearmint_results = pnd.read_csv("../code/python/knub/thesis/spearmint_analogy_reasoning/results.csv", index_col="model")
df_ar_spearmint_results.sort_values(by="Analogy_Reasoning", ascending=False)
"""
Explanation: Using Spearmint
Testing only skip-gram architecture.
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/38f243960dd98f9910f9b981f0b54dd0/plot_fdr_stats_evoked.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
"""
Explanation: FDR correction on T-test on sensor data
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
"""
Explanation: Set parameters
End of explanation
"""
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
"""
Explanation: Read epochs for the channel of interest
End of explanation
"""
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
"""
Explanation: Compute statistic
End of explanation
"""
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
"""
Explanation: Plot
End of explanation
"""
|
geekandtechgirls/Women-In-Django
|
Soluciones.ipynb
|
gpl-3.0
|
x1 = int(input("Introduce un número: "))
x2 = int(input("Y ahora otro: "))
x = (20 * x1 - x2)/(x2 + 3)
print("x =",x)
"""
Explanation: Soluciones a los ejercicios propuestos
Nivel básico
1.
Haz un pequeño programa que le pida al usuario introducir dos números ($x_1$ y $x_2$), calcule la siguiente operación y muestre el resultado de la misma ($x$):
$$ x = \frac{20 * x_1 - x_2}{x_2 + 3} $$
Si intentas operar con el resultado de la función input obtendrás un error que te informa que no se pueden restar dos datos de tipo str. Usa la función int para convertir los datos introducidos por teclado a datos numéricos.
End of explanation
"""
num = int(input("Introduce número de ninjas: "))
if num < 50 and num%2==0:
print("Puedo con ellos!")
else:
print("No me vendría mal una ayudita...")
"""
Explanation: 2.
Haz un programa que le pida al usuario un número (de ninjas). Si dicho número es menor que 50 y es par, el programa imprimirá "puedo con ellos!", en caso contrario imprimirá "no me vendría mal una ayudita..."
Nota: para saber si un número es par o no debes usar el operador $\%$ y para saber si dos condiciones se cuplen a la vez, el operador lógico and
End of explanation
"""
num = int(input("Intoduce un número: "))
# Opción 1: si el usuario introduce un número negativo pedir otro número
while num < 0:
num = int(input("Introduce un número: "))
i = 0
while i <= num:
print(i)
i += 1
num = int(input("Intoduce un número: "))
# Opción 2: si el usuario introduce un número negativo, contar hacia atrás
sign = lambda x: (1, -1)[x < 0]
i = 0
s = sign(num)
while i*s <= num*s:
print(i)
i += s
"""
Explanation: 3.
Haz un bucle while que imprima todos los números desde el 0 hasta un número que introduzca el usuario. Si el número que introduce es negativo puedes tomar dos decisiones: pedirle que introduzca un número positivo o contar hacia atrás, tú eliges!
End of explanation
"""
# Para generar del 0 al 10 ambos inclusive:
for i in range(0,11):
print(i)
# Para generar del 2 al 10 sólo con números pares
for i in range(2, 11, 2):
print(i)
"""
Explanation: 4.
Genera con range los números pares del 0 al 10, ambos inclusive. ¿Qué cambiarías para generar del 2 al 10?
End of explanation
"""
for num in range(2,10):
if num % 2 == 0:
print(num, "es par!")
continue
print(num, "es impar!")
"""
Explanation: 5.
¿Cuál es la diferencia entre la sentencia break y la sentencia continue?
Cuando en un bucle se lee una instrucción break o una instrucción continue, se interumpe la iteración actual. Ahora bien, en el caso de break, se abandona el bucle y en el caso de continue se pasa a la siguiente iteración. Por ejemplo, el siguiente bucle imprime si un número es par o impar:
End of explanation
"""
lista_compra = ['Leche', 'Chocolate', 'Arroz', 'Macarrones']
print("Penúltimo elemento: ", lista_compra[-2])
print("Del segundo al cuarto elemento: ", lista_compra[1:5])
print("Los tres últimos elementos: ", lista_compra[-3:])
print("Todos: ", lista_compra)
del lista_compra[2]
print(lista_compra)
"""
Explanation: 6.
Haz una lista de la compra e imprime los siguientes elementos:
Penúltimo elemento
Del segundo al cuarto elemento
Los tres últimos
Todos!
Por último, elimina el tercer elemento de la lista usando la sentencia del
End of explanation
"""
# solución 1:
[x for x in range(10) if x%2==0]
# solución 2:
list(range(0,10,2))
"""
Explanation: 7.
Crea una lista con todos los números pares del 0 al 10 en una única línea.
End of explanation
"""
[[j for j in range(i*i, i*i+3)] for i in range(1,3)]
"""
Explanation: 8.
Crea la siguiente matriz en una línea:
$$ M_{2 \times 3} = \left( \begin{matrix} 1 & 2 & 3 \
4 & 5 & 6 \end{matrix} \right)$$
End of explanation
"""
tuplas_compra = [('Leche', 2), ('Chocolate', 1), ('Arroz', 1.5),
('Macarrones', 2.1)]
print("Precio del tercer elemento: ", tuplas_compra[2][1])
print("Nombre del último elemento: ", tuplas_compra[-1][0])
print("Nombre y precio del primer elemento", tuplas_compra[0])
"""
Explanation: 9.
Vuelve a hacer la lista de la compra que hiciste en el último ejercicio, pero esta vez guarda cada elemento de la lista de la compra junto con su precio. Después, imprime los siguientes elementos:
El precio del tercer elemento.
El nombre del último elemento.
Tanto el nombre como el precio del primer elemento.
End of explanation
"""
dict_compra = dict(tuplas_compra)
for compra in dict_compra.items():
print("he comprado {} y me ha costado {}".format(compra[0], compra[1]))
print('He comprado leche?', 'Leche' in dict_compra)
del dict_compra['Arroz']
print(dict_compra)
"""
Explanation: 10.
¿Es buena idea usar la función set para eliminar los elementos repetidos de una lista?
Al usar la función set para eliminar los elementos repetidos de una lista perdemos el orden original de nuestra lista. Además, no funcionará si nuestra lista es de diccionarios o de listas, debido a que no son objetos hashables.
11.
Usando la tupla que creaste en el ejercicio sobre tuplas, crea un diccionario de tu lista de la compra. Una vez tengas el diccionario creado:
Imprime todos los elementos que vayas a comprar creando la siguiente frase con la función format: "he comprado __ y me ha costado __".
Consulta si has añadido un determinado elemento (por ejemplo un cartón de leche) a la lista de la compra
Elimina un elemento usando la función del
End of explanation
"""
import numpy as np
print(np.ones(5, dtype=np.int8))
print(np.random.random(5))
print(np.full(shape=(3,3), fill_value=4, dtype=np.int8))
print(np.arange(6))
print(np.linspace(start=1, stop=6, num=10))
print(np.eye(N=2))
print(np.identity(n=3, dtype=np.int8))
"""
Explanation: Nivel medio
1.
Ahora que hemos visto cómo crear arrays a partir de un objeto y otros para crear arrays con tipos prefijados, crea distintos arrays con las funciones anteriores para 1D, 2D y 3D e imprímelas por pantalla. Prueba a usar distintos tipos para ver cómo cambian los arrays. Si tienes dudas sobre cómo usarlos, puedes consultar la documentación oficial.
End of explanation
"""
from time import time
def clona_cols_rows(size=1000, clone=500, print_matrix=False,
create_random=True):
if create_random:
m = np.random.random((size,size))
else:
m = np.arange(size*size).reshape(size,size)
n = np.zeros((size+clone*2, size+clone*2))
antes = time()
# en primer lugar, copiamos m en el centro de n
for i in range(size):
for j in range(size):
n[i+clone, j+clone] = m[i,j]
# después, copiamos la primera fila/columna en las
# primeras clone filas/columnas
for i in range(clone):
n[i,clone:clone+size] = m[0]
n[clone:clone+size, i] = m[:,0]
# una vez copiada la primera fila/columna, pasamos a
# copiar la última/columna
for i in range(clone+size, size+clone*2):
n[i, clone:clone+size] = m[-1]
n[clone:clone+size, i] = m[:,-1]
# por último, copiamos los valores de los extremos en las esquinas
for i in range(clone):
n[i, :clone] = np.full(clone, m[0,0])
n[i, size+clone:] = np.full(clone, m[0,-1])
n[i+size+clone, :clone] = np.full(clone, m[-1,0])
n[i+size+clone, size+clone:] = np.full(clone, m[-1,-1])
despues = time()
if print_matrix:
print(m)
print(n)
return despues-antes
clona_cols_rows(size=3, clone=2, print_matrix=True, create_random=False)
print("Tiempo con bucle for: ", clona_cols_rows(), " s")
def clona_vec_cols_rows(size=1000, clone=500, print_matrix=False,
create_random=True):
if create_random:
m = np.random.random((size,size))
else:
m = np.arange(size*size).reshape(size,size)
n = np.zeros((size+clone*2, size+clone*2))
antes=time()
# en primer lugar, insertamos m en el centro de n
n[clone:clone+size, clone:clone+size] = m
# Copiamos la primera fila de m, en las primeras filas
# de n, y la última fila de m en las últimas filas de n
n[:clone, clone:clone+size] = m[0]
n[size+clone:, clone:size+clone] = m[-1]
# Lo mismo para las columnas
n[:, :clone] = np.repeat(n[:,clone],clone).reshape(2*clone+size, clone)
n[:, size+clone:] = np.repeat(n[:,-(clone+1)],clone).reshape(2*clone+size, clone)
despues=time()
if print_matrix:
print(m)
print(n)
return despues-antes
clona_vec_cols_rows(size=3, clone=2, print_matrix=True, create_random=False)
print("Tiempo vectorizando: ", clona_vec_cols_rows(), " s")
"""
Explanation: 2.
Gracias a las distintas formas de indexar un array que nos permite NumPy, podemos hacer operaciones de forma vectorizada, evitando los bucles. Esto supone un incremento en la eficiencia del código y tener un código más corto y legible. Para ello, vamos a realizar el siguiente ejercicio.
Genera una matriz aleatoria cuadrada de tamaño 1000. Una vez creada, genera una nueva matriz donde las filas y columnas 0 y $n-1$ estén repetidas 500 veces y el centro de la matriz quede exactamente igual a la original. Un ejemplo de esto lo podemos ver a continuación:
$$ \left( \begin{matrix}
1 & 2 & 3 \
2 & 3 & 4 \
3 & 4 & 5
\end{matrix} \right) \Longrightarrow \left( \begin{matrix}
1 & 1 & 1 & 2 & 3 & 3 & 3 \
1 & 1 & 1 & 2 & 3 & 3 & 3 \
1 & 1 & 1 & 2 & 3 & 3 & 3 \
2 & 2 & 2 & 3 & 4 & 4 & 4 \
3 & 3 & 3 & 4 & 5 & 5 & 5 \
3 & 3 & 3 & 4 & 5 & 5 & 5 \
3 & 3 & 3 & 4 & 5 & 5 & 5 \end{matrix} \right) $$
Impleméntalo usando un bucle for y vectorizando el cálculo usando lo anteriormente visto para ver la diferencias de tiempos usando ambas variantes. Para medir el tiempo, puedes usar el módulo time.
End of explanation
"""
R = np.random.random((2,2))
if (R.T == np.linalg.inv(R)).all() and np.linalg.det(R) == 1:
print("Matriz de rotación!")
else:
print("No es matriz de rotación u_u")
"""
Explanation: 3.
Una matriz de rotación $R$ es una matriz que representa una rotación en el espacio euclídeo. Esta matriz $R$ se representa como
$$ R = \left( \begin{matrix} \cos\theta & -\sin\theta \
\sin\theta & -\cos\theta
\end{matrix} \right) $$
donde $\theta$ es el número de ángulos rotados en sentido antihorario.
Estas matrices son muy usadas en geometría, informática o física. Un ejemplo de uso de estas matrices puede ser el cálculo de una rotación de un objeto en un sistema gráfico, la rotación de una cámara respecto a un punto en el espacio, etc.
Estas matrices tienen como propiedades que son matrices ortogonales (su inversa y su traspuesta son iguales) y su determinante es igual a 1. Por tanto, genera un array y muestra si ese array es una matriz de rotación.
End of explanation
"""
array1 = np.array([ -1., 4., -9.])
"""
Explanation: 4.
Dados el array que se ve a continuación, realiza los siguientes apartados:
End of explanation
"""
array2 = np.sin(array1 * np.pi/4)
array2
array3 = array2 * 2 + array1
array3
np.linalg.norm(array3)
"""
Explanation: Multiplica array1 por $\frac{\pi}{4}$ y calcula el seno del array resultante.
Genera un nuevo array cuyo valor sea el doble del resultado anterior mas el vector array1.
Calcula la norma del vector resultante. Para ello, consulta la documentación para ver qué función realiza esta tarea, y ten en cuenta los parámetros que recibe.
End of explanation
"""
n_array1 = np.array([[ 1., 3., 5.], [7., -9., 2.], [4., 6., 8.]])
"""
Explanation: 5.
Dada la siguiente matriz, realiza los siguientes apartados:
End of explanation
"""
media = np.mean(n_array1)
desv_tipica = np.std(n_array1)
print("Media =", media, " y desv típica =", desv_tipica)
maximo = np.max(n_array1)
minimo = np.min(n_array1)
print("Máximo =", maximo, " y minimo =", minimo)
det = np.linalg.det(n_array1)
traza = np.trace(n_array1)
traspuesta = n_array1.T
U, S, V = np.linalg.svd(n_array1)
print(U)
print(S)
print(V)
result = np.diag(array1).sum()
print("Resultado: ", result)
"""
Explanation: Calcula la media y la desviación típica de la matriz.
Obtén el elemento mínimo y máximo de la matriz.
Calcula el determinante, la traza y la traspuesta de la matriz.
Calcula la descomposición en valores singulares de la matriz.
Calcula el valor de la suma de los elementos de la diagonal principal de la matriz.
End of explanation
"""
a = [1,1,1,2,5,3,4,8,5,8]
b = []
list(filter(lambda x: b.append(x) if not x in b else False, a))
print("Lista original:\t\t", a)
print("Lista sin repetidos:\t", b)
"""
Explanation: 6.
A veces, es necesario en nuestro problema, tener que eliminar los elementos repetidos de una lista, dejando aquellos que solo aparezcan una sola vez. Es muy común, que muchos usuarios llamen a la función set para esta tarea, haciendo de la lista un conjunto sin elementos repetidos, ordenándolos y luego, el resultado de esto, volverlo a convertir en una lista. Esto, puede no estar mal del todo, pero puede ser que en el caso peor, puede que estemos haciendo un gasto inútil de memoria, tiempo y cálculos, para que, en el caso de que no haya elementos repetidos, sólo obtengamos una lista ordenada.
Es por ello, por lo que existe otra forma de hacerlo. Utilizando lo ya visto, obtén una lista sin elementos repetidos que mantengan el orden de la lista original. Para hacerlo aún más divertido, no uses más de 4 líneas.
End of explanation
"""
|
wtgme/labeldoc2vec
|
docs/notebooks/doc2vec-wikipedia.ipynb
|
lgpl-2.1
|
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from pprint import pprint
import multiprocessing
"""
Explanation: Doc2Vec to wikipedia articles
We conduct the replication to Document Embedding with Paragraph Vectors (http://arxiv.org/abs/1507.07998).
In this paper, they showed only DBOW results to Wikipedia data. So we replicate this experiments using not only DBOW but also DM.
Basic Setup
Let's import Doc2Vec module.
End of explanation
"""
wiki = WikiCorpus("enwiki-latest-pages-articles.xml.bz2")
#wiki = WikiCorpus("enwiki-YYYYMMDD-pages-articles.xml.bz2")
"""
Explanation: Preparing the corpus
First, download the dump of all Wikipedia articles from here (you want the file enwiki-latest-pages-articles.xml.bz2, or enwiki-YYYYMMDD-pages-articles.xml.bz2 for date-specific dumps).
Second, convert the articles to WikiCorpus. WikiCorpus construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
For more details on WikiCorpus, you should access Corpus from a Wikipedia dump.
End of explanation
"""
class TaggedWikiDocument(object):
def __init__(self, wiki):
self.wiki = wiki
self.wiki.metadata = True
def __iter__(self):
for content, (page_id, title) in self.wiki.get_texts():
yield TaggedDocument([c.decode("utf-8") for c in content], [title])
documents = TaggedWikiDocument(wiki)
"""
Explanation: Define TaggedWikiDocument class to convert WikiCorpus into suitable form for Doc2Vec.
End of explanation
"""
pre = Doc2Vec(min_count=0)
pre.scan_vocab(documents)
for num in range(0, 20):
print('min_count: {}, size of vocab: '.format(num), pre.scale_vocab(min_count=num, dry_run=True)['memory']['vocab']/700)
"""
Explanation: Preprocessing
To set the same vocabulary size with original papar. We first calculate the optimal min_count parameter.
End of explanation
"""
cores = multiprocessing.cpu_count()
models = [
# PV-DBOW
Doc2Vec(dm=0, dbow_words=1, size=200, window=8, min_count=19, iter=10, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=200, window=8, min_count=19, iter =10, workers=cores),
]
models[0].build_vocab(documents)
print(str(models[0]))
models[1].reset_from(models[0])
print(str(models[1]))
"""
Explanation: In the original paper, they set the vocabulary size 915,715. It seems similar size of vocabulary if we set min_count = 19. (size of vocab = 898,725)
Training the Doc2Vec Model
To train Doc2Vec model by several method, DBOW and DM, we define the list of models.
End of explanation
"""
for model in models:
%%time model.train(documents)
"""
Explanation: Now we’re ready to train Doc2Vec of the English Wikipedia.
End of explanation
"""
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Machine learning"], topn=20))
"""
Explanation: Similarity interface
After that, let's test both models! DBOW model show the simillar results with the original paper. First, calculating cosine simillarity of "Machine learning" using Paragraph Vector. Word Vector and Document Vector are separately stored. We have to add .docvecs after model name to extract Document Vector from Doc2Vec Model.
End of explanation
"""
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Lady Gaga"], topn=10))
"""
Explanation: DBOW model interpret the word 'Machine Learning' as a part of Computer Science field, and DM model as Data Science related field.
Second, calculating cosine simillarity of "Lady Gaga" using Paragraph Vector.
End of explanation
"""
for model in models:
print(str(model))
vec = [model.docvecs["Lady Gaga"] - model["american"] + model["japanese"]]
pprint([m for m in model.docvecs.most_similar(vec, topn=11) if m[0] != "Lady Gaga"])
"""
Explanation: DBOW model reveal the similar singer in the U.S., and DM model understand that many of Lady Gaga's songs are similar with the word "Lady Gaga".
Third, calculating cosine simillarity of "Lady Gaga" - "American" + "Japanese" using Document vector and Word Vectors. "American" and "Japanese" are Word Vectors, not Paragraph Vectors. Word Vectors are already converted to lowercases by WikiCorpus.
End of explanation
"""
|
SSDS-Croatia/SSDS-2017
|
Day-3/3_SSDS_2017_CharLSTMs.ipynb
|
mit
|
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
import random
tf.logging.set_verbosity(tf.logging.ERROR)
"""
Explanation: Data Science Summer School - Split '17
Prerequisites: Please download the following zip archive which contains checkpoint you will need in this exercise and put it in assets\checkpoints\ssds\ folder.
3. Character-wise language modeling with multi-layer LSTMs
This hands-on session is based on two tutorial notebooks Intro to Recurrent Networks (Character-wise RNN) and Tensorboard from Udacity's Deep Learning Nanodegree Foundation program.
This notebook implements a multi-layer LSTMs network for training/sampling from character-level language models. The model takes a text file as input and trains the network that learns to predict the next character in a sequence. The network can then be used to generate text character by character that will look like the original training data. This network is based on Andrej Karpathy's post on RNNs, which became standard example for explaining peculiarities behind RNN models.
Good description of LSTM architecture can be found in the article Understanding LSTM Networks.
End of explanation
"""
with open('assets/data/trump_tweets_ascii.txt', 'r') as f:
text=f.read()
# get set of characters contained in the loaded text file
vocab = sorted(set(text))
# encoding characters as integers
vocab_to_int = {c: i for i, c in enumerate(vocab)}
encoded_chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
# make dict for decoding intergers to corresponding characters
int_to_vocab = dict(enumerate(vocab))
print('Text size: {}'.format(len(encoded_chars)))
print('Vocabulary size: {}'.format(len(vocab)))
print('*******************************')
print('Number of tweets: {}'.format(len(text.split('\n'))))
print('Median size of a tweet: {}'.format(np.percentile([len(t) for t in text.split('\n')], 50)))
"""
Explanation: 3.1 Data preparation
Loading and encoding text
We will train our language model on a complete collection of Donald Trump's tweets obtained from Trump Twitter Archive, which we already downloaded and made available in PATH-TO-REPOSITORY/Day-3/assets/data/trump_tweets_ascii.txt. First, we will load the text file and encode its characters as integers.
End of explanation
"""
# first 300 characters
"""
Explanation: In the above output, we can see that trump_tweets_ascii.txt contains in total 2 167 951 characters. Tweets contain 92 unique characters which will form a vocabulary for a language model.
Lets see first 300 characters of the provided text:
End of explanation
"""
# first 300 encoded characters
"""
Explanation: And see how they are encoded as integers:
End of explanation
"""
def split_data(arr, batch_size, num_steps, split_frac=0.9):
"""
Split data into batches and training and validation sets.
Arguments
---------
arr: Array of encoded characters as integers
batch_size: Number of sequences per batch
num_steps: Length of the sequence in a batch
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(arr) / slice_size)
# Drop the last few characters to make only full batches
x = arr[: n_batches*slice_size]
# The targets are the same as the inputs, except shifted one character over.
# number of batches covers full size of arr (no characters dropped)
if(len(arr) == n_batches*slice_size):
# for the last target character use first input character
y = np.roll(x, -1)
else:
# for the last target characher use first dropped character
y = arr[1: n_batches*slice_size + 1]
# Split the data into batch_size slices and then stack slices
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x (n_batches x num_steps)
# Split into training and validation sets, keep the first split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
"""
Explanation: Making training and validation mini-batches
Neural networks are trained by approximating the gradient of loss function with respect to the neuron weights, by looking at only a small subset of the data, also known as a mini-batch. Here is where we will make our mini-batches for training and validation. Now we need to split up the data into batches, as well as into training and validation sets.
For the test we will observe how the network generates new text, thus we will not be using test set. We will feed a character into the network and sample a next one from the distribution over characters likely to come next. We feed the sampled character right back to get next character. Repeating this process character by character will generate new text, hopefully indistinguishable from Donald Trump's Twitter tweets.
End of explanation
"""
# example array generation
batch_size = 5
num_steps = 3
split_frac = 0.9
# split example array into train and validation sets
"""
Explanation: Exercise: Generate example integer array. Use function split_data to split example_arr into train and validation sets.
End of explanation
"""
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
"""
Explanation: Next, we will create a generator function to get batches from the arrays made by split_data. This will provide us with the functionality to iterate over batches, which we can feed to our network model. The arrays are of dimension (batch_size, n_batches*num_steps). Each batch is a sliding window on these arrays with size batch_size X num_steps.
End of explanation
"""
# iterate through all train and validation batches
"""
Explanation: Exercise: Use the for loop to iterate through all train and validation batches.
End of explanation
"""
def build_inputs(batch_size, num_steps, num_classes):
''' Define placeholders for inputs, targets, and dropout.
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
num_classes: Number of classes (target values)
'''
with tf.name_scope('inputs'):
# EXERCISE: Declare placeholder for inputs and one-hot encode inputs
with tf.name_scope('targets'):
# EXERCISE: Declare placeholder for targets and one-hot encode targets
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, x_one_hot, targets, y_one_hot, keep_prob
"""
Explanation: 3.2 Building the model
After having our data prepared and convenience functions split_data and get_batch for handling the data during the training of our model, we can finally start building the model using the TensorFlow library. We will break the model building into five parts:
* building input placeholders for x, y and dropout
* building multi-layer RNN with stacked LSTM cells
* building softmax output layer
* computation for training loss
* building the optimizer for the model parameters
Inputs
First, we will create our input placeholders for Tensorflow computational graph of the model. As we are building supervised learning model, we need to declare placeholders for inputs (x) and targets (y). We also need to one-hot encode the input and target tokens, remember we are getting them as encoded characters. Here, we will also declare scalar placeholder keep_prob for output keep probablity for dropout.
New functions used here:
- tf.name_scope
- tf.one_hot
Exercise: Define placeholders for inputs and targets.
End of explanation
"""
def build_cell(lstm_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
keep_prob: Dropout keep probability
'''
# EXERCISE: Use a basic LSTM cell
# EXERCISE: Add dropout to the cell
return drop
"""
Explanation: Multi-layer LSTM Cell
We first implement build_cell function where we create the LSTM cell we will use in the hidden layer. We will use this cell as a building block for the multi-layer RNN. Afterwards, we implement the build_lstm function to create multiple LSTM cells stacked on each other using build_cell function. We can stack up the LSTM cells into layers with tf.contrib.rnn.MultiRNNCell.
Exercise: Fill in build_cell function for building LSTM cell using:
tf.contrib.rnn.BasicLSTMCell
contrib.rnn.DropoutWrapper.
End of explanation
"""
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build Multi-RNN cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
keep_prob: Dropout keep probability
'''
# EXERCISE: Stack up multiple LSTM layers
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
"""
Explanation: Exercise: Fill in build_lstm function by stacking layers using tf.contrib.rnn.MultiRNNCell.
End of explanation
"""
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: Output tensor of previous layer
in_size: Size of the input tensor
out_size: Size of the softmax layer
'''
# Reshape output so it is a bunch of rows, one row for each step for each sequence.
# That is, the shape should be batch_size*num_steps rows by lstm_size columns.
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(lstm_output, axis=1, name='seq_output')
x = tf.reshape(seq_output, [-1, in_size], name='graph_output')
# Connect the RNN outputs to a softmax layer
with tf.name_scope('logits'):
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
# EXERCISE: Define W and b and multiply inputs with weights and add bias
# Tensorboard
tf.summary.histogram('h_softmax_w', softmax_w)
tf.summary.histogram('h_softmax_b', softmax_b)
with tf.name_scope('predictions'):
# EXERCISE: Use softmax to get the probabilities for predicted characters
# Tensorboard
tf.summary.histogram('h_predictions', predictions)
return predictions, logits
"""
Explanation: Building RNN Output Layer
Here we will create the output layer. We need to connect the output of the RNN cells to a fully connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character. The output 3D tensor with size $(batch_size \times num_steps \times lstm_size)$ has to be reshaped to $((batch_size \times num_steps) \times lstm_size)$, so we can do the matrix multiplication with the softmax weights.
The output is calculated using softmax function
$$
P(y=c\text{ } | \text{ }\mathbf{x}) = \frac{e^{\mathbf{x}^T\mathbf{w}c+b_c}}{\sum{k=1}^{|C|}e^{\mathbf{x}^T\mathbf{w}_k+b_k}}
,\
$$
where $\mathbf{x}\in\mathbb{R}^{512}$ is output of the last hidden layer, and $\mathbf{W}\in\mathbb{R}^{512\times 92}$ and $\mathbf{b}\in\mathbb{R}^{92}$ are the model parameters.
Exercise: Fill in build_output function by defining logits and softmax function.
End of explanation
"""
def build_loss(logits, y_one_hot, lstm_size):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
y_one_hot: one hot encoding of target
lstm_size: Number of LSTM hidden units
'''
# Softmax cross entropy loss
with tf.name_scope('loss'):
# EXERCISE: Reshape one-hot encoded targets to match logits (one row per batch_size per step)
# then define loss and cost function
# Tensorboard
tf.summary.scalar('s_cost', cost)
return cost
"""
Explanation: Training loss
Next we need to calculate the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First, we need to reshape the one-hot targets so it is a 2D tensor with size $((batch_size \times num_steps) \times num_classes)$, which match logits. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $num_classes$ units. Then we run the logits and targets through tf.nn.softmax_cross_entropy_with_logits and find the mean to get the loss.
Exercise: Fill in build loss function:
Reshape one-hot encoded targets to match logits
Define loss and cost function using tf.nn.softmax_cross_entropy_with_logits and tf.reduce_mean.
End of explanation
"""
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
grad_clip: Clipping ratio
'''
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('optimizer'):
tvars = tf.trainable_variables()
# EXERCISE: Calculate and clip gradients
# EXERCISE: Use Adam optimizer
# EXERCISE: Apply gradients to trainable variables
return optimizer
"""
Explanation: Optimizer
Here we build the optimizer. Traditional RNNs face vanishing gradient problem. LSTMs fix the vanishing problem, but the gradients can still grow without bound. To fix this we can clip the gradients larger than some threshold. That is, if a gradient is larger than the prespecified threshold, we set it to the threshold value. This will ensure the gradients never grow too large. Then we use an AdamOptimizer for the learning step.
Exercise: Fill in the function build_optimizer:
Calculate and clip gradients using functions tf.gradients and tf.clip_by_global_norm
Define Adam optimizer using tf.train.AdamOptimizer
Apply gradients to trainable variables using function tf.train.Optimizer.apply_gradients
End of explanation
"""
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
if sampling == True:
# When we will use the network for sampling later, we will pass in one character at a time
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors, and one-hot encode the input and target tokens
self.inputs, x_one_hot, self.targets, y_one_hot, self.keep_prob = \
build_inputs(batch_size, num_steps, num_classes)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
with tf.name_scope("RNN_forward"):
# EXERCISE: Run each sequence step through the RNN and collect the outputs
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, y_one_hot, lstm_size)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
self.summary_merged = tf.summary.merge_all()
"""
Explanation: Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use tf.nn.dynamic_rnn. This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as final_state so we can pass it to the first LSTM cell in the the next mini-batch run. For tf.nn.dynamic_rnn, we pass in the cell and initial state we get from build_lstm, as well as our input sequences.
Exercise: Fill in CharRNN class to run each sequence step through the RNN and collect the outputs using tf.nn.dynamic_rnn.
End of explanation
"""
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
"""
Explanation: Hyperparameters
Here we declare the hyperparameters for the network.
batch_size - Number of sequences running through the network in one pass.
num_steps - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
lstm_size - The number of units in the hidden layers.
num_layers - Number of hidden LSTM layers to use
learning_rate - Learning rate for training
keep_prob - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network https://github.com/karpathy/char-rnn#tips-and-tricks.
End of explanation
"""
# create instance of CharRNN class
# print trainable variables
"""
Explanation: Exercise: Create new instance of CharRNN class using parameters defined above. Print trainable variables in the default graph using tensorflow function trainable_variables. Does the number of parameters correspond to what we expect? Hint: Number of parameters in first hidden layer of LSTM is equal to:
$4 \times \big[N_{units} \times (N_{inputs}+1) + N_{units}^{2}\big]$,
where $N_{units}$ is the number of units in hidden layer (lstm_size) and $N_{inputs}$ is the length of the vocabulary.
End of explanation
"""
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('assets/logs/1', sess.graph)
file_writer.close()
"""
Explanation: Write out the graph for TensorBoard
End of explanation
"""
epochs = 1 #20
save_every_n = 10 #200
train_x, train_y, val_x, val_y = split_data(encoded_chars, batch_size, num_steps)
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Tensorboard
train_writer = tf.summary.FileWriter('assets/logs/2/train', sess.graph)
test_writer = tf.summary.FileWriter('assets/logs/2/test')
#############################################################
# Use the line below to load a checkpoint and resume training
saver.restore(sess, 'assets/checkpoints/ssds/trump_tb_20_i3880_l512_1.327.ckpt')
#############################################################
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
# Train network
for e in range(epochs):
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
start = time.time()
# EXERCISE: Run session and save loss for train batches
end = time.time()
iteration = e*n_batches + b
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
# Tensorboard
train_writer.add_summary(summary, iteration)
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
# EXERCISE: Same as above, run session and append validation loss
# Tensorboard
test_writer.add_summary(summary, iteration)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "assets/checkpoints/trump/trump_new_i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
"""
Explanation: Run tensorboard from command line by issuing command (e.g. from root repository directory):
tensorboard --logdir=Day-3/assets/logs/
3.3 Training model
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) we calculate the validation loss and save a checkpoint.
Please download provided trump_tb_20_i3880_l512_1.327.ckpt
checkpoint and place it in assets/checkpoints/ssds direcory in the repository.
Exercise: Fill in the code below:
Through all train batches run session and save loss.
Through all validation batches run session and append validation loss.
End of explanation
"""
tf.train.get_checkpoint_state('assets/checkpoints/trump')
"""
Explanation: Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
End of explanation
"""
from IPython.core.display import display, HTML
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample_model(checkpoint, n_samples, lstm_size, vocab_size, num_layers=2, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, num_layers=num_layers, sampling=True)
saver = tf.train.Saver()
states = []
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
states.append(new_state)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
states.append(new_state)
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
states.append(new_state)
return (''.join(samples), states)
"""
Explanation: 3.4 Testing model - sampling from the model
End of explanation
"""
# load the latest checkpoint from assets/checkpoints/trump
# generate text using sample_model function
"""
Explanation: Exercise: Load the latest checkpoint from assets/checkpoints/trump folder and generate text using sample_model function.
End of explanation
"""
# load generated checkpoint
# generate text using sample_model function
"""
Explanation: Exercise: Train the model again starting from the initial state for a few iterations and save a checkpoint, then load it and generate text using sample_model function.
End of explanation
"""
from IPython.core.display import display, HTML
from utils import save_lstm_vis, make_colored_text
"""
Explanation: 3.5 Visualization of memory cell activations
End of explanation
"""
checkpoint = 'assets/checkpoints/ssds/trump_tb_20_i3880_l512_1.327.ckpt'
# sample model from the loaded checkpoint
"""
Explanation: Exercise: Load checkpoint trump_tb_20_i3880_l512_1.327.ckpt from assets/checkpoints/ssds/ and generate some sample text using function sample_model. The use utility function make_colored_text to color each character by cell activations in certain layer.
End of explanation
"""
# Position in a tweet
HTML(make_colored_text(samp, states, cell_id=4, layer_id=0))
# Beggining of a word
HTML(make_colored_text(samp, states, cell_id=22, layer_id=1))
"""
Explanation: Exercise: Use utility funtion make_colored_text and Jupyter widget HTML to visualize cell activations for the text above. Here are some examples of interesting visualizations.
layer_id = 0
cell_id:
- position in tweet - 4
- short urls - 10, 50, 130, 160, 163, 164, 183, 218, 230
- separate fixed and variable part of short url - 80, 152
- just variable part of short url - 75, 84, 118, 273, 380
- position in short url - 22*, 112, 206, 386
- urls and references - 115, 403, 483
layer_id = 1
cell_id:
- just variable part of short url - 21, 107, 250, 300, 420
- beginning of a word - 22, 112
- urls and references - 51, 273, 438
- position in short url - 202, 326
- quotation marks - 252
- position in a sentence - 413
End of explanation
"""
save_lstm_vis("assets/html/CA_trump_tb_20_i3880_l512_1.327", samp, states)
"""
Explanation: Use the code below to generate html file that contains colorings of the text above from all 512 cells.
End of explanation
"""
with open('assets/data/trump_tweets_ascii.txt') as f:
tweets_real = f.readlines()
with open('assets/data/trump_tweets_fake.txt') as f:
tweets_fake = f.readlines()
score = 0
N = 10
for i in range(N):
tweet_label = True
if random.random() <= 0.5:
tweet_text = random.choice(tweets_real)
else:
tweet_text = random.choice(tweets_fake)
tweet_label = False
print("\nTweet " + str(i+1) + "/" + str(N) + ": " + tweet_text)
answer = bool(int(input("true (1) or fake (0): ")))
if answer^tweet_label:
print("WRONG!")
else:
print("RIGHT!")
score = score + 1
print("\nYour score: " + str(score) + "/" + str(N))
"""
Explanation: Guessing game
In this section you will play a short game of guessing whether the tweet you are shown is real or generated.
End of explanation
"""
|
takanory/python-machine-learning
|
Chapter05.ipynb
|
mit
|
from IPython.core.display import display
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
import pandas as pd
# http://archive.ics.uci.edu/ml/datasets/Wine
df_wine = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
# 1. d次元のデータセットを標準化する
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# 特徴量とクラスラベルを別々に抽出
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
# 全体の30%をテストデータにする
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
import numpy as np
# 2. 共分散行列を作成
cov_mat = np.cov(X_train_std.T)
# 3. 固有値と固有ベクトルを計算
# linalg.eig関数は固有分解(eigendecomposition)を実行する関数
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
eigen_vals
# 固有値を合計
tot = sum(eigen_vals)
# 分散説明率を計算
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
display("var_exp:", var_exp)
# 分散説明率の累積和を取得
cum_var_exp = np.cumsum(var_exp)
display("cum_var_exp:", cum_var_exp)
import matplotlib.pyplot as plt
# 分散説明率の棒グラフを作成
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance')
# 分散説明率の累積和の階段グラフを作成
plt.step(range(1, 14), cum_var_exp, where='mid', label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
"""
Explanation: 5 次元削減でデータを圧縮する
特徴部分空間(feature extraction)を作成する
主成分分析(PCA: PrincipalComponent Analysis)
線形判別分析(LDA: Linear Discriminant Analysis)
カーネル主成分分析
5.1 主成分分析による教師なし次元削減
d次元のデータセットを標準化する
標準化したデータセットの共分散行列(covariance matric)を作成する
共分散行列を固有ベクトルと固有値に分解する
最も大きいk個の固有値に対するk個の固有ベクトルを選択する
上位k個の固有ベクトルから射影行列Wを作成する
射影行列Wを使ってd次元の入力データセットXを変換し、新しいk次元の特徴部分空間を取得する
5.1.1 共分散行列の固有対を求める
End of explanation
"""
# (固有値, 固有ベクトル)のタプルのリストを作成
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:,i]) for i in range(len(eigen_vals))]
# (固有値, 固有ベクトル)のタプルを大きいものから順に並び替え
eigen_pairs.sort(reverse=True)
# 4. 最も大きいk個の固有値に対するk個の固有ベクトルを選択する(ここでは k = 2 とする)
# 5. 上位k個の固有ベクトルから射影行列Wを作成する
w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis]))
display("Matrix W:", w)
# x' = xW
display(X_train_std[0].dot(w))
# 6. 射影行列Wを使ってd次元の入力データセットXを変換し、新しいk次元の特徴部分空間を取得する
# X' = XW
X_train_pca = X_train_std.dot(w)
# 2次元の散布図としてプロット
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
# クラスラベル、点の色、点の種類の組み合わせからなるリストを生成してプロット
for label, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train==label, 0], X_train_pca[y_train==label, 1], c=c, label=label, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
"""
Explanation: 5.1.2 特徴変換
End of explanation
"""
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# マーカーとカラーマップの用意
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# 決定領域のプロット
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# グリッドポイントの生成
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
# 各特徴量を1次元配列に変換して予測を実行
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
# 予測結果を元のグリッドポイントのデータサイズに変換
Z = Z.reshape(xx1.shape)
# グリッドポイントの等高線のプロット
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
# 軸の範囲の設定
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# クラスごとにサンプルをプロット
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y==cl, 0], y=X[y==cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
# 主成分数を指定して、PCAのインスタンスを生成
pca = PCA(n_components=2)
# ロジスティック回帰のインスタンスを生成
lr = LogisticRegression()
# トレーニングデータやテストデータをPCAに適合させる
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
# トレーニングデータをロジスティック回帰に適合させる
lr.fit(X_train_pca, y_train)
# 決定境界をプロット
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
# 決定境界をプロット
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
# 分散説明率を計算
pca.explained_variance_ratio_
"""
Explanation: 5.1.3 scikit-learn の主成分分析
End of explanation
"""
# 1. d次元のデータセットを標準化する(dは特徴量の個数)
# X_train_std, X_test_std は作成済
# 2. クラスごとにd次元の平均ベクトルを計算する
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train==label], axis=0))
print('MV {}:, {}\n'.format(label, mean_vecs[label - 1]))
# 3. クラス間変動行列SBと、クラス内変動行列SWを生成する
d = 13 # 特徴量の個数
# クラス内変動行列 SW
S_W = np.zeros((d, d)) # 13 x 13 で値がすべて0の行列を生成
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d))
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1)
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter
print('Within-class scatter matrix: {}x{}'.format(S_W.shape[0], S_W.shape[1]))
# クラスラベルの一様に分散していない
print('Class label distribution: {}'.format(np.bincount(y_train)[1:]))
d = 13
# クラス内変動行列 SW
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: {}x{}'.format(S_W.shape[0], S_W.shape[1]))
# クラス間変動行列SB
mean_overall = np.mean(X_train_std, axis=0)
d = 13
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train[y_train==i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1)
mean_overall = mean_overall.reshape(d, 1)
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: {}x{}'.format(S_B.shape[0], S_B.shape[1]))
"""
Explanation: 5.2 線形判別分析による教師ありデータ圧縮
d次元のデータセットを標準化する(dは特徴量の個数)
クラスごとにd次元の平均ベクトルを計算する
クラス間変動行列SBと、クラス内変動行列SWを生成する
行列 SW^-1 SB の固有ベクトルと対応する固有値を計算する
d x k次元の変換行列Wを生成するために、最も大きいk個の固有値に対応するk個の固有ベクトルを選択する
変換行列Wを使ってサンプルを新しい特徴部分空間へ射影する
5.2.1 変動行列を計算する
クラス内変動行列(within-class scatter matrix)
クラス間変動行列(between-class scatter matrix)
End of explanation
"""
X_train[y_train==2, :].shape[0]
# 4. 行列 SW^-1 SB の固有ベクトルと対応する固有値を計算する
# inv関数で逆行列、dot関数で行列積、eig関数で固有値を計算
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
# (固有値, 固有ベクトル)のタプルのリストを作成
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))]
# (固有値, 固有ベクトル)のタプルを大きいものから順に並び替え
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
for eigen_val in eigen_pairs:
print(eigen_val[0])
# 固有値の実数部の総和を求める
tot = sum(eigen_vals.real)
# 分散説明率とその累積和を計算
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
display("discr:", discr)
cum_discr = np.cumsum(discr)
display("cum_discr:", cum_discr)
# 分散説明率の棒グラフを作成
plt.bar(range(1, 14), discr, alpha=0.5, align='center', label='individual "discriminability"')
# 分散説明率の累積和の階段グラフを作成
plt.step(range(1, 14), cum_discr, where='mid', label='cumulative "discriminability"')
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.show()
# 6. 変換行列Wを使ってサンプルを新しい特徴部分空間へ射影する
# 2つの固有ベクトルから変換行列を作成
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real, eigen_pairs[1][1][:, np.newaxis].real))
display("Matrix W:", w)
"""
Explanation: 5.2.2 新しい特徴部分空間の線形判別を選択する
End of explanation
"""
# 標準化したトレーニングデータに変換行列をかける
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
# クラスラベル、点の色、点の種類の組み合わせからなるリストを生成してプロット
for label, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train==label, 0] * -1, X_train_lda[y_train==label, 1] * -1, c=c, label=label, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
"""
Explanation: 5.3.2 新しい特徴空間にサンプルを射影する
End of explanation
"""
if Version(sklearn_version) < '0.18':
from sklearn.lda import LDA
else:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# 次元数を指定して、LDAのインスタンスを生成
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train_lda, y_train)
# 決定境界をプロット
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
"""
Explanation: 5.2.4 scikit-learn による LDA
End of explanation
"""
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
"""
RBPカーネルPCAの実装
パラメータ
----------
X: [NumPy ndarray], shape = [n_samples, n_features]
gamma: float
RBFカーネルのチューニングパラメータ
n_components: int
返される主成分の個数
戻り値
------
X_pc: [NumPy ndarray], shape = [n_samples, n_features]
射影されたデータセット
"""
# M x N 次元のデータセットでペアごとの平方ユークリッド距離を計算
sq_dists = pdist(X, 'sqeuclidean')
# ペアごとの距離を正方行列に変換
mat_sq_dists = squareform(sq_dists)
# 対象カーネル行列を計算
K = exp(-gamma * mat_sq_dists)
# カーネル行列を中心窩
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# 中心化されたカーネル行列から固有値を取得
# numpy.eigh はそれらをソート順に返す
eigvals, eigvecs = eigh(K)
# 上位k個の固有ベクトル(射影されたサンプル)を収集
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
"""
Explanation: 5.3 カーネル主成分分析を使った非線形写像
カーネル化したPCA(kernel PCA)
5.3.1 カーネル関数とカーネルトリック
5.3.2 Python でカーネル主成分分析を実装する
End of explanation
"""
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
# 2つの半月形データを作成
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
# 標準のPCAを使ってみる
from sklearn.decomposition import PCA
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_spca[y==0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
from matplotlib.ticker import FormatStrFormatter
# カーネルPCA関数を使う
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
"""
Explanation: 例1: 半月形の分離
End of explanation
"""
from sklearn.datasets import make_circles
import matplotlib.pyplot as plt
# 同心円用のデータを作成してプロット
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.show()
# 標準のPCAを使ってみる
from sklearn.decomposition import PCA
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_spca[y==0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# カーネルPCA関数を使う
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
# 2番目のグラフ領域に散布図をプロット
ax[1].scatter(X_kpca[y==0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
"""
Explanation: 例2: 同心円の分離
End of explanation
"""
from sklearn.decomposition import KernelPCA
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y==0, 0], X_skernpca[y==0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y==1, 0], X_skernpca[y==1, 1], color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
"""
Explanation: 5.3.3 新しいデータ点を射影する
5.3.34 scikit-learnのカーネル主成分分析
End of explanation
"""
|
jiaphuan/models
|
research/deeplab/deeplab_demo.ipynb
|
apache-2.0
|
import collections
import os
import StringIO
import sys
import tarfile
import tempfile
import urllib
from IPython import display
from ipywidgets import interact
from ipywidgets import interactive
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
if tf.__version__ < '1.5.0':
raise ImportError('Please upgrade your tensorflow installation to v1.5.0 or newer!')
# Needed to show segmentation colormap labels
sys.path.append('utils')
import get_dataset_colormap
"""
Explanation: DeepLab Demo
This demo will demostrate the steps to run deeplab semantic segmentation model on sample input images.
Prerequisites
Running this demo requires the following libraries:
Jupyter notebook (Python 2)
Tensorflow (>= v1.5.0)
Matplotlib
Pillow
numpy
ipywidgets (follow the setup here)
Imports
End of explanation
"""
_MODEL_URLS = {
'xception_coco_voctrainaug': 'http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval': 'http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
Config = collections.namedtuple('Config', 'model_url, model_dir')
def get_config(model_name, model_dir):
return Config(_MODEL_URLS[model_name], model_dir)
config_widget = interactive(get_config, model_name=_MODEL_URLS.keys(), model_dir='')
display.display(config_widget)
# Check configuration and download the model
_TARBALL_NAME = 'deeplab_model.tar.gz'
config = config_widget.result
model_dir = config.model_dir or tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print 'downloading model to %s, this might take a while...' % download_path
urllib.urlretrieve(config.model_url, download_path)
print 'download completed!'
"""
Explanation: Select and download models
End of explanation
"""
_FROZEN_GRAPH_NAME = 'frozen_inference_graph'
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if _FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
model = DeepLabModel(download_path)
"""
Explanation: Load model in TensorFlow
End of explanation
"""
LABEL_NAMES = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tv'
])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = get_dataset_colormap.label_to_color_image(FULL_LABEL_MAP)
def vis_segmentation(image, seg_map):
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = get_dataset_colormap.label_to_color_image(
seg_map, get_dataset_colormap.get_pascal_name()).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0)
plt.show()
"""
Explanation: Helper methods
End of explanation
"""
# Note that we are using single scale inference in the demo for fast
# computation, so the results may slightly differ from the visualizations
# in README, which uses multi-scale and left-right flipped inputs.
IMAGE_DIR = 'g3doc/img'
def run_demo_image(image_name):
try:
image_path = os.path.join(IMAGE_DIR, image_name)
orignal_im = Image.open(image_path)
except IOError:
print 'Failed to read image from %s.' % image_path
return
print 'running deeplab on image %s...' % image_name
resized_im, seg_map = model.run(orignal_im)
vis_segmentation(resized_im, seg_map)
_ = interact(run_demo_image, image_name=['image1.jpg', 'image2.jpg', 'image3.jpg'])
"""
Explanation: Run on sample images
End of explanation
"""
def get_an_internet_image(url):
if not url:
return
try:
# Prefix with 'file://' for local file.
if os.path.exists(url):
url = 'file://' + url
f = urllib.urlopen(url)
jpeg_str = f.read()
except IOError:
print 'invalid url: ' + url
return
orignal_im = Image.open(StringIO.StringIO(jpeg_str))
print 'running deeplab on image %s...' % url
resized_im, seg_map = model.run(orignal_im)
vis_segmentation(resized_im, seg_map)
_ = interact(get_an_internet_image, url='')
"""
Explanation: Run on internet images
End of explanation
"""
|
cdt15/lingam
|
examples/CausalEffect(LightGBM).ipynb
|
mit
|
import numpy as np
import pandas as pd
import graphviz
import lingam
print([np.__version__, pd.__version__, graphviz.__version__, lingam.__version__])
np.set_printoptions(precision=3, suppress=True)
np.random.seed(0)
"""
Explanation: Causal Effect for Non-linear Regression
Import and settings
In this example, we need to import numpy, pandas, and graphviz in addition to lingam.
End of explanation
"""
def make_graph(adjacency_matrix, labels=None):
idx = np.abs(adjacency_matrix) > 0.01
dirs = np.where(idx)
d = graphviz.Digraph(engine='dot')
names = labels if labels else [f'x{i}' for i in range(len(adjacency_matrix))]
for to, from_, coef in zip(dirs[0], dirs[1], adjacency_matrix[idx]):
d.edge(names[from_], names[to], label=f'{coef:.2f}')
return d
"""
Explanation: Utility function
We define a utility function to draw the directed acyclic graph.
End of explanation
"""
X = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data-original',
delim_whitespace=True, header=None,
names = ['mpg', 'cylinders', 'displacement',
'horsepower', 'weight', 'acceleration',
'model year', 'origin', 'car name'])
X.dropna(inplace=True)
X.drop(['model year', 'origin', 'car name'], axis=1, inplace=True)
print(X.shape)
X.head()
"""
Explanation: Test data
We use 'Auto MPG Data Set' (http://archive.ics.uci.edu/ml/datasets/Auto+MPG)
End of explanation
"""
model = lingam.DirectLiNGAM()
model.fit(X)
labels = [f'{i}. {col}' for i, col in enumerate(X.columns)]
make_graph(model.adjacency_matrix_, labels)
"""
Explanation: Causal Discovery
To run causal discovery, we create a DirectLiNGAM object and call the fit method.
End of explanation
"""
import lightgbm as lgb
target = 0 # mpg
features = [i for i in range(X.shape[1]) if i != target]
reg = lgb.LGBMRegressor(random_state=0)
reg.fit(X.iloc[:, features], X.iloc[:, target])
"""
Explanation: Prediction Model
We create the linear regression model.
End of explanation
"""
ce = lingam.CausalEffect(model)
effects = ce.estimate_effects_on_prediction(X, target, reg)
df_effects = pd.DataFrame()
df_effects['feature'] = X.columns
df_effects['effect_plus'] = effects[:, 0]
df_effects['effect_minus'] = effects[:, 1]
df_effects
max_index = np.unravel_index(np.argmax(effects), effects.shape)
print(X.columns[max_index[0]])
"""
Explanation: Identification of Feature with Greatest Causal Influence on Prediction
To identify of the feature having the greatest intervention effect on the prediction, we create a CausalEffect object and call the estimate_effects_on_prediction method.
End of explanation
"""
|
santoshphilip/eppy
|
docs/Main_Tutorial.ipynb
|
mit
|
# you would normaly install eppy by doing
# python setup.py install
# or
# pip install eppy
# or
# easy_install eppy
# if you have not done so, uncomment the following three lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
from eppy import modeleditor
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
fname1 = "../eppy/resources/idffiles/V_7_2/smallfile.idf"
IDF.setiddname(iddfile)
idf1 = IDF(fname1)
"""
Explanation: Eppy Tutorial
Authors: Santosh Philip, Leora Tanjuatco
Eppy is a scripting language for E+ idf files, and E+ output files. Eppy is written in the programming language Python. As a result it takes full advantage of the rich data structure and idioms that are avaliable in python. You can programmatically navigate, search, and modify E+ idf files using eppy. The power of using a scripting language allows you to do the following:
Make a large number of changes in an idf file with a few lines of eppy code.
Use conditions and filters when making changes to an idf file
Make changes to multiple idf files.
Read data from the output files of a E+ simulation run.
Based to the results of a E+ simulation run, generate the input file for the next simulation run.
So what does this matter? Here are some of the things you can do with eppy:
Change construction for all north facing walls.
Change the glass type for all windows larger than 2 square meters.
Change the number of people in all the interior zones.
Change the lighting power in all south facing zones.
Change the efficiency and fan power of all rooftop units.
Find the energy use of all the models in a folder (or of models that were run after a certain date)
If a model is using more energy than expected, keep increasing the R-value of the roof until you get to the expected energy use.
Quick Start
Here is a short IDF file that I’ll be using as an example to start us off ::
To use eppy to look at this model, we have to run a little code first:
End of explanation
"""
idf1.printidf()
"""
Explanation: idf1 now holds all the data to your in you idf file.
Now that the behind-the-scenes work is done, we can print this file.
End of explanation
"""
print(idf1.idfobjects['BUILDING']) # put the name of the object you'd like to look at in brackets
"""
Explanation: Looks like the same file as before, except that all the comments are slightly different.
As you can see, this file has four objects:
VERSION
SIMULATIONCONTROL
BUILDING
SITE:LOCATION
So, let us look take a closer look at the BUILDING object.
We can do this using this command::
End of explanation
"""
building = idf1.idfobjects['BUILDING'][0]
"""
Explanation: We can also zoom in on the object and look just at its individual parts.
For example, let us look at the name of the building.
To do this, we have to do some more behind-the-scenes work, which we'll explain later.
End of explanation
"""
print(building.Name)
"""
Explanation: Now we can do this:
End of explanation
"""
building.Name = "Empire State Building"
print(building.Name)
"""
Explanation: Now that we've isolated the building name, we can change it.
End of explanation
"""
idf1.printidf()
"""
Explanation: Did this actually change the name in the model ? Let us print the entire model and see.
End of explanation
"""
building.Name = "Empire State Building"
import eppy
# import eppy.ex_inits
# reload(eppy.ex_inits)
import ex_inits
"""
Explanation: Yes! It did. So now you have a taste of what eppy can do. Let's get started!
Modifying IDF Fields
That was just a quick example -- we were showing off. Let's look a little closer.
As you might have guessed, changing an IDF field follows this structure::
Plugging the object name (building), the field name (Name) and our new field name ("Empire State Building") into this command gave us this:
End of explanation
"""
import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.idfeditor)
"""
Explanation: But how did we know that "Name" is one of the fields in the object "building"?
Are there other fields?
What are they called?
Let's take a look at the IDF editor:
End of explanation
"""
print(building.Terrain)
"""
Explanation: In the IDF Editor, the building object is selected.
We can see all the fields of the object "BUILDING".
They are:
Name
North Axis
Terrain
Loads Convergence Tolerance Value
Temperature Convergence Tolerance Value
Solar Distribution
Maximum Number of Warmup Days
Minimum Number of Warmup Days
Let us try to access the other fields.
End of explanation
"""
print(building.North_Axis)
"""
Explanation: How about the field "North Axis" ?
It is not a single word, but two words.
In a programming language, a variable has to be a single word without any spaces.
To solve this problem, put an underscore where there is a space.
So "North Axis" becomes "North_Axis".
End of explanation
"""
print(building.Name)
print(building.North_Axis)
print(building.Terrain)
print(building.Loads_Convergence_Tolerance_Value)
print(building.Temperature_Convergence_Tolerance_Value)
print(building.Solar_Distribution)
print(building.Maximum_Number_of_Warmup_Days)
print(building.Minimum_Number_of_Warmup_Days)
"""
Explanation: Now we can do:
End of explanation
"""
fruits = ["apple", "orange", "bannana"]
# fruits is a list with three items in it.
"""
Explanation: Where else can we find the field names?
The IDF Editor saves the idf file with the field name commented next to field.
Eppy also does this.
Let us take a look at the "BUILDING" object in the text file that the IDF Editor saves ::
This a good place to find the field names too.
It is easy to copy and paste from here. You can't do that from the IDF Editor.
We know that in an E+ model, there will be only ONE "BUILDING" object. This will be the first and only item in the list "buildings".
But E+ models are made up of objects such as "BUILDING", "SITE:LOCATION", "ZONE", "PEOPLE", "LIGHTS". There can be a number of "ZONE" objects, a number of "PEOPLE" objects and a number of "LIGHTS" objects.
So how do you know if you're looking at the first "ZONE" object or the second one? Or the tenth one? To answer this, we need to learn about how lists work in python.
Python lesson 1: lists
Eppy holds these objects in a python structure called list. Let us take a look at how lists work in python.
End of explanation
"""
fruits[0]
"""
Explanation: To get the first item in fruits we say:
End of explanation
"""
print(fruits[2])
"""
Explanation: Why "0" ?
Because, unlike us, python starts counting from zero in a list. So, to get the third item in the list we'd need to input 2, like this:
End of explanation
"""
firstfruit = fruits[0]
print(firstfruit)
"""
Explanation: But calling the first fruit "fruit[0]" is rather cumbersome. Why don't we call it firstfruit?
End of explanation
"""
goodfruit = fruits[0]
redfruit = fruits[0]
print(firstfruit)
print(goodfruit)
print(redfruit)
print(fruits[0])
"""
Explanation: We also can say
End of explanation
"""
print(len(fruits))
"""
Explanation: As you see, we can call that item in the list whatever we want.
How many items in the list
To know how many items are in a list, we ask for the length of the list.
The function 'len' will do this for us.
End of explanation
"""
idf1.save()
"""
Explanation: There are 3 fruits in the list.
Saving an idf file
This is easy:
End of explanation
"""
idf1.saveas('something.idf')
"""
Explanation: If you'd like to do a "Save as..." use this:
End of explanation
"""
from eppy import modeleditor
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
try:
IDF.setiddname(iddfile)
except modeleditor.IDDAlreadySetError as e:
pass
fname1 = "../eppy/resources/idffiles/V_7_2/constructions.idf"
idf1 = IDF(fname1)
"""
Explanation: Working with E+ objects
Let us open a small idf file that has only "CONSTRUCTION" and "MATERIAL" objects in it. You can go into "../idffiles/V_7_2/constructions.idf" and take a look at the file. We are not printing it here because it is too big.
So let us open it using the idfreader -
End of explanation
"""
materials = idf1.idfobjects["MATERIAL"]
print(materials)
"""
Explanation: Let us print all the "MATERIAL" objects in this model.
End of explanation
"""
firstmaterial = materials[0]
secondmaterial = materials[1]
print(firstmaterial)
"""
Explanation: As you can see, there are many material objects in this idf file.
The variable "materials" now contains a list of "MATERIAL" objects.
You already know a little about lists, so let us take a look at the items in this list.
End of explanation
"""
print(secondmaterial)
"""
Explanation: Let us print secondmaterial
End of explanation
"""
bad_architects = ["Donald Trump", "Mick Jagger",
"Steve Jobs", "Lady Gaga", "Santa Clause"]
print(bad_architects[3])
"""
Explanation: This is awesome!! Why?
To understand what you can do with your objects organized as lists, you'll have to learn a little more about lists.
Python lesson 2: more about lists
More ways to access items in a list
You should remember that you can access any item in a list by passing in its index.
The tricky part is that python starts counting at 0, so you need to input 0 in order to get the first item in a list.
Following the same logic, you need to input 3 in order to get the fourth item on the list. Like so:
End of explanation
"""
print(bad_architects[-1])
print(bad_architects[-2])
"""
Explanation: But there's another way to access items in a list. If you input -1, it will return the last item. -2 will give you the second-to-last item, etc.
End of explanation
"""
print(bad_architects[1:3]) # slices at 1 and 3
"""
Explanation: Slicing a list
You can also get more than one item in a list:
End of explanation
"""
print(bad_architects[2:-1]) # slices at 2 and -1
print(bad_architects[-3:-1]) # slices at -3 and -1
"""
Explanation: How do I make sense of this?
To understand this you need to see the list in the following manner::
The slice operation bad_architects[1:3] slices right where the numbers are.
Does that make sense?
Let us try a few other slices:
End of explanation
"""
print(bad_architects[3:] )
print(bad_architects[:2] )
print(bad_architects[-3:] )
print(bad_architects[:-2] )
"""
Explanation: You can also slice in the following way:
End of explanation
"""
bad_architects.append("First-year students")
print(bad_architects)
"""
Explanation: I'll let you figure that out on your own.
Adding to a list
This is simple: the append function adds an item to the end of the list.
The following command will add 'something' to the end of the list called listname::
End of explanation
"""
bad_architects.remove("First-year students")
print(bad_architects)
"""
Explanation: Deleting from a list
There are two ways to do this, based on the information you have. If you have the value of the object, you'll want to use the remove function. It looks like this:
An example:
End of explanation
"""
what_i_ate_today = ["coffee", "bacon", "eggs"]
print(what_i_ate_today)
what_i_ate_today.append("vegetables") # adds vegetables to the end of the list
# but I don't like vegetables
print(what_i_ate_today)
# since I don't like vegetables
what_i_ate_today.pop(-1) # use index of -1, since vegetables is the last item in the list
print(what_i_ate_today)
"""
Explanation: What if you know the index of the item you want to remove?
What if you appended an item by mistake and just want to remove the last item in the list?
You should use the pop function. It looks like this:
End of explanation
"""
what_i_ate_today.pop(1)
"""
Explanation: You can also remove the second item.
End of explanation
"""
was_first_item = what_i_ate_today.pop(0)
print('was_first_item =', was_first_item)
print('what_i_ate_today = ', what_i_ate_today)
"""
Explanation: Notice the 'bacon' in the line above.
pop actually 'pops' the value (the one you just removed from the list) back to you.
Let us pop the first item.
End of explanation
"""
materials = idf1.idfobjects["MATERIAL"]
"""
Explanation: what_i_ate_today is just 'eggs'?
That is not much of a breakfast!
Let us get back to eppy.
Continuing to work with E+ objects
Let us get those "MATERIAL" objects again
End of explanation
"""
print(materials[-1])
"""
Explanation: With our newfound knowledge of lists, we can do a lot of things.
Let us get the last material:
End of explanation
"""
print(materials[-2:])
"""
Explanation: How about the last two?
End of explanation
"""
print(len(materials))
"""
Explanation: Pretty good.
Counting all the materials ( or counting all objects )
How many materials are in this model ?
End of explanation
"""
was_last_material = materials.pop(-1)
print(len(materials))
"""
Explanation: Removing a material
Let us remove the last material in the list
End of explanation
"""
print(materials[-1])
"""
Explanation: Success! We have only 9 materials now.
The last material used to be:
'G05 25mm wood'
End of explanation
"""
print(was_last_material)
"""
Explanation: Now the last material in the list is:
'M15 200mm heavyweight concrete'
Adding a material to the list
We still have the old last material
End of explanation
"""
materials.append(was_last_material)
print(len(materials))
"""
Explanation: Let us add it back to the list
End of explanation
"""
print(materials[-1])
"""
Explanation: Once again we have 10 materials and the last material is:
End of explanation
"""
idf1.newidfobject("MATERIAL")
len(materials)
"""
Explanation: Add a new material to the model
So far we have been working only with materials that were already in the list.
What if we want to make new material?
Obviously we would use the function 'newidfobject'.
End of explanation
"""
print(materials[-1])
"""
Explanation: We have 11 items in the materials list.
Let us take a look at the last material in the list, where this fancy new material was added
End of explanation
"""
materials[-1].Name = 'Peanut Butter'
materials[-1].Roughness = 'MediumSmooth'
materials[-1].Thickness = 0.03
materials[-1].Conductivity = 0.16
materials[-1].Density = 600
materials[-1].Specific_Heat = 1500
print materials[-1]
"""
Explanation: Looks a little different from the other materials. It does have the name we gave it.
Why do some fields have values and others are blank ?
"addobject" puts in all the default values, and leaves the others blank. It is up to us to put values in the the new fields.
Let's do it now.
End of explanation
"""
Peanutbuttermaterial = materials[-1]
idf1.copyidfobject(Peanutbuttermaterial)
materials = idf1.idfobjects["MATERIAL"]
len(materials)
materials[-1]
"""
Explanation: Copy an existing material
End of explanation
"""
fruits = ["apple", "orange", "bannana"]
"""
Explanation: Python lesson 3: indentation and looping through lists
I'm tired of doing all this work, it's time to make python do some heavy lifting for us!
Python can go through each item in a list and perform an operation on any (or every) item in the list.
This is called looping through the list.
Here's how to tell python to step through each item in a list, and then do something to every single item.
We'll use a 'for' loop to do this. ::
A quick note about the second line. Notice that it's indented? There are 4 blank spaces before the code starts::
It's elegant, but it means that the indentation of the code holds meaning.
So make sure to indent the second (and third and forth) lines of your loops!
Now let's make some fruit loops.
End of explanation
"""
for fruit in fruits:
print(fruit)
"""
Explanation: Given the syntax I gave you before I started rambling about indentation, we can easily print every item in the fruits list by using a 'for' loop.
End of explanation
"""
for fruit in fruits:
print("I am a fruit said the", fruit)
"""
Explanation: That was easy! But it can get complicated pretty quickly...
Let's make it do something more complicated than just print the fruits.
Let's have python add some words to each fruit.
End of explanation
"""
rottenfruits = [] # makes a blank list called rottenfruits
for fruit in fruits: # steps through every item in fruits
rottenfruit = "rotten " + fruit # changes each item to "rotten _____"
rottenfruits.append(rottenfruit) # adds each changed item to the formerly empty list
print(rottenfruits)
# here's a shorter way of writing it
rottenfruits = ["rotten " + fruit for fruit in fruits]
"""
Explanation: Now we'll try to confuse you:
End of explanation
"""
print(rottenfruits)
"""
Explanation: Did you follow all that??
Just in case you didn't, let's review that last one::
End of explanation
"""
fruits = ["apple", "orange", "pear", "berry", "mango", "plum", "peach", "melon", "bannana"]
for fruit in fruits: # steps through every fruit in fruits
if len(fruit) > 5: # checks to see if the length of the word is more than 5
print(fruit) # if true, print the fruit
# if false, python goes back to the 'for' loop
# and checks the next item in the list
"""
Explanation: Filtering in a loop
But what if you don't want to change every item in a list?
We can use an 'if' statement to operate on only some items in the list.
Indentation is also important in 'if' statements, as you'll see::
End of explanation
"""
p_fruits = [] # creates an empty list called p_fruits
for fruit in fruits: # steps through every fruit in fruits
if fruit.startswith("p"): # checks to see if the first letter is 'p', using a built-in function
p_fruits.append(fruit) # if the first letter is 'p', the item is added to p_fruits
# if the first letter is not 'p', python goes back to the 'for' loop
# and checks the next item in the list
print(p_fruits)
# here's a shorter way to write it
p_fruits = [fruit for fruit in fruits if fruit.startswith("p")]
"""
Explanation: Let's say we want to pick only the fruits that start with the letter 'p'.
End of explanation
"""
print(p_fruits)
"""
Explanation: ::
End of explanation
"""
range(4) # makes a list
for i in range(4):
print(i)
len(p_fruits)
for i in range(len(p_fruits)):
print i
for i in range(len(p_fruits)):
print(p_fruits[i])
for i in range(len(p_fruits)):
print(i, p_fruits[i])
for item_from_enumerate in enumerate(p_fruits):
print(item_from_enumerate)
for i, fruit in enumerate(p_fruits):
print(i, fruit)
"""
Explanation: Counting through loops
This is not really needed, but it is nice to know. You can safely skip this.
Python's built-in function range() makes a list of numbers within a range that you specify.
This is useful because you can use these lists inside of loops.
End of explanation
"""
for material in materials:
print(material.Name )
[material.Name for material in materials]
[material.Roughness for material in materials]
[material.Thickness for material in materials]
[material.Thickness for material in materials if material.Thickness > 0.1]
[material.Name for material in materials if material.Thickness > 0.1]
thick_materials = [material for material in materials if material.Thickness > 0.1]
thick_materials
# change the names of the thick materials
for material in thick_materials:
material.Name = "THICK " + material.Name
thick_materials
"""
Explanation: Looping through E+ objects
If you have read the python explanation of loops, you are now masters of using loops.
Let us use the loops with E+ objects.
We'll continue to work with the materials list.
End of explanation
"""
for_images.display_png(for_images.material_lists) # display the image below
# here's the same concept, demonstrated with code
# remember, we changed the names of the items in the list thick_materials
# these changes are visible when we print the materials list; the thick materials are also in the materials list
[material.Name for material in materials]
"""
Explanation: So now we're working with two different lists: materials and thick_materials.
But even though the items can be separated into two lists, we're still working with the same items.
Here's a helpful illustration:
End of explanation
"""
# OLD CODE, SHOULD BE DELETED
# from idfreader import idfreader
# iddfile = "../iddfiles/Energy+V7_0_0_036.idd"
# fname = "../idffiles/V_7_0/5ZoneSupRetPlenRAB.idf"
# model, to_print, idd_info = idfreader(fname, iddfile)
# surfaces = model['BUILDINGSURFACE:DETAILED'] # all the surfaces
from eppy import modeleditor
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
try:
IDF.setiddname(iddfile)
except modeleditor.IDDAlreadySetError as e:
pass
fname1 = "../eppy/resources/idffiles/V_7_0/5ZoneSupRetPlenRAB.idf"
idf1 = IDF(fname1)
surfaces = idf1.idfobjects['BUILDINGSURFACE:DETAILED']
# Let us look at the first surface
asurface = surfaces[0]
print("surface azimuth =", asurface.azimuth, "degrees")
print("surface tilt =", asurface.tilt, "degrees")
print("surface area =", asurface.area, "m2")
# all the surface names
s_names = [surface.Name for surface in surfaces]
print(s_names[:5]) # print five of them
# surface names and azimuths
s_names_azm = [(sf.Name, sf.azimuth) for sf in surfaces]
print(s_names_azm[:5]) # print five of them
# or to do that in pretty printing
for name, azimuth in s_names_azm[:5]: # just five of them
print(name, azimuth)
# surface names and tilt
s_names_tilt = [(sf.Name, sf.tilt) for sf in surfaces]
for name, tilt in s_names_tilt[:5]: # just five of them
print(name, tilt)
# surface names and areas
s_names_area = [(sf.Name, sf.area) for sf in surfaces]
for name, area in s_names_area[:5]: # just five of them
print(name, area, "m2")
"""
Explanation: Geometry functions in eppy
Sometimes, we want information about the E+ object that is not in the fields. For example, it would be useful to know the areas and orientations of the surfaces. These attributes of the surfaces are not in the fields of surfaces, but surface objects do have fields that have the coordinates of the surface. The areas and orientations can be calculated from these coordinates.
Pyeplus has some functions that will do the calculations.
In the present version, pyeplus will calculate:
surface azimuth
surface tilt
surface area
Let us explore these functions
End of explanation
"""
# just vertical walls
vertical_walls = [sf for sf in surfaces if sf.tilt == 90.0]
print([sf.Name for sf in vertical_walls])
# north facing walls
north_walls = [sf for sf in vertical_walls if sf.azimuth == 0.0]
print([sf.Name for sf in north_walls])
# north facing exterior walls
exterior_nwall = [sf for sf in north_walls if sf.Outside_Boundary_Condition == "Outdoors"]
print([sf.Name for sf in exterior_nwall])
# print out some more details of the north wall
north_wall_info = [(sf.Name, sf.azimuth, sf.Construction_Name) for sf in exterior_nwall]
for name, azimuth, construction in north_wall_info:
print(name, azimuth, construction)
# change the construction in the exterior north walls
for wall in exterior_nwall:
wall.Construction_Name = "NORTHERN-WALL" # make sure such a construction exists in the model
# see the change
north_wall_info = [(sf.Name, sf.azimuth, sf.Construction_Name) for sf in exterior_nwall]
for name, azimuth, construction in north_wall_info:
print(name, azimuth, construction)
# see this in all surfaces
for sf in surfaces:
print(sf.Name, sf.azimuth, sf.Construction_Name)
"""
Explanation: Let us try to isolate the exterior north facing walls and change their construnctions
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.1/tutorials/optimizing.ipynb
|
gpl-3.0
|
!pip install -I "phoebe>=2.1,<2.2"
import phoebe
b = phoebe.default_binary()
"""
Explanation: Advanced: Optimizing Performance with PHOEBE
Setup
Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
print(phoebe.conf.interactive_checks)
phoebe.interactive_checks_off()
print(phoebe.conf.interactive_checks)
"""
Explanation: Interactivity Options
When running in an interactive Python session, PHOEBE updates all constraints and runs various checks after each command. Although this is convenient, it does take some time, and it can sometimes be advantageous to disable this to save computation time.
Interactive Checks
By default, interactive checks are enabled when PHOEBE is being run in an interactive session (either an interactive python, IPython, or Jupyter notebook session), but disabled when PHOEBE is run as a script directly from the console. When enabled, PHOEBE will re-run the system checks after every single change to the bundle, raising warnings via the logger as soon as they occur.
This default behavior can be changed via phoebe.interactive_checks_on() or phoebe.interactive_checks_off(). The current value can be accessed via phoebe.conf.interactive_checks.
End of explanation
"""
passed, msg = b.run_checks()
print(passed, msg)
b.set_value('requiv', component='primary', value=50)
passed, msg = b.run_checks()
print(passed, msg)
"""
Explanation: If disabled, you can always manually run the checks via b.run_checks().
End of explanation
"""
print(phoebe.conf.interactive_constraints)
print(b.filter('mass', component='primary'))
b.set_value('sma@binary', 10)
print(b.filter('mass', component='primary'))
"""
Explanation: Interactive Constraints
By default, interactive constraints are always enabled in PHOEBE, unless explicitly disabled. Whenever a value is changed in the bundle that affects the value of a constrained value, that constraint is immediately executed and all applicable values updated. The ensures that all constrained values are "up-to-date".
If disabled, constraints are delayed and only executed when needed by PHOEBE (when calling run_compute, for example). This can save significant time, as each value that needs updating only needs to have its constraint executed once, instead of multiple times.
This default behavior can be changed via phoebe.interactive_constraints_on() or phoebe.interactive_constraints_off(). The current value can be accessed via phoebe.conf.interactive_constraints.
Let's first look at the default behavior with interactive constraints on.
End of explanation
"""
phoebe.interactive_constraints_off()
print(phoebe.conf.interactive_constraints)
print(b.filter('mass', component='primary'))
b.set_value('sma@binary', 15)
print(b.filter('mass', component='primary'))
"""
Explanation: Note that the mass has already updated, according to the constraint, when the value of the semi-major axes was changed. If we disable interactive constraints this will not be the case.
End of explanation
"""
b.run_delayed_constraints()
print(b.filter('mass', component='primary'))
"""
Explanation: No need to worry though - all constraints will be run automatically before passing to the backend. If you need to access the value of a constrained parameter, you can explicitly ask for all delayed constraints to be executed via b.run_delayed_constraints().
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.1/examples/binary_spots.ipynb
|
gpl-3.0
|
!pip install -I "phoebe>=2.1,<2.2"
"""
Explanation: Binary with Spots
Setup
Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
End of explanation
"""
b.add_dataset('lc', times=phoebe.linspace(0,1,101))
b.run_compute(irrad_method='none', model='no_spot')
"""
Explanation: Model without Spots
End of explanation
"""
b.add_feature('spot', component='primary', feature='spot01', relteff=0.9, radius=30, colat=45, long=90)
b.run_compute(irrad_method='none', model='with_spot')
"""
Explanation: Adding Spots
Let's add a spot to the primary component in our binary.
The 'colat' parameter defines the colatitude on the star measured from its North (spin) Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the other star at t0. See the spots tutorial for more details.
End of explanation
"""
afig, mplfig = b.plot(show=True, legend=True)
"""
Explanation: Comparing Light Curves
End of explanation
"""
|
msanterre/deep_learning
|
tv-script-generation/dlnd_tv_script_generation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
".": "||Period||",
",": "||Comma||",
"\"": "||Quotation_Mark||",
";": "||Semicolon||",
"!": "||Exclamation_Mark||",
"?": "||Question_Mark||",
"(": "||Left_Parentheses||",
")": "||Right_Parentheses||",
"--": "||Dash||",
'\n': "||Return||"
}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
num_layers = 2
def get_cell():
return tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([get_cell() for i in range(num_layers)])
init_state = tf.identity(cell.zero_state(batch_size, tf.float32), name="initial_state")
return cell, init_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
return tf.nn.embedding_lookup(embedding, input_data)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
f_state = tf.identity(final_state, name="final_state")
return outputs, f_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.layers.dense(outputs, vocab_size, activation=None)
# print("Input size: ", input_data.get_shape())
# print("Embed size: ", embed_dim)
# print("RNN Size: ", rnn_size)
# print("Outputs: ", outputs)
# print("logits: ", tf.reshape(logits, [-1]))
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
x = np.array(int_text)
y = np.array(int_text[1:] + [x[0]])
size = batch_size * seq_length
num_batches = len(int_text) // size
fits = num_batches * batch_size * seq_length
# print("{} - ({}, {})".format(len(int_text), len(x), len(y)))
# print("batch_size: ", batch_size)
# print("seq length: ", seq_length)
# print("num batches: ", num_batches)
# print("X: {}...{}".format(x[:10], x[-10:]))
# print("Y: {}...{}".format(y[:10], y[-10:]))
# print("Fits: ", fits)
batches = np.zeros((num_batches, 2, batch_size, seq_length))
for i in range(fits):
batch_index = (i // seq_length) % num_batches
batch_element = i // (num_batches * seq_length)
elem = i % seq_length
#print("{}-{}-{}={} ".format(batch_index, batch_element, elem, x[i]))
# X
batches[batch_index][0][batch_element][elem] = x[i]
#Y
batches[batch_index][1][batch_element][elem] = y[i]
batches[num_batches-1][1][batch_size-1][seq_length-1] = 0 # last target is 0
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
End of explanation
"""
# Number of Epochs
num_epochs = 65
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 12
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_t = loaded_graph.get_tensor_by_name("input:0")
initial_state_t = loaded_graph.get_tensor_by_name("initial_state:0")
final_state_t = loaded_graph.get_tensor_by_name("final_state:0")
probs_t = loaded_graph.get_tensor_by_name("probs:0")
return input_t, initial_state_t, final_state_t, probs_t
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
top_n = 20
p = np.squeeze(probabilities)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
word_i = np.random.choice(len(p), 1, p=p)[0]
word = int_to_vocab[word_i]
return word
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[0][dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
5hubh4m/CS231n
|
Assignment1/features.ipynb
|
mit
|
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
"""
Explanation: Image features exercise
Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.
We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.
All of your work for this exercise will be done in this notebook.
End of explanation
"""
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
"""
Explanation: Load data
Similar to previous exercises, we will load CIFAR-10 data from disk.
End of explanation
"""
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
"""
Explanation: Extract Features
For each image we will compute a Histogram of Oriented
Gradients (HOG) as well as a color histogram using the hue channel in HSV
color space. We form our final feature vector for each image by concatenating
the HOG and color histogram feature vectors.
Roughly speaking, HOG should capture the texture of the image while ignoring
color information, and the color histogram represents the color of the input
image while ignoring texture. As a result, we expect that using both together
ought to work better than using either alone. Verifying this assumption would
be a good thing to try for the bonus section.
The hog_feature and color_histogram_hsv functions both operate on a single
image and return a feature vector for that image. The extract_features
function takes a set of images and a list of feature functions and evaluates
each feature function on each image, storing the results in a matrix where
each column is the concatenation of all feature vectors for a single image.
End of explanation
"""
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [-9, -7]
regularization_strengths = [5, 7]
results = {}
best_val = -1
best_svm = None
for _ in np.arange(50):
i = 10 ** np.random.uniform(low=learning_rates[0], high=learning_rates[1])
j = 10 ** np.random.uniform(low=regularization_strengths[0], high=regularization_strengths[1])
svm = LinearSVM()
svm.train(X_train_feats, y_train, learning_rate=i, reg=j,
num_iters=500, verbose=False)
y_train_pred = svm.predict(X_train_feats)
y_val_pred = svm.predict(X_val_feats)
accuracy = (np.mean(y_train == y_train_pred), np.mean(y_val == y_val_pred))
results[(i, j)] = accuracy
if accuracy[1] > best_val:
best_val = accuracy[1]
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Get the best hyperparameter from result
best_lr = 0.0
best_reg = 0.0
for lr, reg in results:
if results[(lr, reg)][1] == best_val:
best_lr = lr
best_reg = reg
break
print 'Best learning rate: %f, best regularisation strength: %f' % (best_lr, best_reg, )
# Train the classifier with the best hyperparameters
best_svm = LinearSVM()
loss_hist = best_svm.train(X_train_feats, y_train, learning_rate=best_lr, reg=best_reg,
num_iters=2000, verbose=True)
# plot the loss as a function of iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print test_accuracy
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
"""
Explanation: Train SVM on features
Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
End of explanation
"""
print X_train_feats.shape
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 200
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
learning = [1e-5, 1]
regularization = [1e0, 1e4]
decay = [0.9, 1]
results = {}
best_val = -1
for _ in np.arange(0, 50):
i = np.random.uniform(low=learning[0], high=learning[1])
j = np.random.uniform(low=regularization[0], high=regularization[1])
k = np.random.uniform(low=decay[0], high=decay[1])
# Train the network
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=500, batch_size=200,
learning_rate=i, learning_rate_decay=k,
reg=j, verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val_feats) == y_val).mean()
results[(i, j, k)] = val_acc
if val_acc > best_val:
best_val = val_acc
best_net = net
for i, j, k in results:
print 'lr: %f, reg: %f, dec: %f -> %f' % (i, j, k, results[(i, j, k)])
print best_val
# Find the best learning rate and regularization strength
best_lr = 0.
best_reg = 0.
best_decay = 0.
for lr, reg, dec in sorted(results):
if results[(lr, reg, dec)] == best_val:
best_lr = lr
best_reg = reg
best_decay = dec
break
print best_lr, best_decay, best_reg
stats = best_net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=2000, batch_size=400,
learning_rate=best_lr, learning_rate_decay=best_decay,
reg=best_reg, verbose=True)
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print test_acc
"""
Explanation: Inline question 1:
Describe the misclassification results that you see. Do they make sense?
Neural Network on image features
Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels.
For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
End of explanation
"""
|
jakeret/abcpmc
|
notebooks/2d_gauss.ipynb
|
gpl-3.0
|
samples_size = 1000
sigma = np.eye(2) * 0.25
means = [1.1, 1.5]
data = np.random.multivariate_normal(means, sigma, samples_size)
matshow(sigma)
title("covariance matrix sigma")
colorbar()
"""
Explanation: ABC PMC on a 2D gaussian example
In this example we're looking at a dataset that has been drawn from a 2D gaussian distribution. We're going to assume that we don't have a proper likelihood but that we know the covariance matrix $\Sigma$ of the distribution. Using the ABC PMC algorithm we will approximate the posterior of the distribtion of the mean values.
First we generate a new dataset by drawing random variables from a mulitvariate gaussian around mean=[1.1, 1.5]. This is going to be our observed data set
End of explanation
"""
def create_new_sample(theta):
return np.random.multivariate_normal(theta, sigma, samples_size)
"""
Explanation: Then we need to define our model/simulation. In this case this is simple: we draw again random variables from a multivariate gaussian distribution using the given mean and the sigma from above
End of explanation
"""
def dist_measure(x, y):
return np.sum(np.abs(np.mean(x, axis=0) - np.mean(y, axis=0)))
"""
Explanation: Next, we need to define a distance measure. We will use the sum of the absolute differences of the means of the simulated and the observed data
End of explanation
"""
distances = [dist_measure(data, create_new_sample(means)) for _ in range(1000)]
sns.distplot(distances, axlabel="distances", )
title("Variablility of distance from simulations")
"""
Explanation: Verification
To verify if everything works and to see the effect of the random samples in the simulation we compute the distance for 1000 simulations at the true mean values
End of explanation
"""
import abcpmc
"""
Explanation: Setup
Now we're going to set up the ABC PMC sampling
End of explanation
"""
prior = abcpmc.GaussianPrior(mu=[1.0, 1.0], sigma=np.eye(2) * 0.5)
"""
Explanation: As a prior we're going to use a gaussian prior using our best guess about the distribution of the means.
End of explanation
"""
alpha = 75
T = 20
eps_start = 1.0
eps = abcpmc.ConstEps(T, eps_start)
"""
Explanation: As threshold $\epsilon$ we're going to use the $\alpha^{th}$ percentile of the sorted distances of the particles of the current iteration. The simplest way to do this is to define a constant $\epsilon$ and iteratively adapt the theshold. As starting value we're going to define a sufficiently high value so that the acceptance ratio is reasonable and we will sample for T iterations
End of explanation
"""
sampler = abcpmc.Sampler(N=5000, Y=data, postfn=create_new_sample, dist=dist_measure, threads=7)
"""
Explanation: Finally, we create an instance of your sampler. We want to use 5000 particles and the functions we defined above. Additionally we're going to make use of the built-in parallelization and use 7 cores for the sampling
End of explanation
"""
sampler.particle_proposal_cls = abcpmc.OLCMParticleProposal
"""
Explanation: Optionally, we can customize the proposal creation. Here we're going to use a "Optimal Local Covariance Matrix"-kernel (OLCM) as proposed by (Fillipi et al. 2012). This has shown to yield a high acceptance ratio togheter with a faster decrease of the thresold.
End of explanation
"""
def launch():
eps = abcpmc.ConstEps(T, eps_start)
pools = []
for pool in sampler.sample(prior, eps):
print("T: {0}, eps: {1:>.4f}, ratio: {2:>.4f}".format(pool.t, eps(pool.eps), pool.ratio))
for i, (mean, std) in enumerate(zip(*abcpmc.weighted_avg_and_std(pool.thetas, pool.ws, axis=0))):
print(u" theta[{0}]: {1:>.4f} \u00B1 {2:>.4f}".format(i, mean,std))
eps.eps = np.percentile(pool.dists, alpha) # reduce eps value
pools.append(pool)
sampler.close()
return pools
import time
t0 = time.time()
pools = launch()
print "took", (time.time() - t0)
"""
Explanation: Sampling
Now we're ready to sample. All we need to do is to iterate over the yielded values of your sampler instance. The sample function returns a namedtuple per iteration that contains all the information that we're interestend in
End of explanation
"""
for i in range(len(means)):
moments = np.array([abcpmc.weighted_avg_and_std(pool.thetas[:,i], pool.ws, axis=0) for pool in pools])
errorbar(range(T), moments[:, 0], moments[:, 1])
hlines(means, 0, T, linestyle="dotted", linewidth=0.7)
_ = xlim([-.5, T])
"""
Explanation: Postprocessing
How did the sampled values evolve over the iterations? As the threshold is decreasing we expect the errors to shrink while the means converge to the true means.
End of explanation
"""
distances = np.array([pool.dists for pool in pools]).flatten()
sns.distplot(distances, axlabel="distance")
"""
Explanation: How does the distribution of the distances look like after we have approximated the posterior? If we're close to the true posterior we expect to have a high bin count around the values we've found in the earlier distribution plot
End of explanation
"""
eps_values = np.array([pool.eps for pool in pools])
plot(eps_values, label=r"$\epsilon$ values")
xlabel("Iteration")
ylabel(r"$\epsilon$")
legend(loc="best")
"""
Explanation: How did our $\epsilon$ values behave over the iterations? Using the $\alpha^{th}$ percentile causes the threshold to decrease relatively fast in the beginning and to plateau later on
End of explanation
"""
acc_ratios = np.array([pool.ratio for pool in pools])
plot(acc_ratios, label="Acceptance ratio")
ylim([0, 1])
xlabel("Iteration")
ylabel("Acceptance ratio")
legend(loc="best")
%pylab inline
rc('text', usetex=True)
rc('axes', labelsize=15, titlesize=15)
"""
Explanation: What about the acceptance ratio? ABC PMC with the OLCM kernel gives us a relatively high acceptance ratio.
End of explanation
"""
import triangle
samples = np.vstack([pool.thetas for pool in pools])
fig = triangle.corner(samples, truths= means)
"""
Explanation: Finally what does our posterior look like? For the visualization we're using triangle.py (https://github.com/dfm/triangle.py)
End of explanation
"""
idx = -1
samples = pools[idx].thetas
fig = triangle.corner(samples, weights=pools[idx].ws, truths= means)
for mean, std in zip(*abcpmc.weighted_avg_and_std(samples, pools[idx].ws, axis=0)):
print(u"mean: {0:>.4f} \u00B1 {1:>.4f}".format(mean,std))
"""
Explanation: Omitting the first couple of iterations..
End of explanation
"""
|
SheffieldML/notebook
|
compbio/periodic/figure2.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import GPy
np.random.seed(1)
"""
Explanation: Supplementary materials : Details on generating Figure 2
This document is a supplementary material of the article Detecting periodicities with Gaussian
processes by N. Durrande, J. Hensman, M. Rattray and N. D. Lawrence.
The first step is to import the required packages. This tutorial has been written with GPy 0.8.8 which includes the kernels discussed in the article. The latter can be downloaded on the SheffieldML github page.
End of explanation
"""
class AperiodicMatern32(GPy.kern.Kern):
"""
Kernel of the aperiodic subspace (up to a given frequency) of a Matern 3/2 RKHS.
Only defined for input_dim=1.
"""
def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi,
n_freq=10, lower=0., upper=4*np.pi,
active_dims=None, name='aperiodic_Matern32'):
self.per_kern = GPy.kern.PeriodicMatern32(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name='dummy kernel')
self.whole_kern = GPy.kern.Matern32(input_dim, variance, lengthscale, name='dummy kernel')
GPy.kern.Kern.__init__(self, input_dim, active_dims, name)
self.variance = GPy.core.Param('variance', np.float64(variance), GPy.core.parameterization.transformations.Logexp())
self.lengthscale = GPy.core.Param('lengthscale', np.float64(lengthscale), GPy.core.parameterization.transformations.Logexp())
self.period = GPy.core.Param('period', np.float64(period), GPy.core.parameterization.transformations.Logexp())
self.link_parameters(self.variance, self.lengthscale, self.period)
def parameters_changed(self):
self.whole_kern.variance = self.variance * 1.
self.per_kern.variance = self.variance * 1.
self.whole_kern.lengthscale = self.lengthscale * 1.
self.per_kern.lengthscale = self.lengthscale * 1.
self.per_kern.period = self.period * 1.
def K(self, X, X2=None):
return self.whole_kern.K(X, X2) - self.per_kern.K(X, X2)
def Kdiag(self, X):
return np.diag(self.K(X))
def update_gradients_full(self, dL_dK, X, X2=None):
self.whole_kern.update_gradients_full(dL_dK, X, X2)
self.per_kern.update_gradients_full(-dL_dK, X, X2)
self.variance.gradient = self.whole_kern.variance.gradient + self.per_kern.variance.gradient
self.lengthscale.gradient = self.whole_kern.lengthscale.gradient + self.per_kern.lengthscale.gradient
self.period.gradient = self.per_kern.period.gradient
# Domain Parameters
a = 0. # lower bound of the space
b = 4*np.pi # upper bound
# kernel parameters
per = 2*np.pi # period
var = 1. # variance
N = 20 # max frequency in the decomposition (the number of basis functions is 2N)
"""
Explanation: The boundary limits for the plots are set to $[0,4 \pi]$, and we consider a period of $2 \pi$ :
End of explanation
"""
lenscl=.5
km1 = GPy.kern.Matern32(input_dim=1,variance=var,lengthscale=lenscl)
kp1 = GPy.kern.PeriodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
ka1 = AperiodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
lenscl=2.
km2 = GPy.kern.Matern32(input_dim=1,variance=var,lengthscale=lenscl)
kp2 = GPy.kern.PeriodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
ka2 = AperiodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
lenscl=5.
km3 = GPy.kern.Matern32(input_dim=1,variance=var,lengthscale=lenscl)
kp3 = GPy.kern.PeriodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
ka3 = AperiodicMatern32(input_dim=1, variance=var, lengthscale=lenscl, period=per, n_freq=N, lower=a, upper=b)
"""
Explanation: We know from the article that a Matérn kernels $k$ can be decomposed as a sum of a periodic and aperiodic kernel : $k=k_p+k_a$. In the code below, we consider 3 Matérn 3/2 kernels km1, km2, km3 with various lengthscales and we denote by kp[123] and ka[123] their associated periodic and aperiodic components.
End of explanation
"""
x = 5.
fig, ax = plt.subplots(figsize=(4,4))
km1.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b-')
km2.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b--')
km3.plot(x,plot_limits=[a,b], ax=ax)
plt.xlabel('')
plt.ylabel('')
plt.legend(["$\\theta\ =\ 0.5$","$\\theta \ = \ 2$","$\\theta \ = \ 5$"],prop={'size':12},borderaxespad=0.)
plt.ylim([-0.1,1.1])
"""
Explanation: Subfigure a: Matern 3/2 kernel $k$
End of explanation
"""
fig, ax = plt.subplots(figsize=(4,4))
km1.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b-')
km2.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b--')
km3.plot(x,plot_limits=[a,b], ax=ax)
plt.xlabel('')
plt.ylabel('')
#plt.legend(["$\\theta\ =\ 0.5$","$\\theta \ = \ 2$","$\\theta \ = \ 5$"],prop={'size':12},borderaxespad=0.)
plt.ylim([-0.13,0.42])
"""
Explanation: Subfigure b: periodic sub-kernel $k_p$
End of explanation
"""
fig, ax = plt.subplots(figsize=(4,4))
km1.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b-')
km2.plot(x,plot_limits=[a,b], ax=ax)#, 'all', None,'b--')
km3.plot(x,plot_limits=[a,b], ax=ax)
plt.xlabel('')
plt.ylabel('')
plt.title('aperiodic')
#plt.legend(["$\\theta\ =\ 0.5$","$\\theta \ = \ 2$","$\\theta \ = \ 5$"],prop={'size':12},borderaxespad=0.)
plt.ylim([-0.45,1.05])
"""
Explanation: Subfigure c: aperiodic sub-kernel $k_a$
End of explanation
"""
|
machinelearningnanodegree/stanford-cs231
|
solutions/levin/assignment2/BatchNormalization.ipynb
|
mit
|
# As usual, a bit of setup
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
'../../assignment1/cs231n/datasets/cifar-10-batches-py'
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
"""
Explanation: Batch Normalization
One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].
The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.
The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.
It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.
[3] Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift", ICML 2015.
End of explanation
"""
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print 'Before batch normalization:'
print ' means: ', a.mean(axis=0)
print ' stds: ', a.std(axis=0)
# Means should be close to zero and stds close to one
print 'After batch normalization (gamma=1, beta=0)'
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print ' mean: ', a_norm.mean(axis=0)
print ' std: ', a_norm.std(axis=0)
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print 'After batch normalization (nontrivial gamma, beta)'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print 'After batch normalization (test-time):'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
"""
Explanation: Batch normalization: Forward
In the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.
End of explanation
"""
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dgamma error: ', rel_error(da_num, dgamma)
print 'dbeta error: ', rel_error(db_num, dbeta)
"""
Explanation: Batch Normalization: backward
Now implement the backward pass for batch normalization in the function batchnorm_backward.
To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.
Once you have finished, run the following to numerically check your backward pass.
End of explanation
"""
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print 'dx difference: ', rel_error(dx1, dx2)
print 'dgamma difference: ', rel_error(dgamma1, dgamma2)
print 'dbeta difference: ', rel_error(dbeta1, dbeta2)
print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))
"""
Explanation: Batch Normalization: alternative backward
In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.
Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.
NOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it.
End of explanation
"""
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
if reg == 0: print
"""
Explanation: Fully Connected Nets with Batch Normalization
Now that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.
Concretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
HINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.
End of explanation
"""
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
"""
Explanation: Batchnorm for deep networks
Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
End of explanation
"""
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
End of explanation
"""
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
"""
Explanation: Batch normalization and initialization
We will now run a small experiment to study the interaction of batch normalization and weight initialization.
The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
End of explanation
"""
|
emredjan/emredjan.github.io
|
code/plot_normal.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn') # pretty matplotlib plots
plt.rcParams['figure.figsize'] = (12, 8)
"""
Explanation: Plotting Any Kind of Distribution with matplotlib and scipy
It's important to plot distributions of variables when doing exploratory analysis. However, there may be times when you want to see the theoretical distribution on a plot, i.e. when you want to see how much your variable deviates from it, or when you want to decide on a distribution function visually.
Let's take the normal (gaussian) distribution as an example. The probability density function (pdf) is:
$
f(x|\mu,\sigma^2)=\frac{1}{\sqrt{2\pi\sigma^2}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}
$
Basically, if we have a range of $x$'s, a mean ($\mu$) and a standard deviation ($\sigma$), we can pass them onto this formula and get corresponding $y$ values, which we can then plot using the standard matplotlib plot() function:
Let's setup the scene first:
End of explanation
"""
x = np.linspace(-5, 5, 5000)
mu = 0
sigma = 1
y = (1 / (np.sqrt(2 * np.pi * np.power(sigma, 2)))) * \
(np.power(np.e, -(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))))
"""
Explanation: Let's get our x values, determine a mean and a standard deviation, and setup the formula for the normal pdf:
End of explanation
"""
plt.plot(x, y);
"""
Explanation: Now we can plot these using:
End of explanation
"""
import scipy.stats as ss
x = np.linspace(-5, 5, 5000)
mu = 0
sigma = 1
y_pdf = ss.norm.pdf(x, mu, sigma) # the normal pdf
y_cdf = ss.norm.cdf(x, mu, sigma) # the normal cdf
plt.plot(x, y_pdf, label='pdf')
plt.plot(x, y_cdf, label='cdf')
plt.legend();
"""
Explanation: Which is fine and dandy, but it gets quite cumbersome to write those formulas from scratch using numpy and scipy functions from scratch for every distribution we want. Some are even really hard to implement, take for example the cumulative distribution function (cdf) for the standard normal distribution:
$
\Phi(x)=\frac{1}{\sqrt{2\pi}}\int_{-\infty }^{x}e^{-t^{2}/2}\,{\rm {d}}t
$
Fortunately for us, the people at scipy provided nearly every kind of distribution function in the scipy.stats package. Using that, we can achieve the same result as above in a cleaner, less error-prone code. We can even plot the cdf on top of that:
End of explanation
"""
import scipy.stats as ss
def plot_normal(x_range, mu=0, sigma=1, cdf=False, **kwargs):
'''
Plots the normal distribution function for a given x range
If mu and sigma are not provided, standard normal is plotted
If cdf=True cumulative distribution is plotted
Passes any keyword arguments to matplotlib plot function
'''
x = x_range
if cdf:
y = ss.norm.cdf(x, mu, sigma)
else:
y = ss.norm.pdf(x, mu, sigma)
plt.plot(x, y, **kwargs)
x = np.linspace(-5, 5, 5000)
plot_normal(x)
plot_normal(x, cdf=True)
plot_normal(x, -2, 1, color='red', lw=2, ls='-', alpha=0.5)
plot_normal(x, 2, 1.2, color='blue', lw=2, ls='-', alpha=0.5)
plot_normal(x, 0, 0.8, color='green', lw=2, ls='-', alpha=0.5);
"""
Explanation: For reuse, it may be a good idea to put these into a function:
End of explanation
"""
import scipy.stats as ss
def plot_exponential(x_range, mu=0, sigma=1, cdf=False, **kwargs):
'''
Plots the exponential distribution function for a given x range
If mu and sigma are not provided, standard exponential is plotted
If cdf=True cumulative distribution is plotted
Passes any keyword arguments to matplotlib plot function
'''
x = x_range
if cdf:
y = ss.expon.cdf(x, mu, sigma)
else:
y = ss.expon.pdf(x, mu, sigma)
plt.plot(x, y, **kwargs)
x = np.linspace(0, 5, 5000)
plot_exponential(x, 0, 1, color='red', lw=2, ls='-', alpha=0.5, label='pdf')
plot_exponential(x, 0, 1, cdf=True, color='blue', lw=2, ls='-', alpha=0.5, label='cdf')
plt.legend();
"""
Explanation: Given this knowledge, we can now define a function for plotting any kind of distribution. The important bit is to be careful about the parameters of the corresponding scipy.stats function (Some distributions require more than a mean and a standard deviation). You can check those parameters on the official docs for scipy.stats.
The exponential distribution:
End of explanation
"""
import scipy.stats as ss
def plot_f(x_range, dfn, dfd, mu=0, sigma=1, cdf=False, **kwargs):
'''
Plots the f distribution function for a given x range, dfn and dfd
If mu and sigma are not provided, standard f is plotted
If cdf=True cumulative distribution is plotted
Passes any keyword arguments to matplotlib plot function
'''
x = x_range
if cdf:
y = ss.f.cdf(x, dfn, dfd, mu, sigma)
else:
y = ss.f.pdf(x, dfn, dfd, mu, sigma)
plt.plot(x, y, **kwargs)
x = np.linspace(0.001, 5, 5000)
plot_f(x, 10, 10, 0, 1, color='red', lw=2, ls='-', alpha=0.5, label='pdf')
plot_f(x, 10, 10, 0, 1, cdf=True, color='blue', lw=2, ls='-', alpha=0.5, label='cdf')
plt.legend();
"""
Explanation: The F distribution:
End of explanation
"""
import scipy.stats as ss
def plot_beta(x_range, a, b, mu=0, sigma=1, cdf=False, **kwargs):
'''
Plots the f distribution function for a given x range, a and b
If mu and sigma are not provided, standard beta is plotted
If cdf=True cumulative distribution is plotted
Passes any keyword arguments to matplotlib plot function
'''
x = x_range
if cdf:
y = ss.beta.cdf(x, a, b, mu, sigma)
else:
y = ss.beta.pdf(x, a, b, mu, sigma)
plt.plot(x, y, **kwargs)
x = np.linspace(0, 1, 5000)
plot_beta(x, 5, 2, 0, 1, color='red', lw=2, ls='-', alpha=0.5, label='pdf')
plot_beta(x, 5, 2, 0, 1, cdf=True, color='blue', lw=2, ls='-', alpha=0.5, label='cdf')
plt.legend();
"""
Explanation: The beta distribution:
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/963786e591fc03946ca0f3b819f12772/plot_xdawn_denoising.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
from mne import (io, compute_raw_covariance, read_events, pick_types, Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.viz import plot_epochs_image
print(__doc__)
data_path = sample.data_path()
"""
Explanation: XDAWN Denoising
XDAWN filters are trained from epochs, signal is projected in the sources
space and then projected back in the sensor space using only the first two
XDAWN components. The process is similar to an ICA, but is
supervised in order to maximize the signal to signal + noise ratio of the
evoked response.
<div class="alert alert-danger"><h4>Warning</h4><p>As this denoising method exploits the known events to
maximize SNR of the contrast between conditions it can lead
to overfitting. To avoid a statistical analysis problem you
should split epochs used in fit with the ones used in
apply method.</p></div>
References
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
End of explanation
"""
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin') # replace baselining with high-pass
events = read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
# Fit xdawn
xd.fit(epochs)
# Denoise epochs
epochs_denoised = xd.apply(epochs)
# Plot image epoch after Xdawn
plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
"""
Explanation: Set parameters and read data
End of explanation
"""
|
kaphka/ml-software
|
create_data.ipynb
|
apache-2.0
|
def xor(X):
if not ft.reduce(lambda old, new: old == new,X >= 0):
return 1
else:
return 0
x_train = np.array([(np.random.random_sample(5000) - 0.5) * 2 for dim in range(2)]).transpose()
x_test = np.array([(np.random.random_sample(100) - 0.5) * 2 for dim in range(2)]).transpose()
y_train = np.apply_along_axis(xor, 1, x_train)
y_test = np.apply_along_axis(xor, 1, x_test)
with open('data/xor.tuple', 'wb') as xtuple:
pickle.dump((x_train, y_train, x_test, y_test), xtuple)
"""
Explanation: XOR
End of explanation
"""
!wget -P data/ https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data
housing = pd.read_csv('data/housing.data', delim_whitespace=True,
names=['CRIM',
'ZM',
'INDUS',
'CHAS',
'NOX',
'RM',
'AGE',
'DIS',
'RAD',
'TAX',
'PTRATIO',
'B',
'LSTAT',
'MEDV'])
housing.head()
with open('data/housing.dframe', 'wb') as dhousing:
pickle.dump(housing, dhousing)
"""
Explanation: Multivariante Regression - Housing Data Set
https://archive.ics.uci.edu/ml/datasets/Housing
1. CRIM: per capita crime rate by town
2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
3. INDUS: proportion of non-retail business acres per town
4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
5. NOX: nitric oxides concentration (parts per 10 million)
6. RM: average number of rooms per dwelling
7. AGE: proportion of owner-occupied units built prior to 1940
8. DIS: weighted distances to five Boston employment centres
9. RAD: index of accessibility to radial highways
10. TAX: full-value property-tax rate per \$10,000
11. PTRATIO: pupil-teacher ratio by town
12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
13. LSTAT: \% lower status of the population
14. MEDV: Median value of owner-occupied homes in $1000's
End of explanation
"""
!wget -P data/ https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data
data = pd.read_csv('data/pima-indians-diabetes.data',
names=['n_pregnant',
'glucose',
'mmHg',
'triceps',
'insulin',
'BMI',
'pedigree',
'age',
'class'])
data.head()
x = np.array(data)[:,:-1]
y = np.array(data)[:,-1]
n_train = int(len(x) * 0.70)
x_train = x[:n_train]
x_test = x[n_train:]
y_train = y[:n_train]
y_test = y[n_train:]
with open('data/pima-indians-diabetes.tuple', 'wb') as xtuple:
pickle.dump((x_train, y_train, x_test, y_test), xtuple)
"""
Explanation: Binary Classification - Pima Indians Diabetes Data Set
https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes
1. Number of times pregnant
2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test
3. Diastolic blood pressure (mm Hg)
4. Triceps skin fold thickness (mm)
5. 2-Hour serum insulin (mu U/ml)
6. Body mass index (weight in kg/(height in m)^2)
7. Diabetes pedigree function
8. Age (years)
9. Class variable (0 or 1)
End of explanation
"""
!wget -P data/ http://deeplearning.net/data/mnist/mnist.pkl.gz
import cPickle, gzip, numpy
# Load the dataset
f = gzip.open('data/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
plt.imshow(train_set[0][0].reshape((28,28)),cmap='gray', interpolation=None)
!wget -P data/ http://data.dmlc.ml/mxnet/data/mnist.zip
!unzip -d data/ -u data/mnist.zip
"""
Explanation: Image Classification - MNIST dataset
http://deeplearning.net/data/mnist/mnist.pkl.gz
End of explanation
"""
!wget -P data/ https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
!tar -xzf data/cifar-10-python.tar.gz -C data/
with open('data/cifar-10-batches-py/data_batch_1', 'rb') as batch:
cifar1 = cPickle.load(batch)
cifar1.keys()
img = np.stack([cifar1['data'][0].reshape((3,32,32))[0,:,:],
cifar1['data'][0].reshape((3,32,32))[1,:,:],
cifar1['data'][0].reshape((3,32,32))[2,:,:]],axis=2)
plt.imshow(img, cmap='gray')
"""
Explanation: Image Classification - CIFAR-10 dataset
https://www.cs.toronto.edu/~kriz/cifar.html
End of explanation
"""
|
ebonnassieux/fundamentals_of_interferometry
|
3_Positional_Astronomy/3_3_horizontal_coordinates.ipynb
|
gpl-2.0
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
"""
Explanation: Outline
Glossary
Positional Astronomy
Previous: 3.2 Hour Angle (HA) and Local Sidereal Time (LST)
Next: 3.4 Direction Cosine Coordinates ($l,m,n$)
Import standard modules:
End of explanation
"""
from IPython.display import HTML
HTML('../style/code_toggle.html')
import ephem
import matplotlib
%pylab inline
pylab.rcParams['figure.figsize'] = (15, 10)
"""
Explanation: Import section specific modules:
End of explanation
"""
#Creating the observer: KAT-7
KAT7 = ephem.Observer()
KAT7.lat = '-30:43:17'
KAT7.lon = '21:25:40.08'
KAT7.elevation = 0.0
KAT7.date = '2016/5/30 00:00:00' #UTC
#Creating the celestial bodies
star_names = np.array(["Rigel","Thuban","Mimosa","Procyon","Sirius","Achernar","Menkar","Zaurak","Aldebaran","Betelgeuse"])
star_objects = np.empty((len(star_names),),dtype=object)
for k in xrange(len(star_names)):
star_objects[k] = ephem.star(star_names[k],KAT7)
#Creating the time-strings at which we observe
hours = np.empty((96,),dtype=object)
minutes = np.empty((96,),dtype=object)
alt_az_mat = np.zeros((len(star_names),len(hours)+1,2),dtype=float) #(sources,hours,horz_coord)
hours_c = 0
for k in xrange(len(hours)):
if k % 4 == 0:
if hours_c < 10:
hours[k] = '0'+str(hours_c)
else:
hours[k] = str(hours_c)
minutes[k] = "00"
elif k % 4 == 1:
if hours_c < 10:
hours[k] = '0'+str(hours_c)
else:
hours[k] = str(hours_c)
minutes[k] = "15"
elif k % 4 == 2:
if hours_c < 10:
hours[k] = '0'+str(hours_c)
else:
hours[k] = str(hours_c)
minutes[k] = "30"
elif k % 4 == 3:
if hours_c < 10:
hours[k] = '0'+str(hours_c)
else:
hours[k] = str(hours_c)
hours_c = hours_c + 1
minutes[k] = "45"
#Compute the alt/az for different stars observed by KAT-7 at different times on 2016/5/30
for k in xrange(len(hours)):
#Set new time
n_date = '2016/5/30 ' + hours[k] + ':' + minutes[k] + ':00'
KAT7.date = n_date
#Calculate new alt/az
for j in xrange(len(star_names)):
star_objects[j].compute(KAT7)
alt_az_mat[j,k,0] = float(star_objects[j].alt)
alt_az_mat[j,k,1] = float(star_objects[j].az)
#Copy first value to last value
alt_az_mat[:,-1,:] = alt_az_mat[:,0,:]
time_v = np.linspace(0,24,len(hours)+1,endpoint=True)
#Plot alt
matplotlib.rcParams.update({'font.size': 13.75})
fig, ax = plt.subplots()
c = ["r","b","g","y","m","c","k"]
l = ["-","--"]
l_ind = 0
c_ind = 0
for k in xrange(len(star_names)):
if c_ind == 7:
c_ind = 0
l_ind = 1
mask = np.logical_not(np.logical_and(alt_az_mat[k,:,0]*(180/np.pi)>-5,alt_az_mat[k,:,0]*(180/np.pi)<5))
new_curve_y = alt_az_mat[k,mask,0]*(180/np.pi)
new_curve_x = time_v[mask]
ax.plot(new_curve_x,new_curve_y,c[c_ind]+l[l_ind],label=star_names[k],lw=2,zorder=k)
c_ind = c_ind +1
ax.fill_between(time_v, -5, 5, facecolor='k',alpha=1,zorder=k+1)
ax.annotate("HORIZON", xy = (11.5,5), xytext=(11.5, 15),arrowprops=dict(facecolor="b", shrink=1))
ax.legend()
ax.set_xlim([0,24])
ax.set_ylim([-90,90])
ticks = np.array([-90,-80,-70,-60,-50,-40,-30,-20,-10,0,10,20,30,40,50,60,70,80,90])
plt.yticks(ticks)
ticks = np.array([0,2,4,6,8,10,12,14,16,18,20,22,24])
plt.xticks(ticks)
plt.xlabel("UTC [$h$]")
plt.ylabel("Altitude [$^{\circ}$]")
plt.title("KAT-7: 2016/5/30")
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = np.array(["-90$^{\circ}$","-80$^{\circ}$","-70$^{\circ}$","-60$^{\circ}$","-50$^{\circ}$","-40$^{\circ}$","-30$^{\circ}$","-20$^{\circ}$","-10$^{\circ}$","0$^{\circ}$","10$^{\circ}$","20$^{\circ}$","30$^{\circ}$","40$^{\circ}$","50$^{\circ}$","60$^{\circ}$","70$^{\circ}$","80$^{\circ}$","90$^{\circ}$"])
ax.set_yticklabels(labels)
ax.grid('on')
"""
Explanation: 3.3 Horizontal Coordinates (ALT,AZ)
3.3.1 Coordinate Definitions
In $\S$ 3.2.1 ➞ we introduced the concept of an hour angle, which allows us to determine the time that still needs to elapse before a source crosses the local meridian. This however does not tell us where we should point a telescope on earth in order to observe a source with a specific hour angle. The horizontal coordinates azimuth $\mathcal{A}$ and altitude $\mathcal{E}$ (elevation) is used to enable an observer on earth to locate celestial objects in the observer's local sky. The observer's horizontal plane is the fundamental plane of this coordinate system and is known as the celestial horizon. The azimuth angle is measured in the celestial horizon from due north towards the east, while the altitude of a celestial object is the angle between it and the celestial horizon. Both azimuth and elevation are measured in degrees. The azimuth and elevation angle are depicted in Fig. 3.3.1 ⤵ <!--\ref{pos:fig:horizontal}-->
<a id='pos:fig:horizontal'></a> <!--\label{pos:fig:horizontal}--><img src='figures/horizontal.svg' width=40%>
Figure 3.3.1: The horizontal coordinates. <span style="background-color:cyan">KT:XX: Azimuth and elevation symbols seem to have been exchanged in the figure.</span>
The equations below allow us to convert between equatorial and horizontal coordinates
<p class=conclusion>
<font size=4><b> Converting between equatorial and horizontal </b></font>
<br>
<br>
\begin{eqnarray}
\cos\delta\cos H &=& \cos L_a\sin \mathcal{E} - \sin L_a\cos \mathcal{E}\cos \mathcal{A}\\
-\cos\delta\sin H&=& \cos \mathcal{E}\sin \mathcal{A}\\
\sin\delta &=& \sin L_a\sin \mathcal{E}+\cos L_a \cos \mathcal{E} \cos \mathcal{A}
\end{eqnarray}
</p>
<div class=advice>
<b>Note:</b> In the conversion equations above $L_a$ denotes latitude (see <a href='3_1_equatorial_coordinates.ipynb'>$\S$ 3.1 ➞</a>).
</div>
The above equations were derived by applying the spherical trigonometric identities in <a href='../2_Mathematical_Groundwork/2_13_spherical_trigonometry.ipynb'>$\S$ 2.13 ➞</a> to
the triangle $\Delta PSZ$ which is depicted in Fig. 3.3.2 ⤵ <!--\ref{pos:fig:conversion_alaz_radec}--> (see Appendix ➞).
<a id='pos:fig:conversion_alaz_radec'></a> <!--\label{pos:fig:conversion_alaz_radec}--><img src='figures/conversion.svg' width=40%>
Figure 3.3.2: The source-celestial pole-zenith triangle; which enables us to derive the conversion equations between horizontal and equatorial coordinates. The red plane represents the fundamental plane of the horizontal coordinate system, while the blue plane represents the
fundamental plane of the celestial coordinate system. <span style="background-color:cyan">KT:XX: Where is the source located in the figure?</span>
<div class=advice>
<b>Note:</b> The parallactic angle <span style="background-color:cyan">KT:GM:The parallactic angle needs to be in italics since this is its first appearance?</span> $q$ associated with a specific location on the celestial sphere $S$ is the angle between two great circles; the hour circle of $S$ and the great circle that passes through zenith and $S$. The parallactic angle $q$ is depicted in <a href='#pos:fig:conversion_alaz_radec'>Fig. 3.3.2 ⤵</a>. <!--\ref{pos:fig:conversion_alaz_radec}-->
The parallactic angle, and how it pertains to radio interferometry is discussed in more detail in <span style="background-color:cyan">KT:LF:Link to dead notebook</span> <a href='../7_Observing_Systems/7_7_antenna_mounts_and_parallactic_angle.ipynb'>$\S$ 7.7 ➞</a>.
</div>
3.3.2 Examples
Let us cement the concpets we have learned in this section by once again making use of the pyephem package. In this section we will use it to compute the horizontal coordinates for two primary use cases. In the first use case we plot the horizontal coordinates of a few randomly selected stars under the assumption that they are "observed" with KAT7. We will compute the horizontal coordinates of the selected stars for one entire day (2016/5/30). As we have already mentioned, the horizontal coordinates of the stars change during the course of one day, since the earth is rotating around its own axis. To achieve this we first create a pyephem observer object acting as a proxy for the KAT-7 array. Next we create pyephem body objects for the randomly selected stars. Each of the body objects has a method called compute. This compute method can take in an observer object. The compute method of the body object uses the geometrical location and the date attributes of the observer object to calculate the horizontal coordinates of the celestial body (in this case a star) the body object embodies. To track the change of the horizontal coordinates of stars (i.e. the stars we are interested in) we only need to iteratively call the compute methods of the body objects associated with them. Every time we call the compute method we just pass in an observer object with an appropriately altered date attribute. The code snippet below implements the above procedure. The altitude and azimuth angles of ten well known stars, calculated with pyephem, is depicted in Fig. 3.3.3 ⤵ <!--\ref{pos:fig:alt_stars}--> and Fig. 3.3.4 ⤵ <!--\ref{pos:fig:az_stars}-->.
End of explanation
"""
#Plot az
matplotlib.rcParams.update({'font.size': 13.75})
fig, ax = plt.subplots()
c = ["r","b","g","y","m","c","k"]
l = ["-","--"]
l_ind = 0
c_ind = 0
for i in xrange(10):
if c_ind == 7:
c_ind = 0
l_ind = 1
plt.plot(time_v,alt_az_mat[i,:,1]*(180/np.pi),c[c_ind]+l[l_ind],lw=2,label=star_names[i])
c_ind = c_ind +1
ax.legend()
ax.set_xlim([0,24])
ax.set_ylim([0,360])
ticks = np.array([0,60,120,180,240,300,360])
plt.yticks(ticks)
ticks = np.array([0,2,4,6,8,10,12,14,16,18,20,22,24])
plt.xticks(ticks)
plt.xlabel("UTC [$h$]")
plt.ylabel("Azimuth [$^{\circ}$]")
plt.title("KAT-7: 2016/5/30")
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = np.array(["0$^{\circ}$","60$^{\circ}$","120$^{\circ}$","180$^{\circ}$","240$^{\circ}$","300$^{\circ}$","360$^{\circ}$"])
ax.set_yticklabels(labels)
ax.grid('on')
"""
Explanation: Figure 3.3.3: The altitude angle of ten well known stars during 2016/5/30 as observed by the KAT-7 array. The altitude angle was computed by employing pyephem. The peaks of the curves indicate the times at which the stars were at transit. The black rectangle represents the fundamental horizon<span style="background-color:cyan">KT:XX: What is the fundamental horizon? Do you mean the celestial horizon?</span> . Any star that stays below the horizon would not be observable at all (see the curve associated with Thuban for an example). Any star that stays above the horizon for the entire day is a circumpolar star. Mimosa can almost be classified as a circumpolar star. <a id='pos:fig:alt_stars'></a> <!--\label{pos:fig:alt_stars-->
We have not yet plotted the azimuth coordinates for the randomly selected stars. We do so by using the code snippet below.
End of explanation
"""
#Preliminaries
matplotlib.rcParams.update({'font.size': 13.75})
observatories = ["LOFAR","KAT7","MWA","VLA","ALMA","GMRT"]
lat_v = ["52:54:32","-30:43:17","-26:42:12","34:04:43","-23:01:09","19:05:47"]
lon_v = ["06:52:08","21:25:40.08","116:40:16","-107:37:05","-67:45:12","74:02:59"]
alt_az = np.zeros((len(observatories),2),dtype=float)
#Loading different observatories and calculating alt/az of Betelgeuse for each of them
for k in xrange(len(observatories)):
obs = ephem.Observer()
obs.lat = lat_v[k]
obs.lon = lon_v[k]
obs.elevation = 0.0
obs.date = '2016/5/30 00:00:00' #UTC
betelgeuse = ephem.star("Betelgeuse",obs)
alt_az[k,0] = float(betelgeuse.alt)
alt_az[k,1] = float(betelgeuse.az)
#Plotting
cluster = ['o','^','>','s','*','v']
col = ['b','r','g','k','c','m']
fig, ax = plt.subplots()
for xp, yp, m, n, col_v in zip(alt_az[:,0]*(180/np.pi), alt_az[:,1]*(180/np.pi), cluster, observatories,col):
ax.plot([xp],[yp], marker=m, c = col_v, label = n, markersize = 20, linestyle='None')
ax.legend(numpoints=1)
ax.set_xlim([-90,90])
ax.set_ylim([0,360])
ticks = np.array([0,60,120,180,240,300,360])
plt.yticks(ticks)
ticks = np.array([-90,-80,-70,-60,-50,-40,-30,-20,-10,0,10,20,30,40,50,60,70,80,90])
plt.xticks(ticks)
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = np.array(["0$^{\circ}$","60$^{\circ}$","120$^{\circ}$","180$^{\circ}$","240$^{\circ}$","300$^{\circ}$","360$^{\circ}$"])
ax.set_yticklabels(labels)
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = np.array(["-90$^{\circ}$","-80$^{\circ}$","-70$^{\circ}$","-60$^{\circ}$","-50$^{\circ}$","-40$^{\circ}$","-30$^{\circ}$","-20$^{\circ}$","-10$^{\circ}$","0$^{\circ}$","10$^{\circ}$","20$^{\circ}$","30$^{\circ}$","40$^{\circ}$","50$^{\circ}$","60$^{\circ}$","70$^{\circ}$","80$^{\circ}$","90$^{\circ}$"])
ax.set_xticklabels(labels)
plt.xlabel("Altitude [$^{\circ}$]")
plt.ylabel("Azimuth [$^{\circ}$]")
plt.title("Betelgeuse: 2016/5/30 - 00:00:00 UTC")
ax.grid('on')
"""
Explanation: Figure 3.3.4: The azimuth angle of ten well know stars during 2016/5/30 as observed by the KAT-7 array. The azimuth angle was computed by employing pyephem. <a id='pos:fig:az_stars'></a> <!--\label{pos:fig:az_stars-->
In the second use case we determine the horizontal coordinates of Betelgeuse
for different arrays around the world at a specific moment in time (2016/5/30 00:00:00). We again use pyephem to accomplish this. See the code snippet below for the exact details of how this can be achieved. We plot the main result of the code snippet in Fig. 3.3.5 ⤵ <!--\ref{pos:fig:h_betelgeuse}-->.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/mohc/cmip6/models/ukesm1-0-mmh/land.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'ukesm1-0-mmh', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: MOHC
Source ID: UKESM1-0-MMH
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:15
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/noaa-gfdl/cmip6/models/sandbox-1/aerosol.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-1', 'aerosol')
"""
Explanation: ES-DOC CMIP6 Model Properties - Aerosol
MIP Era: CMIP6
Institute: NOAA-GFDL
Source ID: SANDBOX-1
Topic: Aerosol
Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model.
Properties: 70 (38 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:35
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Meteorological Forcings
5. Key Properties --> Resolution
6. Key Properties --> Tuning Applied
7. Transport
8. Emissions
9. Concentrations
10. Optical Radiative Properties
11. Optical Radiative Properties --> Absorption
12. Optical Radiative Properties --> Mixtures
13. Optical Radiative Properties --> Impact Of H2o
14. Optical Radiative Properties --> Radiative Scheme
15. Optical Radiative Properties --> Cloud Interactions
16. Model
1. Key Properties
Key properties of the aerosol model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of aerosol model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Prognostic variables in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of tracers in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are aerosol calculations generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Physical properties of seawater in ocean
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the time evolution of the prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the aerosol model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.5. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Meteorological Forcings
**
4.1. Variables 3D
Is Required: FALSE Type: STRING Cardinality: 0.1
Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Variables 2D
Is Required: FALSE Type: STRING Cardinality: 0.1
Two dimensionsal forcing variables, e.g. land-sea mask definition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Frequency
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Frequency with which meteological forcings are applied (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Resolution
Resolution in the aersosol model grid
5.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for aerosol model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Transport
Aerosol transport
7.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of transport in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
"""
Explanation: 7.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for aerosol transport modeling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.3. Mass Conservation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to ensure mass conservation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.4. Convention
Is Required: TRUE Type: ENUM Cardinality: 1.N
Transport by convention
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Emissions
Atmospheric aerosol emissions
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of emissions in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to define aerosol species (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the aerosol species are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prescribed Climatology
Is Required: FALSE Type: ENUM Cardinality: 0.1
Specify the climatology type for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed via a climatology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Other Method Characteristics
Is Required: FALSE Type: STRING Cardinality: 0.1
Characteristics of the "other method" used for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Concentrations
Atmospheric aerosol concentrations
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of concentrations in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as mass mixing ratios.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_aod_plus_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Prescribed Fields Aod Plus Ccn
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as AOD plus CCNs.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Optical Radiative Properties
Aerosol optical and radiative properties
10.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of optical and radiative properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11. Optical Radiative Properties --> Absorption
Absortion properties in aerosol scheme
11.1. Black Carbon
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Dust
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.3. Organics
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Optical Radiative Properties --> Mixtures
**
12.1. External
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there external mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12.2. Internal
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there internal mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Mixing Rule
Is Required: FALSE Type: STRING Cardinality: 0.1
If there is internal mixing with respect to chemical composition then indicate the mixinrg rule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13. Optical Radiative Properties --> Impact Of H2o
**
13.1. Size
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact size?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.2. Internal Mixture
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact aerosol internal mixture?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.external_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.3. External Mixture
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact aerosol external mixture?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Optical Radiative Properties --> Radiative Scheme
Radiative scheme for aerosol
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Shortwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of shortwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Optical Radiative Properties --> Cloud Interactions
Aerosol-cloud interactions
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol-cloud interactions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.2. Twomey
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the Twomey effect included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Twomey Minimum Ccn
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If the Twomey effect is included, then what is the minimum CCN number?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.4. Drizzle
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect drizzle?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Cloud Lifetime
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect cloud lifetime?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.6. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Model
Aerosol model
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
"""
Explanation: 16.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the Aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other model components coupled to the Aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.4. Gas Phase Precursors
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of gas phase aerosol precursors.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.5. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.6. Bulk Scheme Species
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of species covered by the bulk scheme.
End of explanation
"""
|
jrieke/machine-intelligence-2
|
sheet06/sheet05.ipynb
|
mit
|
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.io.wavfile
sig = np.loadtxt("sound1.dat")
# sound1 = np.asarray((2**16)*sig/(max(sig)-min(sig)), np.int16)
sound1 = sig
scipy.io.wavfile.write("sound1_orig.wav", 8192, sound1)
sig = np.loadtxt("sound2.dat")
sound2 = sig
# sound2 = np.asarray((2**16)*sig/(max(sig)-min(sig)), np.int16)
scipy.io.wavfile.write("sound2_orig.wav", 8192, sound2)
sounds = np.array([sound1, sound2])
"""
Explanation: Machine Intelligence II (week 4) - Team MensaNord
Nikolai Zaki
Alexander Moore
Johannes Rieke
Georg Hoelger
Oliver Atanaszov
Exercise 1
A
End of explanation
"""
A = np.random.random((2, 2))
A_inv = np.linalg.inv(A)
xsounds = np.dot(A, sounds)
# xsounds = mixed sounds
scipy.io.wavfile.write("sound1_mixed.wav", 8192, xsounds[0])
scipy.io.wavfile.write("sound2_mixed.wav", 8192, xsounds[1])
"""
Explanation: B
End of explanation
"""
neworder = np.random.permutation(np.arange(18000))
pxsounds = xsounds[:, np.asarray(neworder)]
# pxsounds = permutated mixed sounds
scipy.io.wavfile.write("sound1_mixed_perm.wav", 8192, pxsounds[0])
scipy.io.wavfile.write("sound2_mixed_perm.wav", 8192, pxsounds[1])
"""
Explanation: C
End of explanation
"""
correlation = np.cov(sounds, pxsounds) / np.std(sounds) / np.std(pxsounds)
plt.imshow(correlation, interpolation='none')
plt.title('Correlations of original and mixed sounds',y=1.05)
plt.colorbar();
"""
Explanation: D
End of explanation
"""
# cpxsounds = centered permutated mixed sounds
cpxsounds = (pxsounds.T - np.mean(pxsounds, 1)).T
# cxsounds = centered mixed sounds
cxsounds = xsounds - np.mean(xsounds,1)[:,np.newaxis]
"""
Explanation: E
End of explanation
"""
def ddf_by_df(inp):
return 1-2.0/(1+np.exp(-inp))
W = np.random.random((2, 2))
print("Goal:")
print(A_inv)
print("W start")
print(W)
k = 0
W_store=np.zeros((18,2,2))
eta_0 = .8
for t in range(18000):
eta = eta_0/(t+1)
W_inv = np.linalg.inv(W)
x = cxsounds[:,t]
gradient = W_inv.T + np.dot(ddf_by_df(np.dot(W,x)).reshape(2,1),x.reshape(1,2))#ddf_by_df(np.dot(W,x))[:,np.newaxis]*x
W += eta*gradient
if t%1000==0:
W_store[k]=W
k+=1
print("W end")
print(W)
W_reg_store = W_store
W_regular = W
"""
Explanation: Exercise 2
A
End of explanation
"""
W = np.random.random((2, 2))
print("Goal:")
print(A_inv)
print("W start")
print(W)
eta_0 = .35
k = 0
W_store=np.zeros((18,2,2))
for t in range(18000):
x = cxsounds[:,t]
gradient = np.dot(np.eye(2) + np.dot(ddf_by_df(np.dot(W,x)).reshape(2,1),np.dot(W,x).reshape(1,2)),W)#np.dot(ddf_by_df(np.dot(W,x)),np.dot(W,x)[:,np.newaxis]),W)
eta = eta_0/(t+1)
W += eta*gradient
if t%1000==0:
W_store[k]=W
k+=1
print("W end")
print(W)
W_nat_store = W_store
W_natural = W
natural_unmixed = np.dot(W_natural, cxsounds)
regular_unmixed = np.dot(W_regular, cxsounds)
fig, axs = plt.subplots(5,2, figsize = (12,8))
for i in range(2):
axs[0][i].plot(sounds[i])
axs[1][i].plot(cxsounds[i])
axs[2][i].plot(cpxsounds[i])
axs[3][i].plot(natural_unmixed[i])
axs[4][i].plot(regular_unmixed[i])
axs[0][i].set_title('original{} '.format(i+1))
axs[1][i].set_title('mixed{} '.format(i+1))
axs[2][i].set_title('permutated{} '.format(i+1))
axs[3][i].set_title('natural{} '.format(i+1))
axs[4][i].set_title('regular{} '.format(i+1))
plt.setp(axs, xlabel = 'time', ylabel = 'amplitude');
fig.tight_layout()
plt.show()
correlation = np.cov(sounds, natural_unmixed) / np.std(sounds) / np.std(natural_unmixed)
plt.imshow(correlation, interpolation='none')
plt.title('Correlation of original and unmixed with natural gradient',y=1.05)
plt.colorbar();
correlation = np.cov(sounds, regular_unmixed) /(np.std(sounds)*np.std(regular_unmixed))
plt.imshow(correlation, interpolation='none')
plt.title('Correlation of original and unmixed with regular gradient',y=1.05)
plt.colorbar();
W_reg_norms = np.linalg.norm(W_reg_store,axis=(2,1))
W_nat_norms = np.linalg.norm(W_nat_store,axis=(2,1))
fig, axs = plt.subplots(1,2,figsize=(12,4))
axs[0].plot(W_reg_norms)
axs[0].set_title('Norms of $W$ in regular gradient descent')
axs[1].plot(W_nat_norms)
axs[1].set_title('Norms of $W$ in natural gradient descent')
white1 = (sound1-np.mean(sound1))/np.std(sound1)
white2 = (sound2-np.mean(sound1))/np.std(sound2)
white = np.vstack((white1,white2))
mixed_white = np.dot(A,white)
def ddf_by_df(inp):
return 1-2.0/(1+np.exp(-inp))
W = np.random.random((2, 2))
print("Goal:")
print(A_inv)
print("W start")
print(W)
k = 0
wW_store=np.zeros((18,2,2))
eta_0 = .8
for t in range(18000):
eta = eta_0/(t+1)
W_inv = np.linalg.inv(W)
x = mixed_white[:,t]
gradient = W_inv.T + np.dot(ddf_by_df(np.dot(W,x)).reshape(2,1),x.reshape(1,2))#ddf_by_df(np.dot(W,x))[:,np.newaxis]*x
W += eta*gradient
if t%1000==0:
wW_store[k]=W
k+=1
print("W end")
print(W)
wW_reg_store = wW_store
wW_regular = W
W = np.random.random((2, 2))
print("Goal:")
print(A_inv)
print("W start")
print(W)
eta_0 = .35
k = 0
wW_store=np.zeros((18,2,2))
for t in range(18000):
x = mixed_white[:,t]
gradient = np.dot(np.eye(2) + np.dot(ddf_by_df(np.dot(W,x)).reshape(2,1),np.dot(W,x).reshape(1,2)),W)#np.dot(ddf_by_df(np.dot(W,x)),np.dot(W,x)[:,np.newaxis]),W)
eta = eta_0/(t+1)
W += eta*gradient
if t%1000==0:
wW_store[k]=W
k+=1
print("W end")
print(W)
wW_nat_store = wW_store
wW_natural = W
wW_reg_norms = np.linalg.norm(wW_reg_store,axis=(2,1))
wW_nat_norms = np.linalg.norm(wW_nat_store,axis=(2,1))
fig, axs = plt.subplots(1,2,figsize=(12,4))
axs[0].plot(wW_reg_norms)
axs[0].set_title('Norms of $W$ in regular gradient descent on whitened data',y=1.05);
axs[1].plot(wW_nat_norms)
axs[1].set_title('Norms of $W$ in natural gradient descent on whitened data',y=1.05);
"""
Explanation: B
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples
|
notebooks/community/sdk/sdk_automl_tabular_forecasting_batch.ipynb
|
apache-2.0
|
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
"""
Explanation: Vertex SDK: AutoML training tabular forecasting model for batch prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_forecasting_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_forecasting_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_forecasting_batch.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex SDK to create tabular forecasting models and do batch prediction using a Google Cloud AutoML model.
Dataset
The dataset used for this tutorial is the BigQuery public dataset, The New York Times US Coronavirus Database. This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
Objective
In this tutorial, you create an AutoML tabular forecasting model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console.
The steps performed include:
Create a Vertex Dataset resource.
Train the model.
View the model evaluation.
Make a batch prediction.
There is one key difference between using batch prediction and using online prediction:
Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Set up your local development environment
If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
The Cloud Storage SDK
Git
Python 3
virtualenv
Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
Install and initialize the SDK.
Install Python 3.
Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment.
To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell.
To launch Jupyter, run jupyter notebook on the command-line in a terminal shell.
Open this notebook in the Jupyter Notebook Dashboard.
Installation
Install the latest version of Vertex SDK for Python.
End of explanation
"""
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
"""
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
"""
Explanation: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
End of explanation
"""
REGION = "us-central1" # @param {type: "string"}
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_NAME
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_NAME
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
import google.cloud.aiplatform as aip
"""
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
"""
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
"""
Explanation: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
End of explanation
"""
IMPORT_FILE = "gs://cloud-samples-data/ai-platform/covid/bigquery-public-covid-nyt-us-counties-train.csv"
"""
Explanation: Tutorial
Now you are ready to start creating your own AutoML tabular forecasting model.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
"""
Explanation: Quick peek at your data
This tutorial uses a version of the NY Times COVID Database dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.
You also need for training to know the heading name of the label column, which is save as label_column. For this dataset, it is the last column in the CSV file.
End of explanation
"""
dataset = aip.TimeSeriesDataset.create(
display_name="NY Times COVID Database" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
)
label_column = "mean_temp"
time_column = "date"
time_series_id_column = "county"
print(dataset.resource_name)
TRANSFORMATIONS = [
{"auto": {"column_name": "date"}},
{"auto": {"column_name": "state_name"}},
{"auto": {"column_name": "county_fips_code"}},
{"auto": {"column_name": "confirmed_cases"}},
{"auto": {"column_name": "deaths"}},
]
label_column = "deaths"
time_column = "date"
time_series_identifier_column = "county"
"""
Explanation: Create the Dataset
Next, create the Dataset resource using the create method for the TimeSeriesDataset class, which takes the following parameters:
display_name: The human readable name for the Dataset resource.
gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.
bq_source: Alternatively, import data items from a BigQuery table into the Dataset resource.
This operation may take several minutes.
End of explanation
"""
dag = aip.AutoMLForecastingTrainingJob(
display_name="train-iowa-liquor-sales-automl_1",
optimization_objective="minimize-rmse",
column_transformations=TRANSFORMATIONS,
)
"""
Explanation: Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
Create training pipeline
An AutoML training pipeline is created with the AutoMLForecastingTrainingJob class, with the following parameters:
display_name: The human readable name for the TrainingJob resource.
column_transformations: (Optional): Transformations to apply to the input columns
optimization_objective: The optimization objective to minimize or maximize.
minimize-rmse
minimize-mae
minimize-rmsle
The instantiated object is the DAG (directed acyclic graph) for the training pipeline.
End of explanation
"""
model = dag.run(
dataset=dataset,
target_column=label_column,
time_column=time_column,
time_series_identifier_column=time_series_identifier_column,
available_at_forecast_columns=[time_column],
unavailable_at_forecast_columns=[label_column],
time_series_attribute_columns=["state_name", "county_fips_code", "confirmed_cases"],
forecast_horizon=30,
# context_window=30,
data_granularity_unit="day",
data_granularity_count=1,
weight_column=None,
budget_milli_node_hours=1000,
model_display_name="covid_" + TIMESTAMP,
predefined_split_column_name=None,
)
"""
Explanation: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters:
dataset: The Dataset resource to train the model.
model_display_name: The human readable name for the trained model.
training_fraction_split: The percentage of the dataset to use for training.
test_fraction_split: The percentage of the dataset to use for test (holdout data).
target_column: The name of the column to train as the label.
budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour).
time_column:
time_series_identifier_column:
The run method when completed returns the Model resource.
The execution of the training pipeline will take upto 20 minutes.
End of explanation
"""
# Get model resource ID
models = aip.Model.list(filter="display_name=covid_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
"""
Explanation: Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
End of explanation
"""
HEADING = "date,county,state_name,county_fips_code,confirmed_cases,deaths"
INSTANCE_1 = "2020-10-13,Adair,Iowa,19001,103,null"
INSTANCE_2 = "2020-10-29,Adair,Iowa,19001,197,null"
"""
Explanation: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
End of explanation
"""
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.csv"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
f.write(HEADING + "\n")
f.write(str(INSTANCE_1) + "\n")
f.write(str(INSTANCE_2) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
"""
Explanation: Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make:
The first line is the heading with the feature (fields) heading names.
Each remaining line is a separate prediction request with the corresponding feature values.
For example:
"feature_1", "feature_2". ...
value_1, value_2, ...
End of explanation
"""
batch_predict_job = model.batch_predict(
job_display_name="covid_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="csv",
sync=False,
)
print(batch_predict_job)
"""
Explanation: Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
instances_format: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
predictions_format: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
"""
batch_predict_job.wait()
"""
Explanation: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
End of explanation
"""
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
"""
Explanation: Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a CSV format:
CSV header + predicted_label
CSV row + prediction, per prediction request
End of explanation
"""
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation
"""
|
tensorflow/gan
|
tensorflow_gan/examples/esrgan/colab_notebooks/ESRGAN_TPU.ipynb
|
apache-2.0
|
import os
import tensorflow.compat.v1 as tf
import pprint
assert 'COLAB_TPU_ADDR' in os.environ, 'Did you forget to switch to TPU?'
tpu_address = 'grpc://' + os.environ['COLAB_TPU_ADDR']
with tf.Session(tpu_address) as sess:
devices = sess.list_devices()
pprint.pprint(devices)
device_is_tpu = [True if 'TPU' in str(x) else False for x in devices]
assert True in device_is_tpu, 'Did you forget to switch to TPU?'
"""
Explanation: ESRGAN with TF-GAN on TPU
Overview
This notebook demonstrates the E2E process of data loading, preprocessing, training and evaluation of the ESRGAN model using TF-GAN on TPUs. To understand the basics of TF-GAN and explore more features of the library, please visit TF-GAN tutorial notebook first. Please visit the Google Cloud Tutorial to learn how to create and make use of a cloud storage bucket.
Learning Objectives
Through this Colab notebook you will learn how to :
* Implement the ESRGAN model and train it
* Make use of various TF-GAN functions to visualize and evaluate the results.
Steps to run this notebook
Click on the following icon to open this notebook in Google Colaboratory.
Create a Cloud Storage bucket for storage : http://console.cloud.google.com/storage.
Navigate to Runtime > Change runtime type tab
Select TPU from hardware accelerator and save
Click Connect in the upper right corner and select Connect to hosted runtime.
Testing out the TPU connection
First, you'll need to enable TPUs for the notebook.
Navigate to Edit→Notebook Settings, and select TPU from the Hardware Accelerator drop-down (you can also access Notebook Settings via the command palette: cmd/ctrl-shift-P).
Next, we'll check that we can connect to the TPU.
End of explanation
"""
import json
import os
import pprint
import re
import time
import tensorflow.compat.v1 as tf
import tensorflow_gcs_config
# Google Cloud Storage bucket for storing the training dataset.
bucket = '' #@param {type:"string"}
assert bucket, 'Must specify an existing GCS bucket name'
print('Using bucket: {}'.format(bucket))
assert 'COLAB_TPU_ADDR' in os.environ, 'Missing TPU; did you request a TPU in Notebook Settings?'
tpu_address = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
from google.colab import auth
auth.authenticate_user()
# Upload credentials to TPU.
tf.config.experimental_connect_to_host(tpu_address)
tensorflow_gcs_config.configure_gcs_from_colab_auth()
# Now credentials are set for all future sessions on this TPU.
"""
Explanation: Authentication
To run on Google's free Cloud TPUs, you must set up a Google Cloud Storage bucket to store dataset and model weights during training. New customers to Google Cloud Platform can get $300 in free credits which can come in handy while running this notebook. Please visit the Google Cloud Tutorial to learn how to create and make use of a cloud storage bucket.
Firstly enter the name of the cloud bucket you have created.
For authentication you will be redirected to give Google Cloud SDK access to your cloud bucket. Paste the authentication code in text box below this cell and proceed.
End of explanation
"""
# Check that imports for the rest of the file work.
import os
import tensorflow as tf
!pip install tensorflow-gan
import tensorflow_gan as tfgan
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# Allow matplotlib images to render immediately.
%matplotlib inline
"""
Explanation: Check imports
End of explanation
"""
Params = {
'batch_size' : 32, # Number of image samples used in each training step
'hr_dimension' : 256, # Dimension of a High Resolution (HR) Image
'scale' : 4, # Factor by which Low Resolution (LR) Images will be downscaled.
'data_name': 'div2k/bicubic_x4', # Dataset name - loaded using tfds.
'trunk_size' : 11, # Number of Residual blocks used in Generator,
'init_lr' : 0.00005, # Initial Learning rate for networks.
'ph1_steps' : 10000, # Number of steps required for phase-1 training
'ph2_steps' : 100000, # Number of steps required for phase-2 training
'decay_ph1' : 0.2, # Factor by which learning rates are modified during phase-1 training
'decay_ph2' : 0.5, # Factor by which learning rates are modified during phase-2 training
'model_dir' : 'gs://{}/SavedModels' # Path to save the model after training. (inside the cloud bucket)
.format(bucket),
'ckpt_dir' : '/content/ckpts/', # Path to save the training checkpoints. (outside the cloud bucket)
'lambda' : 0.005, # To balance adversarial loss during phase-2 training.
'eta' : 0.01, # To balance L1 loss during phase-2 training.
'val_steps' : 100 # Number of steps required for validation.
}
"""
Explanation: Training ESRGAN
The ESRGAN model proposed in the paper ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks (Wang Xintao et al.) performs the task of image super-resolution which is the process of reconstructing high resolution (HR) image from a given low resolution (LR) image. Such a task has numerous application in today's world. The Super-Resolution GAN model was a major breathrough in this field and was capable of generating photorealistic images, however the model also generated artifacts that reduced the overall visual quality. To overcome this, the ESRGAN model was proposed with three major changes made to the SRGAN model :
1. Using Residual-in-Residual Dense Block (RRDB) without batch normalization as basic network building unit
2. Using an improved method to calculate adversarial loss used in RelativisticGAN
3. Improving perceptual loss function by using features before activation.
Go to the visualize results cell to see some of the results obtained.
Define Parameters
End of explanation
"""
dataset_dir = 'gs://{}/{}'.format(bucket, 'datasets')
def input_fn(mode, params):
assert 'batch_size' in params
bs = params['batch_size']
split = 'train' if mode == 'train' else 'validation'
shuffle = True
def scale(image, *args):
hr_size = params['hr_dimension']
scale = params['scale']
hr_image = image
hr_image = tf.image.resize(hr_image, [hr_size, hr_size])
lr_image = tf.image.resize(hr_image, [hr_size//scale, hr_size//scale], method='bicubic')
hr_image = tf.clip_by_value(hr_image, 0, 255)
lr_image = tf.clip_by_value(lr_image, 0, 255)
return lr_image, hr_image
dataset = (tfds.load(params['data_name'], split=split, data_dir=dataset_dir, as_supervised=True)
.map(scale, num_parallel_calls=4)
.cache()
.repeat())
if shuffle:
dataset = dataset.shuffle(
buffer_size=10000, reshuffle_each_iteration=True)
dataset = (dataset.batch(bs, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return dataset
train_ds = input_fn(mode='train', params=Params)
"""
Explanation: Load Training Dataset
We have used the DIV2K dataset which is usually used for benchmarking super resolution models. DIV2K dataset provides various kinds of image from which we are downloading only the HR images and corresponding LR images downsampled using bicubic downsampling. All the HR images are also scaled to 96 x 96 and LR images to 28 x 28.
End of explanation
"""
img_lr, img_hr = next(iter(train_ds))
lr = Image.fromarray(np.array(img_lr)[0].astype(np.uint8))
lr = lr.resize([256, 256])
display(lr)
hr = Image.fromarray(np.array(img_hr)[0].astype(np.uint8))
hr = hr.resize([256, 256])
display(hr)
"""
Explanation: Visualize the dataset
End of explanation
"""
def _conv_block(input, filters, activation=True):
h = layers.Conv2D(filters, kernel_size=[3,3],
kernel_initializer="he_normal", bias_initializer="zeros",
strides=[1,1], padding='same', use_bias=True)(input)
if activation:
h = layers.LeakyReLU(0.2)(h)
return h
def dense_block(input):
h1 = _conv_block(input, 32)
h1 = layers.Concatenate()([input, h1])
h2 = _conv_block(h1, 32)
h2 = layers.Concatenate()([input, h1, h2])
h3 = _conv_block(h2, 32)
h3 = layers.Concatenate()([input, h1, h2, h3])
h4 = _conv_block(h3, 32)
h4 = layers.Concatenate()([input, h1, h2, h3, h4])
h5 = _conv_block(h4, 32, activation=False)
h5 = layers.Lambda(lambda x: x * 0.2)(h5)
h = layers.Add()([h5, input])
return h
def rrdb(input):
h = dense_block(input)
h = dense_block(h)
h = dense_block(h)
h = layers.Lambda(lambda x:x * 0.2)(h)
out = layers.Add()([h, input])
return out
def upsample(x, filters):
x = layers.Conv2DTranspose(filters, kernel_size=3,
strides=2, padding='same',
use_bias = True)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
return x
def generator_network(filter=32,
trunk_size=Params['trunk_size'],
out_channels=3):
lr_input = layers.Input(shape=(None, None, 3))
x = layers.Conv2D(filter, kernel_size=[3,3], strides=[1,1],
padding='same', use_bias=True)(lr_input)
x = layers.LeakyReLU(0.2)(x)
ref = x
for i in range(trunk_size):
x = rrdb(x)
x = layers.Conv2D(filter, kernel_size=[3,3], strides=[1,1],
padding='same', use_bias = True)(x)
x = layers.Add()([x, ref])
x = upsample(x, filter)
x = upsample(x, filter)
x = layers.Conv2D(filter, kernel_size=3, strides=1,
padding='same', use_bias=True)(x)
x = layers.LeakyReLU(0.2)(x)
hr_output = layers.Conv2D(out_channels, kernel_size=3, strides=1,
padding='same', use_bias=True)(x)
model = tf.keras.models.Model(inputs=lr_input, outputs=hr_output)
return model
"""
Explanation: Network Architecture
The basic network buidling unit of the ESRGAN is the Residual-in-Residual Block (RRDB) without batch normalization. The network implemented is similar to the architecture proposed in the paper.
Generator
End of explanation
"""
def _conv_block_d(x, out_channel):
x = layers.Conv2D(out_channel, 3,1, padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
x = layers.Conv2D(out_channel, 4,2, padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
return x
def discriminator_network(filters = 64, training=True):
img = layers.Input(shape = (Params['hr_dimension'], Params['hr_dimension'], 3))
x = layers.Conv2D(filters, [3,3], 1, padding='same', use_bias=False)(img)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha=0.2)(x)
x = layers.Conv2D(filters, [3,3], 2, padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha=0.2)(x)
x = _conv_block_d(x, filters *2)
x = _conv_block_d(x, filters *4)
x = _conv_block_d(x, filters *8)
x = layers.Flatten()(x)
x = layers.Dense(100)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
x = layers.Dense(1)(x)
model = tf.keras.models.Model(inputs = img, outputs = x)
return model
"""
Explanation: Discriminator
End of explanation
"""
def pixel_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.reduce_mean(tf.abs(y_true - y_pred), axis = 0))
# Function for calculating perceptual loss
def vgg_loss(weight=None, input_shape=None):
vgg_model = tf.keras.applications.vgg19.VGG19(
input_shape=input_shape, weights=weight, include_top=False
)
for layer in vgg_model.layers:
layer.trainable = False
vgg_model.get_layer("block5_conv4").activation = lambda x: x
vgg = tf.keras.Model(
inputs=[vgg_model.input],
outputs=[vgg_model.get_layer("block5_conv4").output])
def loss(y_true, y_pred):
return tf.compat.v1.losses.absolute_difference(vgg(y_true), vgg(y_pred))
return loss
"""
Explanation: Loss Functions
The ESRGAN model makes use of three loss functions - pixel loss, perceptual loss (vgg_loss) and adversarial loss. Perceptual loss is calculated using the pre-trained VGG-19 network. Adversarial loss for the model is calculated using relativistic average loss as discussed in the paper. The relativistic_generator_loss and relativistic_discriminator_loss, pre-defined in TF-GAN losses are used for calculating generator and discriminator losses respectively.
These loss functions ensures the balance between visual quality and metrics such as PSNR and encorages the generator to produce more realistic images with natural textures.
End of explanation
"""
# To display images in the order : LR Image -> Generated Image -> HR Image
def visualize_results(image_lr, generated, image_hr):
size = 128
resized_lr = tf.image.resize(image_lr, [size, size], method=tf.image.ResizeMethod.BILINEAR)
resized_gen = tf.image.resize(generated, [size, size], method=tf.image.ResizeMethod.BILINEAR)
resized_hr = tf.image.resize(image_hr, [size, size], method=tf.image.ResizeMethod.BILINEAR)
stack = tf.stack([resized_lr[0], resized_gen[0], resized_hr[0]])
image_grid = tfgan.eval.python_image_grid(stack, grid_shape=(1, 3))
result = Image.fromarray(image_grid.astype(np.uint8))
return result
# Define the TPU strategy
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
train_ds = iter(strategy.experimental_distribute_dataset(train_ds))
"""
Explanation: Training
ESRGAN model is trained in two phases in which the first phase deals with training the generator network individually and is aimed at improving the PSNR values of generated images by reducing the L1 loss.
If starting from scratch, phase-1 training can be completed within an hour on free colab TPU, whereas phase-2 can take around 2-3 hours to get good results. As a result saving the weights/checkpoints are important steps during training.
Training of the same generator model is continued in the second phase along with the discriminator network. In the second phase, the generator reduces the L1 Loss, Relativistic average GAN (RaGAN) loss which indicates how realistic does the generated image look and the imporved Perceptual loss proposed in the paper.
End of explanation
"""
with strategy.scope():
metric = tf.keras.metrics.Mean()
psnr_metric = tf.keras.metrics.Mean()
generator = generator_network()
g_optimizer = tf.optimizers.Adam(
learning_rate = 0.0002,
beta_1 = 0.9,
beta_2 = 0.99
)
@tf.function
def train_step(image_lr, image_hr):
with tf.GradientTape() as tape:
fake = generator(image_lr)
loss = pixel_loss(image_hr, fake) * (1.0 / Params['batch_size'])
psnr_value = tf.image.psnr(fake, image_hr,max_val = 256.0)
metric(loss)
gradient = tape.gradient(loss, generator.trainable_variables)
g_optimizer.apply_gradients(zip(gradient, generator.trainable_variables))
return psnr_value
def val_steps(image_lr, image_hr):
fake = generator(image_lr)
result = visualize_results(image_lr, fake, image_hr)
display(result)
step_count = 0
while step_count < Params['ph1_steps']:
lr, hr = next(train_ds)
psnr_loss = strategy.run(train_step, args = (lr, hr))
loss = strategy.reduce(tf.distribute.ReduceOp.MEAN, psnr_loss, axis=None)
psnr_metric(loss)
if step_count%1000 == 0:
lr = np.array(lr.values)[0]
hr = np.array(hr.values)[0]
print("step {} PNSR = {}".format(step_count, psnr_metric.result()))
val_steps(lr, hr)
if step_count%5000 == 0:
g_optimizer.learning_rate.assign(
g_optimizer.learning_rate * Params['decay_ph1'])
step_count+=1
# Save the generator network which is then used for phase-2 training
os.makedirs(Params['model_dir'] + '/Phase_1/generator', exist_ok = True)
generator.save(Params['model_dir'] + '/Phase_1/generator')
"""
Explanation: Phase - 1 Training
Steps Involved:
Define the generator and its optimizer.
Take LR, HR image pairs from the training dataset
Input the LR image to the generator network
Calculate the L1 loss using the generated image and HR image
Calculate gradient value and apply it to the optimizer
Update the learning rate of optimizer after every decay steps for better performance
End of explanation
"""
with strategy.scope():
optimizer = tf.optimizers.Adam(
learning_rate = 0.0002,
beta_1 = 0.9,
beta_2 = 0.99
)
generator = tf.keras.models.load_model(Params['model_dir'] + '/Phase_1/generator/')
discriminator = discriminator_network()
g_optimizer = optimizer
g_optimizer.learning_rate.assign(0.00005)
d_optimizer = optimizer
checkpoint = tf.train.Checkpoint(G=generator,
D = discriminator,
G_optimizer=g_optimizer,
D_optimizer=d_optimizer)
local_device_option = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
"""
Explanation: Phase - 2
Define optimizers and load networks
Generator network trained in Phase 1 is loaded.
Checkpoints are also defined which can be useful during training.
End of explanation
"""
with strategy.scope():
perceptual_loss = vgg_loss(
weight = "imagenet",
input_shape = [Params['hr_dimension'], Params['hr_dimension'], 3])
with strategy.scope():
gen_metric = tf.keras.metrics.Mean()
disc_metric = tf.keras.metrics.Mean()
psnr_metric = tf.keras.metrics.Mean()
"""
Explanation: Load VGG weights
The VGG-19 network pretrained on imagenet is loaded for calculating perceptual loss.
End of explanation
"""
@tf.function
def train_step(image_lr, image_hr):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
fake = generator(image_lr)
percep_loss = tf.reduce_mean(perceptual_loss(image_hr, fake))
l1_loss = pixel_loss(image_hr, fake)
real_logits = discriminator(image_hr)
fake_logits = discriminator(fake)
loss_RaG = tfgan.losses.losses_impl.relativistic_generator_loss(real_logits,
fake_logits)
disc_loss = tfgan.losses.losses_impl.relativistic_discriminator_loss(real_logits,
fake_logits)
gen_loss = percep_loss + Params['lambda'] * loss_RaG + Params['eta'] * l1_loss
gen_loss = gen_loss / Params['batch_size']
disc_loss = disc_loss / Params['batch_size']
psnr_loss = tf.image.psnr(fake, image_hr, max_val = 256.0)
disc_metric(disc_loss)
gen_metric(gen_loss)
psnr_metric(psnr_loss)
disc_grad = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
d_optimizer.apply_gradients(zip(disc_grad, discriminator.trainable_variables))
gen_grad = gen_tape.gradient(gen_loss, generator.trainable_variables)
g_optimizer.apply_gradients(zip(gen_grad, generator.trainable_variables))
return [disc_loss, gen_loss, psnr_loss]
def val_step(image_lr, image_hr):
fake = generator(image_lr)
result = visualize_results(image_lr, fake, image_hr)
display(result)
step_count = 0
decay_step = [9000, 30000, 50000]
while step_count < Params['ph2_steps']:
lr, hr = next(train_ds)
if tf.train.latest_checkpoint(Params['ckpt_dir']):
checkpoint.restore(tf.train.latest_checkpoint(Params['ckpt_dir']))
disc_loss, gen_loss, psnr_loss = strategy.run(train_step, args = (lr, hr))
if step_count % 1000 == 0:
print("step {}".format(step_count) + " Generator Loss = {} ".format(gen_metric.result()) +
"Disc Loss = {}".format(disc_metric.result()) + " PSNR : {}".format(psnr_metric.result()))
lr = np.array(lr.values)[0]
hr = np.array(hr.values)[0]
val_step(lr, hr)
checkpoint.write(Params['ckpt_dir'], options=local_device_option)
if step_count >= decay_step[0]:
decay_step.pop(0)
g_optimizer.learning_rate.assign(
g_optimizer.learning_rate * Params['decay_ph2'])
d_optimizer.learning_rate.assign(
d_optimizer.learning_rate * Params['decay_ph2'])
step_count+=1
os.makedirs(Params['model_dir'] + '/Phase_2/generator', exist_ok = True)
os.makedirs(Params['model_dir'] + '/Phase_2/discriminator', exist_ok = True)
generator.save(Params['model_dir'] + '/Phase_2/generator')
discriminator.save(Params['model_dir'] + '/Phase_2/discriminator')
"""
Explanation: Training step
Input the LR image to the generator network
Calculate L1 loss, perceptual loss and adversarial loss for both generator and discriminator.
Update the optimizers for both networks using the obtained gradient values
Update the learning rate of optimizers after every decay steps for better performance
TF-GAN's image grid function is used to display the generated images in the validation steps.
End of explanation
"""
def network_interpolation(alpha=0.2,
phase_1_path=None,
phase_2_path=None):
psnr_gen = tf.keras.model.load_model(phase_1_path)
gan_gen = tf.keras.models.load_model(phase_2_path)
for var_1, var_2 in zip(gan_gen.trainable_variables,
psnr_gen.trainable_variables):
var_1.assign((1 - alpha) * var_2 + alpha * var_1)
return gan_gen
generator = network_interpolation(phase_1_path = Params['model_dir'] + '/Phase_1/generator',
phase_2_path = Params['model_dir'] + '/Phase_2/generator')
generator.save(Params['model_dir'] + '/InterpolatedGenerator/')
"""
Explanation: Network Interpolation
End of explanation
"""
val_ds = input_fn(mode='validation', params=Params)
"""
Explanation: Evaluation
End of explanation
"""
def val_steps(image_lr, image_hr):
fake = generator(image_lr)
result = visualize_results(image_lr, fake, image_hr)
display(result)
for i in range(3):
lr, hr = next(iter(val_ds))
val_steps(lr, hr)
"""
Explanation: Visualize Generated Images
End of explanation
"""
@tf.function
def get_fid_score(real_image, gen_image):
size = tfgan.eval.INCEPTION_DEFAULT_IMAGE_SIZE
resized_real_images = tf.image.resize(real_image, [size, size], method=tf.image.ResizeMethod.BILINEAR)
resized_generated_images = tf.image.resize(gen_image, [size, size], method=tf.image.ResizeMethod.BILINEAR)
num_inception_images = 1
num_batches = Params['batch_size'] // num_inception_images
fid = tfgan.eval.frechet_inception_distance(resized_real_images, resized_generated_images, num_batches=num_batches)
return fid
@tf.function
def get_inception_score(images, gen, num_inception_images = 8):
size = tfgan.eval.INCEPTION_DEFAULT_IMAGE_SIZE
resized_images = tf.image.resize(images, [size, size], method=tf.image.ResizeMethod.BILINEAR)
num_batches = Params['batch_size'] // num_inception_images
inc_score = tfgan.eval.inception_score(resized_images, num_batches=num_batches)
return inc_score
with strategy.scope():
generator = tf.keras.models.load_model(Params['model_dir'] + '/InterpolatedGenerator')
fid_metric = tf.keras.metrics.Mean()
inc_metric = tf.keras.metrics.Mean()
psnr_metric = tf.keras.metrics.Mean()
count = 0
i = 0
while i < Params['val_steps']:
lr, hr = next(iter(val_ds))
gen = generator(lr)
fid = strategy.run(get_fid_score, args = (hr, gen))
real_is = strategy.run(get_inception_score, args=(hr, gen))
gen_is = strategy.run(get_inception_score, args=(gen, hr))
val_steps(lr, hr)
fid_metric(fid)
inc_metric(gen_is)
psnr_metric(tf.reduce_mean(tf.image.psnr(gen, hr, max_val = 256.0)))
"""
Explanation: FID and Inception Scores are two common metrices used to evaluate the performance of a GAN model and PSNR value is used to quantify the similarity between two images and is used for benchmarking super resolution models.
End of explanation
"""
|
dsevilla/bdge
|
hbase/sesion6.ipynb
|
mit
|
from pprint import pprint as pp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
"""
Explanation: NoSQL (HBase) (sesión 6)
Esta hoja muestra cómo acceder a bases de datos HBase y también a conectar la salida con Jupyter.
Se puede utilizar el shell propio de HBase en el contenedor.
Con HBase vamos a simular un clúster de varias máquinas con varios contenedores conectados. En el directorio hbase del repositorio git hay un script para ejecutar la instalación con docker-compose.
Para conectarse al clúster con un shell de hbase, hay que ejecutar, desde una terminal el siguiente comando de docker:
```bash
$ docker exec -ti hbase-regionserver hbase shell
Base Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2.7, rac57c51f7ad25e312b4275665d62b34a5945422f, Fri Sep 7 16:11:05 CDT 2018
hbase(main):001:0>
```
End of explanation
"""
import os
import os.path as path
from urllib.request import urlretrieve
def download_file_upper_dir(baseurl, filename):
file = path.abspath(path.join(os.getcwd(),os.pardir,filename))
if not os.path.isfile(file):
urlretrieve(baseurl + '/' + filename, file)
baseurl = 'http://neuromancer.inf.um.es:8080/es.stackoverflow/'
download_file_upper_dir(baseurl, 'Posts.csv')
download_file_upper_dir(baseurl, 'Users.csv')
download_file_upper_dir(baseurl, 'Tags.csv')
download_file_upper_dir(baseurl, 'Comments.csv')
download_file_upper_dir(baseurl, 'Votes.csv')
!pip install happybase
import happybase
from contextlib import contextmanager
HBASEHOST = 'hbase-thriftserver'
class Connection():
def __init__(self, host):
self.host = host
self._genpool()
def _genpool(self):
self.pool = happybase.ConnectionPool(size=5, host=self.host)
@contextmanager
def connection(self):
for _ in range(5): # Probar 5 veces a regenerar el pool
for _ in range(5): # Probar 5 veces a conectar
with self.pool.connection() as connection:
try:
connection.tables()
yield connection
return
except Exception as e:
pass
self._genpool()
raise Exception("HBase Connection Error")
hbasecon = Connection(HBASEHOST)
with hbasecon.connection() as connection:
print(connection.tables())
"""
Explanation: Usaremos la librería happybase para python. La cargamos a continuación y hacemos la conexión.
End of explanation
"""
# Create tables
tables = ['posts', 'votes', 'users', 'tags', 'comments']
for t in tables:
try:
with hbasecon.connection() as connection:
connection.create_table(
t,
{
'rawdata': dict(max_versions=1,compression='GZ')
})
except Exception as e:
print("Database already exists: {0}. {1}".format(t, e))
pass
with hbasecon.connection() as connection:
print(connection.tables())
"""
Explanation: Para la carga inicial, vamos a crear todas las tablas con una única familia de columnas, rawdata, donde meteremos toda la información raw comprimida. Después podremos hacer reorganizaciones de los datos para hacer el acceso más eficiente. Es una de las muchas ventajas de no tener un esquema.
End of explanation
"""
import csv
def csv_to_hbase(file, tablename, cf):
with hbasecon.connection() as connection, open(file) as f:
table = connection.table(tablename)
# La llamada csv.reader() crea un iterador sobre un fichero CSV
reader = csv.reader(f, dialect='excel')
# Se leen las columnas. Sus nombres se usarán para crear las diferentes columnas en la familia
columns = next(reader)
columns = [cf + ':' + c for c in columns]
with table.batch(batch_size=500) as b:
for row in reader:
# La primera columna se usará como Row Key
b.put(row[0], dict(zip(columns[1:], row[1:])))
for t in tables:
print("Importando tabla {0}...".format(t))
%time csv_to_hbase('../'+t.capitalize() + '.csv', t, 'rawdata')
"""
Explanation: El código de importación es siempre el mismo, ya que se coge la primera fila del CSV que contiene el nombre de las columnas y se utiliza para generar nombres de columnas dentro de la familia de columnas dada como parámetro. La función csv_to_hbase() acepta un fichero CSV a abrir, un nombre de tabla y una familia de columnas donde agregar las columnas del fichero CSV. En nuestro caso siempre va a ser rawdata.
End of explanation
"""
with hbasecon.connection() as connection:
posts = connection.table('posts')
"""
Explanation: Consultas sencillas desde Python
A continuación veremos algunas consultas sencillas desde python usando el API de happybase.
End of explanation
"""
posts.row(b'5',columns=[b'rawdata:Body'])
"""
Explanation: Obtener el Post con Id 5. La orden más sencilla e inmediata de HBase es obtener una fila, opcionalmente limitando las columnas a mostrar:
End of explanation
"""
# http://stackoverflow.com/a/30525061/62365
class DictTable(dict):
# Overridden dict class which takes a dict in the form {'a': 2, 'b': 3},
# and renders an HTML Table in IPython Notebook.
def _repr_html_(self):
htmltext = ["<table width=100%>"]
for key, value in self.items():
htmltext.append("<tr>")
htmltext.append("<td>{0}</td>".format(key.decode('utf-8')))
htmltext.append("<td>{0}</td>".format(value.decode('utf-8')))
htmltext.append("</tr>")
htmltext.append("</table>")
return ''.join(htmltext)
# Muestra cómo queda la fila del Id del Post 9997
DictTable(posts.row(b'5'))
DictTable(posts.row(b'5',columns=[b'rawdata:AnswerCount',b'rawdata:AcceptedAnswerId']))
"""
Explanation: El siguiente código permite mostrar de forma amigable las tablas extraídas de la base de datos en forma de diccionario:
End of explanation
"""
row = posts.row(b'5')
for key, value in row.items():
print("Key = '%s', Value = '%s'" %(key,value.decode('utf-8')[:40]))
"""
Explanation: Y también se puede recorrer como un diccionario normal (el decode se utiliza para convertir los valores binarios de la base de datos a una codificación UTF-8):
End of explanation
"""
max_len = 0
for key, data in posts.scan():
cur_len = len(data[b'rawdata:Body'].decode('utf-8'))
if cur_len > max_len:
max_len = cur_len
print("Máxima longitud: %s caracteres." % (max_len))
"""
Explanation: Finalmente, también se puede recorrer toda la tabla estableciendo filtros, que se estudiarán después. Se utiliza la función scan. Se puede iterar con los parámetros key y data. Por ejemplo, calcular el tamaño máximo de la longitud del texto de los posts:
(OJO, es un ejemplo, no se debería hacer así)
End of explanation
"""
with hbasecon.connection() as connection:
comments = connection.table('comments')
posts = connection.table('posts')
with posts.batch(batch_size=500) as bp:
# Hacer un scan de la tabla
for key, data in comments.scan():
comment = {'comments:' +
d.decode('utf-8').split(':')[1] + "_" +
key.decode('utf-8') :
data[d].decode('utf-8') for d in data.keys()}
bp.put(data[b'rawdata:PostId'], comment)
download_file_upper_dir('http://neuromancer.inf.um.es:8080/wikipedia/','eswiki.xml.gz')
"""
Explanation: Construcción de estructuras anidadas
Al igual que pasaba con MongoDB, las bases de datos NoSQL como en este caso HBase permiten almacenar estructuras de datos complejas. En nuestro caso vamos a agregar los comentarios de cada pregunta o respuesta (post) en columnas del mismo. Para ello, creamos una nueva familia de columnas comments.
HBase es bueno para añadir columnas sencillas, por ejemplo que contengan un valor. Sin embargo, si queremos añadir objetos complejos, tenemos que jugar con la codificación de la familia de columnas y columna.
Usaremos el shell porque happybase no permite alterar tablas ya creadas.
En el shell de HBase pondremos lo siguiente:
disable 'posts'
alter 'posts', {NAME => 'comments', VERSIONS => 1}
enable 'posts'
Cada comentario que añadimos contiene, al menos:
un id único
un texto
un autor
etc.
¿Cómo se consigue meterlo en una única familia de columnas?
Hay varias formas. La que usaremos aquí, añadiremos el id de cada comentario como parte del nombre de la columna. Por ejemplo, el comentario con Id 2000, generará las columnas:
Id_2000 (valor 2000)
UserId_2000
PostId_2000
Text_2000
con sus correspondientes valores. Así, todos los datos relativos al comentario con Id original 2000, estarán almacenados en todas las columnas que terminen en "_2000". La base de datos permite implementar filtros que nos permiten buscar esto de forma muy sencilla. Los veremos después.
End of explanation
"""
import xml.sax
import re
class WikiHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self._charBuffer = ''
self.document = {}
def _getCharacterData(self):
data = self._charBuffer
self._charBuffer = ''
return data
def parse(self, f, callback):
self.callback = callback
xml.sax.parse(f, self)
def characters(self, data):
self._charBuffer = self._charBuffer + data
def startElement(self, name, attrs):
if name == 'page':
# print 'Start of page'
self.document = {}
if re.match(r'title|timestamp|username|comment|text', name):
self._charBuffer = ''
def endElement(self, name):
if re.match(r'title|timestamp|username|comment|text', name):
self.document[name] = self._getCharacterData()
# print(name, ': ', self.document[name][:20])
if 'revision' == name:
self.callback(self.document)
"""
Explanation: Se crea la tabla para albergar la wikipedia. Igual que la vista en teoría, pero aquí se usa wikipedia en vez de wiki para que no colisionen la versión completa con la reducida.
De nuevo en el shell de HBase:
create 'wikipedia' , 'text', 'revision'
disable 'wikipedia' # Para evitar su uso temporal
alter 'wikipedia' , { NAME => 'text', VERSIONS => org.apache.hadoop.hbase.HConstants::ALL_VERSIONS }
alter 'wikipedia' , { NAME => 'revision', VERSIONS => org.apache.hadoop.hbase.HConstants::ALL_VERSIONS }
alter 'wikipedia' , { NAME => 'text', COMPRESSION => 'GZ', BLOOMFILTER => 'ROW'}
enable 'wikipedia'
Este código, visto en teoría, recorre el árbol XML construyendo documentos y llamando a la función callback con cada uno. Los documentos son diccionarios con las claves encontradas dentro de los tags <page>...</page>.
End of explanation
"""
import time
class FillWikiTable():
"""Llena la tabla Wiki"""
def __init__(self,connection):
# Conectar a la base de datos a través de Thrift
self.table = connection.table('wikipedia')
def run(_s):
def processdoc(d):
print("Callback called with {0}".format(d['title']))
tuple_time = time.strptime(d['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
timestamp = int(time.mktime(tuple_time))
_s.table.put(d['title'],
{'text:': d.get('text',''),
'revision:author': d.get('username',''),
'revision:comment': d.get('comment','')},
timestamp=timestamp)
with gzip.open(os.path.join(os.pardir,'eswiki.xml.gz'),'r') as f:
start = time.time()
WikiHandler().parse(f, processdoc)
end = time.time()
print ("End adding documents. Time: %.5f" % (end - start))
with hbasecon.connection() as connection:
FillWikiTable(connection).run()
"""
Explanation: El codigo a continuación, cada vez que el código anterior llama a la función processdoc() se añade un documento a la base de datos.
End of explanation
"""
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
for key,data in wikipedia.scan(columns=['revision'], row_start='A', row_stop='B', limit=10):
print(key,'->',data)
"""
Explanation: El código a continuación permite ver las diferentes versiones de una revisión. Como la versión reducida es muy pequeña no da lugar a que haya ninguna revisión, pero con este código se vería. Hace uso del shell de HBase:
get 'wikipedia', 'Commodore Amiga', {COLUMN => 'revision',VERSIONS=>10}
End of explanation
"""
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
for key,data in wikipedia.scan(columns=['revision'], row_start='A', row_stop='B',
filter="PrefixFilter('B')",
limit=10):
print (key,'->',data)
"""
Explanation: La siguiente consulta no poduce resultados. ¿Por qué?
End of explanation
"""
|
statsmodels/statsmodels.github.io
|
v0.13.0/examples/notebooks/generated/statespace_sarimax_internet.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import matplotlib.pyplot as plt
import requests
from io import BytesIO
from zipfile import ZipFile
# Download the dataset
dk = requests.get('http://www.ssfpack.com/files/DK-data.zip').content
f = BytesIO(dk)
zipped = ZipFile(f)
df = pd.read_table(
BytesIO(zipped.read('internet.dat')),
skiprows=1, header=None, sep='\s+', engine='python',
names=['internet','dinternet']
)
"""
Explanation: SARIMAX: Model selection, missing data
The example mirrors Durbin and Koopman (2012), Chapter 8.4 in application of Box-Jenkins methodology to fit ARMA models. The novel feature is the ability of the model to work on datasets with missing values.
End of explanation
"""
# Get the basic series
dta_full = df.dinternet[1:].values
dta_miss = dta_full.copy()
# Remove datapoints
missing = np.r_[6,16,26,36,46,56,66,72,73,74,75,76,86,96]-1
dta_miss[missing] = np.nan
"""
Explanation: Model Selection
As in Durbin and Koopman, we force a number of the values to be missing.
End of explanation
"""
import warnings
aic_full = pd.DataFrame(np.zeros((6,6), dtype=float))
aic_miss = pd.DataFrame(np.zeros((6,6), dtype=float))
warnings.simplefilter('ignore')
# Iterate over all ARMA(p,q) models with p,q in [0,6]
for p in range(6):
for q in range(6):
if p == 0 and q == 0:
continue
# Estimate the model with no missing datapoints
mod = sm.tsa.statespace.SARIMAX(dta_full, order=(p,0,q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_full.iloc[p,q] = res.aic
except:
aic_full.iloc[p,q] = np.nan
# Estimate the model with missing datapoints
mod = sm.tsa.statespace.SARIMAX(dta_miss, order=(p,0,q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_miss.iloc[p,q] = res.aic
except:
aic_miss.iloc[p,q] = np.nan
"""
Explanation: Then we can consider model selection using the Akaike information criteria (AIC), but running the model for each variant and selecting the model with the lowest AIC value.
There are a couple of things to note here:
When running such a large batch of models, particularly when the autoregressive and moving average orders become large, there is the possibility of poor maximum likelihood convergence. Below we ignore the warnings since this example is illustrative.
We use the option enforce_invertibility=False, which allows the moving average polynomial to be non-invertible, so that more of the models are estimable.
Several of the models do not produce good results, and their AIC value is set to NaN. This is not surprising, as Durbin and Koopman note numerical problems with the high order models.
End of explanation
"""
# Statespace
mod = sm.tsa.statespace.SARIMAX(dta_miss, order=(1,0,1))
res = mod.fit(disp=False)
print(res.summary())
# In-sample one-step-ahead predictions, and out-of-sample forecasts
nforecast = 20
predict = res.get_prediction(end=mod.nobs + nforecast)
idx = np.arange(len(predict.predicted_mean))
predict_ci = predict.conf_int(alpha=0.5)
# Graph
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.grid()
ax.plot(dta_miss, 'k.')
# Plot
ax.plot(idx[:-nforecast], predict.predicted_mean[:-nforecast], 'gray')
ax.plot(idx[-nforecast:], predict.predicted_mean[-nforecast:], 'k--', linestyle='--', linewidth=2)
ax.fill_between(idx, predict_ci[:, 0], predict_ci[:, 1], alpha=0.15)
ax.set(title='Figure 8.9 - Internet series');
"""
Explanation: For the models estimated over the full (non-missing) dataset, the AIC chooses ARMA(1,1) or ARMA(3,0). Durbin and Koopman suggest the ARMA(1,1) specification is better due to parsimony.
$$
\text{Replication of:}\
\textbf{Table 8.1} ~~ \text{AIC for different ARMA models.}\
\newcommand{\r}[1]{{\color{red}{#1}}}
\begin{array}{lrrrrrr}
\hline
q & 0 & 1 & 2 & 3 & 4 & 5 \
\hline
p & {} & {} & {} & {} & {} & {} \
0 & 0.00 & 549.81 & 519.87 & 520.27 & 519.38 & 518.86 \
1 & 529.24 & \r{514.30} & 516.25 & 514.58 & 515.10 & 516.28 \
2 & 522.18 & 516.29 & 517.16 & 515.77 & 513.24 & 514.73 \
3 & \r{511.99} & 513.94 & 515.92 & 512.06 & 513.72 & 514.50 \
4 & 513.93 & 512.89 & nan & nan & 514.81 & 516.08 \
5 & 515.86 & 517.64 & nan & nan & nan & nan \
\hline
\end{array}
$$
For the models estimated over missing dataset, the AIC chooses ARMA(1,1)
$$
\text{Replication of:}\
\textbf{Table 8.2} ~~ \text{AIC for different ARMA models with missing observations.}\
\begin{array}{lrrrrrr}
\hline
q & 0 & 1 & 2 & 3 & 4 & 5 \
\hline
p & {} & {} & {} & {} & {} & {} \
0 & 0.00 & 488.93 & 464.01 & 463.86 & 462.63 & 463.62 \
1 & 468.01 & \r{457.54} & 459.35 & 458.66 & 459.15 & 461.01 \
2 & 469.68 & nan & 460.48 & 459.43 & 459.23 & 460.47 \
3 & 467.10 & 458.44 & 459.64 & 456.66 & 459.54 & 460.05 \
4 & 469.00 & 459.52 & nan & 463.04 & 459.35 & 460.96 \
5 & 471.32 & 461.26 & nan & nan & 461.00 & 462.97 \
\hline
\end{array}
$$
Note: the AIC values are calculated differently than in Durbin and Koopman, but show overall similar trends.
Postestimation
Using the ARMA(1,1) specification selected above, we perform in-sample prediction and out-of-sample forecasting.
End of explanation
"""
|
poppy-project/pypot
|
samples/notebooks/Benchmark your Poppy robot.ipynb
|
gpl-3.0
|
from ipywidgets import interact
%pylab inline
"""
Explanation: Benchmark your Poppy robot
The goal of this notebook is to help you identify the performance of your robot and where the bottle necks are. We will measure:
* the time to read/write the position to one motor (for each of your dynamixel bus)
* the time to read/write the positions for all motors (for each of your dynamixel bus)
* the regularity of the synchronization loop of pos/speed/load when
* only this loop is runnnig
* all other synchronization loops are running
* everything else is running
End of explanation
"""
results = {}
"""
Explanation: All bench info will be stored in this dictionary so it's easy to compare with other platforms.
End of explanation
"""
import platform
p = platform.platform()
print(p)
results['platform'] = p
import sys
v = sys.version
print(v)
results['python'] = v
import pypot
results['pypot'] = pypot.__version__
print('Pypot version: {}'.format(results['pypot']))
from pypot.creatures import installed_poppy_creatures
RobotCls = None
def robot_selector(robot):
global RobotCls
RobotCls = robot
interact(robot_selector, robot=installed_poppy_creatures);
RobotCls
robot = RobotCls()
results['robot'] = RobotCls
"""
Explanation: What's the platform
End of explanation
"""
for m in robot.motors:
m.compliant = True
"""
Explanation: Make sure all motors are turned off to avoid breaking anything:
End of explanation
"""
import time
from pypot.dynamixel.syncloop import MetaDxlController
from pypot.dynamixel.controller import PosSpeedLoadDxlController
meta_controllers = [c for c in robot._controllers if isinstance(c, MetaDxlController)]
#controllers = [cc for cc in c.controllers for c in meta_controllers if isinstance(cc, PosSpeedLoadDxlController)]
controllers = []
for c in meta_controllers:
controllers.extend([cc for cc in c.controllers if isinstance(cc, PosSpeedLoadDxlController)])
for c in controllers:
c.stop()
for c in controllers:
def wrapped_update():
if not hasattr(c, 't'):
c.t = []
c.t.append(time.time())
c.update()
c._update = wrapped_update
for c in controllers:
c.start()
"""
Explanation: We find the synchronization loop for pos/speed/load and monkey patch them for monitoring.
End of explanation
"""
import psutil
def monitor(controllers, duration):
for c in controllers:
c.stop()
c.t = []
c.start()
cpu = []
start = time.time()
while time.time() - start < duration:
time.sleep(1.0)
cpu.append(psutil.cpu_percent())
print('Avg CPU usage: {}%'.format(mean(cpu)))
return {c: array(c.t) for c in controllers}
def freq_plot(logs):
for c, t in logs.items():
dt = diff(t)
freq = 1.0 / dt
print('Avg frq for controller {}: {:.3f} ms STD={:.3} ms'.format(c.ids, freq.mean(), freq.std()))
hist(freq)
xlim(0, 100)
"""
Explanation: Now, we define our monitor and plotting functions.
End of explanation
"""
def follow_trajectory(motor, duration=5, freq=50):
t = linspace(0, duration, duration * freq)
a1, f1 = 10.0, 1.0
a2, f2 = 5.0, 0.5
traj = a1 * sin(2 * pi * f1 * t) + a2 * sin(2 * pi * f2 * t)
rec = []
motor.compliant = False
motor.moving_speed = 0
motor.goal_position = 0
time.sleep(1.)
for p in traj:
motor.goal_position = p
rec.append(motor.present_position)
time.sleep(1.0 / freq)
motor.compliant = True
plot(traj)
plot(rec)
"""
Explanation: We also define this follow trajectory function, which applies a sinus on one motor (choosen below) and plot how close is its real position from the target one:
End of explanation
"""
motor = None
def motor_selector(m):
global motor
motor = getattr(robot, m)
interact(motor_selector, m=[m.name for m in robot.motors]);
"""
Explanation: Now choose which motor you want to use for the follow trajectory test. It should be able to move freely from -20 to +20 degrees.
End of explanation
"""
duration = 30
"""
Explanation: Benchmark
Our benchmark duration in seconds:
End of explanation
"""
d = monitor(controllers, duration)
freq_plot(d)
results['normal'] = d
follow_trajectory(motor)
"""
Explanation: Normal usage
End of explanation
"""
for p in robot.primitives:
p.stop()
robot._primitive_manager.stop()
d = monitor(controllers, duration)
freq_plot(d)
results['without primitive'] = d
follow_trajectory(motor)
"""
Explanation: Without primitives
End of explanation
"""
for s in robot.sensors:
s.close()
d = monitor(controllers, duration)
freq_plot(d)
results['without sensor'] = d
follow_trajectory(motor)
"""
Explanation: Without all sensors
End of explanation
"""
|
lyoung13/deep-learning-nanodegree
|
p3-tv-script-generation/dlnd_tv_script_generation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
text_set = set(text)
vocab_to_int = dict((word, index) for index, word in enumerate(text_set))
int_to_vocab = dict((index, word) for index, word in enumerate(text_set))
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
punct_tokens = {"." : "||period||",
"," : "||comma||",
"\"" : "||quotation_mark||",
";" : "||semicolon||",
"!" : "||exclamation_mark||",
"?" : "||question_mark||",
"(" : "||left_parentheses||",
")" : "||right_parentheses||",
"--" : "||dash||",
"\n" : "||return||"}
return punct_tokens
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return input, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following the tuple (Input, Targets, LearingRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
n_layers = 2
keep_prob = 0.6
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)
cell = tf.contrib.rnn.MultiRNNCell([drop] * n_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embedded_input = tf.nn.embedding_lookup(embedding, input_data)
return embedded_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name="final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed_dim = 200
embed_input = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed_input)
logits = tf.contrib.layers.fully_connected(outputs,
vocab_size,
activation_fn=None,
weights_initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
biases_initializer=tf.zeros_initializer())
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
n_elements = len(int_text)
n_batches = n_elements // (batch_size * seq_length)
x_data = np.array(int_text[: n_batches * batch_size * seq_length])
y_data = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(x_data.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(y_data.reshape(batch_size, -1), n_batches, 1)
batches = np.array(list(zip(x_batches, y_batches)))
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 1024
# Sequence Length
seq_length = 15
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 34
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
input_tensor = loaded_graph.get_tensor_by_name("input:0")
initial_state_tensor = loaded_graph.get_tensor_by_name("initial_state:0")
final_state_tensor = loaded_graph.get_tensor_by_name("final_state:0")
probs_tensor = loaded_graph.get_tensor_by_name("probs:0")
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
predicted_word = np.random.choice(list(int_to_vocab.values()),p=probabilities)
return predicted_word
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/fraud_detection_with_tensorflow_bigquery.ipynb
|
apache-2.0
|
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
from tensorflow_io.bigquery import BigQueryClient
import functools
"""
Explanation: Building a Fraud Detection model on Vertex AI with TensorFlow Enterprise and BigQuery
Learning objectives
Analyze the data in BigQuery.
Ingest records from BigQuery.
Preprocess the data.
Build the model.
Train the model.
Evaluate the model.
Introduction
In this notebook, you'll directly ingest a BigQuery dataset and train a fraud detection model with TensorFlow Enterprise on Vertex AI.
You've also walked through all the steps of building a model. Finally, you learned a bit about how to handle imbalanced classification problems.
Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook.
Ingest records from BigQuery
Step 1: Import Python packages
Run the below cell to import the python packages.
End of explanation
"""
GCP_PROJECT_ID = 'qwiklabs-gcp-00-b1e00ce17168' # Replace with your Project-ID
DATASET_GCP_PROJECT_ID = GCP_PROJECT_ID # A copy of the data is saved in the user project
DATASET_ID = 'tfe_codelab'
TRAIN_TABLE_ID = 'ulb_fraud_detection_train'
VAL_TABLE_ID = 'ulb_fraud_detection_val'
TEST_TABLE_ID = 'ulb_fraud_detection_test'
FEATURES = ['Time','V1','V2','V3','V4','V5','V6','V7','V8','V9','V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28','Amount']
LABEL='Class'
DTYPES=[tf.float64] * len(FEATURES) + [tf.int64]
"""
Explanation: Step 2: Define constants
Let's next define some constants for use in the project. Change GCP_PROJECT_ID to the actual project ID you are using. Go ahead and run new cells as you create them.
End of explanation
"""
client = BigQueryClient()
def read_session(TABLE_ID):
return client.read_session(
"projects/" + GCP_PROJECT_ID, DATASET_GCP_PROJECT_ID, TABLE_ID, DATASET_ID,
FEATURES + [LABEL], DTYPES, requested_streams=2
)
def extract_labels(input_dict):
features = dict(input_dict)
label = tf.cast(features.pop(LABEL), tf.float64)
return (features, label)
"""
Explanation: Step 3: Define helper functions
Now, let's define a couple functions. read_session() reads data from a BigQuery table. extract_labels() is a helper function to separate the label column from the rest, so that the dataset is in the format expected by keras.model_fit() later on.
End of explanation
"""
BATCH_SIZE = 32
# TODO 1
# Create the datasets
raw_train_data = # Your code goes here
raw_val_data = # Your code goes here
raw_test_data = # Your code goes here
next(iter(raw_train_data)) # Print first batch
"""
Explanation: Step 4: Ingest data
Finally, let's create each dataset and then print the first batch from the training dataset. Note that we have defined a BATCH_SIZE of 32. This is an important parameter that will impact the speed and accuracy of training.
End of explanation
"""
MEANS = [94816.7387536405, 0.0011219465482001268, -0.0021445914636999603, -0.002317402958335562,
-0.002525792169927835, -0.002136576923287782, -3.7586818983702984, 8.135919975738768E-4,
-0.0015535579268265718, 0.001436137140461279, -0.0012193712736681508, -4.5364970422902533E-4,
-4.6175444671576083E-4, 9.92177789685366E-4, 0.002366229151475428, 6.710217226762278E-4,
0.0010325807119864225, 2.557260815835395E-4, -2.0804190062322664E-4, -5.057391100818653E-4,
-3.452114767842334E-6, 1.0145936326270006E-4, 3.839214074518535E-4, 2.2061197469126577E-4,
-1.5601580596677608E-4, -8.235017846415852E-4, -7.298316615408554E-4, -6.898459943652376E-5,
4.724125688297753E-5, 88.73235686453587]
def norm_data(mean, data):
data = tf.cast(data, tf.float32) * 1/(2*mean)
return tf.reshape(data, [-1, 1])
numeric_columns = []
for i, feature in enumerate(FEATURES):
# TODO 2: Your code goes here
numeric_columns
"""
Explanation: Build the model
Step 1: Preprocess data
Let's create feature columns for each feature in the dataset. In this particular dataset, all of the columns are of type numeric_column, but there a number of other column types (e.g. categorical_column).
You will also norm the data to center around zero so that the network converges faster. You've precalculated the means of each feature to use in this calculation.
End of explanation
"""
model = keras.Sequential([
tf.keras.layers.DenseFeatures(numeric_columns),
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(# TODO 3: Your code goes here)
"""
Explanation: Step 2: Build the model
Now we are ready to create a model. We will feed the columns we just created into the network. Then we will compile the model. We are including the Precision/Recall AUC metric, which is useful for imbalanced datasets.
End of explanation
"""
CLASS_WEIGHT = {
0: 1,
1: 100
}
EPOCHS = 3
train_data = raw_train_data.shuffle(10000)
val_data = raw_val_data
test_data = raw_test_data
# Train the model using model.fit()
# TODO 4: Your code goes here
"""
Explanation: Step 3: Train the model
There are a number of techniques to handle imbalanced data, including oversampling (generating new data in the minority class) and undersampling (reducing the data in the majority class).
For the purposes of this codelab, let's use a technique that overweights the loss when misclassifying the minority class. You'll specify a class_weight parameter when training and weight "1" (fraud) higher, since it is much less prevalent.
You will use 3 epochs (passes through the data) in this lab so training is quicker. In a real-world scenario, You'd want to run it long enough to the point where the stop seeing increases in accuracy of the validation set.
End of explanation
"""
# Evaluate the model
# TODO 5: Your code goes here
"""
Explanation: Step 4: Evaluate the model
The evaluate() function can be applied to test data that the model has never seen to provide an objective assessment. Fortunately, we've set aside test data just for that!
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/image_classification/labs/5_fashion_mnist_class.ipynb
|
apache-2.0
|
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
"""
Explanation: Train a Neural Network Model to Classify Images
Learning Objectives
Pre-process image data
Build, compile, and train a neural network model
Make and verify predictions
Introduction
This lab trains a neural network model to classify images of clothing, such as sneakers and shirts. You will learn how to read and display image data, pre-process image data, build, compile, and train a neural network model, and make and verify predictions.
Each learning objective will correspond to a #TODO in the notebook, where you will complete the notebook cell's code before running the cell. Refer to the solution notebook)for reference.
End of explanation
"""
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
"""
Explanation: Import the Fashion MNIST dataset
This lab uses the Fashion MNIST dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST is intended as a drop-in replacement for the classic MNIST dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc.) in a format identical to that of the articles of clothing you'll use here.
This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.
Here, 60,000 images are used to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow. Import and load the Fashion MNIST data directly from TensorFlow:
End of explanation
"""
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
"""
Explanation: Loading the dataset returns four NumPy arrays:
The train_images and train_labels arrays are the training set—the data the model uses to learn.
The model is tested against the test set, the test_images, and test_labels arrays.
The images are 28x28 NumPy arrays, with pixel values ranging from 0 to 255. The labels are an array of integers, ranging from 0 to 9. These correspond to the class of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the class names are not included with the dataset, store them here to use later when plotting the images:
End of explanation
"""
train_images.shape
"""
Explanation: Explore the data
Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
End of explanation
"""
len(train_labels)
"""
Explanation: Likewise, there are 60,000 labels in the training set:
End of explanation
"""
train_labels
"""
Explanation: Each label is an integer between 0 and 9:
End of explanation
"""
test_images.shape
"""
Explanation: There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
End of explanation
"""
len(test_labels)
"""
Explanation: And the test set contains 10,000 images labels:
End of explanation
"""
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
"""
Explanation: Preprocess the data
The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
End of explanation
"""
# Scale the values
# TODO 1
"""
Explanation: Scale these values to a range of 0 to 1 before feeding them to the neural network model. To do so, divide the values by 255. It's important that the training set and the testing set be preprocessed in the same way:
End of explanation
"""
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
"""
Explanation: To verify that the data is in the correct format and that you're ready to build and train the network, let's display the first 25 images from the training set and display the class name below each image.
End of explanation
"""
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
"""
Explanation: Build the model
Building the neural network requires configuring the layers of the model, then compiling the model.
Set up the layers
The basic building block of a neural network is the layer. Layers extract representations from the data fed into them. Hopefully, these representations are meaningful for the problem at hand.
Most of deep learning consists of chaining together simple layers. Most layers, such as tf.keras.layers.Dense, have parameters that are learned during training.
End of explanation
"""
# Compile the model
# TODO 2
"""
Explanation: The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two tf.keras.layers.Dense layers. These are densely connected, or fully connected, neural layers. The first Dense layer has 128 nodes (or neurons). The second (and last) layer returns a logits array with length of 10. Each node contains a score that indicates the current image belongs to one of the 10 classes.
Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's compile step:
Loss function —This measures how accurate the model is during training. You want to minimize this function to "steer" the model in the right direction.
Optimizer —This is how the model is updated based on the data it sees and its loss function.
Metrics —Used to monitor the training and testing steps. The following example uses accuracy, the fraction of the images that are correctly classified.
End of explanation
"""
# "Fit" the model to the training data
# TODO 2
"""
Explanation: Train the model
Training the neural network model requires the following steps:
Feed the training data to the model. In this example, the training data is in the train_images and train_labels arrays.
The model learns to associate images and labels.
You ask the model to make predictions about a test set—in this example, the test_images array.
Verify that the predictions match the labels from the test_labels array.
Feed the model
To start training, call the model.fit method—so called because it "fits" the model to the training data:
End of explanation
"""
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
"""
Explanation: As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.91 (or 91%) on the training data.
Evaluate accuracy
Next, compare how the model performs on the test dataset:
End of explanation
"""
# Attach a softmax layer to convert the logits to probabilities - call the vaiable "probability_model"
# TODO 3
predictions = probability_model.predict(test_images)
"""
Explanation: It turns out that the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy represents overfitting. Overfitting happens when a machine learning model performs worse on new, previously unseen inputs than it does on the training data. An overfitted model "memorizes" the noise and details in the training dataset to a point where it negatively impacts the performance of the model on the new data. For more information, see the following:
* Demonstrate overfitting
* Strategies to prevent overfitting
Make predictions
With the model trained, you can use it to make predictions about some images.
The model's linear outputs, logits. Attach a softmax layer to convert the logits to probabilities, which are easier to interpret.
End of explanation
"""
predictions[0]
"""
Explanation: Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
End of explanation
"""
np.argmax(predictions[0])
"""
Explanation: A prediction is an array of 10 numbers. They represent the model's "confidence" that the image corresponds to each of the 10 different articles of clothing. You can see which label has the highest confidence value:
End of explanation
"""
test_labels[0]
"""
Explanation: So, the model is most confident that this image is an ankle boot, or class_names[9]. Examining the test label shows that this classification is correct:
End of explanation
"""
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
"""
Explanation: Graph this to look at the full set of 10 class predictions.
End of explanation
"""
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
"""
Explanation: Verify predictions
With the model trained, you can use it to make predictions about some images.
Let's look at the 0th image, predictions, and prediction array. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percentage (out of 100) for the predicted label.
End of explanation
"""
# TODO 3
# Fill in the Code to Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
# YOUR CODE HERE:
plt.subplot(num_rows, 2*num_cols, 2*i+1)
# YOUR CODE HERE
plt.subplot(num_rows, 2*num_cols, 2*i+2)
# YOUR CODE HERE
plt.show()
"""
Explanation: Let's plot several images with their predictions. Note that the model can be wrong even when very confident.
End of explanation
"""
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
"""
Explanation: Use the trained model
Finally, use the trained model to make a prediction about a single image.
End of explanation
"""
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
"""
Explanation: tf.keras models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list:
End of explanation
"""
predictions_single = probability_model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
"""
Explanation: Now predict the correct label for this image:
End of explanation
"""
np.argmax(predictions_single[0])
"""
Explanation: keras.Model.predict returns a list of lists—one list for each image in the batch of data. Grab the predictions for our (only) image in the batch:
End of explanation
"""
|
fonnesbeck/ngcm_pandas_2016
|
notebooks/1.3 Data Manipulation with Pandas.ipynb
|
cc0-1.0
|
import pandas as pd
pd.set_option('max_rows', 10)
"""
Explanation: Data Manipulation with Pandas
End of explanation
"""
c = pd.Categorical(['a', 'b', 'b', 'c', 'a', 'b', 'a', 'a', 'a', 'a'])
c
c.describe()
c.codes
c.categories
"""
Explanation: Categorical Types
Pandas provides a convenient dtype for reprsenting categorical, or factor, data
End of explanation
"""
c.as_ordered()
"""
Explanation: By default the Categorical type represents an unordered categorical
You can provide information about the order of categories
End of explanation
"""
dta = pd.DataFrame.from_dict({'factor': c,
'x': np.random.randn(10)})
dta.head()
dta.dtypes
dta.factor.cat
dta.factor.cat.categories
dta.factor.describe()
"""
Explanation: Support in DataFrames
When a Categorical is in a DataFrame, there is a special cat accessor
This gives access to all of the features of the Categorical type
End of explanation
"""
# [Solution Here]
%load solutions/load_nfs_categorical.py
"""
Explanation: Exercise
Load NFS data again. Convert fditemno to a Categorical Type. Use describe.
End of explanation
"""
dates = pd.date_range("1/1/2015", periods=75, freq="D")
dates
y = pd.Series(np.random.randn(75), index=dates)
y.head()
y.reset_index().dtypes
"""
Explanation: Date and Time Types
Pandas provides conveniences for working with dates
Creating a Range of Dates
End of explanation
"""
dta = (y.reset_index(name='t').
rename(columns={'index': 'y'}))
dta.head()
dta.dtypes
dta.y.dt.freq
dta.y.dt.day
"""
Explanation: Support in DataFrames
When a datetime type is in a DataFrame, there is a special dt accessor
This gives access to all of the features of the datetime type
End of explanation
"""
y.ix["2015-01-01":"2015-01-15"]
"""
Explanation: Indexing with Dates
You can use strings
Note: the ending index is inclusive here. This is different than most of the rest of Python
End of explanation
"""
y["2015-01"]
"""
Explanation: DatetimeIndex supports partial string indexing
End of explanation
"""
resample = y.resample("M")
resample.mean()
"""
Explanation: You can resample to a lower frequency, specifying how to aggregate
Uses the DateTeimIndexResampler object
End of explanation
"""
y.asfreq('H', method='ffill')
"""
Explanation: Or go to a higher frequency, optionally specifying how to fill in the
End of explanation
"""
y
y.shift(1)
y.shift(-1)
"""
Explanation: There are convenience methods to lag and lead time series
End of explanation
"""
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000',
periods=1000))
ts = ts.cumsum()
rolling = ts.rolling(window=60)
rolling
rolling.mean()
"""
Explanation: Rolling and Window Functions
Pandas also provides a number of convenience functions for working on rolling or moving windows of time series through a common interface
This interface is the new Rolling object
End of explanation
"""
# [Solution here]
%load solutions/load_nfs_datetime.py
"""
Explanation: Exercise
Create a datetime colume named 'date' for NFS_1974.csv NFS diary data
styr: Survey year
stmth: Survey month
logday: Day in the log (assume logdays are actual days)
Hint: You could do this in two ways
Look at the parse_dates keyword of read_csv
Create the date after reading in the DataFrame
End of explanation
"""
# this is a bit slow because of the date parsing
transit = pd.read_csv("../data/AIS/transit_segments.csv",
parse_dates=['st_time', 'end_time'],
infer_datetime_format=True)
vessels = pd.read_csv("../data/AIS/vessel_information.csv")
"""
Explanation: Merging and Joining DataFrames
End of explanation
"""
vessels.head()
transit.head()
"""
Explanation: A lot of the time data that comes from relational databases will be normalized
I.e., redundant information will be put in separate tables
Users are expected to merge or join tables to work with them
End of explanation
"""
vessels.columns.intersection(transit.columns)
"""
Explanation: Several ships in the vessels data have traveled multiple segments as we would expect
Matching the names in the transit data to the vessels data is thus a many-to-one match
aside pandas Indices (of which Columns are one) are set-like
End of explanation
"""
transit.merge(vessels).head()
"""
Explanation: Merging
We can combine these two datasets for a many-to-one match
merge will use the common columns if we do not explicitly specify the columns
End of explanation
"""
A = pd.DataFrame(np.random.randn(25, 2),
index=pd.date_range('1/1/2015', periods=25))
A[2] = np.repeat(list('abcde'), 5)
A
B = pd.DataFrame(np.random.randn(5, 2))
B[2] = list('abcde')
B
A.merge(B, on=2)
"""
Explanation: Watch out, when merging on columns, indices are discarded
End of explanation
"""
transit.set_index('mmsi', inplace=True)
vessels.set_index('mmsi', inplace=True)
transit.join(vessels).head()
"""
Explanation: Joins
Join is like merge, but it works on the indices
The same could be achieved with merge and the left_index and right_index keywords
End of explanation
"""
%load solutions/join_nfs.py
"""
Explanation: Exercise
Join the 1974 Household NFS data with the Diary data
The data is in ../data/NationalFoodSurvey/NFS_1974/
End of explanation
"""
df1 = pd.read_csv('../data/ebola/guinea_data/2014-08-04.csv',
index_col=['Date', 'Description'])
df2 = pd.read_csv('../data/ebola/guinea_data/2014-08-26.csv',
index_col=['Date', 'Description'])
df1.shape
df2.shape
df1.head()
df2.head()
df1.index.is_unique
df2.index.is_unique
"""
Explanation: Concatenation
Another common operation is appending data row-wise or column-wise to an existing dataset
We can use the concat function for this
Let's import two microbiome datasets, each consisting of counts of microorganisms from a particular patient.
We will use the first column of each dataset as the index.
The index is the unique biological classification of each organism, beginning with domain, phylum, class, and for some organisms, going all the way down to the genus level.
End of explanation
"""
df = pd.concat((df1, df2), axis=0)
df.shape
"""
Explanation: We can concatenate on the rows
End of explanation
"""
# [Solution here]
%load solutions/concat_nfs.py
"""
Explanation: Exercise
Join all of the diary data together in a single DataFrame
Hint: you might find glob.glob useful
You will need to add a unique field identifying the survey year to each DataFrame
Hint: you might find a regular expression using re.search useful
End of explanation
"""
vessels.type
"""
Explanation: Text Data Manipulation
Much like the cat and dt accessors we've already seen
String types have a str accessor that provides fast string operations on columns
End of explanation
"""
vessels.type.str.count('/').max()
"""
Explanation: Count the vessel separators
End of explanation
"""
vessels.type.str.split('/', expand=True)
"""
Explanation: Split on these accessors and expand to return a DataFrame with nan-padding
End of explanation
"""
# [Solution here]
%load solutions/nfs_dairy.py
"""
Explanation: Exercise
Load the file "Ref_ food groups.txt"
Get all of the food groups that contain the word milk
End of explanation
"""
|
softctrl/nd101-tv-script-generation
|
dlnd_tv_script_generation.ipynb
|
agpl-3.0
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
from collections import Counter
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
word_ctr = Counter(text)
sorted_vocab = sorted(word_ctr, key=word_ctr.get, reverse=True)
int_to_vocab = {i: word for i, word in enumerate(sorted_vocab)}
vocab_to_int = {word: i for i, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
dict_punctuation_token = {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_Mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_Mark||',
'?' : '||Question_mark||',
'(' : '||Left_Parentheses||',
')' : '||Right_Parentheses||',
'--' : '||Dash||',
'\n' : '||Return||'
}
return dict_punctuation_token
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm_layers = 2
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)
cell = tf.contrib.rnn.MultiRNNCell([lstm] * lstm_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform(shape=[vocab_size, embed_dim], minval=-1,maxval=1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, FinalState = build_rnn(cell, embed)
Logits = tf.contrib.layers.fully_connected(outputs,
vocab_size,
weights_initializer=tf.random_uniform_initializer(-1, 1),
biases_initializer=tf.zeros_initializer(),
activation_fn=None
)
return Logits, FinalState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
n_batches = len(int_text) // (batch_size * seq_length)
x_data = np.array(int_text[:n_batches * batch_size * seq_length])
y_data = np.roll(x_data, -1)
x_batches = np.split(x_data.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(y_data.reshape(batch_size, -1), n_batches, 1)
batches = np.array(list(zip(x_batches, y_batches)))
return batches
# print('## I am using the example on this question:')
# print('get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2):')
# print(get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2))
# print('##')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
End of explanation
"""
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = 25
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 50
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
predicted_word = int_to_vocab[int(np.searchsorted(np.cumsum(probabilities),
np.sum(probabilities) * np.random.rand(1)))]
return predicted_word
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
dilipbobby/DataScience
|
Numpy/numpyclass.ipynb
|
apache-2.0
|
import numpy as np
import numpy.matlib
"""
Explanation: cs-1
Numpy
Topics:
Intro to numpy,
Ndarray Object,
Eg Array creation,
Array Attributes
Numpy:
NumPy is the fundamental package needed for scientific computing with Python. It contains:
a powerful N-dimensional array object
basic linear algebra functions
basic Fourier transforms
sophisticated random number capabilities
Extra features:
–fast, multidimensional arrays
–libraries of reliable, tested scientific functions
–plotting tools
**NumPy is at the core of nearly every scientific Python application or module since it provides a fast N-d array datatype that can be manipulated in a vectorized form.
Why we need numpy ?
Lists ok for storing small amounts of one-dimensional data.
But, can’t use directly with arithmetical operators (+, -, *, /, …)
Need efficient arrays with arithmetic and better multidimensional tools
How Numpy is useful
Similar to lists, but much more capable, except fixed size
NumPy is a hybrid of the older NumArray and Numeric packages , and is meant to replace them both.
NumPy adds a new data structure to Python – the ndarray.
An N-dimensional array is a homogeneous collection of “items” indexed using N integers
Defined by:
The shape of the array,kind of item the array is composed of.
End of explanation
"""
# Eg : one dimensional
a = np.array([1,2,3,4])
print("One dim ")
print(a)
print(type(a))
#more than one dimension
b = np.array([[1, 2], [3, 4]])
print("Two dims")
print(b)
#using ndim
c=np.array([1,2,3,4,5], ndmin = 2)
print("Two dimensional")
print(c.ndim)
print(c.shape)
print(c)
#dtype:
np.array([1, 2, 3], dtype = complex)
"""
Explanation: Every ndarray is a homogeneous collection of exactly the same data-type
every item takes up the same size block of memory
each block of memory in the array is interpreted in exactly the same way.
Array Creation :
There are a number of ways to initialize new numpy arrays, for example from
– a Python list or tuples
– using functions that are dedicated to generating numpy arrays, such as arange, linspace, etc.
– reading data from files
The basic ndarray is created using an array function in NumPy: numpy.array
syntax : numpy.array(object, dtype = None, copy = True, order = None, subok = False, ndmin = 0)
returns a array object
End of explanation
"""
arrey=np.array([[1,2,3],[4,5,6]])
arrey.ndim
print(arrey)
"""
Explanation: Numpy Attributes :
NumPy’s array class is called ndarray. It is also known by the alias array. Note that numpy.array is not the same as the Standard Python Library class array.array, which only handles one-dimensional arrays and offers less functionality.
ndarray.ndim
the number of axes (dimensions) of the array. In the Python world, the number of dimensions is referred to as rank.This array attribute returns a tuple consisting of array dimensions.
End of explanation
"""
arrey = np.array([[1,2,3],[4,5,6]])
print(arrey)
print(arrey.shape)
#resize ndarray
arrey = np.array([[1,2,3],[4,5,6]])
arrey.shape = (3,2)
print(arrey)
#Resize: NumPy also provides a reshape function to resize an array.
barray = arrey.reshape(2,3)
print(barray)
"""
Explanation: ndarray.shape
the dimensions of the array. This is a tuple of integers indicating the size of the array in each dimension. For a matrix with n rows and m columns, shape will be (n,m). The length of the shape tuple is therefore the rank, or number of dimensions, ndim.
End of explanation
"""
arrey.size
"""
Explanation: ndarray.size :
Total number of elements of the array. This is equal to the product of the elements of shape.
End of explanation
"""
arrey.dtype
"""
Explanation: ndarray.dtype :
an object describing the type of the elements in the array. One can create or specify dtype’s using standard Python types. Additionally NumPy provides types of its own. numpy.int32, numpy.int16, and numpy.float64 are some examples.
End of explanation
"""
#ax = np.array([1,2,3,4,5], dtype = np.int16)
ax = np.array([1,2,3,4,5], dtype = np.float32)
ax.itemsize
"""
Explanation: ndarray.iteamsize:
This array attribute returns the length of each element of array in bytes.
End of explanation
"""
ax.data
"""
Explanation: ndarray.data
the buffer containing the actual elements of the array. Normally, we won’t need to use this attribute because we will access the elements in an array using indexing facilities.
End of explanation
"""
dt = np.dtype(np.int32)
dt
"""
Explanation: CS-2
Topics
Data types,Array creation,
Numeric Ranges,Indexing and slicing.
dtype:
A dtype object is constructed using the following
syntax −
numpy.dtype(object, align, copy)
Object − To be converted to data type object
Align − If true, adds padding to the field to make it similar to C-struct
Copy − Makes a new copy of dtype object. If false, the result is reference to builtin data type object.
End of explanation
"""
np.empty([3,3], dtype = int)
"""
Explanation: Array creation:
NumPy offers several functions to create arrays with initial placeholder content.
numpy.empty
Syntax: numpy.empty(shape, dtype = float, order = 'C')
Shape : Shape of an empty array in int or tuple of int
Dtype : Desired output data type. Optional
Order :'C' for C-style row-major array, 'F' for FORTRAN style column-major array
End of explanation
"""
print(np.zeros(5))
np.zeros((3,3))
"""
Explanation: numpy.zeros
Returns a new array of specified size, filled with zeros.
Syntax : numpy.zeros(shape, dtype = float, order = 'F')
End of explanation
"""
np.ones(5)
np.ones([2,2], dtype = int)
"""
Explanation: numpy.ones
Returns a new array of specified size and type, filled with ones.
Syntax : numpy.ones(shape, dtype = None, order = 'C')
End of explanation
"""
x = [1,2,3]
a = np.asarray(x)
print(a)
print(type(a))
a.shape
"""
Explanation: Note: zeros_like,ones_like, empty_like arange,fromfunction, fromfile
numpy.asarray
This function is similar to numpy.array except for the fact that it has fewer parameters.
syntax : numpy.asarray(a, dtype = None, order = None)
End of explanation
"""
np.arange(5,9,2)
"""
Explanation: Numeric ranges
This function returns an ndarray object containing evenly spaced values within a given range.
syntax: numpy.arange(start, stop, step, dtype)
End of explanation
"""
np.linspace(10,20,num=5,endpoint=False,retstep=False)
"""
Explanation: numpy.linspace
This function is similar to arange() function. In this function, instead of step size, the number of evenly spaced values between the interval is specified.
syntax: numpy.linspace(start, stop, num, endpoint, retstep, dtype)
retstep : If true, returns samples and step between the consecutive numbers.
endpoint : True by default, hence the stop value is included in the sequence. If false, it is not included
End of explanation
"""
np.logspace(1.0, 2.0, num = 5)
np.logspace(1.0, 2.0, num = 5,base=2)
"""
Explanation: numpy.logspace
This function returns an ndarray object that contains the numbers that are evenly spaced on a log scale
syntax : numpy.logscale(start, stop, num, endpoint, base, dtype)
End of explanation
"""
o = np.linspace(0, 4, 9)
print(o)
o.resize(3, 3)
o
"""
Explanation: resize changes the shape and size of array in-place.
End of explanation
"""
np.eye(2)
#import numpy.matlib
#np.matlib.eye(n = 3, M = 4, k = 0, dtype = float)
"""
Explanation: eye returns a 2-D array with ones on the diagonal and zeros elsewhere.
End of explanation
"""
y=[1,2,3]
np.diag(y)
"""
Explanation: diag extracts a diagonal or constructs a diagonal array.
End of explanation
"""
#using numpy
np.repeat([1, 2, 3], 3)
p = np.ones([2, 3], int)
p
#vstack to stack arrays in sequence vertically (row wise).
np.vstack([p, 2*p])
#hstack to stack arrays in sequence horizontally (column wise)
np.hstack([p, 2*p])
"""
Explanation: Create an array using repeating list (pythonic way)
End of explanation
"""
s = np.arange(13)*2
s
#indexing
s[0], s[4], s[-1]
"""
Explanation: Indexing / Slicing
Three types of indexing methods are available − field access, basic slicing and advanced indexing.
End of explanation
"""
s[1:5]
#Use negatives to count from the back.
s[-4:]
#can be used to indicate step-size. array[start:stop:stepsize]
#Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.
s[5::2]
#Let's look at a multidimensional array.
m = np.arange(36)
m.resize((6, 6))
m
#Use bracket notation to slice: array[row, column]
m[2, 2]
#to select a range of rows or columns
m[3, 3:]
#We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30.
m[m > 30]
#Here we are assigning all values in the array that are greater than 30 to the value of 30
m[m > 30] = 30
m
x = np.arange(10)
print(x)
s=slice(2,7,2)
print("Done",x[s])
"""
Explanation: To indicate a range. array[start:stop]
Leaving start or stop empty will default to the beginning/end of the array.
End of explanation
"""
print(np.sin(0))
a = np.array([0,30,45,60,90])
print ('Sine of different angles:')
# Convert to radians by multiplying with pi/180
print (np.sin(a*np.pi/180))
print ('Cosine values for angles in array:')
print (np.cos(a*np.pi/180) )
print ('Tangent values for given angles:')
print (np.tan(a*np.pi/180))
#inverse tri
a = np.array([0,30,45,60,90])
#print 'Array containing sine values:'
sin = np.sin(a*np.pi/180)
print( sin )
print ('\n')
print ('Compute sine inverse of angles. Returned values are in radians.')
inv = np.arcsin(sin)
print (inv )
print ('\n')
print( 'Check result by converting to degrees:' )
print (np.degrees(inv))
print ('\n')
print ('arccos and arctan functions behave similarly:' )
cos = np.cos(a*np.pi/180)
print (cos)
print ('\n')
print ('Inverse of cos:')
inv = np.arccos(cos)
print (inv)
print ('\n')
print ('In degrees:')
print (np.degrees(inv))
print ('\n')
print ('Tan function:' )
tan = np.tan(a*np.pi/180)
print (tan)
print ('Inverse of tan:')
inv = np.arctan(tan)
print (inv)
print ('\n')
print ('In degrees:' )
print (np.degrees(inv))
"""
Explanation: cs-3
Topics :
Math functions,
Basic operations,
Statistical Functions,
Copies & Views,
Broadcasting,
Iterating Over Array,
ix() function
Math functions :NumPy contains a large number of various mathematical operations. NumPy provides standard trigonometric functions, functions for arithmetic operations, handling complex numbers, etc.
Trigonometric Functions:
NumPy has standard trigonometric functions which return trigonometric ratios for a given angle in radians.
np.sin()
np.cos()
np.tan()
arcsin, arcos, and arctan functions return the trigonometric inverse of sin, cos, and tan of the given angle.
The result of these functions can be verified by numpy.degrees() function by converting radians to degrees.
End of explanation
"""
#round off
a = np.array([1.0,5.55, 123, 0.567, 25.532])
print ('Original array:')
print (a )
print ('\n')
print ('After rounding:')
print (np.around(a))
print (np.around(a, decimals = 1))
"""
Explanation: numpy.around()
This is a function that returns the value rounded to the desired precision. The function takes the following parameters.
syntax : numpy.around(a,decimals)
End of explanation
"""
a = np.array([-1.7, 1.5, -0.2, 0.6, 10])
print ('array:')
print (a)
print ('\n')
print ('The modified array:')
#returns largest intgres
print (np.floor(a))
#returns lowest intgers
print (np.ceil(a))
"""
Explanation: numpy.floor()
This function returns the largest integer not greater than the input parameter. The floor of the scalar x is the largest integer i, such that i <= x. Note that in Python, flooring always is rounded away from 0.
End of explanation
"""
x=np.array([1,2,3])
y=np.array([4,5,6])
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print('\n')
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
a = np.arange(9, dtype = np.float).reshape(3,3)
print ('First array:')
print (a )
print ('Second array:' )
b = np.array([10,10,10])
print (b )
print ('\n')
print ('Add the two arrays:')
print (np.add(a,b))
print ('\n')
print ('Subtract the two arrays:')
print (np.subtract(a,b))
print ('\n')
print ('Multiply the two arrays:')
print (np.multiply(a,b))
print ('Divide the two arrays:')
print (np.divide(a,b))
"""
Explanation: Basic operations:
Input arrays for performing arithmetic operations such as add(), subtract(), multiply(), and divide() must be either of the same shape or should conform to array broadcasting rules.
Use +, -, , / and * to perform element wise addition, subtraction, multiplication, division and power.
End of explanation
"""
a = np.array([-4, -2, 1, 3, 5])
a.sum()
a.max()
a.min()
np.average(a)
a.mean()
a.std() #Standard deviation is the square root of the average of squared deviations from mean
"""
Explanation: Statistical Functions:
NumPy has quite a few useful statistical functions for finding minimum, maximum, percentile standard deviation and variance, etc. from the given elements in the array.
End of explanation
"""
np.var([1,2,3,4])
a.argmax()
a.argmin()
"""
Explanation: Variance is the average of squared deviations, i.e., mean(abs(x - x.mean())**2).
In other words, the standard deviation is the square root of variance.
End of explanation
"""
#no copy
a = np.arange(6)
print ('Our array is:' )
print (a )
print ('Applying id() function:')
print (id(a))
print ('a is assigned to b:' )
b = a
print (b)
print ('b has same id():')
print (id(b))
print ('Change shape of b:')
b.shape = 3,2
print (b)
print ('Shape of a also gets changed:')
print (a)
#view
a = np.array([1,2,3,4])
#print 'Array a:'
print (a )
print(id(a))
#Create view of a:
b = a.view()
print( b )
b.shape=(2,2)
print(id(b))
print (b is a)
print(b.shape)
print(a.shape)
#copy
a = np.array([[10,10], [2,3], [4,5]])
print ('Array a is:')
print( a)
# 'Create a deep copy of a:'
b = a.copy()
print ('Array b is:')
print (b)
#b does not share any memory of a
print ('Can we write b is a')
print (b is a)
"""
Explanation: Copies & Views :
No Copy:
Simple assignments do not make the copy of array object. Instead, it uses the same id() of the original array to access it. The id() returns a universal identifier of Python object, similar to the pointer in C.
View or Shallow Copy:
NumPy has ndarray.view() method which is a new array object that looks at the same data of the original array.
Deep copy:
The ndarray.copy() function creates a deep copy. It is a complete copy of the array and its data, and doesn’t share with the original array.
End of explanation
"""
#normal example
a = np.array([1,2,3,4])
b = np.array([10,20,30,40])
print(a.shape)
print(b.shape)
c = a * b
print (c)
#Broadcasting
x = np.arange(4)
y = np.ones(5)
xb=x.reshape(4,1)
print(xb)
#bd
print(xb + y)
(xb + y).shape
"""
Explanation: Broadcasting :
The term broadcasting refers to the ability of NumPy to treat arrays of different shapes during arithmetic operations.
End of explanation
"""
#Matrix operations
z = np.array([y, y**2])
print(len(z)) # number of rows of array
"""
Explanation: Note : If the dimensions of two arrays are dissimilar, element-to-element operations are not possible. However, operations on arrays of non-similar shapes is still possible in NumPy, because of the broadcasting capability.
End of explanation
"""
y=np.arange(5)
z = np.array([y, y ** 2])
z
#The shape of array z is (2,3) before transposing.
z.shape
z.T
"""
Explanation: Let's look at transposing arrays. Transposing permutes the dimensions of the array.
End of explanation
"""
x=np.array([1,2,3])
y=np.array([4,5,6])
x.dot(y) # dot product 1*4 + 2*5 + 3*6
"""
Explanation: Dot Product:
[x1,x2,x2]clo[y1,y2,y3] = x1y1+x2xy2+x3y3
End of explanation
"""
tp = np.random.randint(0, 10, (4,3))
tp
#Iterate by row:
for row in tp:
print(row)
#Iterate by index:
for i, row in enumerate(tp):
print('row', i, 'is', row)
#Use zip to iterate over multiple iterables.
tp2=tp*2
tp2
for i, j in zip(tp, tp2):
print(i,'+',j,'=',i+j)
"""
Explanation: Iterating Over Arrays
create a new 4 by 3 array of random numbers 0-9.
End of explanation
"""
a = np.arange(0,60,5)
a = a.reshape(3,4)
print ('Original array is:')
print (a)
print ('\n')
print ('Modified array is:')
for x in np.nditer(a):
print (x)
"""
Explanation: NumPy package contains an iterator object numpy.nditer. It is an efficient multidimensional iterator object using which it is possible to iterate over an array.
End of explanation
"""
a = np.array([2,3,4,5])
b = np.array([8,5,4])
c = np.array([5,4,6,8,3])
ax,bx,cx = np.ix_(a,b,c)
result = ax+bx*cx
result
result[3,2,4]
a[3]+b[2]*c[4]
"""
Explanation: ix_() function:
The ix_ function can be used to combine different vectors so as to obtain the result for each n-tuplet.
End of explanation
"""
#NumPy package contains a Matrix library numpy.matlib.
import numpy.matlib
#matlib.empty()
#numpy.matlib.empty(shape, dtype, order)
print (np.matlib.empty((2,2)))
print('\n')
#ones
print (np.matlib.ones((2,2)))
print('\n')
#random
print (np.matlib.rand(3,3))
print('\n')
#This function returns the matrix filled with zeros.
#numpy.matlib.zeros()
print (np.matlib.zeros((2,2)))
print('\n')
#numpy.matlib.eye()
#This function returns a matrix with 1 along the diagonal elements and the zeros elsewhere. The function takes the following parameters.
#numpy.matlib.eye(n, M,k, dtype)
print (np.matlib.eye(n = 3, M = 3, k = 1, dtype = float))
print('\n')
#numpy.matlib.identity()
#The numpy.matlib.identity() function returns the Identity matrix of the given size.
#An identity matrix is a square matrix with all diagonal elements as 1.
np.matlib.identity(3)
#creation matrix
i = np.matrix('1,2,3,4')
print(i)
#array to matrix
list=[1,2,3,4]
k = np.asmatrix (list)
print(k)
print(type(k))
"""
Explanation: cs-4
Topics :
Matlib subpackage,
matrix,
linear algebra method,
matplotlib using numpy.
End of explanation
"""
#det
b = np.array([[6,1,1], [4, -2, 5], [2,8,7]])
print (b)
print('\n')
print (np.linalg.det(b))
print('\n')
print (6*(-2*7 - 5*8) - 1*(4*7 - 5*2) + 1*(4*8 - -2*2))
#dot
#Dot product of the two arrays
#vdot
#Dot product of the two vectors
#linear
dou = np.array([[1,2],[3,4]])
bou = np.array([[11,12],[13,14]])
print(np.dot(dou,bou)) #[[1*11+2*13, 1*12+2*14],[3*11+4*13, 3*12+4*14]]
print('\n')
print(np.vdot(dou,bou)) #1*11 + 2*12 + 3*13 + 4*14
#Solve the system of equations 3 * x0 + x1 = 9 and x0 + 2 * x1 = 8:
al = np.array([[3,1], [1,2]])
bl = np.array([9,8])
x = np.linalg.solve(al, bl)
print(x)
a = np.array([[1,1,1],[0,2,5],[2,5,-1]])
#'Array a
print (a)
print('\n')
ainv = np.linalg.inv(a)
print(ainv)
"""
Explanation: NumPy package contains numpy.linalg module that provides all the functionality required for linear algebra
End of explanation
"""
from matplotlib import pyplot as plt
x = np.arange(1,11)
y = 2 * x + 5
plt.title("Matplotlib demo")
plt.xlabel("x axis caption")
plt.ylabel("y axis caption")
plt.plot(x,y)
plt.show()
N = 8
y = np.zeros(N)
y
x1 = np.linspace(0, 10, N, endpoint=True)
x2 = np.linspace(0, 10, N, endpoint=False)
plt.plot(x1, y, 'o')
plt.plot(x2, y + 0.5, 'o')
plt.ylim([-0.5, 1])
plt.show()
"""
Explanation: Using Matplotlib with numpy
End of explanation
"""
import time
import numpy as np
size_of_vec = 100000
def pure_python_version():
t1 = time.time()
X = range(size_of_vec)
Y = range(size_of_vec)
Z = []
for i in range(len(X)):
Z.append(X[i] + Y[i])
return time.time() - t1
def numpy_version():
t1 = time.time()
X = np.arange(size_of_vec)
Y = np.arange(size_of_vec)
Z = X + Y
return time.time() - t1
t1 = pure_python_version()
t2 = numpy_version()
print(t1, t2)
#print("this example Numpy is " + str(t1/t2) + " faster!")
"""
Explanation: Overview
End of explanation
"""
|
magenta/ddsp
|
ddsp/colab/demos/train_autoencoder.ipynb
|
apache-2.0
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: <a href="https://colab.research.google.com/github/magenta/ddsp/blob/main/ddsp/colab/demos/train_autoencoder.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
!pip install -qU ddsp[data_preparation]==1.6.3
# Initialize global path for using google drive.
DRIVE_DIR = ''
"""
Explanation: Train a DDSP Autoencoder on GPU
This notebook demonstrates how to install the DDSP library and train it for synthesis based on your own data using our command-line scripts. If run inside of Colab, it will automatically use a free Google Cloud GPU.
At the end, you'll have a custom-trained checkpoint that you can download to use with the DDSP Timbre Transfer Colab.
<img src="https://storage.googleapis.com/ddsp/additive_diagram/ddsp_autoencoder.png" alt="DDSP Autoencoder figure" width="700">
Note that we prefix bash commands with a ! inside of Colab, but you would leave them out if running directly in a terminal.
Install Dependencies
First we install the required dependencies with pip.
End of explanation
"""
from google.colab import drive
drive.mount('/content/drive')
"""
Explanation: Setup Google Drive (Optional, Recommeded)
This notebook requires uploading audio and saving checkpoints. While you can do this with direct uploads / downloads, it is recommended to connect to your google drive account. This will enable faster file transfer, and regular saving of checkpoints so that you do not lose your work if the colab kernel restarts (common for training more than 12 hours).
Login and mount your drive
This will require an authentication code. You should then be able to see your drive in the file browser on the left panel.
End of explanation
"""
#@markdown (ex. `/content/drive/My Drive/...`) Leave blank to skip loading from Drive.
DRIVE_DIR = '' #@param {type: "string"}
import os
assert os.path.exists(DRIVE_DIR)
print('Drive Folder Exists:', DRIVE_DIR)
"""
Explanation: Set your base directory
In drive, put all of the audio (.wav, .mp3) files with which you would like to train in a single folder.
Typically works well with 10-20 minutes of audio from a single monophonic source (also, one acoustic environment).
Use the file browser in the left panel to find a folder with your audio, right-click "Copy Path", paste below, and run the cell.
End of explanation
"""
AUDIO_DIR = 'data/audio'
AUDIO_FILEPATTERN = AUDIO_DIR + '/*'
!mkdir -p $AUDIO_DIR
if DRIVE_DIR:
SAVE_DIR = os.path.join(DRIVE_DIR, 'ddsp-solo-instrument')
else:
SAVE_DIR = '/content/models/ddsp-solo-instrument'
!mkdir -p "$SAVE_DIR"
"""
Explanation: Make directories to save model and data
End of explanation
"""
import glob
import os
from ddsp.colab import colab_utils
if DRIVE_DIR:
mp3_files = glob.glob(os.path.join(DRIVE_DIR, '*.mp3'))
wav_files = glob.glob(os.path.join(DRIVE_DIR, '*.wav'))
audio_files = mp3_files + wav_files
else:
audio_files, _ = colab_utils.upload()
for fname in audio_files:
target_name = os.path.join(AUDIO_DIR,
os.path.basename(fname).replace(' ', '_'))
print('Copying {} to {}'.format(fname, target_name))
!cp "$fname" $target_name
"""
Explanation: Prepare Dataset
Upload training audio
Upload audio files to use for training your model. Uses DRIVE_DIR if connected to drive, otherwise prompts local upload.
End of explanation
"""
import glob
import os
TRAIN_TFRECORD = 'data/train.tfrecord'
TRAIN_TFRECORD_FILEPATTERN = TRAIN_TFRECORD + '*'
# Copy dataset from drive if dataset has already been created.
drive_data_dir = os.path.join(DRIVE_DIR, 'data')
drive_dataset_files = glob.glob(drive_data_dir + '/*')
if DRIVE_DIR and len(drive_dataset_files) > 0:
!cp "$drive_data_dir"/* data/
else:
# Make a new dataset.
if not glob.glob(AUDIO_FILEPATTERN):
raise ValueError('No audio files found. Please use the previous cell to '
'upload.')
!ddsp_prepare_tfrecord \
--input_audio_filepatterns=$AUDIO_FILEPATTERN \
--output_tfrecord_path=$TRAIN_TFRECORD \
--num_shards=10 \
--alsologtostderr
# Copy dataset to drive for safe-keeping.
if DRIVE_DIR:
!mkdir "$drive_data_dir"/
print('Saving to {}'.format(drive_data_dir))
!cp $TRAIN_TFRECORD_FILEPATTERN "$drive_data_dir"/
"""
Explanation: Preprocess raw audio into TFRecord dataset
We need to do some preprocessing on the raw audio you uploaded to get it into the correct format for training. This involves turning the full audio into short (4-second) examples, inferring the fundamental frequency (or "pitch") with CREPE, and computing the loudness. These features will then be stored in a sharded TFRecord file for easier loading. Depending on the amount of input audio, this process usually takes a few minutes.
(Optional) Transfer dataset from drive. If you've already created a dataset, from a previous run, this cell will skip the dataset creation step and copy the dataset from $DRIVE_DIR/data
End of explanation
"""
from ddsp.colab import colab_utils
import ddsp.training
data_provider = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN)
dataset = data_provider.get_dataset(shuffle=False)
PICKLE_FILE_PATH = os.path.join(SAVE_DIR, 'dataset_statistics.pkl')
_ = colab_utils.save_dataset_statistics(data_provider, PICKLE_FILE_PATH, batch_size=1)
"""
Explanation: Save dataset statistics for timbre transfer
Quantile normalization helps match loudness of timbre transfer inputs to the
loudness of the dataset, so let's calculate it here and save in a pickle file.
End of explanation
"""
from ddsp.colab import colab_utils
import ddsp.training
from matplotlib import pyplot as plt
import numpy as np
data_provider = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN)
dataset = data_provider.get_dataset(shuffle=False)
try:
ex = next(iter(dataset))
except StopIteration:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
colab_utils.specplot(ex['audio'])
colab_utils.play(ex['audio'])
f, ax = plt.subplots(3, 1, figsize=(14, 4))
x = np.linspace(0, 4.0, 1000)
ax[0].set_ylabel('loudness_db')
ax[0].plot(x, ex['loudness_db'])
ax[1].set_ylabel('F0_Hz')
ax[1].set_xlabel('seconds')
ax[1].plot(x, ex['f0_hz'])
ax[2].set_ylabel('F0_confidence')
ax[2].set_xlabel('seconds')
ax[2].plot(x, ex['f0_confidence'])
"""
Explanation: Let's load the dataset in the ddsp library and have a look at one of the examples.
End of explanation
"""
%reload_ext tensorboard
import tensorboard as tb
tb.notebook.start('--logdir "{}"'.format(SAVE_DIR))
"""
Explanation: Train Model
We will now train a "solo instrument" model. This means the model is conditioned only on the fundamental frequency (f0) and loudness with no instrument ID or latent timbre feature. If you uploaded audio of multiple instruemnts, the neural network you train will attempt to model all timbres, but will likely associate certain timbres with different f0 and loudness conditions.
First, let's start up a TensorBoard to monitor our loss as training proceeds.
Initially, TensorBoard will report No dashboards are active for the current data set., but once training begins, the dashboards should appear.
End of explanation
"""
!ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir="$SAVE_DIR" \
--gin_file=models/solo_instrument.gin \
--gin_file=datasets/tfrecord.gin \
--gin_param="TFRecordProvider.file_pattern='$TRAIN_TFRECORD_FILEPATTERN'" \
--gin_param="batch_size=16" \
--gin_param="train_util.train.num_steps=30000" \
--gin_param="train_util.train.steps_per_save=300" \
--gin_param="trainers.Trainer.checkpoints_to_keep=10"
"""
Explanation: We will now begin training.
Note that we specify gin configuration files for the both the model architecture (solo_instrument.gin) and the dataset (tfrecord.gin), which are both predefined in the library. You could also create your own. We then override some of the spefic params for batch_size (which is defined in in the model gin file) and the tfrecord path (which is defined in the dataset file).
Training Notes:
Models typically perform well when the loss drops to the range of ~4.5-5.0.
Depending on the dataset this can take anywhere from 5k-30k training steps usually.
The default is set to 30k, but you can stop training at any time, and for timbre transfer, it's best to stop before the loss drops too far below ~5.0 to avoid overfitting.
On the colab GPU, this can take from around 3-20 hours.
We highly recommend saving checkpoints directly to your drive account as colab will restart naturally after about 12 hours and you may lose all of your checkpoints.
By default, checkpoints will be saved every 300 steps with a maximum of 10 checkpoints (at ~60MB/checkpoint this is ~600MB). Feel free to adjust these numbers depending on the frequency of saves you would like and space on your drive.
If you're restarting a session and DRIVE_DIR points a directory that was previously used for training, training should resume at the last checkpoint.
End of explanation
"""
from ddsp.colab.colab_utils import play, specplot
import ddsp.training
import gin
from matplotlib import pyplot as plt
import numpy as np
data_provider = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN)
dataset = data_provider.get_batch(batch_size=1, shuffle=False)
try:
batch = next(iter(dataset))
except OutOfRangeError:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
# Parse the gin config.
gin_file = os.path.join(SAVE_DIR, 'operative_config-0.gin')
gin.parse_config_file(gin_file)
# Load model
model = ddsp.training.models.Autoencoder()
model.restore(SAVE_DIR)
# Resynthesize audio.
outputs = model(batch, training=False)
audio_gen = model.get_audio_from_outputs(outputs)
audio = batch['audio']
print('Original Audio')
specplot(audio)
play(audio)
print('Resynthesis')
specplot(audio_gen)
play(audio_gen)
"""
Explanation: Resynthesis
Check how well the model reconstructs the training data
End of explanation
"""
from ddsp.colab import colab_utils
import tensorflow as tf
import os
CHECKPOINT_ZIP = 'my_solo_instrument.zip'
latest_checkpoint_fname = os.path.basename(tf.train.latest_checkpoint(SAVE_DIR))
!cd "$SAVE_DIR" && zip $CHECKPOINT_ZIP $latest_checkpoint_fname* operative_config-0.gin dataset_statistics.pkl
!cp "$SAVE_DIR/$CHECKPOINT_ZIP" ./
colab_utils.download(CHECKPOINT_ZIP)
"""
Explanation: Download Checkpoint
Below you can download the final checkpoint. You are now ready to use it in the DDSP Timbre Tranfer Colab.
End of explanation
"""
|
vzg100/Post-Translational-Modification-Prediction
|
.ipynb_checkpoints/Phosphorylation Sequence Tests -Bagging -dbptm+ELM-checkpoint.ipynb
|
mit
|
from pred import Predictor
from pred import sequence_vector
from pred import chemical_vector
"""
Explanation: Template for test
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=0)
y.supervised_training("xgb")
y.benchmark("Data/Benchmarks/phos.csv", "S")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=1)
x.supervised_training("xgb")
x.benchmark("Data/Benchmarks/phos.csv", "S")
del x
"""
Explanation: Controlling for Random Negatve vs Sans Random in Imbalanced Techniques using S, T, and Y Phosphorylation.
Included is N Phosphorylation however no benchmarks are available, yet.
Training data is from phospho.elm and benchmarks are from dbptm.
Note: SMOTEEN seems to preform best
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_Y_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=0)
y.supervised_training("xgb")
y.benchmark("Data/Benchmarks/phos.csv", "Y")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_Y_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=1)
x.supervised_training("xgb")
x.benchmark("Data/Benchmarks/phos.csv", "Y")
del x
"""
Explanation: Y Phosphorylation
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_t_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=0)
y.supervised_training("xgb")
y.benchmark("Data/Benchmarks/phos.csv", "T")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_t_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=1)
x.supervised_training("xgb")
x.benchmark("Data/Benchmarks/phos.csv", "T")
del x
"""
Explanation: T Phosphorylation
End of explanation
"""
|
ComputationalModeling/spring-2017-danielak
|
past-semesters/fall_2016/day-by-day/day20-monte-carlo-integration/MonteCarlo_Integration.ipynb
|
agpl-3.0
|
# Put your code here!
"""
Explanation: A New Hope (for integrating functions)
Names of group members
// put your names here!
Goals of this assignment
The main goal of this assignment is to use https://en.wikipedia.org/wiki/Monte_Carlo_integration - a technique for numerical integration that uses random numbers to compute the value of a definite integral. Monte Carlo integration works well for one-dimensional functions, but is especially helpful for higher-dimensional integrals or complicated functions.
Part 1
Write a function that uses Monte Carlo integration to $f(x) = 2 x^2 + 3$ from $x_{beg}= -2$ to $x_{end} = +4$. The analytic answer is:
$\int_{-2}^{4} (2x^2 + 3) dx = \left. \frac{2}{3}x^3 + 3x \right|_{-2}^4 = 66$
As you increase the number of samples ($N_{sampple}$) from 10 to $10^6$, how does your calculated solution approach the true answer? In other words, calculate the fractional error defined as $\epsilon = |\frac{I - T}{T}|$, where I is the integrated answer, T is the true (i.e., analytic) answer, and the vertical bars denote that you take the absolute value. This gives you the fractional difference between your integrated answer and the true answer.
End of explanation
"""
# Put your code here!
"""
Explanation: Part 2
A torus that is radially symmetric about the z-axis (think of a donut pierced by the x-y plane) can be described by the equation:
$\large( R - \sqrt{x^2 + y^2} \large)^2 + z^2 = r^2$
where R is the distance from the center of the tube to the center of the torus, and r is the radius of the tube (with the 'tube' meaning the tasty baked part of the donut). Assuming that $R = 12$ cm, $r = 8$ cm, and $\rho_{donut} = 0.8$ g cm$^{-3}$, use Monte Carlo integration to calculate the mass of this excessively large donut. Note that for the situation described here, a point (x,y,z) is inside of the tasty cake part of the donut when:
$\large( R - \sqrt{x^2 + y^2} \large)^2 + z^2 < r^2$
(Try testing this relation in the x-y plane to see that it is true.) Assume that the donut is of uniform density and that the mass of the icing can be neglected. You can use the formulae shown in the Wikipedia page linked above to get the analytic answer. Run the test several times, both repeatedly with the same number of samples and with different numbers of samples. How many points do you have to use to get an answer that converges to within 1%? What about 0.1%?
Hint: does the box that encompasses the donut have to be a cube? I.e., when calculating this problem, what is the minimum practical bounding box that can be described simply and which fully encompasses the donut?
End of explanation
"""
from IPython.display import HTML
HTML(
"""
<iframe
src="https://goo.gl/forms/NOKKHPQ0oKn1B7e23?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
"""
)
"""
Explanation: Assignment wrapup
Please fill out the form that appears when you run the code below. You must completely fill this out in order to receive credit for the assignment!
End of explanation
"""
|
jmhsi/justin_tinker
|
data_science/lendingclub_bak/dataprep_and_modeling/0.2.1_investigate_min_score_to_use_for_selection.ipynb
|
apache-2.0
|
import modeling_utils.data_prep as data_prep
from sklearn.externals import joblib
import time
platform = 'lendingclub'
store = pd.HDFStore(
'/Users/justinhsi/justin_tinkering/data_science/lendingclub/{0}_store.h5'.
format(platform),
append=True)
"""
Explanation: If I plan to run the scorer every batch to select loans, I should have a minimum score that a loan must receive to even be considered for investing in, and the remaining loans can be selected from in descending score order
End of explanation
"""
store.open()
train = store['train_filtered_columns']
test = store['test_filtered_columns']
loan_npv_rois = store['loan_npv_rois']
default_series = test['target_strict']
results = store['results']
store.close()
train_ids = set(train.index.values)
test_ids = set(test.index.values)
assert len(train_ids.intersection(test_ids)) == 0
"""
Explanation: Make sure no loan in test set was in train set
End of explanation
"""
train_X, train_y = data_prep.process_data_test(train)
test_X, test_y = data_prep.process_data_test(test)
train_y = train_y['npv_roi_10'].values
test_y = test_y['npv_roi_10'].values
regr = joblib.load('model_dump/model_0.2.1.pkl')
regr_version = '0.2.1'
train_yhat = regr.predict(train_X)
test_yhat = regr.predict(test_X)
test['0.2.1_scores'] = test_yhat
train['0.2.1_scores'] = train_yhat
test['npv_roi_5'] = loan_npv_rois[.05]
"""
Explanation: Add scores and npv_roi_5 to test set
End of explanation
"""
test['0.2.1_scores'].hist(bins=100)
train['0.2.1_scores'].hist(bins=100)
"""
Explanation: See what the range of predictions is, to tell if we predict outliers later
End of explanation
"""
good_percentiles = np.arange(71,101,1)
good_percentiles = good_percentiles[::-1]
def find_min_score_models(trials, available_loans, test, percentiles):
# looks at loans that scored in top 30%, computes avg npv_roi_5 in each of those
# top percentiles
results = {}
results_scores = {}
pct_default = {}
test_copy = test.copy()
for trial in tqdm_notebook(np.arange(trials)):
loan_ids = np.random.choice(
test_copy.index.values, available_loans, replace=False)
loans_to_pick_from = test_copy.loc[loan_ids, :]
loans_to_pick_from.sort_values('0.2.1_scores', ascending=False, inplace = True)
chunksize = int(len(loans_to_pick_from)/100)
results_dict = {}
results_scores_dict = {}
for k,perc in enumerate(percentiles):
subset = loans_to_pick_from[k*chunksize:(k+1)*chunksize]
results_dict[perc] = subset['npv_roi_5'].mean()
results_scores_dict[perc] = subset['0.2.1_scores'].mean()
results[trial] = pd.Series(results_dict)
results_scores[trial] = pd.Series(results_scores_dict)
return pd.DataFrame.from_dict(results).T, pd.DataFrame.from_dict(results_scores).T
# assume there's 200 loans per batch
trials = 20000
available_loans = 200
results, results_scores = find_min_score_models(trials, available_loans, test, good_percentiles)
summaries = results.describe()
summaries_scores = results_scores.describe()
plt.figure(figsize=(12,9))
plt.plot(summaries.columns.values, summaries.loc['mean',:], 'o', label='mean')
plt.plot(summaries.columns.values, summaries.loc['25%',:], 'ro', label='25%')
# plt.plot(summaries.columns.values, summaries.loc['50%',:], '-.')
plt.plot(summaries.columns.values, summaries.loc['75%',:], 'ko', label='75%')
plt.title('return per percentile over batches')
plt.legend(loc='best')
plt.xlabel('percentile of 0.2.1_score')
plt.ylabel('npv_roi_5')
plt.show()
plt.figure(figsize=(12,9))
plt.plot(summaries_scores.columns.values, summaries_scores.loc['mean',:], 'o', label='mean')
plt.plot(summaries_scores.columns.values, summaries_scores.loc['25%',:], 'ro', label='25%')
# plt.plot(summaries_scores.columns.values, summaries_scores.loc['50%',:], '-.')
plt.plot(summaries_scores.columns.values, summaries_scores.loc['75%',:], 'ko', label='75%')
plt.title('scores per percentile over batches')
plt.legend(loc='best')
plt.xlabel('percentile of 0.2.1_score')
plt.ylabel('npv_roi_5')
plt.show()
summaries
summaries_scores.loc['mean', 75]
# lets take one sided 99% cofidence interval at score is greater than mean -3 std_dev at 90th percentile
cutoff = summaries_scores.loc['mean', 90] - 3*summaries_scores.loc['std', 90]
"""
Explanation: find what is a good percentile to cutoff at, and what the distribution for scores is at that percentile
End of explanation
"""
picks = test[test['0.2.1_scores'] >= cutoff]
# grade distribution of picks
picks['grade'].value_counts(dropna=False)/len(picks)
# compared to grade distribution of all test loans
test['grade'].value_counts(dropna=False)/len(test)
cutoff
"""
Explanation: Say I wanted the 75pctile of the 80th percentile (-0.36289), what grade distribution of loans are those?
End of explanation
"""
|
AtmaMani/pyChakras
|
faas/sam-try/try-sam-ml/training.ipynb
|
mit
|
# Install required dependencies
! pip install -q torch==1.8.0 torchvision==0.9.0
# Torchvision provides an easy way to import MNIST dataset into DataLoaders
import torch
import torchvision
from torchvision.transforms import ToTensor
# mini-batch size when training and testing
mini_batch_size = 64
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./mnist_data/', train=True, download=True, transform=ToTensor()),
batch_size=mini_batch_size)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./mnist_data/', train=False, download=True, transform=ToTensor()),
batch_size=mini_batch_size)
"""
Explanation: Training Notebook
This notebook illustrates training of a simple model to classify digits using the MNIST dataset. This code is used to train the model included with the templates. This is meant to be a starter model to show you how to set up Serverless applications to do inferences. For deeper understanding of how to train a good model for MNIST, we recommend literature from the MNIST website. The dataset is made available under a Creative Commons Attribution-Share Alike 3.0 license.
End of explanation
"""
import torch.nn as nn
import torch.nn.functional as F
# Use a GPU if set up on this machine
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# We'll start with building a model
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.convbn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
self.convbn2 = nn.BatchNorm2d(32)
layer_size = 100
self.layer1 = nn.Linear(800, layer_size)
self.bn1 = nn.BatchNorm1d(layer_size)
self.layer2 = nn.Linear(layer_size, layer_size)
self.bn2 = nn.BatchNorm1d(layer_size)
self.layer3 = nn.Linear(layer_size, layer_size)
self.bn3 = nn.BatchNorm1d(layer_size)
self.layer4 = nn.Linear(layer_size, layer_size)
self.bn4 = nn.BatchNorm1d(layer_size)
self.layer5 = nn.Linear(layer_size, layer_size)
self.bn5 = nn.BatchNorm1d(layer_size)
self.smax = nn.Linear(layer_size, 10)
def forward(self, x):
x = self.convbn1(F.relu(F.max_pool2d(self.conv1(x), 2)))
x = F.dropout2d(x, training=self.training)
x = self.convbn2(F.relu(F.max_pool2d(self.conv2(x), 2)))
x = F.dropout2d(x, training=self.training)
x = x.view(-1, 800)
x = F.dropout(self.bn1(F.relu(self.layer1(x))), training=self.training)
x = F.dropout(self.bn2(F.relu(self.layer2(x))), training=self.training)
x = F.dropout(self.bn3(F.relu(self.layer3(x))), training=self.training)
x = F.dropout(self.bn4(F.relu(self.layer4(x))), training=self.training)
x = F.dropout(self.bn5(F.relu(self.layer5(x))), training=self.training)
return self.smax(x)
model = Model().to(device)
print(model)
# Define some hand tuned parameters
# (we already defined the batch size above)
epochs = 10
learning_rate = 10**-4
log_step = 200
# Define our loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Single training epoch loop
def train(train_loader, model, loss_fn, optimizer):
size = len(train_loader.dataset)
for batch, (X, y) in enumerate(train_loader):
X, y = X.to(device), y.to(device)
# Forward pass and compute loss
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagate loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % log_step == 0:
loss, current = loss.item(), batch * len(X)
print(f'loss: {loss} [{current}/{size}]')
def test(test_loader, model):
size = len(test_loader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in test_loader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f'Test accuracy: {100*correct}%, avg loss: {test_loss}')
# Driver loop to start training
for epoch_no in range(epochs):
print(f'\nEpoch {epoch_no}\n---------------------------------------------')
train(train_loader, model, loss_fn, optimizer)
test(test_loader, model)
print('Done!')
"""
Explanation: PyTorch Model Training
For this example, we will train a simple CNN classifier using PyTorch to classify the MNIST digits. We will then freeze the model in the TorchScript format. This is same as the starter model file included with the SAM templates.
End of explanation
"""
# Convert to a TorchScript model optimized for running on CPU
scripted_model = torch.jit.script(model.cpu())
# Let's sanity check the models give same results using random input
model.eval()
scripted_model.eval()
for i in range(1000):
X = torch.randn(1, 1, 28, 28)
pt_ans = torch.argmax(model(X)).item()
ts_ans = torch.argmax(scripted_model(X)).item()
assert pt_ans == ts_ans
# Freeze the scripted model to include with the template
scripted_model.save('digit_classifier.pt')
"""
Explanation: We will save the model as a TorchScript file to export it for inferencing. Note that PyTorch offers more ways for saving models depending on your use case and execution environment.
End of explanation
"""
|
ProfessorKazarinoff/staticsite
|
content/code/sympy/sympy_solving_equations.ipynb
|
gpl-3.0
|
from sympy import symbols, nonlinsolve
"""
Explanation: Sympy is a Python package used for solving equations using symbolic math.
Let's solve the following problem with SymPy.
Given:
The density of two different polymer samples $\rho_1$ and $\rho_2$ are measured.
$$ \rho_1 = 1.408 \ g/cm^3 $$
$$ \rho_2 = 1.343 \ g/cm^3 $$
The percent crystalinity of the two samples ($\%c_1 $ and $\%c_2$) is known.
$$ \%c_1 = 74.3 \% $$
$$ \%c_2 = 31.2 \% $$
The percent crystalinity of a polymer sample is related to the density of 100% amorphus regions ($\rho_a$) and 100% crystaline regions ($\rho_c$) according to:
$$ \%crystallinity = \frac{ \rho_c(\rho_s - \rho_a) }{\rho_s(\rho_c - \rho_a) } \times 100 \% $$
Find:
Find the density of 100% amorphus regions ($\rho_a$) and the density of 100% crystaline regions ($\rho_c$) for this polymer.
Solution:
There are a couple functions we need from Sympy. We'll need the symbols function to create our symbolic math variables and we need the nonlinsolve function to solve a system of non-linear equations.
End of explanation
"""
pc, pa, p1, p2, c1, c2 = symbols('pc pa p1 p2 c1 c2')
"""
Explanation: We need to define six different symbols: $$\rho_c, \rho_a, \rho_1, \rho_2, c_1, c_2$$
End of explanation
"""
expr1 = ( (pc*(p1-pa) ) / (p1*(pc-pa)) - c1)
expr2 = ( (pc*(p2-pa) ) / (p2*(pc-pa)) - c2)
"""
Explanation: Next we'll create two expressions for our two equations. We can subtract the %crystallinity from the left side of the equation to set the equation to zero.
$$ \%crystallinity = \frac{ \rho_c(\rho_s - \rho_a) }{\rho_s(\rho_c - \rho_a) } \times 100 \% $$
$$ \frac{ \rho_c(\rho_s - \rho_a) }{\rho_s(\rho_c - \rho_a) } \times 100 \% - \%crystallinity = 0 $$
Sub in $\rho_s = \rho_1$ and $\rho_s = \rho_2$ to each of the expressions.
End of explanation
"""
expr1 = expr1.subs(p1, 1.408)
expr1 = expr1.subs(c1, 0.743)
expr1
"""
Explanation: Now we'll substitue in the values of $\rho_1 = 1.408$ and $c_1 = 0.743$ into our first expression.
End of explanation
"""
expr2 = expr2.subs(p2, 1.343)
expr2 = expr2.subs(c2, 0.312)
expr2
"""
Explanation: Now we'll substitue our the values of $\rho_2 = 1.343$ and $c_2 = 0.312$ into our second expression.
End of explanation
"""
nonlinsolve([expr1,expr2],[pa,pc])
"""
Explanation: To solve the two equations for the to unknows $\rho_a$ and $\rho_b$, use SymPy's nonlinsolve() function. Pass in a list of the two expressions and followed by a list of the two variables to solve for.
End of explanation
"""
sol = nonlinsolve([expr1,expr2],[pa,pc])
type(sol)
sol.args
sol.args[0]
sol.args[0][0]
pa = sol.args[0][0]
pc = sol.args[0][1]
print(f' Density of 100% amorphous polymer, pa = {round(pa,2)} g/cm3')
print(f' Density of 100% crystaline polymer, pc = {round(pc,2)} g/cm3')
"""
Explanation: We see that the value of $\rho_a = 1.29957$ and $\rho_c = 1.44984$.
The solution is a SymPy FiniteSet object. To pull the values of $\rho_a$ and $\rho_c$ out of the FiniteSet, use the syntax sol.args[0][<var num>].
End of explanation
"""
|
RTHMaK/RPGOne
|
scipy-2017-sklearn-master/notebooks/10 Case Study - Titanic Survival.ipynb
|
apache-2.0
|
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.data.shape)
"""
Explanation: SciPy 2016 Scikit-learn Tutorial
Case Study - Titanic Survival
Feature Extraction
Here we will talk about an important piece of machine learning: the extraction of
quantitative features from data. By the end of this section you will
Know how features are extracted from real-world data.
See an example of extracting numerical features from textual data
In addition, we will go over several basic tools within scikit-learn which can be used to accomplish the above tasks.
What Are Features?
Numerical Features
Recall that data in scikit-learn is expected to be in two-dimensional arrays, of size
n_samples $\times$ n_features.
Previously, we looked at the iris dataset, which has 150 samples and 4 features
End of explanation
"""
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.},
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec
vec.fit_transform(measurements).toarray()
vec.get_feature_names()
"""
Explanation: These features are:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
Numerical features such as these are pretty straightforward: each sample contains a list
of floating-point numbers corresponding to the features
Categorical Features
What if you have categorical features? For example, imagine there is data on the color of each
iris:
color in [red, blue, purple]
You might be tempted to assign numbers to these features, i.e. red=1, blue=2, purple=3
but in general this is a bad idea. Estimators tend to operate under the assumption that
numerical features lie on some continuous scale, so, for example, 1 and 2 are more alike
than 1 and 3, and this is often not the case for categorical features.
In fact, the example above is a subcategory of "categorical" features, namely, "nominal" features. Nominal features don't imply an order, whereas "ordinal" features are categorical features that do imply an order. An example of ordinal features would be T-shirt sizes, e.g., XL > L > M > S.
One work-around for parsing nominal features into a format that prevents the classification algorithm from asserting an order is the so-called one-hot encoding representation. Here, we give each category its own dimension.
The enriched iris feature set would hence be in this case:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
color=purple (1.0 or 0.0)
color=blue (1.0 or 0.0)
color=red (1.0 or 0.0)
Note that using many of these categorical features may result in data which is better
represented as a sparse matrix, as we'll see with the text classification example
below.
Using the DictVectorizer to encode categorical features
When the source data is encoded has a list of dicts where the values are either strings names for categories or numerical values, you can use the DictVectorizer class to compute the boolean expansion of the categorical features while leaving the numerical features unimpacted:
End of explanation
"""
import os
import pandas as pd
titanic = pd.read_csv(os.path.join('datasets', 'titanic3.csv'))
print(titanic.columns)
"""
Explanation: Derived Features
Another common feature type are derived features, where some pre-processing step is
applied to the data to generate features that are somehow more informative. Derived
features may be based in feature extraction and dimensionality reduction (such as PCA or manifold learning),
may be linear or nonlinear combinations of features (such as in polynomial regression),
or may be some more sophisticated transform of the features.
Combining Numerical and Categorical Features
As an example of how to work with both categorical and numerical data, we will perform survival predicition for the passengers of the HMS Titanic.
We will use a version of the Titanic (titanic3.xls) from here. We converted the .xls to .csv for easier manipulation but left the data is otherwise unchanged.
We need to read in all the lines from the (titanic3.csv) file, set aside the keys from the first line, and find our labels (who survived or died) and data (attributes of that person). Let's look at the keys and some corresponding example lines.
End of explanation
"""
titanic.head()
"""
Explanation: Here is a broad description of the keys and what they mean:
pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival
(0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare
cabin Cabin
embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
In general, it looks like name, sex, cabin, embarked, boat, body, and homedest may be candidates for categorical features, while the rest appear to be numerical features. We can also look at the first couple of rows in the dataset to get a better understanding:
End of explanation
"""
labels = titanic.survived.values
features = titanic[['pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'embarked']]
features.head()
"""
Explanation: We clearly want to discard the "boat" and "body" columns for any classification into survived vs not survived as they already contain this information. The name is unique to each person (probably) and also non-informative. For a first try, we will use "pclass", "sibsp", "parch", "fare" and "embarked" as our features:
End of explanation
"""
pd.get_dummies(features).head()
"""
Explanation: The data now contains only useful features, but they are not in a format that the machine learning algorithms can understand. We need to transform the strings "male" and "female" into binary variables that indicate the gender, and similarly for "embarked".
We can do that using the pandas get_dummies function:
End of explanation
"""
features_dummies = pd.get_dummies(features, columns=['pclass', 'sex', 'embarked'])
features_dummies.head(n=16)
data = features_dummies.values
import numpy as np
np.isnan(data).any()
"""
Explanation: This transformation successfully encoded the string columns. However, one might argue that the class is also a categorical variable. We can explicitly list the columns to encode using the columns parameter, and include pclass:
End of explanation
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, random_state=0)
imp = Imputer()
imp.fit(train_data)
train_data_finite = imp.transform(train_data)
test_data_finite = imp.transform(test_data)
from sklearn.dummy import DummyClassifier
clf = DummyClassifier('most_frequent')
clf.fit(train_data_finite, train_labels)
print("Prediction accuracy: %f" % clf.score(test_data_finite, test_labels))
"""
Explanation: With all of the hard data loading work out of the way, evaluating a classifier on this data becomes straightforward. Setting up the simplest possible model, we want to see what the simplest score can be with DummyClassifier.
End of explanation
"""
# %load solutions/10_titanic.py
"""
Explanation: Exercise
Try executing the above classification, using LogisticRegression and RandomForestClassifier instead of DummyClassifier
Does selecting a different subset of features help?
End of explanation
"""
|
transcranial/keras-js
|
notebooks/layers/convolutional/Cropping2D.ipynb
|
mit
|
data_in_shape = (3, 5, 4)
L = Cropping2D(cropping=((1,1),(1, 1)), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(250)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: Cropping2D
[convolutional.Cropping2D.0] cropping ((1,1),(1,1)) on 3x5x4 input, data_format='channels_last'
End of explanation
"""
data_in_shape = (3, 5, 4)
L = Cropping2D(cropping=((1,1),(1,1)), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(251)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.Cropping2D.1] cropping ((1,1),(1,1)) on 3x5x4 input, data_format='channels_first'
End of explanation
"""
data_in_shape = (8, 7, 6)
L = Cropping2D(cropping=((4,2),(3,1)), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(252)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.Cropping2D.2] cropping ((4,2),(3,1)) on 8x7x6 input, data_format='channels_last'
End of explanation
"""
data_in_shape = (8, 7, 6)
L = Cropping2D(cropping=((4,2),(3,1)), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(253)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.Cropping2D.3] cropping ((4,2),(3,1)) on 8x7x6 input, data_format='channels_first'
End of explanation
"""
data_in_shape = (8, 7, 6)
L = Cropping2D(cropping=(2,3), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(254)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.Cropping2D.4] cropping (2,3) on 8x7x6 input, data_format='channels_last'
End of explanation
"""
data_in_shape = (8, 7, 6)
L = Cropping2D(cropping=1, data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(255)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Cropping2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
"""
Explanation: [convolutional.Cropping2D.5] cropping 4 on 8x7x6 input, data_format='channels_last'
End of explanation
"""
import os
filename = '../../../test/data/layers/convolutional/Cropping2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
"""
Explanation: export for Keras.js tests
End of explanation
"""
|
gfabieno/SeisCL
|
docs/notebooks/examples/2004_BP_velocity_model.ipynb
|
gpl-3.0
|
from urllib.request import urlretrieve
import gzip
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate as intp
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
from SeisCL import SeisCL
%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(8, 5)
"""
Explanation: The 2004 BP velocity model
The 2004 BP velocity estimation benchmark model looks like this:
End of explanation
"""
models_url={
'vp':'http://s3.amazonaws.com/open.source.geoscience/open_data/bpvelanal2004/vel_z6.25m_x12.5m_exact.segy.gz',
'rho':'http://s3.amazonaws.com/open.source.geoscience/open_data/bpvelanal2004/density_z6.25m_x12.5m.segy.gz',
'Salt':'http://s3.amazonaws.com/open.source.geoscience/open_data/bpvelanal2004/vel_z6.25m_x12.5m_saltindex.segy.gz',
'water':'http://s3.amazonaws.com/open.source.geoscience/open_data/bpvelanal2004/vel_z6.25m_x12.5m_wbindex.segy.gz'}
models_gz={
'vp':'vel_z6.25m_x12.5m_exact.segy.gz',
'rho':'density_z6.25m_x12.5m.segy.gz',
'Salt':'vel_z6.25m_x12.5m_saltindex.segy.gz',
'water':'vel_z6.25m_x12.5m_wbindex.segy.gz'}
models_segy={
'vp':'vel_z6.25m_x12.5m_exact.segy',
'rho':'density_z6.25m_x12.5m.segy',
'Salt':'vel_z6.25m_x12.5m_saltindex.segy',
'water':'vel_z6.25m_x12.5m_wbindex.segy'}
for par in models_url:
if not os.path.isfile(models_segy[par]):
urlretrieve(models_url[par], models_gz[par])
with gzip.open(models_gz[par], 'rb') as infile:
with open(models_segy[par], 'wb') as outfile:
for line in infile:
outfile.write(line)
os.remove(models_gz[par])
"""
Explanation: Download the model
We can download the data directly from the web. In the following, we download the compressed model files, uncompress them to the segy format. We only download files if model files are not present.
End of explanation
"""
models={
'vp':None,
'rho':None,
'Salt':None,
'water':None}
for par in models:
models[par]=SeisCL.read_segy(None,models_segy[par])
models[par]=models[par][:,:1800]
gz, gx = np.mgrid[:models[par].shape[0], :models[par].shape[1]]
x=np.arange(0,models[par].shape[1],1)
z=np.arange(0,models[par].shape[0],1)
interpolator=intp.interp2d(x,z,models[par])
xi=np.arange(0,models[par].shape[1],1)
zi=np.arange(0,models[par].shape[0],2)
models[par]=interpolator(xi,zi)
"""
Explanation: We can now load the model into a numpy array. We only load the left part of the model here
End of explanation
"""
models['rho']*= 1000
#Fundamentals of seismic rock physics by Wang 2001
models['vs']= (models['vp'])/1.8
#For salts, we take values from Elastic properties of rock salt:
#Lab measurements and well log analysis in the Gulf of Mexico by Zong et al
#we take Vs/vp to be 2.92/4.75 (results at max pressure)
models['vs'][models['Salt']==0]=models['vp'][models['Salt']==0]/4.75*2.92;
models['vs'][models['water']==1]=0;
"""
Explanation: The BP model does not provide a shear wave velocity model, so we build one from the Vp model, using constant VP/VS ratio for sediments and salts
End of explanation
"""
b=np.argmax(models['water'][:,0]==0)
models['rho'][0:b,:]= models['rho'][0]
models['vp'][0:b,:]= models['vp'][0]
models['vs'][0:b,:]= models['vs'][0]
models['water'][0:b,:]= models['water'][0]
"""
Explanation: For demonstration purposes, it is easier to have a flat water bottom, we we modify the model a little bit here
End of explanation
"""
for par in ['vp','vs','rho']:
fig, ax = plt.subplots()
fig.suptitle(par, fontsize=20)
plt.xlabel('x (km)', fontsize=16)
plt.ylabel('Depth (km)', fontsize=14)
im = ax.imshow(models[par],
interpolation='bilinear',
extent=[0,models[par].shape[1]*0.0125,-models[par].shape[0]*0.0125,0])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
plt.show()
"""
Explanation: Let's look at what the model looks like:
End of explanation
"""
seis = SeisCL()
seis.N = models['vp'].shape
seis.ND = 2
seis.dh = 12.5
seis.dt = 6*seis.dh/(7*np.sqrt(2)*np.max(models['vp']))*0.8
seis.NT = int(15/seis.dt)
seis.freesurf = 1
seis.f0 = 1.5
seis.seisout = 2
seis.surface_acquisition_2d(ds=1000, dg=1, dsx=50)
seis.set_forward([0], models, withgrad=False)
seis.execute()
datafd = seis.read_data()
"""
Explanation: We can then compute one shot with SeisCL.
End of explanation
"""
p = datafd[0][:,::20]
xmin = np.min(seis.rec_pos_all[0, :])
xmax = np.max(seis.rec_pos_all[0, :])
clip=0.2;
vmin=np.min(p)*clip;
vmax=np.max(p)*clip;
fig, ax = plt.subplots()
im = ax.imshow(p,
interpolation='bilinear',
vmin=vmin,
vmax=vmax,
cmap=plt.get_cmap('Greys'),
aspect='auto',
origin='upper',
extent=[xmin,xmax, p.shape[0]*seis.dt*20,0]
)
fig.suptitle('Pressure', fontsize=20)
plt.xlabel('x (km)', fontsize=16)
plt.ylabel('Time (ms)', fontsize=14)
plt.show()
"""
Explanation: Let's finally plot the modelled shots.
End of explanation
"""
|
dacostaortiz/Modelado-Matematico
|
Homework01/01 - First approach.ipynb
|
mit
|
import os
import numpy as np
path = "/data/"
def read_dir(path, ext):
l = []
for f in os.listdir(os.getcwd()+path):
if f.endswith(ext):
r = open(os.getcwd()+path+f).read()
r = np.array(r[:-1].split())
l.append({f:r})
return l
"""
Explanation: Chapter 1 - Modelling & programming environments
A mathematical model is a description of a system which uses abstractions and mathematical language. The development of of one of these is know as mathematical modelling.
The mathematical models usally are made of relations and variables, such relations can be described by operators (algebraical and/or differential), functions, etc. The variables are abstractions of the studied system's parameters which can be quantified.
Homework 01
First approach - Deal with data and model implementations
Nowdays in almost every field of the human knowledge is necessary to deal with data. Use a programming language becomes an essential tool when we want to automatize the implementation of our models.
1) implement a function to read data from a directory taking in count the file extention.
End of explanation
"""
input_data = read_dir(path,'.in')
ans_data = read_dir(path,'.ans')
"""
Explanation: 2) Use the funtion to load the data on memory
End of explanation
"""
def price(l):
p = int(l[0])
a = int(l[1])
b = int(l[2])
c = int(l[3])
d = int(l[4])
n = int(l[5])
P = []
for k in range(int(n)):
P.append(p*(np.sin(a*(k+1)+b)+np.cos(c*(k+1)+d)+2))
return P
def max_decline(prices):
max_dif = 0.0
dif = max(prices)-min(prices)
if dif > 0.0:
max_dif = dif
return max_dif
def max_dec_list(data):
declines = []
for d in data:
key = d.keys()[0]
value = d.values()[0]
stock_prices = price(value)
declines.append({key:[max_decline(stock_prices)]})
return declines
comp_data = max_dec_list(input_data)
"""
Explanation: 3) Create a set of functions that implements a model to treat our data.
End of explanation
"""
for l1 in ans_data:
for l2 in comp_data:
if l1.keys()[0][0:2] == l2.keys()[0][0:2]:
print l1.keys()[0], l2.keys()[0]
print 'error in file', l1.keys()[0][0:2], 'is ', '%.6f' % abs(float(l1.get(l1.keys()[0])[0])- float(l2.get(l2.keys()[0])[0]))
"""
Explanation: 4) Compute the error between the computed data and the teorical.
End of explanation
"""
|
keras-team/autokeras
|
docs/ipynb/timeseries_forecaster.ipynb
|
apache-2.0
|
dataset = tf.keras.utils.get_file(
fname="AirQualityUCI.csv",
origin="https://archive.ics.uci.edu/ml/machine-learning-databases/00360/"
"AirQualityUCI.zip",
extract=True,
)
dataset = pd.read_csv(dataset, sep=";")
dataset = dataset[dataset.columns[:-2]]
dataset = dataset.dropna()
dataset = dataset.replace(",", ".", regex=True)
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
data_x_val = validation_data[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
# Data with train data and the unseen data from subsequent time steps.
data_x_test = dataset[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
data_y = data_train["AH"].astype("float64")
data_y_val = validation_data["AH"].astype("float64")
print(data_x.shape) # (6549, 12)
print(data_y.shape) # (6549,)
"""
Explanation: To make this tutorial easy to follow, we use the UCI Airquality dataset, and try to
forecast the AH value at the different timesteps. Some basic preprocessing has also
been performed on the dataset as it required cleanup.
A Simple Example
The first step is to prepare your data. Here we use the UCI Airquality
dataset as an example.
End of explanation
"""
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(
lookback=lookback,
predict_from=predict_from,
predict_until=predict_until,
max_trials=1,
objective="val_loss",
)
# Train the TimeSeriesForecaster with train data
clf.fit(
x=data_x,
y=data_y,
validation_data=(data_x_val, data_y_val),
batch_size=32,
epochs=10,
)
# Predict with the best model(includes original training data).
predictions = clf.predict(data_x_test)
print(predictions.shape)
# Evaluate the best model with testing data.
print(clf.evaluate(data_x_val, data_y_val))
"""
Explanation: The second step is to run the TimeSeriesForecaster.
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/mri/cmip6/models/mri-esm2-0/seaice.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'mri-esm2-0', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: MRI
Source ID: MRI-ESM2-0
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:19
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
jkeung/yellowbrick
|
examples/rank2d.ipynb
|
apache-2.0
|
# Imports
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as mse
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# Data Loading
columns = OrderedDict([
("DAY", "the day of data collection"),
("Q-E", "input flow to plant"),
("ZN-E", "input Zinc to plant"),
("PH-E", "input pH to plant"),
("DBO-E", "input Biological demand of oxygen to plant"),
("DQO-E", "input chemical demand of oxygen to plant"),
("SS-E", "input suspended solids to plant"),
("SSV-E", "input volatile supended solids to plant"),
("SED-E", "input sediments to plant"),
("COND-E", "input conductivity to plant"),
("PH-P", "input pH to primary settler"),
("DBO-P", "input Biological demand of oxygen to primary settler"),
("SS-P", "input suspended solids to primary settler"),
("SSV-P", "input volatile supended solids to primary settler"),
("SED-P", "input sediments to primary settler"),
("COND-P", "input conductivity to primary settler"),
("PH-D", "input pH to secondary settler"),
("DBO-D", "input Biological demand of oxygen to secondary settler"),
("DQO-D", "input chemical demand of oxygen to secondary settler"),
("SS-D", "input suspended solids to secondary settler"),
("SSV-D", "input volatile supended solids to secondary settler"),
("SED-D", "input sediments to secondary settler"),
("COND-S", "input conductivity to secondary settler"),
("PH-S", "output pH"),
("DBO-S", "output Biological demand of oxygen"),
("DQO-S", "output chemical demand of oxygen"),
("SS-S", "output suspended solids"),
("SSV-S", "output volatile supended solids"),
("SED-S", "output sediments"),
("COND-", "output conductivity"),
("RD-DB-P", "performance input Biological demand of oxygen in primary settler"),
("RD-SSP", "performance input suspended solids to primary settler"),
("RD-SE-P", "performance input sediments to primary settler"),
("RD-DB-S", "performance input Biological demand of oxygen to secondary settler"),
("RD-DQ-S", "performance input chemical demand of oxygen to secondary settler"),
("RD-DB-G", "global performance input Biological demand of oxygen"),
("RD-DQ-G", "global performance input chemical demand of oxygen"),
("RD-SSG", "global performance input suspended solids"),
("RD-SED-G", "global performance input sediments"),
])
data = pd.read_csv("data/water-treatment.data", names=columns.keys())
data = data.replace('?', np.nan)
# Capture only the numeric columns in the data set.
numeric_columns = columns.keys()
numeric_columns.remove("DAY")
data = data[numeric_columns].apply(pd.to_numeric)
"""
Explanation: Sliders Example
This is an example of interactive iPython workbook that uses widgets to meaningfully interact with visualization.
End of explanation
"""
def apply_column_pairs(func):
"""
Applies a function to a pair of columns and returns a new
dataframe that contains the result of the function as a matrix
of each pair of columns.
"""
def inner(df):
cols = pd.DataFrame([
[
func(df[acol], df[bcol]) for bcol in df.columns
] for acol in df.columns
])
cols.columns = df.columns
cols.index = df.columns
return cols
return inner
@apply_column_pairs
def least_square_error(cola, colb):
"""
Computes the Root Mean Squared Error of a linear regression
between two columns of data.
"""
x = cola.fillna(np.nanmean(cola))
y = colb.fillna(np.nanmean(colb))
m, b = np.polyfit(x, y, 1)
yh = (x * m) + b
return ((y-yh) ** 2).mean()
labeled_metrics = {
'Pearson': 'pearson',
'Kendall Tao': 'kendall',
'Spearman': 'spearman',
'Pairwise Covariance': 'covariance',
'Least Squares Error': 'lse',
}
@interact(metric=labeled_metrics, data=fixed(data))
def rank2d(data, metric='pearson'):
"""
Creates a visualization of pairwise ranking by column in the data.
"""
# The different rank by 2d metrics.
metrics = {
"pearson": lambda df: df.corr('pearson'),
"kendall": lambda df: df.corr('kendall'),
"spearman": lambda df: df.corr('spearman'),
"covariance": lambda df: df.cov(),
"lse": least_square_error,
}
# Quick check to make sure a valid metric is passed in.
if metric not in metrics:
raise ValueError(
"'{}' not a valid metric, specify one of {}".format(
metric, ", ".join(metrics.keys())
)
)
# Compute the correlation matrix
corr = metrics[metric](data)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
ax.set_title("{} metric across {} features".format(metric.title(), len(data.columns)))
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
"""
Explanation: 2D Rank Features
End of explanation
"""
|
sujitpal/intro-dl-talk-code
|
src/06-redrum-mt-lstm.ipynb
|
unlicense
|
from __future__ import division, print_function
from keras.layers.core import Activation, Dense, RepeatVector
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
import nltk
import numpy as np
"""
Explanation: Machine Translation with LSTM (seq2seq)
This problem is from the last assignment of the Udacity Deep Learning course. The idea is to build a sequence to sequence model using LSTMs that will convert sequences of words of the form:
the quick brown fox
to this form:
eht kciuq nworb xof
i.e., the characters of each word are reversed. This is a similar (although much simplified) scenario to machine translation where input words are in one language and output words are in another. However, the creation of training data has been simplified with this approach.
One caveat with this approach is that we cannot make it a word based seq2seq model, since there is a 1 to 1 correspondence between the two "languages". Instead, we will create a character based seq2seq model so the model cannot depend on any regularity.
The file name for the notebook is a reference to The Shining in case you were wondering.
Setup Imports
End of explanation
"""
char_vocab = set(" ")
sentences = []
fin = open("../data/alice_in_wonderland.txt", "rb")
for line in fin:
line = line.strip()
if len(line) == 0:
continue
for sentence in nltk.sent_tokenize(line):
words = []
for word in nltk.word_tokenize(sentence):
word = word.lower()
words.append(word)
for c in word:
char_vocab.add(c)
sentences.append(words)
fin.close()
vocab_size = len(char_vocab)
print("vocab size: %d" % (vocab_size))
"""
Explanation: Extract Text from file
We extract the list of words for use later. We also capture the vocabulary as we read it.
End of explanation
"""
def reverse_words(words):
reversed_words = []
for w in words:
reversed_words.append("".join(reversed([c for c in w])))
return reversed_words
nb_words_in_seq = 4
input_texts = []
output_texts = []
for sentence in sentences:
ngrams = nltk.ngrams(sentence, nb_words_in_seq)
for ngram in ngrams:
input_texts.append(" ".join(ngram))
output_texts.append(" ".join(reverse_words(ngram)))
maxlen = max([len(x) for x in input_texts])
print("maximum length of sequence: %d chars" % (maxlen))
"""
Explanation: Create text sequences
Our input sequences are 4 words long. Here we construct the input and output training sequences from the text, and compute the maximum size of the sequence in characters.
End of explanation
"""
char2idx = dict((c, i) for i, c in enumerate(char_vocab))
idx2char = {v:k for k, v in char2idx.items()}
"""
Explanation: Create Lookup tables
As mentioned earlier, we are going to build a character based seq2seq model. We use the vocabulary generated earlier to construct lookup tables for each character.
End of explanation
"""
X = np.zeros((len(input_texts), maxlen, vocab_size), dtype=np.bool)
Y = np.zeros((len(output_texts), maxlen, vocab_size), dtype=np.bool)
for i, input_text in enumerate(input_texts):
input_text = input_text.ljust(maxlen)
for j, ch in enumerate([c for c in input_text]):
X[i, j, char2idx[ch]] = 1
for i, output_text in enumerate(output_texts):
output_text = output_text.ljust(maxlen)
for j, ch in enumerate([c for c in output_text]):
Y[i, j, char2idx[ch]] = 1
"""
Explanation: Vectorize sequences
End of explanation
"""
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3, random_state=0)
print(Xtrain.shape, Xtest.shape, Ytrain.shape, Ytest.shape)
"""
Explanation: Split data into training and test
End of explanation
"""
model = Sequential()
model.add(LSTM(512, input_shape=(maxlen, vocab_size), return_sequences=False))
model.add(RepeatVector(maxlen))
model.add(LSTM(512, return_sequences=True))
model.add(TimeDistributed(Dense(vocab_size)))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
"""
Explanation: Build Model
End of explanation
"""
def decode_text(probas):
text_seq = []
for i in range(probas.shape[0]):
idx = np.argmax(probas[i])
text_seq.append(idx2char[idx])
return "".join(text_seq).strip()
def cosine_sim(y_test, y_pred):
ytest_flat = np.ravel(y_test)
ypred_flat = np.ravel(y_pred)
cosim = np.dot(ytest_flat, ypred_flat) / (np.linalg.norm(ytest_flat, 2) *
np.linalg.norm(ypred_flat, 2))
return cosim
for iteration in range(51):
print("=" * 50)
print("Iteration-#: %d" % (iteration))
model.fit(Xtrain, Ytrain, batch_size=128, nb_epoch=1,
verbose=0, validation_data=(Xtest, Ytest))
avg_cosim = 0
for i in range(10):
test_idx = np.random.randint(Xtest.shape[0])
x_test = np.array([Xtest[test_idx, :, :]])
y_test = np.array([Ytest[test_idx, :, :]])
y_pred = model.predict([x_test], verbose=0)
cosim = cosine_sim(y_test, y_pred)
xtest_text = decode_text(x_test[0])
ytest_text = decode_text(y_test[0])
ypred_text = decode_text(y_pred[0])
print("input: [%s], expected: [%s], got: [%s], similarity: %.3f" %
(xtest_text, ytest_text, ypred_text, cosim))
avg_cosim += cosim
avg_cosim /= 10
print("Average cosine similarity between label and prediction: %.3f" % (avg_cosim))
"""
Explanation: Evaluate Model
End of explanation
"""
|
materialsvirtuallab/ceng114
|
lectures/Lecture 12 - Statistics.ipynb
|
bsd-2-clause
|
from __future__ import division
import matplotlib.pyplot as plt
import matplotlib as mpl
import palettable
import numpy as np
import math
import seaborn as sns
from collections import defaultdict
%matplotlib inline
# Here, we customize the various matplotlib parameters for font sizes and define a color scheme.
# As mentioned in the lecture, the typical defaults in most software are not optimal from a
# data presentation point of view. You need to work hard at selecting these parameters to ensure
# an effective data presentation.
colors = palettable.colorbrewer.qualitative.Set1_4.mpl_colors
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['lines.color'] = 'r'
mpl.rcParams['axes.titlesize'] = 32
mpl.rcParams['axes.labelsize'] = 30
mpl.rcParams['axes.labelsize'] = 30
mpl.rcParams['xtick.labelsize'] = 24
mpl.rcParams['ytick.labelsize'] = 24
data = """105 221 183 186 121 181 180 143
97 154 153 174 120 168 167 141
245 228 174 199 181 158 176 110
163 131 154 115 160 208 158 133
207 180 190 193 194 133 156 123
134 178 76 167 184 135 229 146
218 157 101 171 165 172 158 169
199 151 142 163 145 171 148 158
160 175 149 87 160 237 150 135
196 201 200 176 150 170 118 149"""
data = [[int(x) for x in d.split()] for d in data.split("\n")]
d = np.array(data).flatten()
min_val = d.min()
max_val = d.max()
start_val = math.floor(min_val / 10) * 10
mean = np.average(d)
median = np.median(d)
print("Min value = %d, Max value = %d." % (min_val, max_val))
print("Mean = %.1f" % mean)
print("Median = %.1f" % median)
print("Standard deviation = %.1f" % np.sqrt(np.var(d)))
freq, bins = np.histogram(d, bins=np.arange(70, 260, 20))
plt.figure(figsize=(12,8))
bins = np.arange(70, 260, 20)
plt.hist(np.array(d), bins=bins)
plt.xticks(bins + 10, ["%d-%d" % (bins[i], bins[i+1]) for i in range(len(bins) - 1)], rotation=-45)
ylabel = plt.ylabel("f")
xlabel = plt.xlabel("Compressive strength (psi)")
"""
Explanation: Introduction
This notebook generates the various data representations in lecture 12. It is easy to generalize this to other applications.
End of explanation
"""
def generate_stem_and_leaf(data):
stem_and_leaf = defaultdict(list)
for i in data:
k = int(math.floor(i / 10))
v = int(i % 10)
stem_and_leaf[k].append(v)
for k in sorted(stem_and_leaf.keys()):
print("%02d | %s" % (k, " ".join(["%d" % i for i in stem_and_leaf[k]])))
generate_stem_and_leaf(d)
"""
Explanation: Stem and leaf
The code below shows how to generate a stem and leaf display.
End of explanation
"""
plt.figure(figsize=(12,8))
ax = sns.boxplot(y=d, color="c")
ax = sns.swarmplot(y=d, color=".25") # We add the swarm plot as well to show all data points.
ylabel = plt.ylabel("Compressive Strength (psi)")
"""
Explanation: Boxplot
End of explanation
"""
|
testedminds/sand
|
docs/Loading network data.ipynb
|
apache-2.0
|
import sand
"""
Explanation: Loading network data
CSV -> List of Dictionaries -> igraph
sand's underlying graph implementation is igraph. igraph offers several ways to load data, but sand provides a few convenience functions that simplify the workflow:
End of explanation
"""
edgelist_file = './data/lein-topology-57af741.csv'
edgelist_data = sand.csv_to_dicts(edgelist_file,header=['source', 'target', 'weight'])
edgelist_data[:5]
"""
Explanation: Read network data from csv with csv_to_dicts
csv_to_dicts reads a CSV into a list of Python dictionaries. Each column in the CSV becomes a corresponding key in each dictionary.
Let's load a CSV with function dependencies in a Clojure library from lein-topology into a list of Dictionaries:
End of explanation
"""
functions = sand.from_edges(edgelist_data)
functions.summary()
"""
Explanation: Use from_edges with an adjacency list consisting of two vertex names and an edge weight
End of explanation
"""
people_file = './data/people.csv'
%%writefile $people_file
uuid,name,cohort
6aacd73c-0be5-412d-95a3-ca54149c9952,Mark Taylor,Day 1 - Period 6
5205741f-3ea9-4c30-9c50-4bab229a51ce,Aidin Aslani,Day 1 - Period 6
14a36491-5a3d-42c9-b012-6a53654d9bac,Charlie Brown,Day 1 - Period 2
9dc7633a-e493-4ec0-a252-8616f2148705,Armin Norton,Day 1 - Period 2
review_file = './data/reviews.csv'
%%writefile $review_file
reviewer_uuid,student_uuid,feedback,date,weight
6aacd73c-0be5-412d-95a3-ca54149c9952,14a36491-5a3d-42c9-b012-6a53654d9bac,Awesome work!,2015-02-12,1
5205741f-3ea9-4c30-9c50-4bab229a51ce,9dc7633a-e493-4ec0-a252-8616f2148705,WOW!,2014-02-12,1
"""
Explanation: ... or use from_vertices_and_edges with two lists of dictionaries
A richer network model includes attributes on the vertex and edge collections, including unique identifiers for each vertex.
We can use Jupyter's cell magic to generate some sample data. Here we'll represent a network of students reviewing one another's work. Students (vertices) will be in people.csv and reviews (edges) will be in reviews.csv:
End of explanation
"""
people_data = sand.csv_to_dicts(people_file)
people_data
review_data = sand.csv_to_dicts(review_file)
review_data
reviews = sand.from_vertices_and_edges(
vertices=people_data,
edges=review_data,
vertex_name_key='name',
vertex_id_key='uuid',
edge_foreign_keys=('reviewer_uuid', 'student_uuid'))
reviews.summary()
"""
Explanation: We again load this data into Lists of Dictionaries with csv_to_dicts:
End of explanation
"""
reviews.vs['indegree']
reviews.vs['outdegree']
reviews.vs['label']
reviews.vs['name']
"""
Explanation: Several vertex attributes are automatically computed when the graph is loaded:
End of explanation
"""
reviews.vs['group']
"""
Explanation: Groups
Groups represent modules or communities in the network. Groups are based on the labels by default.
End of explanation
"""
len(set(functions.vs['group']))
len(functions.vs)
"""
Explanation: The vertices in the lein topology data set contain fully-qualified namespaces for functions. Grouping by name isn't particularly useful here:
End of explanation
"""
functions.vs['group'] = sand.fqn_to_groups(functions.vs['label'])
len(set(functions.vs['group']))
"""
Explanation: Because sand was build specifically for analyzing software and system networks, a fqn_to_groups grouping function is built in:
End of explanation
"""
|
rachellevanger/tda-persistence-explorer
|
doc/superlevel_filtration_stitch_with_rips.ipynb
|
mit
|
import PersistenceExplorer as PE
import os
from scipy import misc
from skimage import morphology as morph
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
"""
Explanation: Superlevel set filtration with stitching to Vietoris-Rips-type filtration
This notebook takes in an image and generates a collection of output images, one for each threshold value of the original filtration. Each output image is the original image down to some threshold 0 <= k <= 255, and then below that, concentric rings of values less than k act as balls that grow outward from the superlevel set at threshold k.
End of explanation
"""
idx = 340
sbmp = '../data/granular_media/%06d.bmp' % idx
stmp = '../data/tmp/'
# Read in the image, take its negative, and dilate to 'blur' the force chains.
bmp = misc.imread(sbmp)
bmp = 255 - bmp
bmp = morph.dilation(bmp, morph.disk(2.5))*(255./np.max(bmp))
bmp = bmp.astype(np.int)
plt.imshow(bmp);
plt.colorbar();
plt.show()
print np.max(bmp)
os.mkdir(stmp)
os.mkdir(stmp + 'bmp')
misc.imsave(stmp + ('bmp/%06d_0.bmp' % idx), bmp)
"""
Explanation: Set up test directory with an initial image to process
End of explanation
"""
os.mkdir(stmp + 'pd_sup')
PE.ProcessImageListWithPHAT([stmp + ('bmp/%06d_0.bmp' % idx)], [stmp + ('pd_sup/%06d_0.csv' % idx)], 'super')
"""
Explanation: Process the superlevel set persistence diagram for this image
End of explanation
"""
imagefiles = [stmp + ('bmp/%06d_0.bmp' % idx)]
pdfiles = [stmp + ('pd_sup/%06d_0.csv' % idx)]
frames = [0]
imagesize = [512, 528]
max_image_display_size = 400
persistence_diagram_display_size = 400
dimension_of_interest = 1
PE.PersistenceExplorer(imagefiles, pdfiles, frames, dimension_of_interest,
imagesize, max_image_display_size, persistence_diagram_display_size)
"""
Explanation: Display original image and superlevel set persistence diagram.
End of explanation
"""
# Compute filtration step range by taking min/max of birth times of H1 generators
pd_sup = pd.read_csv(stmp + ('pd_sup/%06d_0.csv' % idx))
min_birth = min(pd_sup.loc[pd_sup['dim']==1]['birth'])
max_birth = max(pd_sup.loc[pd_sup['dim']==1]['birth'])
num_steps = 20 # Number of steps from first to last birth value (number of images in time series)
height_skip = 2 # Value of each level in Vietoris-Rips-type filtration
radius = 3 # Radius of expansion for Vietoris-Rips-type filtration
for step in range(0,num_steps):
# Compute the initial threshold for this step
threshold = max_birth - int(((max_birth - min_birth)/float(num_steps-1))*float(step))
# Initialize the output image
bmp_out = bmp.copy()
bmp_out[bmp < threshold] = 0
# Initialize union of prior binary matrices
sum_of_binaries = np.zeros(bmp.shape)
# Initialize initial superlevel set binary image at threshold
dilated_superlevel = {}
dilated_superlevel[-1] = (bmp >= threshold).astype(np.int)
# Starting from (threshold-height_skip) going down by height_skip to zero
rge_thresholds = range(threshold - height_skip, 0, -1*height_skip)
for k in range(len(rge_thresholds)):
# Inflate previous binary matrix minus sum of prior by radius
dilated_superlevel[k] = np.multiply(morph.dilation(dilated_superlevel[k - 1], morph.disk(radius)), 1-sum_of_binaries)
dilated_superlevel[k] = np.multiply(dilated_superlevel[k], 1-dilated_superlevel[k - 1])
# Add new height value to bmp_out for this new inflated set
bmp_out = bmp_out + dilated_superlevel[k]*rge_thresholds[k]
# Add current binary matrix to prior sum
sum_of_binaries = (sum_of_binaries.astype(np.bool) | dilated_superlevel[k - 1].astype(np.bool)).astype(np.int)
misc.imsave(stmp + ('bmp/%06d_%d.bmp' % (idx, step+1)), bmp_out)
"""
Explanation: Generate Vietoris-Rips-like variants of the original image
End of explanation
"""
image_list = [stmp + ('bmp/%06d_%d.bmp' % (idx, step+1)) for step in range(0,num_steps)]
pd_list = [stmp + ('pd_sup/%06d_%d.csv' % (idx, step+1)) for step in range(0,num_steps)]
PE.ProcessImageListWithPHAT(image_list, pd_list, 'super')
frames = range(0,num_steps)
imagesize = [512, 528]
max_image_display_size = 400
persistence_diagram_display_size = 400
dimension_of_interest = 1
PE.PersistenceExplorer(image_list, pd_list, frames, dimension_of_interest,
imagesize, max_image_display_size, persistence_diagram_display_size)
"""
Explanation: Generate their persistence diagrams and explore them!
End of explanation
"""
|
machine-learning-colombia/examples
|
notebooks/deep-learning-udacity/1_notmnist.ipynb
|
mit
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
"""
Explanation: Deep Learning
Assignment 1
The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.
This notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.
End of explanation
"""
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
"""
Explanation: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
End of explanation
"""
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
"""
Explanation: Extract the dataset from the compressed .tar.gz file.
This should give you a set of directories, labelled A through J.
End of explanation
"""
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
"""
Explanation: Problem 1
Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road.
A few images might not be readable, we'll just skip them.
End of explanation
"""
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
"""
Explanation: Problem 2
Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
Problem 3
Another check: we expect the data to be balanced across classes. Verify that.
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Also create a validation dataset for hyperparameter tuning.
End of explanation
"""
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
"""
Explanation: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
End of explanation
"""
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
"""
Explanation: Problem 4
Convince yourself that the data is still good after shuffling!
Finally, let's save the data for later reuse:
End of explanation
"""
|
navoj/ecell4
|
ipynb/Tutorials/Spatiocyte.ipynb
|
gpl-2.0
|
from ecell4 import *
with species_attributes():
A | B | C | {'D': '1'}
with reaction_rules():
A + B == C | (0.01, 0.3)
m = get_model()
w = lattice.LatticeWorld(Real3(1, 1, 1), 0.005) # The second argument is 'voxel_radius'.
w.bind_to(m)
w.add_molecules(Species('C'), 60)
sim = lattice.LatticeSimulator(w)
obs = FixedIntervalNumberObserver(0.1, ('A', 'B', 'C'))
sim.run(10, obs)
"""
Explanation: Spatiocyte simulations with single-molecule resolution
We showed an example of E-Cell4 spatial representation.
Next let's simulate the models with more detailed spatial representation called single molecule resolution.
Spatiocyte lattice-based method
In spatical Gillespie method, we divided the Space into smaller Space, then we diffuse the molecules in the Subvolume.
However we simulated the molecules in the Subvolume as the number of the molecules, and the location of the molecules are NOT determinated.
In other words, the spatical resolution of spatical Gillespie method is equal to the side of the Subvolume $l$.
To improve this resolution we need to make the size of $l$ small, but in this method the $l$ must be larger than the (at least) 10 times the diameter of molecule $R$.
How can we improve the spatical resolution to the size of the molecule?
The answer is the simulation with single-molecule resolution.
This method simulate the molecule not with the number of the molecules, but with the spatical reaction diffusion of each molecule.
E-Cell4 has multiple single-molecule resolution method, here we explain about Spatiocyte lattice-based method.
Spatiocyte treat each molecule as hard sphere and diffuse the molecules in hexagonal close-packed lattice.
Spatiocyte has ID for each molecule and the position of the molecule with single-molecule resolution.
To use the time scale, Spatiocyte has 100 times smaller time-step than spatical Gillespie, because the time scale of diffusion increases with the square of the distance.
Next, let's use Spatiocyte.
End of explanation
"""
from ecell4 import *
with species_attributes():
A | {'D': '1'}
m = get_model()
w = lattice.LatticeWorld(Real3(1, 1, 1), 0.005)
w.bind_to(m)
(pid, p), suc = w.new_particle(Species('A'), Real3(0.5, 0.5, 0.5))
"""
Explanation: There is a distinct difference in the second argument for LatticeWorld. This is called Voxel radius.
Spatiocyte defines the locations of the molecules with dividing the Space with molecule size, and call the minimum unit for this Space as Voxel.
In most cases the size of the molecule would be good for Voxel radius.
In this example, we set 5 $\mathrm{nm}$ as the radius of the molecule in the Space with the side 1 $\mathrm{\mu m}$ .
It takes more time to simulate, but the result is same with ODE or Gillespie.
The diffusion movement of single molecule
Next let's simulate single molecule diffusion to check the resolution.
End of explanation
"""
pid, p = w.get_particle(pid)
print(p.species().serial()) # will print: A
print(p.radius(), p.D()) # will print: (0.005, 1.0)
print(tuple(p.position())) # will print: (0.49806291436591293, 0.49652123150307814, 0.5)
"""
Explanation: new_particle method places a particle to a coordinate in LatticeWorld, and returns the particle's pid, the information about the particle p, and verify whether the particle is cooked with suc.
If a particle is already placed in the coordinate you can NOT place a particle over it and suc will be False and fail.
p contains the particle position, species type, radius, and diffusion coefficient.
You can inspect the p with the particle's ID pid.
Let's check p.
End of explanation
"""
viz.plot_world(w, save_image=True)
"""
Explanation: get_particle method receives the particle ID and returns the ID and particle (of cource the IDs are same).
You can inspect the coordinate of the particle as Real3 with position method.
It is hard to directly read the coordinate, here we printed it after converting to tuple.
As you can see the tuple coodinate is slightly different from original Real3. This is because Spatiocyte can place the molecule only on the lattice.
LatticeWorld places the molecule the nearest lattice for the argument Real3.
You can visualize the coordinate of the molecule with viz.plot_world method, and check the molecule in the center of the World.
End of explanation
"""
sim = lattice.LatticeSimulator(w)
obs = FixedIntervalTrajectoryObserver(0.002, (pid,))
sim.run(1, obs)
viz.plot_trajectory(obs)
"""
Explanation: And you can use Observer to track the trajectory of molecular diffusion process.
End of explanation
"""
print(len(obs.data()))
print(len(obs.data()[0]))
"""
Explanation: Here we visualized the trajectory with viz.plot_trajectory method, you can also obtain it as Real3 list with data method.
End of explanation
"""
w.add_molecules(Species('A'), 5)
particles = w.list_particles(Species('A'))
for pid, p in particles:
print(p.species().serial(), tuple(p.position()))
"""
Explanation: data method returns nested list.
First index means the index of the particle.
Second index means the index of the Real3.
In this case we threw just one particle, so the first result is 1, and next 501 means time-series coordinate of the only one particle (initial coordinate and the coordinates in 1/0.002 = 500 time points).
Also you can obtain the particles in bulk with list_particles method and species type.
End of explanation
"""
from ecell4 import *
with species_attributes():
A | B | C | {'D': '1'}
with reaction_rules():
A + B > C | 1.0
m = get_model()
w = lattice.LatticeWorld(Real3(2, 1, 1), 0.005)
w.bind_to(m)
w.add_molecules(Species('A'), 120)
w.add_molecules(Species('B'), 120)
obs = FixedIntervalNumberObserver(0.005, ('A', 'B', 'C'))
sim = lattice.LatticeSimulator(w)
sim.run(1.0, obs)
%matplotlib inline
odew = ode.ODEWorld(Real3(2, 1, 1))
odew.bind_to(m)
odew.add_molecules(Species('A'), 120)
odew.add_molecules(Species('B'), 120)
odeobs = FixedIntervalNumberObserver(0.005, ('A', 'B', 'C'))
odesim = ode.ODESimulator(odew)
odesim.run(1.0, odeobs)
viz.plot_number_observer(obs, "-", odeobs, "--")
"""
Explanation: Please remember list_particles method, this method can be used for other World as well as add_molecules method.
On a different note, in Spatiocyte proper method to inspect the single molecule is list_voxels, and the coordinate is described with index of voxel (not Real3).
The diffusion coefficient and the second-order reaction
The models we have addressed are called second-order reaction.
Let's look at the relationship between this second-order reaction and the diffusion coefficient in Spatiocyte.
End of explanation
"""
from ecell4 import *
with species_attributes():
A | {'D': '1', 'location': 'C'}
m = get_model()
w = lattice.LatticeWorld(Real3(1, 1, 1), 0.005)
w.bind_to(m)
sph = Sphere(Real3(0.5, 0.5, 0.5), 0.45)
print(w.add_structure(Species('C'), sph)) # will print 539805
viz.plot_world(w, save_image=True)
"""
Explanation: Although we used faster kinetic constant than before, the result is same.
But by contrast with ODE simulation, you can find the difference between them (solid line is Spatiocyte, dash line is ODE).
Is this fault of Spatiocyte? (No)
Actually Spatiocyte reaction rate couldn't be faster, while ODE reaction rate can be faster infinitely.
This is caused by the difference between the definition of reaction rate constant in ODE solver and single molecule simulation method.
The former is called macroscopic or effective reaction rate constant, the latter is called microscopic or intrinsic reaction rate constant.
The macroscopic rate represents the reaction rate in mixed molecular state, meanwhile microscopic rate represents the reactivity in molecule collision.
So in microscopic perspective, the first thing molecules need to react is collision.
In Spatiocyte however you make this microscopic rate faster, you can NOT make the reaction rate faster than diffusion rate.
This is called diffusion-limited.
This is similar to what the molecules coordinated disproportionately need time to react.
It is known that there is a relationship between this macroscopic rate constant $k_\mathrm{on}$ and microscopic rate constant $k_a$ in 3D space.
$
\frac{1}{k_\mathrm{on}}=\frac{1}{k_a}+\frac{1}{4\pi RD_\mathrm{tot}}
$
Here, $R$ is the sum of two molecule's radius in collision, $D_\mathrm{tot}$ is the sum of diffusion coefficients.
In the case of the above IPython Notebook cell, $k_D=4\pi RD_\mathrm{tot}$ is almost 0.25 and microscopic rate constant is 1.0.
So the macroscopic rate constant is almost 0.2.
(However unless you specify the configuration for Spatiocyte, the second order reaction rate must be slower than $3\sqrt{2} RD$, and the dissociation constant $k_D$ is also $3\sqrt{2} RD$.
The single molecule simulation method can separate molecular diffusion and reaction in accurate manner contrary to ODE or Gillespie method supposed well mixed system (that is diffusion coefficient is infinite).
However if the microscopic rate constant $k_D$ is small enough, the macroscopic rate constant is almost equal to microscopic one (reaction late-limit).
The structure in the Spatiocyte method
Next we explain a way to create a structure like cell membrane.
Although The structure feature in E-Cell4 is still in development, Spatiocyte supports the structure on some level.
Let's look a sphere structure as an example.
To restrict the molecular diffusion inside of the sphere, first we create it.
End of explanation
"""
w.add_molecules(Species('A'), 120)
viz.plot_world(w, species_list=('A',), save_image=True) # visualize A-molecules only
"""
Explanation: The Sphere class first argument is the center of the sphere, and second argument is the radius.
Then we created and added a Species named C.
The structure in the Spatiocyte method is described by filling the space with the Voxel.
In the example above, the Voxels in the sphere are occupied with Species named C.
You can see those distribution with viz.plot_world.
(However, the number of the species is too large to visualize. So we plot only a part of it, but actually the sphere is fully occupied with the species.)
Next we create Species moving inside this sphere. To that end we give location attribute to the Species.
and add_molecules to the World.
End of explanation
"""
pid_list = [pid for pid, p in w.list_particles(Species('A'))[: 10]]
obs = FixedIntervalTrajectoryObserver(1e-3, pid_list)
sim = lattice.LatticeSimulator(w)
sim.run(1, obs)
viz.plot_trajectory(obs, save_image=True)
"""
Explanation: Now we restricted the trajectories of Species A on the structure of Species C, and add_molecules works like that.
As a note, you need to create the structure before add_molecule.
We can use FixedIntervalTrajectoryObserver to check the restriction of the diffusion area.
End of explanation
"""
from ecell4 import *
with species_attributes():
A | {'D': '0.1', 'location': 'M'}
B | {'D': '1'}
m = get_model()
w = lattice.LatticeWorld(Real3(1, 1, 1))
w.bind_to(m)
origin = Real3(0, 0, 0.5)
unit0 = Real3(1, 0, 0)
unit1 = Real3(0, 1, 0)
w.add_structure(
Species('M'), PlanarSurface(origin, unit0, unit1)) # Create a structure first
w.add_molecules(Species('B'), 480) # Throw-in B-molecules
viz.plot_world(w, species_list=('A', 'B'))
"""
Explanation: pid_list is a list for 10 IDs of 60 A species.
The trajectories are colored by this 10 species.
Certainly the trajectories are restricted in the sphere.
The structure and the reaction
At the end we explain about molecular translocation among the structures.
A species without location attribute is not an member of any structures.
In the example above, if you do NOT write location attribute with Species A, A is placed outside of the sphere.
Next let's create a surface structure.
To create a surface we need to use three Real3, those are original point (origin) and two axis vector (unit0, unit1).
python
ps = PlanarSurface(origin, unit0, unit1)
Use this ps and suppose Species A on the surface and a normal Species B.
End of explanation
"""
%matplotlib inline
with reaction_rules():
B + M == A | (1e-3, 1.5)
sim = lattice.LatticeSimulator(w)
obs = NumberObserver(('A', 'B'))
sim.run(2, obs)
viz.plot_number_observer(obs)
viz.plot_world(w, species_list=('A', 'B'))
"""
Explanation: It might be hard to understand, but actually the species B are placed only on a surface.
Then how can we make absorbed this species B to a surface M and synthesize a species A.
python
with reaction_rules():
B + M == A | (1e-3, 1.5)
This means that a species B becomes A when B collides with a structure M.
And a species A dissociates and becomes M and B on in the reverse reaction direction.
Now you can simulate a model with structure.
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.