repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
sgkang/simpegAIP
notebook/StoltzaAndMacnae1998_simple_stepon.ipynb
mit
def AofT(time,T, ai, taui): return ai*np.exp(-time/taui)/(1.+np.exp(-T/(2*taui))) from SimPEG import * import sys sys.path.append("./DoubleLog/") from plotting import mapDat class LinearSurvey(Survey.BaseSurvey): nD = None def __init__(self, time, **kwargs): self.time = time self.nD = time.size def projectFields(self, u): return u class LinearProblem(Problem.BaseProblem): surveyPair = LinearSurvey def __init__(self, mesh, G, **kwargs): Problem.BaseProblem.__init__(self, mesh, **kwargs) self.G = G def fields(self, m, u=None): return self.G.dot(m) def Jvec(self, m, v, u=None): return self.G.dot(v) def Jtvec(self, m, v, u=None): return self.G.T.dot(v) """ Explanation: $A(t,T) = \Sigma_i A_i e^{-t/\tau_i} / (1 + e^{-T/2\tau_i})$ End of explanation """ # time = np.cumsum(np.r_[0., 1e-5*np.ones(10), 5e-5*np.ones(10), 1e-4*np.ones(10), 5e-4*np.ones(10), 1e-3*np.ones(10)]) time = np.cumsum(np.r_[0., 1e-5*np.ones(10), 5e-5*np.ones(10),1e-4*np.ones(5)]) M = 41 tau = np.logspace(-4.1, -1, M) N = time.size A = np.zeros((N, M)) for j in range(M): A[:,j] = np.exp(-time/tau[j]) mtrue = np.zeros(M) np.random.seed(1) inds = np.random.random_integers(0, 41, size=5) mtrue[inds] = np.r_[-0.2, 2, 1, 4, 5] out = np.dot(A,mtrue) fig = plt.figure(figsize=(6,4.5)) ax = plt.subplot(111) for i, ind in enumerate(inds): temp, dum, dum = mapDat(mtrue[inds][i]*np.exp(-time/tau[ind]), 1e-5, stretch=2) plt.semilogx(time, temp, 'k', alpha = 0.5) outmap, ticks, tickLabels = mapDat(out, 1e-5, stretch=2) ax.semilogx(time, outmap, 'k', lw=2) ax.set_yticks(ticks) ax.set_yticklabels(tickLabels) # ax.set_ylim(ticks.min(), ticks.max()) ax.set_ylim(ticks.min(), ticks.max()) ax.set_xlim(time.min(), time.max()) ax.grid(True) # from pymatsolver import MumpsSolver mesh = Mesh.TensorMesh([M]) prob = LinearProblem(mesh, A) survey = LinearSurvey(time) survey.pair(prob) survey.makeSyntheticData(mtrue, std=0.01) # survey.dobs = out reg = Regularization.BaseRegularization(mesh) dmis = DataMisfit.l2_DataMisfit(survey) dmis.Wd = 1./(0.05*abs(survey.dobs)+0.05*1e-2) opt = Optimization.ProjectedGNCG(maxIter=20) # opt = Optimization.InexactGaussNewton(maxIter=20) opt.lower = -1e-10 invProb = InvProblem.BaseInvProblem(dmis, reg, opt) invProb.beta = 1e-4 beta = Directives.BetaSchedule() beta.coolingFactor = 2 target = Directives.TargetMisfit() inv = Inversion.BaseInversion(invProb, directiveList=[beta, target]) m0 = np.zeros_like(survey.mtrue) mrec = inv.run(m0) plt.semilogx(tau, mtrue, '.') plt.semilogx(tau, mrec, '.') fig = plt.figure(figsize=(6,4.5)) ax = plt.subplot(111) obsmap, ticks, tickLabels = mapDat(survey.dobs, 1e0, stretch=2) predmap, dum, dum = mapDat(invProb.dpred, 1e0, stretch=2) ax.loglog(time, survey.dobs, 'k', lw=2) ax.loglog(time, invProb.dpred, 'k.', lw=2) # ax.set_yticks(ticks) # ax.set_yticklabels(tickLabels) # ax.set_ylim(ticks.min(), ticks.max()) # ax.set_ylim(ticks.min(), ticks.max()) ax.set_xlim(time.min(), time.max()) ax.grid(True) time = np.cumsum(np.r_[0., 1e-5*np.ones(10), 5e-5*np.ones(10), 1e-4*np.ones(10), 5e-4*np.ones(10), 1e-3*np.ones(10)]) N = time.size A = np.zeros((N, M)) for j in range(M): A[:,j] = np.exp(-time/tau[j]) mfund = mtrue.copy() mfund[mfund<0.] = 0. obs = np.dot(A, mtrue) fund = np.dot(A, mfund) pred = np.dot(A, mrec) ip = obs-fund ipobs = obs-pred plt.loglog(time, obs, 'k.-', lw=2) plt.loglog(time, -obs, 'k--', lw=2) plt.loglog(time, fund, 'b.', lw=2) plt.loglog(time, pred, 'b-', lw=2) plt.loglog(time, -ip, 'r--', lw=2) plt.loglog(time, abs(ipobs), 'r.', lw=2) plt.ylim(abs(obs).min(), abs(obs).max()) plt.xlim(time.min(), time.max()) """ Explanation: Simple exponential basis $$ \mathbf{A}\mathbf{\alpha} = \mathbf{d}$$ End of explanation """
albahnsen/PracticalMachineLearningClass
exercises/P2-MovieGenrePrediction.ipynb
mit
import pandas as pd import os import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import MultiLabelBinarizer from sklearn.multiclass import OneVsRestClassifier from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.metrics import r2_score, roc_auc_score from sklearn.model_selection import train_test_split dataTraining = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/dataTraining.zip', encoding='UTF-8', index_col=0) dataTesting = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/dataTesting.zip', encoding='UTF-8', index_col=0) dataTraining.head() dataTesting.head() """ Explanation: Project 2 Movie Genre Classification Classify a movie genre based on its plot. <img src="moviegenre.png" style="float: left; margin-right: 10px;" /> https://www.kaggle.com/c/miia4200-20191-p2-moviegenreclassification/overview Data Input: - movie plot Output: Probability of the movie belong to each genre Evaluation 20% API 30% Create a solution using with a Machine Learning algorithm - Presentation (5 slides) 50% Performance in the Kaggle competition (Normalized acording to class performance in the private leaderboard) Acknowledgements We thank Professor Fabio Gonzalez, Ph.D. and his student John Arevalo for providing this dataset. See https://arxiv.org/abs/1702.01992 Sample Submission End of explanation """ vect = CountVectorizer(max_features=1000) X_dtm = vect.fit_transform(dataTraining['plot']) X_dtm.shape print(vect.get_feature_names()[:50]) """ Explanation: Create count vectorizer End of explanation """ dataTraining['genres'] = dataTraining['genres'].map(lambda x: eval(x)) le = MultiLabelBinarizer() y_genres = le.fit_transform(dataTraining['genres']) y_genres X_train, X_test, y_train_genres, y_test_genres = train_test_split(X_dtm, y_genres, test_size=0.33, random_state=42) """ Explanation: Create y End of explanation """ clf = OneVsRestClassifier(RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=10, random_state=42)) clf.fit(X_train, y_train_genres) y_pred_genres = clf.predict_proba(X_test) roc_auc_score(y_test_genres, y_pred_genres, average='macro') """ Explanation: Train multi-class multi-label model End of explanation """ X_test_dtm = vect.transform(dataTesting['plot']) cols = ['p_Action', 'p_Adventure', 'p_Animation', 'p_Biography', 'p_Comedy', 'p_Crime', 'p_Documentary', 'p_Drama', 'p_Family', 'p_Fantasy', 'p_Film-Noir', 'p_History', 'p_Horror', 'p_Music', 'p_Musical', 'p_Mystery', 'p_News', 'p_Romance', 'p_Sci-Fi', 'p_Short', 'p_Sport', 'p_Thriller', 'p_War', 'p_Western'] y_pred_test_genres = clf.predict_proba(X_test_dtm) res = pd.DataFrame(y_pred_test_genres, index=dataTesting.index, columns=cols) res.head() res.to_csv('pred_genres_text_RF.csv', index_label='ID') """ Explanation: Predict the testing dataset End of explanation """
mne-tools/mne-tools.github.io
0.15/_downloads/plot_source_label_time_frequency.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, source_induced_power print(__doc__) """ Explanation: Compute power and phase lock in label of the source space Compute time-frequency maps of power and phase lock in the source space. The inverse method is linear based on dSPM inverse operator. The example also shows the difference in the time-frequency maps when they are computed with and without subtracting the evoked response from each epoch. The former results in induced activity only while the latter also includes evoked (stimulus-locked) activity. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' label_name = 'Aud-rh' fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name tmin, tmax, event_id = -0.2, 0.5, 2 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.find_events(raw, stim_channel='STI 014') inverse_operator = read_inverse_operator(fname_inv) include = [] raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more # Picks MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude='bads') reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) # Load epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject, preload=True) # Compute a source estimate per frequency band including and excluding the # evoked response freqs = np.arange(7, 30, 2) # define frequencies of interest label = mne.read_label(fname_label) n_cycles = freqs / 3. # different number of cycle per frequency # subtract the evoked response in order to exclude evoked activity epochs_induced = epochs.copy().subtract_evoked() plt.close('all') for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced], ['evoked + induced', 'induced only'])): # compute the source space power and the inter-trial coherence power, itc = source_induced_power( this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0), baseline_mode='percent', n_cycles=n_cycles, n_jobs=1) power = np.mean(power, axis=0) # average over sources itc = np.mean(itc, axis=0) # average over sources times = epochs.times ########################################################################## # View time-frequency plots plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43) plt.subplot(2, 2, 2 * ii + 1) plt.imshow(20 * power, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r') plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.title('Power (%s)' % title) plt.colorbar() plt.subplot(2, 2, 2 * ii + 2) plt.imshow(itc, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=0, vmax=0.7, cmap='RdBu_r') plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.title('ITC (%s)' % title) plt.colorbar() plt.show() """ Explanation: Set parameters End of explanation """
robertoalotufo/ia898
master/Revisao_NumerosComplexos.ipynb
mit
import numpy as np C = np.complex(3,4) print('C=', C) print(type(C)) """ Explanation: Table of Contents <p><div class="lev1 toc-item"><a href="#IA898-Revisão-em-números-complexos" data-toc-modified-id="IA898-Revisão-em-números-complexos-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>IA898 Revisão em números complexos</a></div><div class="lev2 toc-item"><a href="#Representação-cartesiana" data-toc-modified-id="Representação-cartesiana-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Representação cartesiana</a></div><div class="lev2 toc-item"><a href="#Representação-no-plano-complexo" data-toc-modified-id="Representação-no-plano-complexo-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Representação no plano complexo</a></div><div class="lev2 toc-item"><a href="#Representação-polar" data-toc-modified-id="Representação-polar-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Representação polar</a></div><div class="lev2 toc-item"><a href="#Relação-entre-representação-cartesiana-e-representação-polar" data-toc-modified-id="Relação-entre-representação-cartesiana-e-representação-polar-14"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Relação entre representação cartesiana e representação polar</a></div><div class="lev2 toc-item"><a href="#Array-de-números-complexos" data-toc-modified-id="Array-de-números-complexos-15"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>Array de números complexos</a></div> # IA898 Revisão em números complexos Antes de começarmos a discutir a Transformada de Fourier, vamos fazer a revisão de alguns conceitos importantes, como por exemplo, os números complexos. O conceito de números complexos surgiu no século XIV basicamente para possibilitar a resolução raiz quadrada. No início, os números complexos não eram vistos como números, mas sim como um artifício algébrico útil para se resolver equações. Descartes, no século XVII, os chamou de números imaginários. A representação de números complexos se dá na forma cartesiana (parte real e parte imaginária) e a na forma polar (módulo e fase). A equação de Euler dá a uma representação da forma polar em cosseno (parte real) e seno (parte imaginária). A operação de soma de números complexos é mais fácil de ser realizada na forma cartesiana enquanto que as operações de produto e potenciação são mais facilmente realizadas na forma polar. A visualização de um sinal complexo também é mais fácil na forma polar (magnitude e fase). ## Representação cartesiana A representação cartesiana do número complexo $c$ é dada por: $$ c = x + yj $$ onde $x$ é denominada parte Real e $y$ é denominada parte Imaginária. A maneira mais imediata de criar um número complexo usando NumPy e através da função *complex*, bastando para isso passar os valores das partes *real* e *imaginária* do número complexo, como mostra o exemplo abaixo. Veja que a variável *C* é do tipo *complex*. End of explanation """ c = np.array([3+4j]) #c = 3+4j print('c=', c) print(type(c)) print('Parte real:', c.real) print('Parte imaginária:', c.imag) print(c.shape) """ Explanation: Porém, como iremos trabalhar com números complexos em imagens, utilizaremos não apenas um número complexo, mas um array de números complexos. Podemos ver no exemplo a seguir, que a forma de se criar um array de números complexos é a mesma de se criar um array de números inteiros ou números reais (em ponto flutuante). Basta então utilizarmos a função array do NumPy e inserir cada elemento complexo, escrevendo-o da forma :eq:x+yj. Veja que agora a variável c é do tipo numpy.ndarray. End of explanation """ cc = np.conjugate(c) print('c=', c) print('Complexo conjugado:', cc) print('Parte real:', cc.real) print('Parte imaginária:', cc.imag) """ Explanation: O conjugado deste mesmo número complexo $c$ é dado por: $$ \bar{c} = x - yj $$ Percebam que a parte real de um número complexo e de seu conjugado é a mesma ($x$), enquanto que a parte imaginária deles são opostas ($y$ e $-y$) End of explanation """ c3 = np.array(2+2j) print('c3=', c3) print('Módulo:', np.abs(c3)) print('Argumento:', np.rad2deg(np.angle(c3))) np.angle(c3)*180/np.pi """ Explanation: Representação no plano complexo Os números complexos são representados geometricamente no plano complexo. Nele, representa-se a parte real, :eq:x, no eixo horizontal e a parte imaginária, :eq:y, no eixo vertical. Se interpretarmos este número complexo no plano cartesiano como um vetor, podemos calcular sua magnitude (módulo) e sua fase (argumento). End of explanation """ C_array = np.array([[3+4j, 2+2j],[1-2j, -2+2j]]) print('C_array=\n', C_array) print('Módulo:\n', np.abs(C_array)) print('Argumento:\n', np.rad2deg(np.angle(C_array))) print('Shape:',C_array.shape) """ Explanation: Representação polar A representação polar de um número complexo, dada pela fórmula de Euler, é da forma: $$ c = \rho (\cos \phi + j \sin \phi) $$ ou ainda $$ c = \rho {e}^{j\phi } $$ onde $\rho$ é o módulo (magnitude) do número complexo e $\phi$ é o argumento (fase) do número complexo. Também é fácil de ver que $rho \cos \phi$ é a parte real do número complexo e $\rho \sin \phi$ é a parte imaginária do número complexo. Relação entre representação cartesiana e representação polar Todo número complexo pode ser representado por sua parte real e sua parte imaginária, ou por seu módulo e argumento. A partir de uma destas representações é possível obter a outra. Para converter um número complexo de sua representação cartesiana, para sua representação polar, basta fazermos: $$ \rho = \left|z \right| = \sqrt{{x}^{2}+{y}^{2}} $$ $$ \phi = \arctan \left(\frac{y}{x} \right) $$ Já a conversão do número complexo em sua forma polar para a forma cartesiana é dada por: $$ x = \rho\cos(\phi) $$ $$ y = \rho\sin(\phi) $$ Para conversão de um número complexo de suas coordenadas cartesianas para polares, basta usar as funções abs e angle oferecidas pelo NumPy. Já para a conversão inversa (de polar para cartesiana), não função específica do NumPy e é preciso, portanto, realizar o cálculo como descrito acima. É importante também ficar atento ao uso do ângulo em radianos ou em graus. Array de números complexos Até agora, vimos os conceitos relacionados aos números complexos e as principais funções do NumPy para manipular estes números. Percebam que desde o começo operamos em uma array de um único elemento. Mas, no caso das imagens e da transformada de Fourier, ao invés de um único número complexo estaremos manipulando um array de números complexos. End of explanation """
vitojph/2016progpln
notebooks/2-Python-control-flujo.ipynb
mit
# asignamos unos cuantos valores a variables numero1 = 2 numero2 = 34 print(numero1 == numero2) print(numero1 != numero2) print(numero1 == numero1) print(numero2 <= 10) print(19 >= (10 * numero1)) print("------------------") print(10 == (5*2)) print(MiVariable != 10) """ Explanation: Control de flujo Hasta ahora hemos programado en Python intrucciones sencillas que solo tenían en cuenta una posibilidad. No evaluábamos nada, simplemente dábamos órdenes y Python obedecía. Como mucho, cuando cometíamos algún error de sintaxis, Python se quejaba, sin más. A partir de ahora vamos a aprender a manejar el control de flujo. Vamor a tener en cuenta resultados múltiples y vamos a seleccionar uno concreto dependiendo del valor que tengan las variables o de lo que esté ocurriendo en el programa en un momento concreto. Comparadores Los elementos más sencillos del control de flujo son los comparadores: Igual a ==. No es igual a !=. Menor que &lt;. Menor o igual que &lt;=. Mayor que &gt;. Mayor o igual que &gt;=. ¡OJO! No confundas el comparador de igualdad == con el símbolo = que utilizamos para asignar valores a variables. Cuando utilizamos un comparador para comparar dos expresiones, el resultado que nos devuelve la comparación es un valor booleano: True o False. End of explanation """ nombre = 'Víctor' if nombre == 'Víctor': print('¡Hey! Te llamas igual que yo.') print("Pues muy bien") diadelasemana = 'jueves' if diadelasemana != 'domingo': # hoy no es domingo print("No vas a misa") if diadelasemana == 'viernes': print("Empieza el fin de semana") print("Esta es la última línea.") if 10 == 5*2: print('10 es igual a 5 veces 2') """ Explanation: Condicionales En Python podemos evaluar condiciones con la intrucción if. if forma lo que se denomina un bloque de código, que tiene una sintaxis particular: if COMPARACIÓN: # si la comparación es True, ejecuta lo siguiente INSTRUCCIONES_1 Fíjate bien en dos cosas: los bloques de código comienzan cuando una instrucción termina en dos puntos : el código dentro del bloque aparece indentado o sangrado. Esta indentación se introduce pulsando el tabulador o tecleando espacios (habitualmente, cuatro espacios). Es muy importante mantener correctamente alineadas las instrucciones que aparecen dentro del mismo bloque de código. De lo contrario, podemos encontrar un error de sintaxis. End of explanation """ # prueba cambiando el valor asignado a la variable edad edad = 44 if edad >= 65: print('¡Enhorabuena, estás jubilado!') else: print('Deberías estar trabajando, si te dejan.') """ Explanation: Podemos evaluar condiciones más complejas definiciendo por ejemplo una alternativa cuando el resultado de la comparaciónn era False. Para ello, usamos instrucciones del tipo if-else, con la siguiente sintaxis: if COMPARACIÓN: # si la comparación es True, ejecuta lo siguiente INSTRUCCIONES_1 else: # por el contrario, si la comparación es False, ejecuta INSTRUCCIONES_2 End of explanation """ # prueba varias ejecuciones de esta celda cambiando el valor asignado a la variable temperatura temperatura = 22 if temperatura <= 0: print('¡Está helando!') elif 1 <= temperatura <= 10: print('¡Hace frescuni!') elif 11 <= temperatura <= 25: print('¡Ya es primavera!') else: print('¡Buff, qué calor!') """ Explanation: Por último, podemos evaluar distintas condiciones con instrucciones del tipo if-elif-else, con la siguiente sintaxis: if COMPARACIÓN1: # si la comparación es True, ejecuta lo siguiente INSTRUCCIONES_1 elif COMPARACIÓN2: # si esta comparación es True, ejecuta lo siguiente INSTRUCCIONES_2 else: # por el contrario, si ninguna comparación es True, ejecuta INSTRUCCIONES_3 Piensa en elif como en una abreviatura de else + if. End of explanation """ nombre = 'Víctor' edad = 37 if nombre == 'Víctor' and edad == 38: print('¡Hey! ¡Eres yo! ¿Quién te envía?') elif nombre == 'Víctor' or edad == 38: print('Te pareces a mí en algo.') if not nombre == 'Víctor' and not edad == 38: # esto es equivalente a if nombre != "Víctor" and edad != 34: print('No tienes nada que ver conmigo') alumnos = ["Pepito", "Raul", "Ana", "Antonio", "Maria"] print(alumnos) if 'Paco' in alumnos and 'Ana' in alumnos: print('Paco y Ana están en clase.') else: print('No es cierto que Paco y Ana estén en clase') if "Paco" in alumnos or "Ana" in alumnos: print('Paco o Ana, uno de los dos o ambos, está en clase.') else: print('No ha venido ninguno') """ Explanation: Operadores lógicos (o booleanos) Los operadores lógicos son palabras usadas para conectar oraciones de Python de forma gramaticalmente correcta, casi igual que si lo hiciésamos en lenguaje natural. Existen tres operadores lógicos: la conjunción and: la disyunción or. la negación not. Al igual que con las comparaciones, los operadores lógicos generan valores booleanos: True o False. End of explanation """ alumnos = ["Pepito", "Raul", "Ana", "Antonio", "Maria"] hoy = "viernes" if "Pepito" in alumnos: print("Pepito está en clase") if "Ana" in alumnos: print("Ana está en clase") if "Pepito" in alumnos and "Ana" not in alumnos and hoy == "viernes": print("Pepito ha venido, pero Ana no. Menos mal que es viernes") if "Pepito" in alumnos and "Ana" not in alumnos: print("Pepito ha venido, pero Ana no") if hoy == "viernes": print("Menos mal que es viernes") alumnos = 'Pepito Raul Ana Antonio Maria'.split() print(alumnos) numeros = "1 2 3 4 5 6 7 8 9 10".split() print(numeros) print(int(numeros[0]) + int(numeros[-1])) # algunos ejemplos para distinguir el funcionamiento de == y de in palabras_minusculas = "hola casa amigo wertyj" palabras_mayusculas = "AMIGO VIERNES CAFÉ" print("casa" in palabras_minusculas) print("O V" in palabras_mayusculas) print("amigo" in "amigo") print("amigo" == "amigo") print("amigo" in "amigos") print("amigo" == "amigos") """ Explanation: A continuación, hay más ejemplos de evaluación de condiciones con bloques de código if con dudas que surgen en clase. Pactica para entender cómo funcionan. End of explanation """
aaschroeder/Titanic_example
Final_ensemble.ipynb
gpl-3.0
import numpy as np import pandas as pd titanic=pd.read_csv('./titanic_clean_data.csv') """ Explanation: In this section, we're going to Setup the titanic data for ensembling Call the five individual models Ensemble the models (We use another Gradient Booster Output for submission End of explanation """ cols_to_norm=['Age','Fare'] col_norms=['Age_z','Fare_z'] titanic[col_norms]=titanic[cols_to_norm].apply(lambda x: (x-x.mean())/x.std()) titanic['cabin_clean']=(pd.notnull(titanic.Cabin)) from sklearn import svm, datasets from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectKBest, f_classif from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score, train_test_split from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.linear_model import LogisticRegression titanic_target=titanic.Survived.values features=['Sex','SibSp','Parch','Pclass_1','Pclass_2','Pclass_3','Emb_C','Emb_Q','Emb_S',\ 'Emb_nan','Age_ct_C','Age_ct_A','Age_ct_S', 'Sp_ct','Age_z','Fare_z',\ 'Ti_Dr', 'Ti_Master', 'Ti_Mil', 'Ti_Miss', 'Ti_Mr', 'Ti_Mrs', 'Ti_Other', 'Ti_Rev',\ 'Fl_AB', 'Fl_CD', 'Fl_EFG', 'Fl_nan'] titanic_features=titanic[features].values titanic_features, ensemble_features, titanic_target, ensemble_target= \ train_test_split(titanic_features, titanic_target, test_size=.1, random_state=7132016) """ Explanation: OK, let's normalize our continous variables End of explanation """ titanic_rf=pd.read_csv('./ensemble_rf.csv') titanic_gboost=pd.read_csv('./ensemble_gboost.csv') titanic_svm=pd.read_csv('./ensemble_svm.csv') titanic_nb=pd.read_csv('./ensemble_nb.csv') titanic_logit=pd.read_csv('./ensemble_logit.csv') titanic_ensemble=pd.merge(titanic_rf, titanic_gboost, left_index=True, right_index=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_svm, left_index=True, right_index=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_nb, left_index=True, right_index=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_logit, left_index=True, right_index=True) print titanic_ensemble.head() """ Explanation: In here will be the section where we import the relevant python code from each project Now, we'll import the three csv datasets, merge them on PassengerID, then ensemble End of explanation """ print titanic_ensemble.corr() """ Explanation: Let's see what the correlations are across the learners, to find how much similarity in information they share End of explanation """ titanic_ensemble=titanic_ensemble.values #clf=GradientBoostingClassifier().fit(titanic_ensemble,ensemble_target) feat_param=['deviance','exponential'] score=0 for feature in feat_param: clf = GradientBoostingClassifier(loss=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5 ) if score_test.mean()>score: loss_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in np.linspace(.05,.45,11): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5 ) if score_test.mean()>score: rate_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in range(100,1001,100): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: feat_n_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in range(1,21): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: depth_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in range(1,21): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: sample_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in range(1,21): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=sample_out,\ min_samples_leaf=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: sample_leaf_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in np.linspace(0.0,0.5,10): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=sample_out,\ min_samples_leaf=sample_leaf_out, min_weight_fraction_leaf=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5 ) if score_test.mean()>score: frac_out=feature score_diff=score_test.mean()-score score=score_test.mean() score=0 for feature in np.linspace(0.1,1,10): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=sample_out,\ min_samples_leaf=sample_leaf_out, min_weight_fraction_leaf=frac_out,\ subsample=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: subsamp_out=feature score_diff=score_test.mean()-score score=score_test.mean() node_out=None for feature in range(2,11): clf = GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=sample_out,\ min_samples_leaf=sample_leaf_out, min_weight_fraction_leaf=frac_out,\ subsample=subsamp_out, max_leaf_nodes=feature, random_state=7112016) score_test= cross_val_score(clf,titanic_ensemble,ensemble_target,cv=5) if score_test.mean()>score: node_out=feature score_diff=score_test.mean()-score score=score_test.mean() clf=GradientBoostingClassifier(loss=loss_out, learning_rate=rate_out, n_estimators=feat_n_out,\ max_depth=depth_out, min_samples_split=sample_out,\ min_samples_leaf=sample_leaf_out, min_weight_fraction_leaf=frac_out,\ subsample=subsamp_out, max_leaf_nodes=node_out,\ random_state=7112016).fit(titanic_ensemble,ensemble_target) titanic_rf=pd.read_csv('./kaggle_titanic_submission_rf.csv') titanic_gboost=pd.read_csv('./kaggle_titanic_submission_gboost.csv') titanic_svm=pd.read_csv('./kaggle_titanic_submission_svm.csv') titanic_nb=pd.read_csv('./kaggle_titanic_submission_nb.csv') titanic_logit=pd.read_csv('./kaggle_titanic_submission_logit.csv') titanic_ensemble=pd.merge(titanic_rf, titanic_gboost, on='PassengerId') titanic_ensemble.rename(columns={'Survived_x':'rf_pred','Survived_y':'gboost_pred'}, inplace=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_svm, on='PassengerId') titanic_ensemble.rename(columns={'Survived':'svm_pred'}, inplace=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_nb, on='PassengerId') titanic_ensemble.rename(columns={'Survived':'nb_pred'}, inplace=True) titanic_ensemble=pd.merge(titanic_ensemble, titanic_logit, on='PassengerId') titanic_ensemble.rename(columns={'Survived':'log_pred'}, inplace=True) titanic_ensemble=titanic_ensemble[['rf_pred','gboost_pred','svm_pred', 'nb_pred','log_pred']] titanic_ensemble=titanic_ensemble.values predictions=clf.predict(titanic_ensemble) kaggle=pd.DataFrame({'PassengerId':titanic_rf['PassengerId']}) kaggle['Survived']=predictions kaggle.to_csv('./kaggle_titanic_submission_ensemble.csv', index=False) """ Explanation: Now let's perform the ensembling, using the same method for Gradient Boosting we used earlier End of explanation """
gboeing/urban-data-science
modules/08-urban-networks-ii/lecture.ipynb
mit
import geopandas as gpd import matplotlib.pyplot as plt import networkx as nx import numpy as np import osmnx as ox import pandana import pandas as pd from shapely.geometry import Point # consistent randomization np.random.seed(0) # configure OSMnx cache_folder = '../../data/cache2' ox.config(log_console=True, use_cache=True, cache_folder=cache_folder) """ Explanation: Urban Networks II Overview of today's topics: - Network modeling and analysis in a study site - Simulating commutes - Network efficiency - Network perturbation - Comparative network analysis - Urban accessibility End of explanation """ # create a study site: geocode city hall, convert coords to shapely geometry, # project geometry to UTM, buffer by 5km, project back to lat-lng latlng_coords = ox.geocode('Los Angeles City Hall') latlng_point = Point(latlng_coords[1], latlng_coords[0]) latlng_point_proj, crs = ox.projection.project_geometry(latlng_point) polygon_proj = latlng_point_proj.buffer(5000) polygon, crs = ox.projection.project_geometry(polygon_proj, crs=crs, to_latlong=True) polygon # model the street network within study site # your parameterization makes assumptions about your interests here G = ox.graph_from_polygon(polygon, network_type='drive', truncate_by_edge=True) fig, ax = ox.plot_graph(G, node_size=0, edge_color='w', edge_linewidth=0.3) # add speeds and travel times G = ox.add_edge_speeds(G) G = ox.add_edge_travel_times(G) # study site area in km^2 polygon_proj.area / 1e6 # how many intersections does it contain? street_counts = pd.Series(dict(G.nodes(data='street_count'))) intersect_count = len(street_counts[street_counts > 2]) intersect_count # what's the intersection density? intersect_count / (polygon_proj.area / 1e6) # now clean up the intersections and re-calculate clean_intersects = ox.consolidate_intersections(ox.project_graph(G), rebuild_graph=False, tolerance=10) clean_intersect_count = len(clean_intersects) clean_intersect_count # what's the cleaned intersection density? clean_intersect_count / (polygon_proj.area / 1e6) """ Explanation: 1. Model a study site First, we will identify a study site, model its street network, and calculate some simple indicators. End of explanation """ od = pd.read_csv('../../data/od.csv').sample(1000) od.shape od # get home/work network nodes home_nodes = ox.get_nearest_nodes(G, X=od['home_lng'], Y=od['home_lat'], method='balltree') work_nodes = ox.get_nearest_nodes(G, X=od['work_lng'], Y=od['work_lat'], method='balltree') def calc_path(G, orig, dest, weight='travel_time'): try: return ox.shortest_path(G, orig, dest, weight) except nx.exception.NetworkXNoPath: # if path cannot be solved return None %%time paths = [calc_path(G, orig, dest) for orig, dest in zip(home_nodes, work_nodes)] len(paths) # filter out any nulls (ie, not successfully solved) paths = [path for path in paths if path is not None] len(paths) # plot 100 routes fig, ax = ox.plot_graph_routes(G, routes=paths[0:100], node_size=0, edge_linewidth=0.2, orig_dest_size=0, route_colors='c', route_linewidth=2, route_alpha=0.2) # now it's your turn # how do these routes change if we minimize distance traveled instead? # what kinds of streets get more/fewer trips assigned to them? """ Explanation: 2. Simulate commutes We'll use a random sample of LEHD LODES data to get home/work coordinates. This is an imperfect proxy for "true" work locations from a payroll enumeration. You can read more about LODES and its limitations here. These data are processed in a separate notebook to keep the data easy on your CPU and memory for this lecture. Our trip simulation will use naive assumptions about travel time (e.g., free flow, no congestion, rough imputation of speed limits) for simplicity, but these can be enriched with effort. End of explanation """ def calc_efficiency(G, route, attr='length'): # sum the edge lengths in the route trip_distance = sum(ox.utils_graph.get_route_edge_attributes(G, route=route, attribute=attr)) # fast vectorized great-circle distance calculator gc_distance = ox.distance.great_circle_vec(lat1=G.nodes[route[0]]['y'], lng1=G.nodes[route[0]]['x'], lat2=G.nodes[route[-1]]['y'], lng2=G.nodes[route[-1]]['x']) return gc_distance / trip_distance # calculate each trip's efficiency and make a pandas series trip_efficiency = pd.Series([calc_efficiency(G, path) for path in paths]) # the straight-line distance is what % of each network distance traveled? trip_efficiency trip_efficiency.describe() # now it's your turn # what if i were instead interested in how much longer trips are than straight-line would be? """ Explanation: 3. Network efficiency How "efficient" are our commuter's routes? That is, how does their distance traveled compare to straight-line distances from home to work? End of explanation """ # randomly knock-out 10% of the network's nodes frac = 0.10 n = int(len(G.nodes) * frac) nodes_to_remove = pd.Series(G.nodes).sample(n).index G_per = G.copy() G_per.remove_nodes_from(nodes_to_remove) # get home/work network nodes again, calculate routes, drop nulls home_nodes_per = ox.get_nearest_nodes(G_per, X=od['home_lng'], Y=od['home_lat'], method='balltree') work_nodes_per = ox.get_nearest_nodes(G_per, X=od['work_lng'], Y=od['work_lat'], method='balltree') paths_per = [calc_path(G_per, orig, dest) for orig, dest in zip(home_nodes_per, work_nodes_per)] paths_per = [path for path in paths_per if path is not None] len(paths_per) # calculate each trip's efficiency and make a pandas series trip_efficiency_per = pd.Series([calc_efficiency(G_per, path) for path in paths_per]) trip_efficiency_per.describe() """ Explanation: 4. Network perturbation Oh no! There's been an earthquake! The earthquake has knocked out 10% of the street network. Let's simulate that perturbation and see how routes have to change. End of explanation """ # what % of formerly solvable routes are now unsolvable? 1 - (len(paths_per) / len(paths)) # knocking out x% of the network made (solvable) trips what % less efficient? 1 - (trip_efficiency_per.mean() / trip_efficiency.mean()) # plot n routes apiece, before (cyan) and after (yellow) perturbation n = 100 all_paths = paths[:n] + paths_per[:n] colors = ['c'] * n + ['y'] * n # shuffle the order, so you don't just plot new atop old paths_colors = pd.DataFrame({'path': all_paths, 'color': colors}).sample(frac=1) fig, ax = ox.plot_graph_routes(G, routes=paths_colors['path'], node_size=0, edge_linewidth=0.2, orig_dest_size=0, route_colors=paths_colors['color'], route_linewidth=2, route_alpha=0.3) """ Explanation: How many routes are now disconnected? How did trip efficiency change? End of explanation """ # now it's your turn # use the prompts above to conduct a self-directed analysis of network perturbation # either using elevation/flooding or any of the 3 prompts above """ Explanation: Central LA performs relatively well because it has a relatively dense and gridlike network that offers multiple redundancy options. What if you conduct this analysis in a disconnected, dendritic suburb on the urban fringe? What if you model a walkable network rather than a drivable one? What if the network perturbation isn't a spatially random process? Take these questions as prompts for self-paced exercise. For example, let's say the LA river has flooded. Use OSMnx to attach elevations to all the nodes in our street network, then knock-out the 10% at the lowest elevation (ie, around the river). How does that change network characteristics like connectivity and efficiency? Or, model a coastal town Miami Beach, then knock-out the network nodes below some sea-level rise threshold. What happens? What neighborhoods are most affected? What communities live in those vulnerable places? End of explanation """ # study area within 1/2 mile of SF Civic Center latlng_coords = ox.geocode('Civic Center, San Francisco, CA, USA') latlng_point = Point(latlng_coords[1], latlng_coords[0]) latlng_point_proj, crs = ox.projection.project_geometry(latlng_point) polygon_proj = latlng_point_proj.buffer(800) sf_polygon, crs = ox.projection.project_geometry(polygon_proj, crs=crs, to_latlong=True) # get the tracts that intersect the study area polygon tracts = gpd.read_file('../../data/tl_2020_06_tract/').set_index('GEOID') mask = tracts.intersects(sf_polygon) cols = ['ALAND', 'geometry'] sf_tracts = tracts.loc[mask, cols] sf_tracts.head() """ Explanation: 5. Compare places to each other Here we'll model and analyze a set of sub-sites within a study area to compare their characteristics. End of explanation """ # build a custom filter cf1 = '["highway"~"residential|living_street|tertiary|secondary|primary"]' cf2 = '["service"!~"alley|driveway|emergency_access|parking|parking_aisle|private"]' cf3 = '["area"!~"yes"]' custom_filter = cf1 + cf2 + cf3 custom_filter # model the street network across all the study sub-sites G_all = ox.graph_from_polygon(sf_tracts.unary_union, custom_filter=custom_filter) len(G_all.nodes) %%time # calculate clean intersection counts per tract intersect_counts = {} for label, geom in zip(sf_tracts.index, sf_tracts['geometry']): G_tmp = ox.graph_from_polygon(geom, custom_filter=custom_filter) clean_intersects = ox.consolidate_intersections(ox.project_graph(G_tmp), rebuild_graph=False) intersect_counts[label] = len(clean_intersects) # calculate intersection density per km^2 sf_tracts['intersect_count'] = pd.Series(intersect_counts) sf_tracts['intersect_density'] = sf_tracts['intersect_count'] / (sf_tracts['ALAND'] / 1e6) sf_tracts['intersect_density'].describe() # plot the tracts and the network plt.style.use('dark_background') fig, ax = plt.subplots(figsize=(6, 6)) ax.axis('off') ax.set_title('Intersection density (per km2)') ax = sf_tracts.plot(ax=ax, column='intersect_density', cmap='Reds_r', legend=True, legend_kwds={'shrink': 0.8}) fig, ax = ox.plot_graph(G_all, ax=ax, node_size=0, edge_color='#111111') fig.savefig('map.png', dpi=300, facecolor='#111111', bbox_inches='tight') """ Explanation: Let's use a custom filter to model "surface streets." You get to pick what to include and exclude, using the Overpass Query Language. End of explanation """ # now it's your turn # how would you improve this analysis to make it more meaningful and interpretable? """ Explanation: Our simplified, naive assumptions in this analysis have some shortcomings that resulting in analytical problems. How would you improve it? 1. Periphery effects? 2. Incorrect study site sizes? 3. What are we counting and not counting here? End of explanation """ # specify some parameters for the analysis walk_time = 20 # max walking horizon in minutes walk_speed = 4.5 # km per hour # model the walkable network within our original study site G_walk = ox.graph_from_polygon(polygon, network_type='walk') fig, ax = ox.plot_graph(G_walk, node_size=0, edge_color='w', edge_linewidth=0.3) # set a uniform walking speed on every edge for u, v, data in G_walk.edges(data=True): data['speed_kph'] = walk_speed G_walk = ox.add_edge_travel_times(G_walk) # extract node/edge GeoDataFrames, retaining only necessary columns (for pandana) nodes = ox.graph_to_gdfs(G_walk, edges=False)[['x', 'y']] edges = ox.graph_to_gdfs(G_walk, nodes=False).reset_index()[['u', 'v', 'travel_time']] # get all the "fresh food" stores on OSM within the study site # you could load any amenities DataFrame, but we'll get ours from OSM tags = {'shop': ['grocery', 'greengrocer', 'supermarket']} amenities = ox.geometries_from_bbox(north=nodes['y'].max(), south=nodes['y'].min(), east=nodes['x'].min(), west=nodes['x'].max(), tags=tags) amenities.shape # construct the pandana network model network = pandana.Network(node_x=nodes['x'], node_y=nodes['y'], edge_from=edges['u'], edge_to=edges['v'], edge_weights=edges[['travel_time']]) # extract (approximate, unprojected) centroids from the amenities' geometries centroids = amenities.centroid # specify a max travel distance for this analysis # then set the amenities' locations on the network maxdist = walk_time * 60 # minutes -> seconds, to match travel_time units network.set_pois(category='grocery', maxdist=maxdist, maxitems=3, x_col=centroids.x, y_col=centroids.y) # calculate travel time to nearest amenity from each node in network distances = network.nearest_pois(distance=maxdist, category='grocery', num_pois=3) distances.astype(int).head() # plot distance to nearest amenity fig, ax = ox.plot_graph(G_walk, node_size=0, edge_linewidth=0.1, edge_color='gray', show=False, close=False) sc = ax.scatter(x=nodes['x'], y=nodes['y'], c=distances[1], s=1, cmap='inferno_r') ax.set_title(f'Walking time to nearest grocery store') plt.colorbar(sc, shrink=0.7).outline.set_edgecolor('none') """ Explanation: 6. Urban accessibility If you're interested in isochrone mapping, see the OSMnx examples for a demonstration. Here, we'll analyze food deserts in central LA using OSMnx and Pandana. Pandana uses contraction hierarchies for imprecise but very fast shortest path calculation. End of explanation """ # set a variable on the network, using the amenities' nodes node_ids = network.get_node_ids(centroids.x, centroids.y) network.set(node_ids, name='grocery') # aggregate the variable to all the nodes in the network # when counting, the decay doesn't matter (but would for summing) access = network.aggregate(distance=maxdist, type='count', decay='linear', name='grocery') # let's cap it at 5, assuming no further utility from a larger choice set access = access.clip(upper=5) access.describe() # plot amenity count within your walking horizon fig, ax = ox.plot_graph(G_walk, node_size=0, edge_linewidth=0.1, edge_color='gray', show=False, close=False) sc = ax.scatter(x=nodes['x'], y=nodes['y'], c=access, s=1, cmap='inferno') ax.set_title(f'Grocery stores within a {walk_time} minute walk') plt.colorbar(sc, shrink=0.7).outline.set_edgecolor('none') # now it's your turn # map walking time to nearest school in our study site, capped at 30 minutes # what kinds of communities have better/worse walking access to schools? # see documentation at https://wiki.openstreetmap.org/wiki/Tag:amenity=school """ Explanation: This tells us about the travel time to the nearest amenities, from each node in the network. What if we're instead interested in how many amenities we can reach within our time horizon? End of explanation """
khrapovs/metrix
notebooks/computing_ols.ipynb
mit
import numpy as np import matplotlib.pylab as plt import seaborn as sns from numpy.linalg import inv, lstsq sns.set_context('notebook') """ Explanation: Different ways to compute OLS Import all necessary modules End of explanation """ %matplotlib inline """ Explanation: If you want to embed plots inside IPython notebook, you need to turn on this option. End of explanation """ N, S = 100, 1000 """ Explanation: Generate the data Define the number of observations in the sample, $N$, and the number of simulations, $S$. End of explanation """ mean = [0,0] rho = .1 cov = [[1, rho], [rho, 1]] """ Explanation: Parameters of the joint distribution of $X$ and $e$. End of explanation """ alpha, beta = 2, 3 """ Explanation: Regression level and slope. End of explanation """ def simulate_data(mean, cov, alpha, beta, size): X, e = np.random.multivariate_normal(mean, cov, size).T Y = alpha + beta * X + e return Y, X Y, X = simulate_data(mean, cov, alpha, beta, (N, S)) """ Explanation: Simulate $X$ and $e$ from multivariate normal distribution. End of explanation """ def ols_matrix(Y,X): Y = np.matrix(Y).T X = np.matrix(np.vstack((np.ones_like(X), X))).T beta = np.array(inv(X.T * X) * (X.T * Y)) return float(beta[1]) def ols_array(Y,X): X = np.vstack((np.ones_like(X), X)).T Qxx = np.dot(X.T, X) Qxy = np.dot(X.T, Y) beta = np.dot(inv(Qxx), Qxy) return float(beta[1]) def ols_lstsq(Y,X): X = np.vstack((np.ones_like(X), X)).T beta = lstsq(X,Y)[0] return float(beta[1]) """ Explanation: OLS Estimator Define two functions which should return the same slope parameter. The difference is in the implementation. The first one is a bit more natural since it uses NumPy matrices. This allows to use * symbol to multiply matrices. the second uses NumPy arrays instead. This implementation requires the use of dot function. End of explanation """ beta_hat1, beta_hat2, beta_hat3 = [], [], [] """ Explanation: Compute the estimates Now we need to initialize two containers for our estimates in each simulated sample. End of explanation """ %%time for y, x in zip(Y, X): beta_hat1.append(ols_matrix(y, x)) """ Explanation: For each simulated sample we compute the estimate and append it to the list. IPython "magic" %%timeit will show how much time was spent in the current block. End of explanation """ %%time for y, x in zip(Y, X): beta_hat2.append(ols_array(y, x)) %%time for y, x in zip(Y, X): beta_hat3.append(ols_lstsq(y, x)) """ Explanation: Same for the second implimentation through arrays instead of matrices. End of explanation """ print(np.array_equal(beta_hat1, beta_hat2)) print(np.array_equal(beta_hat1, beta_hat3)) print(np.allclose(beta_hat1, beta_hat3, atol=1e-20)) """ Explanation: Analyze the results Are the results equal? End of explanation """ plt.figure(figsize = (12, 6)) plt.hist(beta_hat1, 50, histtype='stepfilled', normed=True, lw=0, alpha=.5, label='Density') plt.axvline(beta, color='red', lw=5, label='True') plt.axvline(np.mean(beta_hat1), color='black', lw=5, label='Mean Estimate') plt.xlabel(r'$\hat{\beta}$') plt.ylabel('%') plt.legend() plt.show() """ Explanation: Plot the histogram. End of explanation """ nobs = [250, 500, 1000, 2000] beta_hat = [] for N in nobs: Y, X = simulate_data(mean, cov, alpha, beta, (N, S)) temp = [] for y, x in zip(Y, X): temp.append(ols_array(y, x)) beta_hat.append(temp) beta_hat = np.array(beta_hat) """ Explanation: Monte Carlo experiment Run estimation for all sample sizes and all simulated data. End of explanation """ plt.figure(figsize=(8, 5)) for i in range(len(nobs)): sns.kdeplot(beta_hat[i], alpha=.4, lw=3, shade=True, label=nobs[i]) plt.axvline(beta, color='red', lw=3, label='True') plt.xlabel(r'$\hat{\beta}$') plt.ylabel('%') plt.legend(title='N') plt.show() """ Explanation: Plot densities of the estimates. End of explanation """
dennys-bd/Coursera-Machine-Learning-Specialization
Course 2 - ML, Regression/week-2-multiple-regression-assignment-1-blank.ipynb
mit
import graphlab """ Explanation: Regression Week 2: Multiple Regression (Interpretation) The goal of this first notebook is to explore multiple regression and feature engineering with existing graphlab functions. In this notebook you will use data on house sales in King County to predict prices using multiple regression. You will: * Use SFrames to do some feature engineering * Use built-in graphlab functions to compute the regression weights (coefficients/parameters) * Given the regression weights, predictors and outcome write a function to compute the Residual Sum of Squares * Look at coefficients and interpret their meanings * Evaluate multiple models via RSS Fire up graphlab create End of explanation """ sales = graphlab.SFrame('kc_house_data.gl/') """ Explanation: Load in house sales data Dataset is from house sales in King County, the region where the city of Seattle, WA is located. End of explanation """ train_data,test_data = sales.random_split(.8,seed=0) """ Explanation: Split data into training and testing. We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you). End of explanation """ example_features = ['sqft_living', 'bedrooms', 'bathrooms'] example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features, validation_set = None) """ Explanation: Learning a multiple regression model Recall we can use the following code to learn a multiple regression model predicting 'price' based on the following features: example_features = ['sqft_living', 'bedrooms', 'bathrooms'] on training data with the following code: (Aside: We set validation_set = None to ensure that the results are always the same) End of explanation """ example_weight_summary = example_model.get("coefficients") print example_weight_summary """ Explanation: Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows: End of explanation """ example_predictions = example_model.predict(train_data) print example_predictions[0] # should be 271789.505878 """ Explanation: Making Predictions In the gradient descent notebook we use numpy to do our regression. In this book we will use existing graphlab create functions to analyze multiple regressions. Recall that once a model is built we can use the .predict() function to find the predicted values for data we pass. For example using the example model above: End of explanation """ def get_residual_sum_of_squares(model, data, outcome): # First get the predictions predictions = model.predict(data) # Then compute the residuals/errors residuals = outcome - predictions # Then square and add them up RSS = sum(residuals*residuals) return(RSS) """ Explanation: Compute RSS Now that we can make predictions given the model, let's write a function to compute the RSS of the model. Complete the function below to calculate RSS given the model, data, and the outcome. End of explanation """ rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price']) print rss_example_train # should be 2.7376153833e+14 """ Explanation: Test your function by computing the RSS on TEST data for the example model: End of explanation """ from math import log """ Explanation: Create some new features Although we often think of multiple regression as including multiple different features (e.g. # of bedrooms, squarefeet, and # of bathrooms) but we can also consider transformations of existing features e.g. the log of the squarefeet or even "interaction" features such as the product of bedrooms and bathrooms. You will use the logarithm function to create a new feature. so first you should import it from the math library. End of explanation """ train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2) test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2) # create the remaining 3 features in both TEST and TRAIN data train_data['bed_bath_rooms'] = train_data['bedrooms']*train_data['bathrooms'] test_data['bed_bath_rooms'] = test_data['bedrooms']*test_data['bathrooms'] train_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x: log(x)) test_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x: log(x)) train_data['lat_plus_long'] = train_data['lat'] + train_data['long'] test_data['lat_plus_long'] = test_data['lat'] + test_data['long'] """ Explanation: Next create the following 4 new features as column in both TEST and TRAIN data: * bedrooms_squared = bedrooms*bedrooms * bed_bath_rooms = bedrooms*bathrooms * log_sqft_living = log(sqft_living) * lat_plus_long = lat + long As an example here's the first one: End of explanation """ print 'bedrooms_squared %f' % round(sum(test_data['bedrooms_squared'])/len(test_data['bedrooms_squared']),2) print 'bed_bath_rooms %f' % round(sum(test_data['bed_bath_rooms'])/len(test_data['bed_bath_rooms']),2) print 'log_sqft_living %f' % round(sum(test_data['log_sqft_living'])/len(test_data['log_sqft_living']),2) print 'lat_plus_long %f' % round(sum(test_data['lat_plus_long'])/len(test_data['lat_plus_long']),2) """ Explanation: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms. bedrooms times bathrooms gives what's called an "interaction" feature. It is large when both of them are large. Taking the log of squarefeet has the effect of bringing large values closer together and spreading out small values. Adding latitude to longitude is totally non-sensical but we will do it anyway (you'll see why) Quiz Question: What is the mean (arithmetic average) value of your 4 new features on TEST data? (round to 2 digits) End of explanation """ model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long'] model_2_features = model_1_features + ['bed_bath_rooms'] model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long'] """ Explanation: Learning Multiple Models Now we will learn the weights for three (nested) models for predicting house prices. The first model will have the fewest features the second model will add one more feature and the third will add a few more: * Model 1: squarefeet, # bedrooms, # bathrooms, latitude & longitude * Model 2: add bedrooms*bathrooms * Model 3: Add log squarefeet, bedrooms squared, and the (nonsensical) latitude + longitude End of explanation """ # Learn the three models: (don't forget to set validation_set = None) model_1 = graphlab.linear_regression.create(train_data, target = 'price', features = model_1_features, validation_set = None) model_2 = graphlab.linear_regression.create(train_data, target = 'price', features = model_2_features, validation_set = None) model_3 = graphlab.linear_regression.create(train_data, target = 'price', features = model_3_features, validation_set = None) # Examine/extract each model's coefficients: print 'model 1' model_1.get("coefficients") print 'model 2' model_2.get("coefficients") print 'model 3' model_3.get("coefficients") """ Explanation: Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients: End of explanation """ # Compute the RSS on TRAINING data for each of the three models and record the values: print 'model 1: %.9f model 2: %.9f model 3: %.9f' % (get_residual_sum_of_squares(model_1, train_data, test_data['price']), get_residual_sum_of_squares(model_2, train_data, test_data['price']), get_residual_sum_of_squares(model_3, train_data, test_data['price'])) """ Explanation: Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1? Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 2? Think about what this means. Comparing multiple models Now that you've learned three models and extracted the model weights we want to evaluate which model is best. First use your functions from earlier to compute the RSS on TRAINING Data for each of the three models. End of explanation """ # Compute the RSS on TESTING data for each of the three models and record the values: print 'model 1: %.9f model 2: %.9f model 3: %.9f' % (get_residual_sum_of_squares(model_1, test_data, test_data['price']), get_residual_sum_of_squares(model_2, test_data, test_data['price']), get_residual_sum_of_squares(model_3, test_data, test_data['price'])) """ Explanation: Quiz Question: Which model (1, 2 or 3) has lowest RSS on TRAINING Data? Is this what you expected? Now compute the RSS on on TEST data for each of the three models. End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/image_classification/solutions/4_tpu_training.ipynb
apache-2.0
import os os.environ["BUCKET"] = "your-bucket-here" """ Explanation: Transfer Learning on TPUs In the <a href="3_tf_hub_transfer_learning.ipynb">previous notebook</a>, we learned how to do transfer learning with TensorFlow Hub. In this notebook, we're going to kick up our training speed with TPUs. Learning Objectives Know how to set up a TPU strategy for training Know how to use a TensorFlow Hub Module when training on a TPU Know how to create and specify a TPU for training First things first. Configure the parameters below to match your own Google Cloud project details. End of explanation """ %%writefile tpu_models/trainer/task.py import argparse import json import os import sys import tensorflow as tf from . import model from . import util def _parse_arguments(argv): """Parses command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--epochs', help='The number of epochs to train', type=int, default=5) parser.add_argument( '--steps_per_epoch', help='The number of steps per epoch to train', type=int, default=500) parser.add_argument( '--train_path', help='The path to the training data', type=str, default="gs://cloud-ml-data/img/flower_photos/train_set.csv") parser.add_argument( '--eval_path', help='The path to the evaluation data', type=str, default="gs://cloud-ml-data/img/flower_photos/eval_set.csv") parser.add_argument( '--tpu_address', help='The path to the evaluation data', type=str, required=True) parser.add_argument( '--hub_path', help='The path to TF Hub module to use in GCS', type=str, required=True) parser.add_argument( '--job-dir', help='Directory where to save the given model', type=str, required=True) return parser.parse_known_args(argv) def main(): """Parses command line arguments and kicks off model training.""" args = _parse_arguments(sys.argv[1:])[0] # TODO: define a TPU strategy resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=args.tpu_address) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) with strategy.scope(): train_data = util.load_dataset(args.train_path) eval_data = util.load_dataset(args.eval_path, training=False) image_model = model.build_model(args.job_dir, args.hub_path) model_history = model.train_and_evaluate( image_model, args.epochs, args.steps_per_epoch, train_data, eval_data, args.job_dir) if __name__ == '__main__': main() """ Explanation: Packaging the Model In order to train on a TPU, we'll need to set up a python module for training. The skeleton for this has already been built out in tpu_models with the data processing functions from the pevious lab copied into <a href="tpu_models/trainer/util.py">util.py</a>. Similarly, the model building and training functions are pulled into <a href="tpu_models/trainer/model.py">model.py</a>. This is almost entirely the same as before, except the hub module path is now a variable to be provided by the user. We'll get into why in a bit, but first, let's take a look at the new task.py file. We've added five command line arguments which are standard for cloud training of a TensorFlow model: epochs, steps_per_epoch, train_path, eval_path, and job-dir. There are two new arguments for TPU training: tpu_address and hub_path tpu_address is going to be our TPU name as it appears in Compute Engine Instances. We can specify this name with the ctpu up command. hub_path is going to be a Google Cloud Storage path to a downloaded TensorFlow Hub module. The other big difference is some code to deploy our model on a TPU. To begin, we'll set up a TPU Cluster Resolver, which will help tensorflow communicate with the hardware to set up workers for training (more on TensorFlow Cluster Resolvers). Once the resolver connects to and initializes the TPU system, our Tensorflow Graphs can be initialized within a TPU distribution strategy, allowing our TensorFlow code to take full advantage of the TPU hardware capabilities. TODO #1: Set up a TPU strategy End of explanation """ !wget https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4?tf-hub-format=compressed """ Explanation: The TPU server Before we can start training with this code, we need a way to pull in MobileNet. When working with TPUs in the cloud, the TPU will not have access to the VM's local file directory since the TPU worker acts as a server. Because of this all data used by our model must be hosted on an outside storage system such as Google Cloud Storage. This makes caching our dataset especially critical in order to speed up training time. To access MobileNet with these restrictions, we can download a compressed saved version of the model by using the wget command. Adding ?tf-hub-format=compressed at the end of our module handle gives us a download URL. End of explanation """ %%bash rm -r tpu_models/hub mkdir tpu_models/hub tar xvzf 4?tf-hub-format=compressed -C tpu_models/hub/ """ Explanation: This model is still compressed, so lets uncompress it with the tar command below and place it in our tpu_models directory. End of explanation """ !gsutil rm -r gs://$BUCKET/tpu_models !gsutil cp -r tpu_models gs://$BUCKET/tpu_models """ Explanation: Finally, we need to transfer our materials to the TPU. We'll use GCS as a go-between, using gsutil cp to copy everything. End of explanation """ !echo "gsutil cp -r gs://$BUCKET/tpu_models ." """ Explanation: Spinning up a TPU Time to wake up a TPU! Open the Google Cloud Shell and copy the ctpu up command below. Say 'Yes' to the prompts to spin up the TPU. ctpu up --zone=us-central1-b --tf-version=2.1 --name=my-tpu It will take about five minutes to wake up. Then, it should automatically SSH into the TPU, but alternatively Compute Engine Interface can be used to SSH in. You'll know you're running on a TPU when the command line starts with your-username@your-tpu-name. This is a fresh TPU and still needs our code. Run the below cell and copy the output into your TPU terminal to copy your model from your GCS bucket. Don't forget to include the . at the end as it tells gsutil to copy data into the currect directory. End of explanation """ !echo "python3 -m tpu_models.trainer.task \ --tpu_address=my-tpu \ --hub_path=gs://$BUCKET/tpu_models/hub/ \ --job-dir=gs://$BUCKET/flowers_tpu_$(date -u +%y%m%d_%H%M%S)" """ Explanation: Time to shine, TPU! Run the below cell and copy the output into your TPU terminal. Training will be slow at first, but it will pick up speed after a few minutes once the Tensorflow graph has been built out. TODO #2 and #3: Specify the tpu_address and hub_path End of explanation """
Kyubyong/numpy_exercises
2_Array_manipulation_routines_Solutions.ipynb
mit
import numpy as np np.__version__ """ Explanation: Array manipulation routines End of explanation """ x = np.ones([10, 10, 3]) out = np.reshape(x, [-1, 150]) print out assert np.allclose(out, np.ones([10, 10, 3]).reshape([-1, 150])) """ Explanation: Q1. Let x be a ndarray [10, 10, 3] with all elements set to one. Reshape x so that the size of the second dimension equals 150. End of explanation """ x = np.array([[1, 2, 3], [4, 5, 6]]) out1 = np.ravel(x, order='F') out2 = x.flatten(order="F") assert np.allclose(out1, out2) print out1 """ Explanation: Q2. Let x be array [[1, 2, 3], [4, 5, 6]]. Convert it to [1 4 2 5 3 6]. End of explanation """ x = np.array([[1, 2, 3], [4, 5, 6]]) out1 = x.flat[4] out2 = np.ravel(x)[4] assert np.allclose(out1, out2) print out1 """ Explanation: Q3. Let x be array [[1, 2, 3], [4, 5, 6]]. Get the 5th element. End of explanation """ x = np.zeros((3, 4, 5)) out1 = np.swapaxes(x, 1, 0) out2 = x.transpose([1, 0, 2]) assert out1.shape == out2.shape print out1.shape """ Explanation: Q4. Let x be an arbitrary 3-D array of shape (3, 4, 5). Permute the dimensions of x such that the new shape will be (4,3,5). End of explanation """ x = np.zeros((3, 4)) out1 = np.swapaxes(x, 1, 0) out2 = x.transpose() out3 = x.T assert out1.shape == out2.shape == out3.shape print out1.shape """ Explanation: Q5. Let x be an arbitrary 2-D array of shape (3, 4). Permute the dimensions of x such that the new shape will be (4,3). End of explanation """ x = np.zeros((3, 4)) print np.expand_dims(x, axis=1).shape """ Explanation: Q5. Let x be an arbitrary 2-D array of shape (3, 4). Insert a nex axis such that the new shape will be (3, 1, 4). End of explanation """ x = np.zeros((3, 4, 1)) print np.squeeze(x).shape """ Explanation: Q6. Let x be an arbitrary 3-D array of shape (3, 4, 1). Remove a single-dimensional entries such that the new shape will be (3, 4). End of explanation """ x = np.array([[1, 2, 3], [4, 5, 6]]) y = np.array([[7, 8, 9], [10, 11, 12]]) out1 = np.concatenate((x, y), 1) out2 = np.hstack((x, y)) assert np.allclose(out1, out2) print out2 """ Explanation: Q7. Lex x be an array <br/> [[ 1 2 3]<br/> [ 4 5 6].<br/><br/> and y be an array <br/> [[ 7 8 9]<br/> [10 11 12]].<br/> Concatenate x and y so that a new array looks like <br/>[[1, 2, 3, 7, 8, 9], <br/>[4, 5, 6, 10, 11, 12]]. End of explanation """ x = np.array([[1, 2, 3], [4, 5, 6]]) y = np.array([[7, 8, 9], [10, 11, 12]]) out1 = np.concatenate((x, y), 0) out2 = np.vstack((x, y)) assert np.allclose(out1, out2) print out2 """ Explanation: Q8. Lex x be an array <br/> [[ 1 2 3]<br/> [ 4 5 6].<br/><br/> and y be an array <br/> [[ 7 8 9]<br/> [10 11 12]].<br/> Concatenate x and y so that a new array looks like <br/>[[ 1 2 3]<br/> [ 4 5 6]<br/> [ 7 8 9]<br/> [10 11 12]] End of explanation """ x = np.array((1,2,3)) y = np.array((4,5,6)) out1 = np.column_stack((x, y)) out2 = np.squeeze(np.dstack((x, y))) out3 = np.vstack((x, y)).T assert np.allclose(out1, out2) assert np.allclose(out2, out3) print out1 """ Explanation: Q8. Let x be an array [1 2 3] and y be [4 5 6]. Convert it to [[1, 4], [2, 5], [3, 6]]. End of explanation """ x = np.array([[1],[2],[3]]) y = np.array([[4],[5],[6]]) out = np.dstack((x, y)) print out """ Explanation: Q9. Let x be an array [[1],[2],[3]] and y be [[4], [5], [6]]. Convert x to [[[1, 4]], [[2, 5]], [[3, 6]]]. End of explanation """ x = np.arange(1, 10) print np.split(x, [4, 6]) """ Explanation: Q10. Let x be an array [1, 2, 3, ..., 9]. Split x into 3 arrays, each of which has 4, 2, and 3 elements in the original order. End of explanation """ x = np.arange(16).reshape(2, 2, 4) out1 = np.split(x, [3],axis=2) out2 = np.dsplit(x, [3]) assert np.allclose(out1[0], out2[0]) assert np.allclose(out1[1], out2[1]) print out1 """ Explanation: Q11. Let x be an array<br/> [[[ 0., 1., 2., 3.],<br/> [ 4., 5., 6., 7.]],<br/> [[ 8., 9., 10., 11.],<br/> [ 12., 13., 14., 15.]]].<br/> Split it into two such that the first array looks like<br/> [[[ 0., 1., 2.],<br/> [ 4., 5., 6.]],<br/> [[ 8., 9., 10.],<br/> [ 12., 13., 14.]]].<br/> and the second one look like:<br/> [[[ 3.],<br/> [ 7.]],<br/> [[ 11.],<br/> [ 15.]]].<br/> End of explanation """ x = np.arange(16).reshape((4, 4)) out1 = np.hsplit(x, 2) out2 = np.split(x, 2, 1) assert np.allclose(out1[0], out2[0]) assert np.allclose(out1[1], out2[1]) print out1 """ Explanation: Q12. Let x be an array <br /> [[ 0., 1., 2., 3.],<br> [ 4., 5., 6., 7.],<br> [ 8., 9., 10., 11.],<br> [ 12., 13., 14., 15.]].<br> Split it into two arrays along the second axis. End of explanation """ x = np.arange(16).reshape((4, 4)) out1 = np.vsplit(x, 2) out2 = np.split(x, 2, 0) assert np.allclose(out1[0], out2[0]) assert np.allclose(out1[1], out2[1]) print out1 """ Explanation: Q13. Let x be an array <br /> [[ 0., 1., 2., 3.],<br> [ 4., 5., 6., 7.],<br> [ 8., 9., 10., 11.],<br> [ 12., 13., 14., 15.]].<br> Split it into two arrays along the first axis. End of explanation """ x = np.array([0, 1, 2]) out1 = np.tile(x, [2, 2]) out2 = np.resize(x, [2, 6]) assert np.allclose(out1, out2) print out1 """ Explanation: Q14. Let x be an array [0, 1, 2]. Convert it to <br/> [[0, 1, 2, 0, 1, 2],<br/> [0, 1, 2, 0, 1, 2]]. End of explanation """ x = np.array([0, 1, 2]) print np.repeat(x, 2) """ Explanation: Q15. Let x be an array [0, 1, 2]. Convert it to <br/> [0, 0, 1, 1, 2, 2]. End of explanation """ x = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) out = np.trim_zeros(x) print out """ Explanation: Q16. Let x be an array [0, 0, 0, 1, 2, 3, 0, 2, 1, 0].<br/> remove the leading the trailing zeros. End of explanation """ x = np.array([2, 2, 1, 5, 4, 5, 1, 2, 3]) u, indices = np.unique(x, return_counts=True) print u, indices """ Explanation: Q17. Let x be an array [2, 2, 1, 5, 4, 5, 1, 2, 3]. Get two arrays of unique elements and their counts. End of explanation """ x = np.array([[1,2], [3,4]]) out1 = np.fliplr(x) out2 = x[:, ::-1] assert np.allclose(out1, out2) print out1 """ Explanation: Q18. Lex x be an array <br/> [[ 1 2]<br/> [ 3 4].<br/> Flip x along the second axis. End of explanation """ x = np.array([[1,2], [3,4]]) out1 = np.flipud(x) out2 = x[::-1, :] assert np.allclose(out1, out2) print out1 """ Explanation: Q19. Lex x be an array <br/> [[ 1 2]<br/> [ 3 4].<br/> Flip x along the first axis. End of explanation """ x = np.array([[1,2], [3,4]]) out = np.rot90(x) print out """ Explanation: Q20. Lex x be an array <br/> [[ 1 2]<br/> [ 3 4].<br/> Rotate x 90 degrees counter-clockwise. End of explanation """ x = np.arange(1, 9).reshape([2, 4]) print np.roll(x, 1, axis=1) """ Explanation: Q21 Lex x be an array <br/> [[ 1 2 3 4]<br/> [ 5 6 7 8].<br/> Shift elements one step to right along the second axis. End of explanation """
tpin3694/tpin3694.github.io
machine-learning/replace_characters.ipynb
mit
# Import library import re """ Explanation: Title: Replace Characters Slug: replace_characters Summary: How to remove characters to clean unstructured text data for machine learning in Python. Date: 2016-09-06 12:00 Category: Machine Learning Tags: Preprocessing Text Authors: Chris Albon Preliminaries End of explanation """ # Create text text_data = ['Interrobang. By Aishwarya Henriette', 'Parking And Going. By Karl Gautier', 'Today Is The night. By Jarek Prakash'] """ Explanation: Create Text End of explanation """ # Remove periods remove_periods = [string.replace('.', '') for string in text_data] # Show text remove_periods """ Explanation: Replace Character (Method 1) End of explanation """ # Create function def replace_letters_with_X(string: str) -> str: return re.sub(r'[a-zA-Z]', 'X', string) # Apply function [replace_letters_with_X(string) for string in remove_periods] """ Explanation: Replace Character (Method 2) End of explanation """
pglauner/misc
src/cs730/1_notmnist.ipynb
gpl-2.0
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle """ Explanation: Deep Learning Assignment 1 The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later. This notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST. End of explanation """ url = 'http://yaroslavvb.com/upload/notMNIST/' def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" if force or not os.path.exists(filename): filename, _ = urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: raise Exception( 'Failed to verify' + filename + '. Can you get to it with a browser?') return filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043) """ Explanation: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine. End of explanation """ num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall() tar.close() data_folders = [ os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename) """ Explanation: Extract the dataset from the compressed .tar.gz file. This should give you a set of directories, labelled A through J. End of explanation """ image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) image_index = 0 print(folder) for image in os.listdir(folder): image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[image_index, :, :] = image_data image_index += 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') num_images = image_index dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800) """ Explanation: Problem 1 Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display. Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size. We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. A few images might not be readable, we'll just skip them. End of explanation """ import matplotlib.pyplot as plt J = pickle.load( open( 'notMNIST_small/J.pickle', "rb" ) ) plt.imshow(J[200]) plt.show() """ Explanation: Problem 2 Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot. End of explanation """ for l in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']: letters = pickle.load( open( 'notMNIST_small/{0}.pickle'.format(l), "rb" ) ) print('{0}: {1}'.format(l, len(letters))) """ Explanation: Problem 3 Another check: we expect the data to be balanced across classes. Verify that. End of explanation """ def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 200000 valid_size = 10000 test_size = 10000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape) """ Explanation: Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9. Also create a validation dataset for hyperparameter tuning. End of explanation """ def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) """ Explanation: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match. End of explanation """ pickle_file = 'notMNIST.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size) """ Explanation: Problem 4 Convince yourself that the data is still good after shuffling! Finally, let's save the data for later reuse: End of explanation """ for n in [50, 100, 1000, 5000, len(train_dataset)]: clf = LogisticRegression(solver='newton-cg') clf.fit([train_dataset[i].flatten() for i in xrange(n)], train_labels[:n]) res = clf.predict([x.flatten() for x in valid_dataset]) print(sum(res != valid_labels)) len(train_dataset) """ Explanation: Problem 5 By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it. Measure how much overlap there is between training, validation and test samples. Optional questions: - What about near duplicates between datasets? (images that are almost identical) - Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments. Problem 6 Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it. Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model. Optional question: train an off-the-shelf model on all the data! End of explanation """
tpin3694/tpin3694.github.io
sql/select_rows_that_contain_an_item_in_a_list.ipynb
mit
# Ignore %load_ext sql %sql sqlite:// %config SqlMagic.feedback = False """ Explanation: Title: Select Rows That Contain An Item In A List Slug: select_rows_that_contain_an_item_in_a_list Summary: Select rows that contain an item in a list. Date: 2016-05-01 12:00 Category: SQL Tags: Basics Authors: Chris Albon Note: This tutorial was written using Catherine Devlin's SQL in Jupyter Notebooks library. If you have not using a Jupyter Notebook, you can ignore the two lines of code below and any line containing %%sql. Furthermore, this tutorial uses SQLite's flavor of SQL, your version might have some differences in syntax. For more, check out Learning SQL by Alan Beaulieu. End of explanation """ %%sql -- Create a table of criminals CREATE TABLE criminals (pid, name, age, sex, city, minor); INSERT INTO criminals VALUES (412, 'James Smith', 15, 'M', 'Santa Rosa', 1); INSERT INTO criminals VALUES (234, 'Bill James', 22, 'M', 'Santa Rosa', 0); INSERT INTO criminals VALUES (632, 'Stacy Miller', 23, 'F', 'Santa Rosa', 0); INSERT INTO criminals VALUES (621, 'Betty Bob', NULL, 'F', 'Petaluma', 1); INSERT INTO criminals VALUES (162, 'Jaden Ado', 49, 'M', NULL, 0); INSERT INTO criminals VALUES (901, 'Gordon Ado', 32, 'F', 'Santa Rosa', 0); INSERT INTO criminals VALUES (512, 'Bill Byson', 21, 'M', 'Santa Rosa', 0); INSERT INTO criminals VALUES (411, 'Bob Iton', NULL, 'M', 'San Francisco', 0); """ Explanation: Create Data End of explanation """ %%sql -- Select everything SELECT * -- From the table 'criminals' FROM criminals -- Where the city is any of these cities WHERE city IN ('Santa Rosa', 'Petaluma'); """ Explanation: Select Rows That Contain An Item In A List End of explanation """
destrys/euler
python/notebooks/Problem1.ipynb
mit
%psource p1.natural_3and5_brute from destryseuler import p1 p1.natural_3and5_brute?? %pfile ../destryseuler/p1.py def natural3and5Brute(upper=1000): output = 0 for i in range(1,upper): if i % 3 == 0: output += i continue if i % 5 == 0: output += i continue return output natural3and5Brute() """ Explanation: Problem 1 If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. Brute Force End of explanation """ import math def natural3and5(upper=1000): floor3 = math.floor((upper-1)/3) floor5 = math.floor((upper-1)/5) floor15 = math.floor((upper-1)/15) output = 3 * (floor3 * (floor3+1))/2 + 5 * (floor5 * (floor5 + 1))/2 - 15 * (floor15 * (floor15 + 1))/2 return output """ Explanation: I believe this has O(n) complexity. More Elegant If $n$ is the maximum value, the sum of the multiples can be represented as such: $$(3 + 6 + 9 + ... \left \lfloor{\frac{x}{3}}\right \rfloor \times 3 ) + (5 + 10 + 15 + ... + \left \lfloor{\frac{x}{5}}\right \rfloor \times 5) - (15 + 30 + 45 + ... + \left \lfloor{\frac{x}{15}}\right \rfloor \times 15)$$ where $\left \lfloor{x}\right \rfloor$ is the floor function, returning the largest integer not greater than $x$The first term is the sum of natural numbers divisible by 3, the second term is the sum of numbers divisible by 5, and the last term are the terms that are repeated in the first two terms.We can rewrite this as $$3\times(1+ 2 +3 +... \left \lfloor{\frac{x}{3}}\right \rfloor) + 5 \times (1 + 2 + 3 +... + \left \lfloor{\frac{x}{5}}\right \rfloor) - 15 \times (1 + 2 + 3 + ... + \left \lfloor{\frac{x}{15}}\right \rfloor)$$ Now each term contains a sum of sequential integers which is easily calculated $$ 3 \times \frac{\left \lfloor{\frac{n}{3}}\right \rfloor (\left \lfloor{\frac{n}{3}}\right \rfloor +1)}{2} + 5 \times \frac{\left \lfloor{\frac{n}{5}}\right \rfloor (\left \lfloor{\frac{n}{5}}\right \rfloor +1)}{2} - 15 \times \frac{\left \lfloor{\frac{n}{15}}\right \rfloor (\left \lfloor{\frac{n}{15}}\right \rfloor +1)}{2} $$ End of explanation """ natural3and5() natural3and5(5000) """ Explanation: I believe this has O(1) complexity. End of explanation """ elegant = [] brute = [] for i in [10**x for x in range(1,5)]: tmp = %timeit -o natural3and5(i) elegant.append(tmp.all_runs[0]/tmp.loops) tmp = %timeit -o natural3and5Brute(i) brute.append(tmp.all_runs[0]/tmp.loops) elegant brute import matplotlib.pyplot as plt %matplotlib inline y = [10**x for x in range(1,5)] # Create plot fig = plt.subplot() fig.loglog(y,elegant,'yo-',linewidth=2) fig.loglog(y,brute,'ro-',linewidth=2) # Label it and make it pretty myfont = {'family' : 'Times New Roman', 'weight' : 'bold', 'size' : 22} fig.set_xlabel('n', **myfont) fig.set_ylabel('time (seconds)',**myfont) fig.tick_params(axis='both',which='both',labelsize=20,width=2) [fig.spines[x].set_linewidth(2) for x in fig.spines] #Show it plt.show() """ Explanation: ToDo Benchmarks Can you make the divisors inputs and calulate the common term? More than 2 Divisors? End of explanation """
quantopian/research_public
notebooks/lectures/Introduction_to_Research/notebook.ipynb
apache-2.0
2 + 2 """ Explanation: Introduction to the Research Environment The research environment is powered by IPython notebooks, which allow one to perform a great deal of data analysis and statistical validation. We'll demonstrate a few simple techniques here. Code Cells vs. Text Cells As you can see, each cell can be either code or text. To select between them, choose from the 'Cell Type' dropdown menu on the top left. Executing a Command A code cell will be evaluated when you press play, or when you press the shortcut, shift-enter. Evaluating a cell evaluates each line of code in sequence, and prints the results of the last line below the cell. End of explanation """ X = 2 """ Explanation: Sometimes there is no result to be printed, as is the case with assignment. End of explanation """ 2 + 2 3 + 3 """ Explanation: Remember that only the result from the last line is printed. End of explanation """ print 2 + 2 3 + 3 """ Explanation: However, you can print whichever lines you want using the print statement. End of explanation """ #Take some time to run something c = 0 for i in range(10000000): c = c + i c """ Explanation: Knowing When a Cell is Running While a cell is running, a [*] will display on the left. When a cell has yet to be executed, [ ] will display. When it has been run, a number will display indicating the order in which it was run during the execution of the notebook [5]. Try on this cell and note it happening. End of explanation """ import numpy as np import pandas as pd # This is a plotting library for pretty pictures. import matplotlib.pyplot as plt """ Explanation: Importing Libraries The vast majority of the time, you'll want to use functions from pre-built libraries. You can't import every library on Quantopian due to security issues, but you can import most of the common scientific ones. Here I import numpy and pandas, the two most common and useful libraries in quant finance. I recommend copying this import statement to every new notebook. Notice that you can rename libraries to whatever you want after importing. The as statement allows this. Here we use np and pd as aliases for numpy and pandas. This is a very common aliasing and will be found in most code snippets around the web. The point behind this is to allow you to type fewer characters when you are frequently accessing these libraries. End of explanation """ np.random. """ Explanation: Tab Autocomplete Pressing tab will give you a list of IPython's best guesses for what you might want to type next. This is incredibly valuable and will save you a lot of time. If there is only one possible option for what you could type next, IPython will fill that in for you. Try pressing tab very frequently, it will seldom fill in anything you don't want, as if there is ambiguity a list will be shown. This is a great way to see what functions are available in a library. Try placing your cursor after the . and pressing tab. End of explanation """ np.random.normal? """ Explanation: Getting Documentation Help Placing a question mark after a function and executing that line of code will give you the documentation IPython has for that function. It's often best to do this in a new cell, as you avoid re-executing other code and running into bugs. End of explanation """ # Sample 100 points with a mean of 0 and an std of 1. This is a standard normal distribution. X = np.random.normal(0, 1, 100) """ Explanation: Sampling We'll sample some random data using a function from numpy. End of explanation """ plt.plot(X) """ Explanation: Plotting We can use the plotting library we imported as follows. End of explanation """ plt.plot(X); """ Explanation: Squelching Line Output You might have noticed the annoying line of the form [&lt;matplotlib.lines.Line2D at 0x7f72fdbc1710&gt;] before the plots. This is because the .plot function actually produces output. Sometimes we wish not to display output, we can accomplish this with the semi-colon as follows. End of explanation """ X = np.random.normal(0, 1, 100) X2 = np.random.normal(0, 1, 100) plt.plot(X); plt.plot(X2); plt.xlabel('Time') # The data we generated is unitless, but don't forget units in general. plt.ylabel('Returns') plt.legend(['X', 'X2']); """ Explanation: Adding Axis Labels No self-respecting quant leaves a graph without labeled axes. Here are some commands to help with that. End of explanation """ np.mean(X) np.std(X) """ Explanation: Generating Statistics Let's use numpy to take some simple statistics. End of explanation """ data = get_pricing('MSFT', start_date='2012-1-1', end_date='2015-6-1') """ Explanation: Getting Real Pricing Data Randomly sampled data can be great for testing ideas, but let's get some real data. We can use get_pricing to do that. You can use the ? syntax as discussed above to get more information on get_pricing's arguments. End of explanation """ data """ Explanation: Our data is now a dataframe. You can see the datetime index and the colums with different pricing data. End of explanation """ X = data['price'] """ Explanation: This is a pandas dataframe, so we can index in to just get price like this. For more info on pandas, please click here. End of explanation """ plt.plot(X.index, X.values) plt.ylabel('Price') plt.legend(['MSFT']); """ Explanation: Because there is now also date information in our data, we provide two series to .plot. X.index gives us the datetime index, and X.values gives us the pricing values. These are used as the X and Y coordinates to make a graph. End of explanation """ np.mean(X) np.std(X) """ Explanation: We can get statistics again on real data. End of explanation """ R = X.pct_change()[1:] """ Explanation: Getting Returns from Prices We can use the pct_change function to get returns. Notice how we drop the first element after doing this, as it will be NaN (nothing -> something results in a NaN percent change). End of explanation """ plt.hist(R, bins=20) plt.xlabel('Return') plt.ylabel('Frequency') plt.legend(['MSFT Returns']); """ Explanation: We can plot the returns distribution as a histogram. End of explanation """ np.mean(R) np.std(R) """ Explanation: Get statistics again. End of explanation """ plt.hist(np.random.normal(np.mean(R), np.std(R), 10000), bins=20) plt.xlabel('Return') plt.ylabel('Frequency') plt.legend(['Normally Distributed Returns']); """ Explanation: Now let's go backwards and generate data out of a normal distribution using the statistics we estimated from Microsoft's returns. We'll see that we have good reason to suspect Microsoft's returns may not be normal, as the resulting normal distribution looks far different. End of explanation """ # Take the average of the last 60 days at each timepoint. MAVG = pd.rolling_mean(X, window=60) plt.plot(X.index, X.values) plt.plot(MAVG.index, MAVG.values) plt.ylabel('Price') plt.legend(['MSFT', '60-day MAVG']); """ Explanation: Generating a Moving Average pandas has some nice tools to allow us to generate rolling statistics. Here's an example. Notice how there's no moving average for the first 60 days, as we don't have 60 days of data on which to generate the statistic. End of explanation """
adrianstaniec/deep-learning
03_intro-to-tflearn/TFLearn_Sentiment_Analysis.ipynb
mit
import pandas as pd import numpy as np import tensorflow as tf import tflearn from tflearn.data_utils import to_categorical """ Explanation: Sentiment analysis with TFLearn In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you. We'll start off by importing all the modules we'll need, then load and prepare the data. End of explanation """ reviews = pd.read_csv('reviews.txt', header=None) labels = pd.read_csv('labels.txt', header=None) """ Explanation: Preparing the data Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this. Read the data Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way. End of explanation """ reviews[0][1] from collections import Counter total_counts = Counter() for _, review in reviews.iterrows(): total_counts.update([s.lower() for s in review[0].split(' ')]) print("Total words in data set: ", len(total_counts)) """ Explanation: Counting word frequency To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class. Exercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours. End of explanation """ vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000] print(vocab[:60]) """ Explanation: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words. End of explanation """ print(vocab[-1], ': ', total_counts[vocab[-1]]) """ Explanation: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words. End of explanation """ word2idx = {word: number for number, word in enumerate(vocab)} word2idx """ Explanation: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words. Note: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie. Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension. Exercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on. End of explanation """ def text_to_vector(text): vector = np.zeros(len(word2idx)) idxs = [word2idx[word.lower()] for word in text.split(' ') if word in word2idx.keys()] for i in idxs: vector[i] += 1 return vector """ Explanation: Text to vector function Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this: Initialize the word vector with np.zeros, it should be the length of the vocabulary. Split the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here. For each word in that list, increment the element in the index associated with that word, which you get from word2idx. Note: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary. End of explanation """ text_to_vector('The tea is for a party to celebrate ' 'the movie so she has no time for a cake')[:65] """ Explanation: If you do this right, the following code should return ``` text_to_vector('The tea is for a party to celebrate ' 'the movie so she has no time for a cake')[:65] array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]) ``` End of explanation """ word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_) for ii, (_, text) in enumerate(reviews.iterrows()): word_vectors[ii] = text_to_vector(text[0]) # Printing out the first 5 word vectors word_vectors[:5, :23] """ Explanation: Now, run through our entire review data set and convert each review to a word vector. End of explanation """ Y = (labels=='positive').astype(np.int_) records = len(labels) shuffle = np.arange(records) np.random.shuffle(shuffle) test_fraction = 0.9 train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):] trainX, trainY = word_vectors[train_split,:], np.array([to_categorical(y, 2).reshape(2) for y in Y.values[train_split]]) testX, testY = word_vectors[test_split,:], np.array([to_categorical(y, 2).reshape(2) for y in Y.values[test_split]]) """ Explanation: Train, Validation, Test sets Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later. End of explanation """ # Network building def build_model(): # This resets all parameters and variables, leave this here tf.reset_default_graph() net = tflearn.input_data([None, 10000]) net = tflearn.fully_connected(net, 100, activation='relu') # Hidden net = tflearn.fully_connected(net, 10, activation='relu') # Hidden net = tflearn.fully_connected(net, 2, activation='softmax') # Output net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') model = tflearn.DNN(net) return model """ Explanation: Building the network TFLearn lets you build the network by defining the layers. Input layer For the input layer, you just need to tell it how many units you have. For example, net = tflearn.input_data([None, 100]) would create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size. The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units. Adding layers To add new hidden layers, you use net = tflearn.fully_connected(net, n_units, activation='ReLU') This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units). Output layer The last layer you add is used as the output layer. Therefore, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax. net = tflearn.fully_connected(net, 2, activation='softmax') Training To set how you train the network, use net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') Again, this is passing in the network you've been building. The keywords: optimizer sets the training method, here stochastic gradient descent learning_rate is the learning rate loss determines how the network error is calculated. In this example, with the categorical cross-entropy. Finally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like net = tflearn.input_data([None, 10]) # Input net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden net = tflearn.fully_connected(net, 2, activation='softmax') # Output net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') model = tflearn.DNN(net) Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc. End of explanation """ model = build_model() """ Explanation: Intializing the model Next we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want. Note: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon. End of explanation """ # Training model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=50) """ Explanation: Training the network Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors. You can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network. End of explanation """ predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_) test_accuracy = np.mean(predictions == testY[:,0], axis=0) print("Test accuracy: ", test_accuracy) """ Explanation: Testing After you're satisified with your hyperparameters, you can run the network on the test set to measure its performance. Remember, only do this after finalizing the hyperparameters. End of explanation """ # Helper function that uses your model to predict sentiment def test_sentence(sentence): positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1] print('Sentence: {}'.format(sentence)) print('P(positive) = {:.3f} :'.format(positive_prob), 'Positive' if positive_prob > 0.5 else 'Negative') sentence = "Moonlight is by far the best movie of 2016." test_sentence(sentence) sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful" test_sentence(sentence) rev = "Wonder Woman is a superhero movie, and it fulfills the heroic and mythic demands of that genre, but it's also an entry in the genre of wisdom literature that shares hard-won insights and long-pondered paradoxes of the past with a sincere intimacy." test_sentence(rev) test_sentence("quite nice movie to watch after reading the book. good music and actors play. starts a little bit slow but then the viewer gets involved. well presented story about italian mafia in new york after 2nd world war. ") rev = "this movie was not very good. The actors play was far from terrific. I would consider this film mediocre performance." test_sentence(rev) rev = "this movie was very good. The actors play was terrific. I would consider this film far from not mediocre performance." test_sentence(rev) """ Explanation: Try out your own text! End of explanation """
jhconning/Dev-II
notebooks/Vollrath.ipynb
bsd-3-clause
import numpy as np import matplotlib.pyplot as plt from ipywidgets import interact, fixed from scipy.optimize import fsolve, brentq, minimize def F(la, A, alpha): return A * la**alpha def y(la= 0.5, Aa = 1, Am = 1, alpA = 0.5, alpM = 0.5, p = 1): '''GDP per capita''' return F(la, Aa, alpA) + p * F(1-la, Am, alpM) def optla(Aa = 1, Am = 1, alpA = 0.5, alpM = 0.5, p = 1): '''We could solve these FOC analytically for the optimal labor share, but here we solve for root numerically''' def foc(la): return alpA * Aa * la**(alpA-1) - p* alpM * Am * (1-la)**(alpM-1) return fsolve(foc, 0.05)[0] def plot_y(Aa = 1, Am = 1, alpA = 0.5, alpM = 0.5, p = 1, ax=None): if ax is None: ax = plt.gca() lla = np.linspace(0,1, 100) ax.plot(lla, y(lla, Aa, Am, alpA, alpM, p), label =r'$y(l_A | A_A, A_I )$') lao = optla(Aa, Am, alpA, alpM, p = 1) ax.scatter(lao, y(lao, Aa, Am, alpA, alpM, p) ) ax.set_xlim(-0.01, 1.01) ax.axvline(lao, linestyle='dashed') ax.set_xlabel(r'$l_a$ -- pct labor in agriculture') ax.set_ylabel(r'$y$ -- gdp per capita') ax.legend(loc='upper right', bbox_to_anchor=(1.3, 1)) ax.grid() print(f'{lao*100:0.0f}% ag employment, y = {y(lao, Aa, Am, alpA, alpM, p):0.0f}') def plot_ppf(Aa = 1, Am = 1, alpA = 0.5, alpM = 0.5, p = 1, ax=None): '''draw PPF and world price line through optimum''' if ax is None: ax = plt.gca() lla = np.linspace(0.001, 0.999, 50) ax.plot(F(lla, Aa, alpA), F(1-lla, Am, alpM) ) lao = optla(Aa, Am, alpA, alpM, p = 1) ax.scatter(F(lao, Aa, alpA), F(1-lao, Am, alpM) ) xo, yo = F(lao, Aa, alpA), F(1-lao, Am, alpM) A = yo + p*xo ax.plot([0, A/p], [A, 0], alpha = 0.5) # price line ax.set_xlim(left=0.) ax.set_ylim(bottom=0.) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.grid() AaR, AmR = 4, 10 AaP, AmP = 2, 1 """ Explanation: Volrath (2009) "How important are dual economy effects..." Vollrath, D., 2009. How important are dual economy effects for aggregate productivity? Journal of Development Economics 88, 325–334. How can we explain the inverse relationship between the share of labor in the agricultural sector and GDP per capita. <img src="./media/income_ag.png" alt="Vollrath" class="bg-primary" width="500px"> Efficient allocation explanation GDP per capita This isn't exactlyt same model (his has mobile capital across sectors), it's a slightly simpler Specific-Factors (or Ricardo Viner) model with specific capital in the manufacturing and agricultural sector (i.e. land) to show very similar diagrams. It helps make the same points. Production in the ag and manufacturing (Volrath calls it 'industrial') sectors respectively are given by: $$ A_A \cdot T_a ^\alpha L_a^{1-\alpha} $$ $$ A_M \cdot K_m ^\beta L_m^{1-\beta} $$ Economywide labor supply must in equilibrium be allocated across the sectors: $$ L_a + L_m = \bar L $$ Divide by $\bar L$ to express in per capita terms: $$ l_a + l_m = 1 $$ Land is specific to the ag sector and in fixed supply so $T_a=\bar T$, and capital is specific to manufacturing and fixed $K_m = \bar K$ Note that ag output per capita can be written: $$ \frac{A_A \cdot \bar T^\alpha \cdot L_a^{1-\alpha}}{\bar L} = A_A \bar t ^\alpha \cdot l_a ^{1-\alpha} $$ where $\bar t = \bar T / \bar L$ And a similar expression for manuf output per capita. Let's define $A_a = A_A \bar t^\alpha$ so a country with high 'TFP' has either better technology (higher $A_A$) and/or a higher endowment of land per person. Similarly, define $A_m = A_M \bar k^\beta$. Then we can express GDP per capita measured in agricultural goods as given by $$ y(l_a| A_a, A_m, p) = A_a l_a^\alpha + p \cdot A_m (1-l_a)^\beta $$ Where $p=\frac{P_A}{P_M}$ is the relative price of agricultural goods. Rich and Poor Countries Technology (and/or endowments of capital and land per person) are different between rich and poor countries | | Agriculture | Industry | | ---- | :---------: | :-------: | | Rich | $A_{aR}$ | $A_{mR}$ | | Poor | $A_{aP}$ | $A_{mP}$ | The Rich country might have absolute productivity advantages but the poor countries have a comparative advantage in agriculture. We could also allow for different $\alpha$ and $\beta$ but we ignore that here. $$ y_R(l_a| A_{aR}, A_{mR}, p) = A_{aR} l_a^\alpha + p \cdot A_{mR} (1-l_a)^\beta $$ $$ y_P(l_a| A_{aP}, A_{mP}, p) = A_{aP} l_a^\alpha + p \cdot A_{mP} (1-l_a)^\beta $$ Perhaps poorer countries just have a comparative advantage at agricultural production. Let's plot this per capita GDP function and see how its shape changes with the value of the $A$ End of explanation """ plot_y(Aa= 4, Am = 10, alpA = 0.4, alpM = 0.7) """ Explanation: | | Agriculture | Industry | | ---- | :---------: | :-------: | | Rich | $A_{aR}$=4 | $A_{mR}$=10| | Poor | $A_{aP}$=2 | $A_{mP}$=1 | The Rich country has 'TFP' twice as high as the poor country in agriculture. The Rich country has 'TFP' ten times higher in manufacturing. Rich has a comparative advantage in manufacturing Rich country GDP per capita is maximized at a low share of employment in agriculture. End of explanation """ plot_y(Aa=2, Am=1, alpA = 0.7, alpM = 0.4) """ Explanation: Poor country GDP per capita is maximized at a high share of employment in agriculture. Note lower level of GDP per capita than rich. End of explanation """ fig, ax = plt.subplots() plot_y(Aa=4, Am=10, alpA = 0.4, alpM = 0.7) plot_y(Aa=2, Am=1, alpA = 0.7, alpM = 0.4) ax.grid() """ Explanation: Rich and Poor plotted together Demonstrating how that the inverse relationship between labor-share in agriculture and GDP per capita might be generated, with efficient allocations. End of explanation """ plot_ppf(Aa = AaR, Am = AmR, alpA = 0.5, alpM = 0.5) plot_ppf(Aa = AaP, Am = AmP, alpA = 0.5, alpM = 0.5) plt.grid() interact(Aa=(1,5,0.1), Am=(1,5,0.1), alpA = (0.2, 0.8, 0.1), alpM = (0.2, 0.8, 0.1)) """ Explanation: PPFs Just for fun, let's draw their PPFs. We assume here they both face the same world relative price $p$ This is really just a trade model with comparative advantage (the shape of the PPF) determined my a combination of relative factor endowments and the technological productivity levels. End of explanation """
hamogu/Cepheids
CompanionPSF.ipynb
mit
import numpy as np import astropy.units as u from astropy.coordinates import SkyCoord from astroquery.gaia import Gaia from astropy import table from astropy.table import Table from astropy.wcs import WCS from astropy.io import fits from astropy.nddata import NDData from photutils.psf import extract_stars import sherpa import sherpa.astro.models import sherpa.data import sherpa.fit import matplotlib.pyplot as plt %matplotlib inline # On melkor, the datapath is hardocded in snapshot program # but to run on my laptop, I need to put it here datapath = 'data/' prflist = [['ibg402010_drz.fits', 612, 209], ['ibg402010_drz.fits', 1007, 951], ['ibg402010_drz.fits', 64, 823], ['ibg403010_drz.fits', 597, 385], ['ibg405010_drz.fits', 340, 40], ['ibg406010_drz.fits', 424, 348], ['ibg412010_drz.fits', 439, 239], ['ibg415010_drz.fits', 676, 73], ['ibg416010_drz.fits', 444, 217], ['ibg417010_drz.fits', 587, 954], ['ibg418010_drz.fits', 113, 946], ['ibg418010_drz.fits', 160, 720], ['ibg418010_drz.fits', 113, 946], ['ibg418010_drz.fits', 917, 873], ['ibg419010_drz.fits', 675, 644], ['ibg422010_drz.fits', 896, 320], ['ibg425010_drz.fits', 647, 697], ['ibg426010_drz.fits', 997, 84], ['ibg426010_drz.fits', 385, 95], ['ibg428010_drz.fits', 356, 85], ['ibg428010_drz.fits', 889, 496], ['ibg429010_drz.fits', 803, 305], ['ibg432010_drz.fits', 153, 109], ['ibg434010_drz.fits', 876, 577], ['ibg436010_drz.fits', 342, 878], ['ibg437010_drz.fits', 873, 239], ['ibg438010_drz.fits', 41, 651], ['ibg438010_drz.fits', 417, 403], ['ibg440010_drz.fits', 212, 338], ['ibg440010_drz.fits', 297, 1018], ['ibg443010_drz.fits', 358, 289], ['ibg444010_drz.fits', 329, 347], ['ibg444010_drz.fits', 724, 725], ['ibg446010_drz.fits', 276, 501], ['ibg449010_drz.fits', 75, 775], ['ibg450010_drz.fits', 669, 272], ['ibg452010_drz.fits', 158, 558], ['ibg453010_drz.fits', 812, 847], ['ibg453010_drz.fits', 333, 189], ['ibg455010_drz.fits', 567, 650], ['ibg455010_drz.fits', 263, 444], ['ibg456010_drz.fits', 530, 696], ['ibg458010_drz.fits', 162, 807], ['ibg459010_drz.fits', 375, 168], ['ibg462010_drz.fits', 683, 641], ['ibg463010_drz.fits', 647, 971], ['ibg463010_drz.fits', 319, 873], ['ibg465010_drz.fits', 588, 723], ['ibg468010_drz.fits', 150, 509], ['ibg468010_drz.fits', 280, 136], ['ibg471010_drz.fits', 600, 685], ['ibg471010_drz.fits', 892, 511], ] """ Explanation: PSFs for the companions The whole Cepheid project is focussed on the Cepheids PSF, fitting, subtracting etc. using LOCI and other complicated algorithms. However, in some cases, we also need a simple prescription of the PSF for "normal" stars, i.e. stars that are not saturated ant not insanly overexposed, so that we don't see the same number of structures in the wings that we do see for the Cepheids. In particualr, we need an analytic form of the PSF to fit the flux of any detected companions and similarly, we need a sample of "clean" stars that we can use for our fake-insert experiements to determine fit uncertianties and detection limits. In this notebook, we stars from a list of hand-selected stars in the Cepheid images. I just clicked through the images in ds9, selecting all stars that had a flux in the central pixel > 200, are not visibly impacted by the Cepheid PSF (usually that means they are far away, but if a star is very bright, then it is OK if it's close to the diffraction spike as long as it's flux is much larger than the spike), are single and look circular (no unresolved binaries). In this notebook, we load in that list and then perform additional filtering to get a set of clean PSF templates. End of explanation """ radius = u.Quantity(1.0, u.arcsec) gaia = [] for i, p in enumerate(prflist): wcs = WCS(fits.getheader(datapath + p[0], 1)) ra, dec = wcs.all_pix2world(p[1], p[2], 1) coord = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs') j = Gaia.cone_search(coord, radius) r = j.get_results() r['i'] = i r['file'] = p[0] r['x'] = p[1] r['y'] = p[2] gaia.append(r) gaiatab = table.vstack(gaia) gaiatab # Select only those stars that are likely good single point sources gaiatab = gaiatab[(gaiatab['ra_error'] < 0.1) & (gaiatab['dec_error'] < 0.1)] """ Explanation: First, we turn to GAIA. While the current DR2 does not explicitly mark catalog entries as multiple systems or extended sources, we can just look at the error in RA and dec. For stars as bright as the ones we've chosen, those errors are typically i nthe range 0.05 milliarcsec. Anything larger than that indicates that the source is not a good point source, so we want to reject it. End of explanation """ # -1 because the above positions are measured in ds9, which counts from (1,1) # while the python code counts from (0,0) stars621 = extract_stars([NDData(fits.open(datapath + row['file'])[1].data) for row in gaiatab], [Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab], size=25) stars845 = extract_stars([NDData(fits.open(datapath + row['file'].replace('10_', '20_'))[1].data) for row in gaiatab], [Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab], size=25) def sherpa_phot(psf, data): # Set sensible starting values # Use hasattr so I can use the same function for e.g. Gauss2D if I want to try that if hasattr(psf, 'alpha') and not psf.alpha.frozen: psf.alpha = 2 if hasattr(psf, 'r0') and not psf.r0.frozen: psf.r0 = 2 psf.ampl = np.max(s.data) x, y = np.indices(data.shape) dpsf = sherpa.data.Data2D('', x.flatten(), y.flatten(), data.flatten()) fpsf = sherpa.fit.Fit(dpsf, psf, stat=sherpa.stats.LeastSq()) fpsf.fit() return data - psf(x.flatten(), y.flatten()).reshape(data.shape) beta2d = sherpa.astro.models.Beta2D() beta2d.xpos = stars621[0].data.shape[0] / 2 beta2d.ypos = stars621[0].data.shape[1] / 2 beta2d.alpha = 2 beta2d.alpha.frozen = True results = [] for i, s in enumerate(stars621): out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res621 = Table(rows=results, names = [p.name for p in beta2d.pars]) results = [] for i, s in enumerate(stars845): out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res845 = Table(rows=results, names = [p.name for p in beta2d.pars]) """ Explanation: Next, we load all remaining sources. End of explanation """ plt.plot(res621['xpos']) plt.plot(res845['xpos']) plt.plot(res621['ypos']) plt.plot(res845['ypos']) # Note how (x, y) -> (y, x) because of the ordering of the indices # I don't want to find out exactly where that happens, just make make this line look better. # I just chekc that it works (see below) and that's all that I need here. gaiatab['x'] = gaiatab['x'] - np.rint(12.5 - res621['ypos']) gaiatab['y'] = gaiatab['y'] - np.rint(12.5 - res621['xpos']) # Now extract it again # -1 because the above positions are measured in ds9, which counts from (1,1) # while the python code counts from (0,0) stars621 = extract_stars([NDData(fits.open(datapath + row['file'])[1].data) for row in gaiatab], [Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab], size=25) stars845 = extract_stars([NDData(fits.open(datapath + row['file'].replace('10_', '20_'))[1].data) for row in gaiatab], [Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab], size=25) # fit it again results = [] for i, s in enumerate(stars621): beta2d.xpos = 12.5 beta2d.ypos = 12.5 out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res621 = Table(rows=results, names = [p.name for p in beta2d.pars]) results = [] for i, s in enumerate(stars845): beta2d.xpos = 12.5 beta2d.ypos = 12.5 out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res845 = Table(rows=results, names = [p.name for p in beta2d.pars]) # Check its centered plt.plot(res621['xpos']) plt.plot(res845['xpos']) plt.plot(res621['ypos']) plt.plot(res845['ypos']) """ Explanation: Since the (x,y) positions are just picked by hand in ds9, they might be off by a pixel or two. However, for the fake insertion technique later, we want sources that are reasonably centered in the extracted sub-images. Thus, in the first round, we fit the (x,y) values, calculate the difference and then adjust the input (x,y) in our catalog. End of explanation """ beta2d.alpha.frozen = False results = [] for i, s in enumerate(stars621): beta2d.xpos = 12.5 beta2d.ypos = 12.5 out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res621ar = Table(rows=results, names = [p.name for p in beta2d.pars]) results = [] for i, s in enumerate(stars845): beta2d.xpos = 12.5 beta2d.ypos = 12.5 out = sherpa_phot(beta2d, s.data) results.append([p.val for p in beta2d.pars]) res845ar = Table(rows=results, names = [p.name for p in beta2d.pars]) plt.scatter(res621ar['alpha'], res621ar['r0'], c=gaiatab['parallax']) plt.colorbar() plt.scatter(res845ar['alpha'], res845ar['r0'], c=gaiatab['parallax']) plt.colorbar() """ Explanation: Look at fit results with alpha and r0 free End of explanation """ out = plt.hist([res621['r0'], res845['r0']], bins=np.arange(1., 2.51, .05)) ind = (np.abs(res621['r0'] - 1.65) < .4) & (np.abs(res845['r0'] - 1.6) < 0.4) ind.sum(), len(res621) out = plt.hist([res621['ampl'][ind], res845['ampl'][ind]], bins=20, range=[0,2000]) plt.scatter(res621['ampl'], res845['ampl'], c=ind) plt.scatter(res621['ampl'] / res845['ampl'], res845['r0'], c=gaiatab['parallax']) plt.colorbar() psf_621 = sherpa.astro.models.Beta2D() psf_621.alpha = 2.2 psf_621.r0 = 1.7 psf_845 = sherpa.astro.models.Beta2D() psf_845.alpha = 2 psf_845.r0 = 1.625 for this_psf in [psf_621, psf_845]: # Set starting values this_psf.xpos = 12.5 this_psf.ypos = 12.5 # Freeze psf values this_psf.alpha.frozen = True this_psf.r0.frozen = True data = stars845[5].data plt.plot(data[12, :]) psf_845.ampl = np.max(data) x, y = np.indices(data.shape) dpsf = sherpa.data.Data2D('', x.flatten(), y.flatten(), data.flatten()) fpsf = sherpa.fit.Fit(dpsf, psf_845, stat=sherpa.stats.LeastSq()) fpsf.fit() plt.plot(psf_845(x.flatten(), y.flatten()).reshape(data.shape)[12, :]) """ Explanation: Look at fit results when alpha is frozen Even if the alpha is frozen to a differnet value then what I will ultimately use to fit the LOCI subtracted data, it's close enough. The goal here is to remove source that obviously have a different PSF shape, e.g. because they are unresolved binaries or background sources or something. End of explanation """ bright = (res621['ampl'] > 400) & (res845['ampl'] > 400) (ind & bright).sum() x, y = np.indices(stars621[0].data.shape) fig, axes = plt.subplots(nrows=4, ncols=15, figsize=(35,12)) for j, s in enumerate((ind & bright).nonzero()[0][:15]): for i, stars in enumerate([stars621, stars845]): image = stars[s].data imout = axes[2 * i][j].imshow(image) #axes[0][j].set_title(prftestlist[j][0]) plt.colorbar(imout, ax = axes[2 * i][j]) thispsf = [psf_621, psf_845][i] photout = sherpa_phot(thispsf, image) imout = axes[2 * i + 1][j].imshow(photout) axes[2 * i + 1][j].set_title("residual") plt.colorbar(imout, ax = axes[2 * i + 1][j]) len(gaiatab), len(bright) for row in gaiatab[ind & bright]: print("['{}', {:3.0f}, {:3.0f}],".format(row['file'], row['x'], row['y'])) """ Explanation: Select the PSF stars These stars have the following cuts applied: Look good in GAIA Have typical r0 fit results (for fixed alpha) pick the brightest of those End of explanation """
laserkelvin/Molecule-Tools
Coulomb Matrix.ipynb
gpl-3.0
Atoms = ["C", "B", "H"] """ Explanation: Atoms Atoms are defined as a list of strings here. End of explanation """ Coordinates = [] for AtomNumber in range(len(Atoms)): Coordinates.append(np.random.rand(3)) """ Explanation: Generate some random coordinates Making some random $x,y,z$ coordinates up End of explanation """ Molecule = dict() for Index, Atom in enumerate(Atoms): Molecule[Index] = __Atom__(Atom, Coordinates[Index]) """ Explanation: Molecule Here we've defined a molecule as a dictionary of atom objects. We'll loop over the number of atoms, and initialise each dictionary key with an atom. In an actual run, we'll need to parse information from an xyz file somehow instead. End of explanation """ CM = CalculateCoulombMatrix(Molecule) def ReadXYZ(File): f = open(File, "r") fc = f.readlines() f.close() NAtoms = len(fc) Coordinates = [] for line in range(NAtoms): Coordinates.append(fc[line].split()) return Coordinates """ Explanation: Calculate the Coulomb matrix Call our function, using the Molecule dictionary as input to calculate a Coulomb matrix. End of explanation """
ALEXKIRNAS/DataScience
Coursera/Machine-learning-data-analysis/Course 2/Week_04/grad_boosting.ipynb
mit
import sklearn.datasets as datasets import numpy as np from matplotlib import pyplot as plt import pandas as pd %matplotlib inline data = datasets.load_boston() target = data['target'] data = data['data'] data.shape x_train, x_test, y_train, y_test = data[:380], data[380:], target[:380], target[380:] """ Explanation: Градиентный бустинг своими руками Внимание: в тексте задания произошли изменения - поменялось число деревьев (теперь 50), правило изменения величины шага в задании 3 и добавился параметр random_state у решающего дерева. Правильные ответы не поменялись, но теперь их проще получить. Также исправлена опечатка в функции gbm_predict. В этом задании будет использоваться датасет boston из sklearn.datasets. Оставьте последние 25% объектов для контроля качества, разделив X и y на X_train, y_train и X_test, y_test. Целью задания будет реализовать простой вариант градиентного бустинга над регрессионными деревьями для случая квадратичной функции потерь. End of explanation """ from sklearn.tree import DecisionTreeRegressor num_of_estimators = 50 coef = [0.9 for _ in range(num_of_estimators)] def gbm_predict(X, estimators): return np.asarray([sum([coeff * algo.predict([x])[0] for algo, coeff in zip(estimators, coef)]) for x in X], dtype=np.float32) def mse(X, Y, estimators): error = np.sum(np.power(gbm_predict(X, estimators) - Y, 2)) / X.shape[0] return error def rmse(X, Y, estimators): return np.sqrt(mse(X, Y, estimators)) def gradient_boost_fit(x, y): estimators = [DecisionTreeRegressor(max_depth=5, random_state=42) for _ in range(num_of_estimators)] gradient = y for i in range(num_of_estimators): estimators[i].fit(x, gradient) gradient = y - gbm_predict(x, estimators[:(i + 1)]) return estimators estimators = gradient_boost_fit(x_train, y_train) rmse(x_train, y_train, estimators) rmse(x_test, y_test, estimators) """ Explanation: Задание 1 Как вы уже знаете из лекций, бустинг - это метод построения композиций базовых алгоритмов с помощью последовательного добавления к текущей композиции нового алгоритма с некоторым коэффициентом. Градиентный бустинг обучает каждый новый алгоритм так, чтобы он приближал антиградиент ошибки по ответам композиции на обучающей выборке. Аналогично минимизации функций методом градиентного спуска, в градиентном бустинге мы подправляем композицию, изменяя алгоритм в направлении антиградиента ошибки. Воспользуйтесь формулой из лекций, задающей ответы на обучающей выборке, на которые нужно обучать новый алгоритм (фактически это лишь чуть более подробно расписанный градиент от ошибки), и получите частный ее случай, если функция потерь L - квадрат отклонения ответа композиции a(x) от правильного ответа y на данном x. Если вы давно не считали производную самостоятельно, вам поможет таблица производных элементарных функций (которую несложно найти в интернете) и правило дифференцирования сложной функции. После дифференцирования квадрата у вас возникнет множитель 2 — т.к. нам все равно предстоит выбирать коэффициент, с которым будет добавлен новый базовый алгоритм, проигноируйте этот множитель при дальнейшем построении алгоритма. Задание 2 Заведите массив для объектов DecisionTreeRegressor (будем их использовать в качестве базовых алгоритмов) и для вещественных чисел (это будут коэффициенты перед базовыми алгоритмами). В цикле от обучите последовательно 50 решающих деревьев с параметрами max_depth=5 и random_state=42 (остальные параметры - по умолчанию). В бустинге зачастую используются сотни и тысячи деревьев, но мы ограничимся 50, чтобы алгоритм работал быстрее, и его было проще отлаживать (т.к. цель задания разобраться, как работает метод). Каждое дерево должно обучаться на одном и том же множестве объектов, но ответы, которые учится прогнозировать дерево, будут меняться в соответствие с полученным в задании 1 правилом. Попробуйте для начала всегда брать коэффициент равным 0.9. Обычно оправдано выбирать коэффициент значительно меньшим - порядка 0.05 или 0.1, но т.к. в нашем учебном примере на стандартном датасете будет всего 50 деревьев, возьмем для начала шаг побольше. В процессе реализации обучения вам потребуется функция, которая будет вычислять прогноз построенной на данный момент композиции деревьев на выборке X: def gbm_predict(X): return [sum([coeff * algo.predict([x])[0] for algo, coeff in zip(base_algorithms_list, coefficients_list)]) for x in X] (считаем, что base_algorithms_list - список с базовыми алгоритмами, coefficients_list - список с коэффициентами перед алгоритмами) Эта же функция поможет вам получить прогноз на контрольной выборке и оценить качество работы вашего алгоритма с помощью mean_squared_error в sklearn.metrics. Возведите результат в степень 0.5, чтобы получить RMSE. Полученное значение RMSE — ответ в пункте 2. End of explanation """ coef = [0.9 / (1. + i) for i in range(num_of_estimators)] estimators = gradient_boost_fit(x_train, y_train) rmse(x_test, y_test, estimators) """ Explanation: Задание 3 Вас может также беспокоить, что двигаясь с постоянным шагом, вблизи минимума ошибки ответы на обучающей выборке меняются слишком резко, перескакивая через минимум. Попробуйте уменьшать вес перед каждым алгоритмом с каждой следующей итерацией по формуле 0.9 / (1.0 + i), где i - номер итерации (от 0 до 49). Используйте качество работы алгоритма как ответ в пункте 3. В реальности часто применяется следующая стратегия выбора шага: как только выбран алгоритм, подберем коэффициент перед ним численным методом оптимизации таким образом, чтобы отклонение от правильных ответов было минимальным. Мы не будем предлагать вам реализовать это для выполнения задания, но рекомендуем попробовать разобраться с такой стратегией и реализовать ее при случае для себя. End of explanation """ 2 3 """ Explanation: Задание 4 Реализованный вами метод - градиентный бустинг над деревьями - очень популярен в машинном обучении. Он представлен как в самой библиотеке sklearn, так и в сторонней библиотеке XGBoost, которая имеет свой питоновский интерфейс. На практике XGBoost работает заметно лучше GradientBoostingRegressor из sklearn, но для этого задания вы можете использовать любую реализацию. Исследуйте, переобучается ли градиентный бустинг с ростом числа итераций (и подумайте, почему), а также с ростом глубины деревьев. На основе наблюдений выпишите через пробел номера правильных из приведенных ниже утверждений в порядке возрастания номера (это будет ответ в п.4): 1. С увеличением числа деревьев, начиная с некоторого момента, качество работы градиентного бустинга не меняется существенно. 2. С увеличением числа деревьев, начиная с некоторого момента, градиентный бустинг начинает переобучаться. 3. С ростом глубины деревьев, начиная с некоторого момента, качество работы градиентного бустинга на тестовой выборке начинает ухудшаться. 4. С ростом глубины деревьев, начиная с некоторого момента, качество работы градиентного бустинга перестает существенно изменяться End of explanation """ from sklearn.linear_model import LinearRegression estimator = LinearRegression() estimator.fit(x_train, y_train) def mse(X, Y, estimator): error = np.sum(np.power(estimator.predict(X) - Y, 2)) / X.shape[0] return error def rmse(X, Y, estimator): return np.sqrt(mse(X, Y, estimator)) rmse(x_test, y_test, estimator) """ Explanation: Задание 5 Сравните получаемое с помощью градиентного бустинга качество с качеством работы линейной регрессии. Для этого обучите LinearRegression из sklearn.linear_model (с параметрами по умолчанию) на обучающей выборке и оцените для прогнозов полученного алгоритма на тестовой выборке RMSE. Полученное качество - ответ в пункте 5. В данном примере качество работы простой модели должно было оказаться хуже, но не стоит забывать, что так бывает не всегда. В заданиях к этому курсу вы еще встретите пример обратной ситуации. End of explanation """
landlab/landlab
notebooks/teaching/geomorphology_exercises/hillslope_notebooks/north_carolina_piedmont_hillslope_class_notebook.ipynb
mit
# Code Block 1 import numpy as np from landlab.io import read_esri_ascii from landlab.plot.imshow import imshow_grid import matplotlib.pyplot as plt # below is to make plots show up in the notebook %matplotlib inline """ Explanation: <a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a> Using Landlab to explore a diffusive hillslope in the piedmont of North Carolina This notebook was developed in collaboration with Karl Wegmann at North Carolina State University. This notebook was coded by Nicole Gasparini at Tulane University. <hr> For tutorials on learning Landlab, click here: <a href="https://github.com/landlab/landlab/wiki/Tutorials">https://github.com/landlab/landlab/wiki/Tutorials</a> <hr> What is this notebook? This notebook is designed to be an exercise for students in a quantative geomorphology class. This notebook is meant to illustrate to students how a model can be used to understand and interpret real landscapes. This notebook is not designed to teach students how to code, nor is it designed to teach students how to use Landlab. This notebook provides data from a real landscape in North Carolina (below) in order to compare the shape of this hillslope with those produced by models using linear diffusion. A group of students at NC State have collected elevation data along the yellow transect in the image of the landscape below. This notebook uses that transect data and compares it with model output. This notebook steps students through exercises to quantify the diffusivity on this hillslope, assuming that sediment transport on this hillslope follows a linear diffusion law. Students should be introduced to the linear diffusion transport law before using this notebook. Application of linear diffusion transport law: The tranport law applied here is of the form: \begin{equation} q_s = -D \nabla z \end{equation} where ${q}_s$ is the transport rate with dimensions of L$^2$T$^{-1}$; $D$ is a transport coefficient with dimensions of L$^2$T$^{-1}$; and $z$ is elevation. $\nabla z$ is the gradient in the landscape, or change in elevation with change in distance. Landscape slope is $-\nabla z$ (assuming distance increases downhill), hence the negative in the above equation. Changes in elevation, or erosion, are calculated from conservation of mass: \begin{equation} \frac{dz}{dt} = U-\nabla q_s \end{equation} where $U$ is the rock uplift rate (relative to a fixed elevation at the boundary), with dimensions LT$^{-1}$, and $t$ is time. Note that at steady state the rock uplift rate sets the erosion rate. If the erosion rate in a landscape is a known, a modeling scenario in which the landscape is uplifting at the same rate as the known erosion rate will eventually lead to a landscape that is also eroding at that rate. Although we call $U$ the rate of rock uplift rate, we would get the same solution if $U$ is the rate of river erosion in the river at the base of a hillslope. For this example, it might easier to think of $U$ as a base-level lowering rate. What will we do with Landlab? Here we will use the Landlab component LinearDiffuser, to explore whether hillslopes evolved according to a linear diffusion rule look like the example North Carolina hillslope. We will just be eye-balling similarities. No rigorous comparison between the real and modeled hillslope is performed. More general background on applying hillslope process models to real landscapes can be found in this paper: Roering, Joshua J. (2008) "How well can hillslope evolution models “explain” topography? Simulating soil transport and production with high-resolution topographic data." Geological Society of America Bulletin. This paper has a nice compilation of measured landscape diffusivity values that can help put the values you get from this exercise in context: Perron, J. T., (2017) "Climate and the pace of erosional landscape evolution" Annual Review of Earth and Planetary Sciences. What do you need to know about this Landscape? General information on this landscape can be found in this reference: Wegmann, K. W., Lewis, R. Q., & Hunt, M. C. (2012). "Historic mill ponds and piedmont stream water quality: Making the connection near Raleigh, North Carolina". The Geological Society of America, Field Guide 29. For now the following information is provided. The site is in the Piedmont near Raleigh, North Carolina. The site is in the W.B. Umstead State Park which is in the Sycamore Creek watershed. Sycamore Creek drains into the Neuse River. The area gets ~ 1.25 m/yr of rainfall. The site is currently forested. Basin average erosion rates from the area are on the order of 10 m/million years, or 1e-5 m/yr. These rates are averaged over approximately 100,000 years. These rates are from a nearby area similar to this, but not exactly this study area. The site has been heavily impacted from agriculture due to European settlers, beginning in the 1600s. STUDENTS - Step 0 - What you need to do: You will start out by making the assumption that the landscape is eroding at a rate of 1e-5 m/yr. As you will see, the study area hillslope profile has a form similar to what one would expect from a uniformly eroding diffusive profile. Your first goal is to find the diffusivity and time required for the profile to reach steady state given the background erosion rate of 1e-5 m/yr. You can do that by running the code below. Take special note of Code Block 4 where you set the diffusivity. Make sure choose a resonable initial value, and record what your initial value was. Code Block 5 will allow you to see the analytical solution for hillslope form using different $D$ Values. You can play with the analystical solution to find the best $D$. Also be prepared to change model run time (Code Block 4) for your initial experiments. Once you iterate to a modeled profile that you feel reasonably matches the DEM profile (just eye-ball the similarity), you will be ready to answer the questions that follow all of the code. How to run a Jupyter notebook: Read the text boxes and sequentially run each code block (shift - enter OR got to the Cell pulldown menu at the top and choose Run Cells). While a code block is running there is a * in the brackets next to it. Once it has completed running there will be a number in the brackets next to it Remember that you can always go to the Kernel pulldown menu at the top and choose Restart & Clear Output or Restart & Run All if you change things and want to start afresh. If you just change one code block and rerun only that code block, only the parts of the code in that code block will be updated. (E.g. if you change parameters but don't reset the code blocks that initialize run time or topography, then these values will not be reset.) Now on to the code... We start by importing libraries that we will need to run the code. You should not need to edit this code block. End of explanation """ # Code Block 2 # distance and elevation data along the survey line field_dist = np.array( [ 0, 1, 2, 3, 4, 4.99, 5.99, 6.99, 7.99, 8.99, 9.99, 10.99, 11.99, 12.99, 13.99, 14.98, 15.98, 16.98, 17.98, 18.98, 19.98, 20.98, 21.98, 22.98, 23.98, 24.97, 25.97, 26.97, 27.97, 28.97, 29.97, 30.97, 31.97, 32.97, 33.97, 34.96, 35.96, 36.96, 37.96, 38.96, 39.96, 40.96, 41.96, 42.96, 43.95, 44.95, 45.95, 46.95, 49.95, 50.95, 51.95, 52.95, 53.94, 54.94, 55.94, 56.94, 57.94, 58.94, 59.94, 60.94, 61.94, 62.94, 63.93, 64.93, 65.93, 66.93, 67.93, 68.93, 69.93, 70.93, 71.93, 72.92, 73.92, 74.92, 75.92, 76.92, 77.92, 78.92, 79.92, 80.92, 81.92, 82.91, 83.91, 84.91, 85.91, 86.91, 87.91, 88.91, ] ) field_z = np.array( [ 0, 0.03, 0.3, 0.47, 0.62, 0.83, 1.09, 1.31, 1.54, 1.8, 2.14, 2.38, 2.55, 2.84, 3.15, 3.49, 3.78, 4.05, 4.41, 4.57, 4.77, 5.05, 5.29, 5.46, 5.68, 5.96, 6.4, 6.81, 6.99, 7.21, 7.45, 7.63, 7.79, 7.87, 8.06, 8.24, 8.4, 8.51, 8.65, 8.68, 8.82, 8.98, 9.01, 9.04, 9.05, 9.09, 9.07, 9.07, 9.02, 8.93, 8.9, 8.83, 8.73, 8.62, 8.47, 8.28, 8.22, 8, 7.82, 7.75, 7.39, 7.2, 7.04, 6.79, 6.6, 6.39, 6.1, 5.77, 5.5, 5.3, 5.11, 4.89, 4.64, 4.5, 4.32, 4.1, 3.96, 3.6, 3.19, 2.92, 2.73, 2.41, 2.12, 1.76, 1.21, 0.95, 0.56, 0.06, ] ) """ Explanation: Now we will use the survey data from the NC State students and compare it to model output. Below is the information from the survey. You should not need to edit this code block. End of explanation """ # Code Block 3 from landlab import RasterModelGrid mg = RasterModelGrid((90, 5), 1.0) # make grid z_vals = mg.add_ones("topographic__elevation", at="node") # initialize z values # Set initial conditions initial_elevation = np.multiply( z_vals, -1.0 ) # this line and next set elevation to 87. m z_vals += initial_elevation # Set boundary conditions mg.set_closed_boundaries_at_grid_edges(True, False, True, False) # Initialize values for plotting variables down the middle of the hillslope ycoord_rast = mg.node_vector_to_raster(mg.node_y) ys_grid = ycoord_rast[:, 2] """ Explanation: We will create a grid for our model using Landlab's RasterModelGrid class, which we need to import. We make a grid with dx = dy = 1.0 m (same resolution as the survey data). We make a grid that has 5 columns and 90 rows, to match the length of the profile from the real landscape. End of explanation """ # Code Block 4 from landlab.components import LinearDiffuser D = 0.005 # value in m^2/yr lin_diffuse = LinearDiffuser(mg, linear_diffusivity=D) # Uniform rate of rock uplift, which drives uniform erosion at steady state uplift_rate = 0.00001 # m/year, start with 1e-5 m/yr # Total time in years that the model will run for. runtime = 500000 # years # Stability criteria for timestep dt. Coefficient can be changed # depending on our tolerance for stability vs tolerance for run time. # Do not change this. dt = 0.5 * mg.dx * mg.dx / D print("dt", dt) # nt is number of time steps nt = int(runtime // dt) # Below is to keep track of time for labeling plots time_counter = 0 # length of uplift over a single time step, meters uplift_per_step = uplift_rate * dt """ Explanation: Now we import and initialize the LinearDiffuser component. In this case the units on our diffusivity coefficient, or transport coefficient, are m$^2$yr$^{-1}$. NOTE to Students: You need to chose a reasonable initial value for D (diffusivity). Remember you need to justify your initial guess for D. Supplied references should help with this. We also initialize a few more parameters. End of explanation """ # Code Block 5 # ANALYTICAL SOLUTION ys = np.arange(mg.number_of_node_rows * mg.dx - mg.dx) # location of divide or ridge crest -> middle of grid # based on boundary conds. divide_loc = (mg.number_of_node_rows * mg.dx - mg.dx) / 2 # half-width of the ridge half_width = (mg.number_of_node_rows * mg.dx - mg.dx) / 2 # analytical solution for elevation under linear diffusion at steady state zs = (uplift_rate / (2 * D)) * (np.power(half_width, 2) - np.power(ys - divide_loc, 2)) # PLOTTING plt.figure() imshow_grid(mg, "topographic__elevation") plt.title("initial topography, at right is the colorbar") plt.figure() elev_rast = mg.node_vector_to_raster(mg.at_node["topographic__elevation"]) plt.figure() plt.plot(ys_grid, elev_rast[:, 2], "r-", label="model") plt.plot(ys, zs, "k--", label="analytical solution") plt.plot(field_dist, field_z, "b:", label="field data") plt.xlabel("horizontal distance (m)") plt.ylabel("elevation (m)") plt.legend(loc="lower center") # plt.title('before running model') """ Explanation: Now we figure out the analytical solution for the elevation of the steady-state profile. Before we evolve the landscape, we look at the model initial topography, the analytical solution, and the field profile. End of explanation """ # Code Block 6 for i in range(nt): mg["node"]["topographic__elevation"][mg.core_nodes] += uplift_per_step lin_diffuse.run_one_step(dt) time_counter += dt print("time evolved for ", time_counter, " years") """ Explanation: Now you can update the values of $D$ in Code Block 4 and rerun Code Block 5 to fit the analytical solution to the field data. Once you have done that, run the model and evolve the landscape to make sure it also fits the analytical solution. You may need to update $runtime$ in Code Block 4 if the model does not match the analytical solution. Remember that you need to find the $D$ value and approximate time that it takes for the landscape to reach steady state. Below is the time loop that does all the calculations. End of explanation """ # Code Block 7 plt.figure() elev_rast = mg.node_vector_to_raster(mg.at_node["topographic__elevation"]) plt.plot(ys_grid, elev_rast[:, 2], "r-", label="model") plt.plot(ys, zs, "k--", label="analytical solution") plt.plot(field_dist, field_z, "b:", label="field data") plt.xlabel("horizontal distance (m)") plt.ylabel("vertical elevation (m)") plt.legend(loc="lower center") plt.title( "topographic cross section at time %s, with D = %s m^2/yr" % (time_counter, D) ) """ Explanation: Now we plot the evolved cross-section. End of explanation """
cxxgtxy/tensorflow
tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ !pip install tflite-model-maker """ Explanation: Image classification with TensorFlow Lite Model Maker <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/lite/tutorials/model_maker_image_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://tfhub.dev/"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> Model Maker library simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications. This notebook shows an end-to-end example that utilizes this Model Maker library to illustrate the adaption and conversion of a commonly-used image classification model to classify flowers on a mobile device. Prerequisites To run this example, we first need to install several required packages, including Model Maker package that in GitHub repo. End of explanation """ import numpy as np import tensorflow as tf assert tf.__version__.startswith('2') from tflite_model_maker import configs from tflite_model_maker import ExportFormat from tflite_model_maker import image_classifier from tflite_model_maker import ImageClassifierDataLoader from tflite_model_maker import model_spec import matplotlib.pyplot as plt """ Explanation: Import the required packages. End of explanation """ image_path = tf.keras.utils.get_file( 'flower_photos', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', untar=True) """ Explanation: Simple End-to-End Example Get the data path Let's get some images to play with this simple end-to-end example. Hundreds of images is a good start for Model Maker while more data could achieve better accuracy. End of explanation """ data = ImageClassifierDataLoader.from_folder(image_path) train_data, test_data = data.split(0.9) """ Explanation: You could replace image_path with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path. <img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_image_classification.png" alt="Upload File" width="800" hspace="100"> If you prefer not to upload your images to the cloud, you could try to run the library locally following the guide in GitHub. Run the example The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process. Step 1. Load input data specific to an on-device ML app. Split it to training data and testing data. End of explanation """ model = image_classifier.create(train_data) """ Explanation: Step 2. Customize the TensorFlow model. End of explanation """ loss, accuracy = model.evaluate(test_data) """ Explanation: Step 3. Evaluate the model. End of explanation """ model.export(export_dir='.') """ Explanation: Step 4. Export to TensorFlow Lite model. Here, we export TensorFlow Lite model with metadata which provides a standard for model descriptions. The label file is embedded in metadata. You could download it in the left sidebar same as the uploading part for your own use. End of explanation """ image_path = tf.keras.utils.get_file( 'flower_photos', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', untar=True) """ Explanation: After this simple 4 steps, we could further use TensorFlow Lite model file in on-device applications like in image classification reference app. Detailed Process Currently, we support several models such as EfficientNet-Lite* models, MobileNetV2, ResNet50 as pre-trained models for image classification. But it is very flexible to add new pre-trained models to this library with just a few lines of code. The following walks through this end-to-end example step by step to show more detail. Step 1: Load Input Data Specific to an On-device ML App The flower dataset contains 3670 images belonging to 5 classes. Download the archive version of the dataset and untar it. The dataset has the following directory structure: <pre> <b>flower_photos</b> |__ <b>daisy</b> |______ 100080576_f52e8ee070_n.jpg |______ 14167534527_781ceb1b7a_n.jpg |______ ... |__ <b>dandelion</b> |______ 10043234166_e6dd915111_n.jpg |______ 1426682852_e62169221f_m.jpg |______ ... |__ <b>roses</b> |______ 102501987_3cdb8e5394_n.jpg |______ 14982802401_a3dfb22afb.jpg |______ ... |__ <b>sunflowers</b> |______ 12471791574_bb1be83df4.jpg |______ 15122112402_cafa41934f.jpg |______ ... |__ <b>tulips</b> |______ 13976522214_ccec508fe7.jpg |______ 14487943607_651e8062a1_m.jpg |______ ... </pre> End of explanation """ data = ImageClassifierDataLoader.from_folder(image_path) """ Explanation: Use ImageClassifierDataLoader class to load data. As for from_folder() method, it could load data from the folder. It assumes that the image data of the same class are in the same subdirectory and the subfolder name is the class name. Currently, JPEG-encoded images and PNG-encoded images are supported. End of explanation """ train_data, rest_data = data.split(0.8) validation_data, test_data = rest_data.split(0.5) """ Explanation: Split it to training data (80%), validation data (10%, optional) and testing data (10%). End of explanation """ plt.figure(figsize=(10,10)) for i, (image, label) in enumerate(data.dataset.take(25)): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image.numpy(), cmap=plt.cm.gray) plt.xlabel(data.index_to_label[label.numpy()]) plt.show() """ Explanation: Show 25 image examples with labels. End of explanation """ model = image_classifier.create(train_data, validation_data=validation_data) """ Explanation: Step 2: Customize the TensorFlow Model Create a custom image classifier model based on the loaded data. The default model is EfficientNet-Lite0. End of explanation """ model.summary() """ Explanation: Have a look at the detailed model structure. End of explanation """ loss, accuracy = model.evaluate(test_data) """ Explanation: Step 3: Evaluate the Customized Model Evaluate the result of the model, get the loss and accuracy of the model. End of explanation """ # A helper function that returns 'red'/'black' depending on if its two input # parameter matches or not. def get_label_color(val1, val2): if val1 == val2: return 'black' else: return 'red' # Then plot 100 test images and their predicted labels. # If a prediction result is different from the label provided label in "test" # dataset, we will highlight it in red color. plt.figure(figsize=(20, 20)) predicts = model.predict_top_k(test_data) for i, (image, label) in enumerate(test_data.dataset.take(100)): ax = plt.subplot(10, 10, i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image.numpy(), cmap=plt.cm.gray) predict_label = predicts[i][0][0] color = get_label_color(predict_label, test_data.index_to_label[label.numpy()]) ax.xaxis.label.set_color(color) plt.xlabel('Predicted: %s' % predict_label) plt.show() """ Explanation: We could plot the predicted results in 100 test images. Predicted labels with red color are the wrong predicted results while others are correct. End of explanation """ model.export(export_dir='.') """ Explanation: If the accuracy doesn't meet the app requirement, one could refer to Advanced Usage to explore alternatives such as changing to a larger model, adjusting re-training parameters etc. Step 4: Export to TensorFlow Lite Model Convert the existing model to TensorFlow Lite model format with metadata. The default TFLite filename is model.tflite. End of explanation """ model.export(export_dir='.', export_format=ExportFormat.LABEL) """ Explanation: See example applications and guides of image classification for more details about how to integrate the TensorFlow Lite model into mobile apps. The allowed export formats can be one or a list of the following: ExportFormat.TFLITE ExportFormat.LABEL ExportFormat.SAVED_MODEL By default, it just exports TensorFlow Lite model with metadata. You can also selectively export different files. For instance, exporting only the label file as follows: End of explanation """ model.evaluate_tflite('model.tflite', test_data) """ Explanation: You can also evaluate the tflite model with the evaluate_tflite method. End of explanation """ config = configs.QuantizationConfig.create_full_integer_quantization(representative_data=test_data, is_integer_only=True) """ Explanation: Advanced Usage The create function is the critical part of this library. It uses transfer learning with a pretrained model similar to the tutorial. The createfunction contains the following steps: Split the data into training, validation, testing data according to parameter validation_ratio and test_ratio. The default value of validation_ratio and test_ratio are 0.1 and 0.1. Download a Image Feature Vector as the base model from TensorFlow Hub. The default pre-trained model is EfficientNet-Lite0. Add a classifier head with a Dropout Layer with dropout_rate between head layer and pre-trained model. The default dropout_rate is the default dropout_rate value from make_image_classifier_lib by TensorFlow Hub. Preprocess the raw input data. Currently, preprocessing steps including normalizing the value of each image pixel to model input scale and resizing it to model input size. EfficientNet-Lite0 have the input scale [0, 1] and the input image size [224, 224, 3]. Feed the data into the classifier model. By default, the training parameters such as training epochs, batch size, learning rate, momentum are the default values from make_image_classifier_lib by TensorFlow Hub. Only the classifier head is trained. In this section, we describe several advanced topics, including switching to a different image classification model, changing the training hyperparameters etc. Post-training quantization on the TensorFLow Lite model Post-training quantization is a conversion technique that can reduce model size and inference latency, while also improving CPU and hardware accelerator latency, with little degradation in model accuracy. Thus, it's widely used to optimize the model. Model Maker supports multiple post-training quantization options. Let's take full integer quantization as an instance. First, define the quantization config to enforce full integer quantization for all ops including the input and output. The input type and output type are uint8 by default. You may also change them to other types like int8 by setting inference_input_type and inference_output_type in config. End of explanation """ model.export(export_dir='.', tflite_filename='model_quant.tflite', quantization_config=config) """ Explanation: Then we export TensorFlow Lite model with such configuration. End of explanation """ model = image_classifier.create(train_data, model_spec=model_spec.mobilenet_v2_spec, validation_data=validation_data) """ Explanation: In Colab, you can download the model named model_quant.tflite from the left sidebar, same as the uploading part mentioned above. Change the model Change to the model that's supported in this library. This library supports EfficientNet-Lite models, MobileNetV2, ResNet50 by now. EfficientNet-Lite are a family of image classification models that could achieve state-of-art accuracy and suitable for Edge devices. The default model is EfficientNet-Lite0. We could switch model to MobileNetV2 by just setting parameter model_spec to mobilenet_v2_spec in create method. End of explanation """ loss, accuracy = model.evaluate(test_data) """ Explanation: Evaluate the newly retrained MobileNetV2 model to see the accuracy and loss in testing data. End of explanation """ inception_v3_spec = model_spec.ImageModelSpec( uri='https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1') inception_v3_spec.input_image_shape = [299, 299] """ Explanation: Change to the model in TensorFlow Hub Moreover, we could also switch to other new models that inputs an image and outputs a feature vector with TensorFlow Hub format. As Inception V3 model as an example, we could define inception_v3_spec which is an object of ImageModelSpec and contains the specification of the Inception V3 model. We need to specify the model name name, the url of the TensorFlow Hub model uri. Meanwhile, the default value of input_image_shape is [224, 224]. We need to change it to [299, 299] for Inception V3 model. End of explanation """ model = image_classifier.create(train_data, validation_data=validation_data, epochs=10) """ Explanation: Then, by setting parameter model_spec to inception_v3_spec in create method, we could retrain the Inception V3 model. The remaining steps are exactly same and we could get a customized InceptionV3 TensorFlow Lite model in the end. Change your own custom model If we'd like to use the custom model that's not in TensorFlow Hub, we should create and export ModelSpec in TensorFlow Hub. Then start to define ImageModelSpec object like the process above. Change the training hyperparameters We could also change the training hyperparameters like epochs, dropout_rate and batch_size that could affect the model accuracy. The model parameters you can adjust are: epochs: more epochs could achieve better accuracy until it converges but training for too many epochs may lead to overfitting. dropout_rate: The rate for dropout, avoid overfitting. None by default. batch_size: number of samples to use in one training step. None by default. validation_data: Validation data. If None, skips validation process. None by default. train_whole_model: If true, the Hub module is trained together with the classification layer on top. Otherwise, only train the top classification layer. None by default. learning_rate: Base learning rate. None by default. momentum: a Python float forwarded to the optimizer. Only used when use_hub_library is True. None by default. shuffle: Boolean, whether the data should be shuffled. False by default. use_augmentation: Boolean, use data augmentation for preprocessing. False by default. use_hub_library: Boolean, use make_image_classifier_lib from tensorflow hub to retrain the model. This training pipeline could achieve better performance for complicated dataset with many categories. True by default. warmup_steps: Number of warmup steps for warmup schedule on learning rate. If None, the default warmup_steps is used which is the total training steps in two epochs. Only used when use_hub_library is False. None by default. model_dir: Optional, the location of the model checkpoint files. Only used when use_hub_library is False. None by default. Parameters which are None by default like epochs will get the concrete default parameters in make_image_classifier_lib from TensorFlow Hub library or train_image_classifier_lib. For example, we could train with more epochs. End of explanation """ loss, accuracy = model.evaluate(test_data) """ Explanation: Evaluate the newly retrained model with 10 training epochs. End of explanation """
huilyu2/DataVisualization
Assignment+3_Hui+Lyu.ipynb
mit
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import csv """ Explanation: Assignment: Week 04 This week, we will be putting to use some of the skills we have developed over the last couple weeks. You will be tasked with examining a new dataset, constructing a class that operates on it, and plotting a few different aspects of that dataset. Our dataset will be the trees owned by the City of Champaign. You will have to load in the data, filter and split based on a few criteria, and then make visualizations. End of explanation """ fn = "/home/huilyu2/work/Trees_Owned_by_the_City_of_Champaign.csv" # /home/huilyu2/work/Trees_Owned_by_the_City_of_Champaign.csv # YOUR CODE HERE data = {} with open(fn, "r") as f: reader = csv.reader(f) header = next(reader) for column in header: data[column] = [] for row in reader: for column, value in zip(header,row): data[column].append(value) np.testing.assert_equal(len(data), 6) for col in data: np.testing.assert_equal(len(data[col]), 26940) """ Explanation: In the next cell, read in the data using CSV. You do not (yet) need to apply any data-type conversions; what needs to come out of this is a dict object (call this object "data") which is composed of lists, one for every header. End of explanation """ data["Latitude"] = [] data["Longitude"] = [] for loc in data["Location"]: lat, lon = loc.split(",") data["Latitude"].append(float(lat[1:])) data["Longitude"].append(float(lon[:-1])) """ Explanation: The column Location is not in a useful form. You may use the code below to convert it to Latitude and Longitude. The original form is in a string such as: '(40.15271592360, -88.25708727630)' We will use "split" on these and strip off the leading and trailing parenthesis. Because this is material that we have not covered in class before, you may use this code directly. End of explanation """ class Dataset: def __init__(self, data): self.data = data.copy() def convert(self, column, dtype): # YOUR CODE HERE self.data[column] = np.array(self.data[column], dtype=dtype) #raise NotImplementedError() def columns(self): # YOUR CODE HERE return self.data.keys() #raise NotImplementedError() def filter_eq(self, column, value): # YOUR CODE HERE good = (self.data[column] == value) new_data = {} for column in self.data: new_data[column] = self.data[column][good] return Dataset(new_data) #raise NotImplementedError() def filter_lt(self, column, value): # YOUR CODE HERE good = (self.data[column] < value) new_data = {} for column in self.data: new_data[column] = self.data[column][good] return Dataset(new_data) #raise NotImplementedError() def filter_gt(self, column, value): # YOUR CODE HERE good = (self.data[column] > value) new_data = {} for column in self.data: new_data[column] = self.data[column][good] return Dataset(new_data) #raise NotImplementedError() def filter_ne(self, column, value): # YOUR CODE HERE good = (self.data[column] != value) new_data = {} for column in self.data: new_data[column] = self.data[column][good] return Dataset(new_data) #raise NotImplementedError() def size(self): # YOUR CODE HERE for key in self.data: return self.data[key].size #raise NotImplementedError() def split(self, column): # YOUR CODE HERE new_datasets = {} for split_value in np.unique(self.data[column]): new_datasets[split_value] = self.filter_eq(column, split_value) return new_datasets #raise NotImplementedError() def stats(self): statistics = {} for key in self.data: if self.data[key].dtype not in ("float", "int"): continue values = self.data[key] statistics[key] = (values.min(), values.max(), values.std(), values.mean()) return statistics def compare(self, other): stats1 = self.stats() stats2 = other.stats() for column in self.columns(): if column not in stats1: continue print("Column '{0:25s}'".format(column)) for s1, s2 in zip(stats1[column], stats2[column]): print(" {0} vs {1}".format(s1, s2)) def plot(self, x_column, y_column): plt.plot(self.data[x_column], self.data[y_column], '.') trees = Dataset(data) value_types = {'Number of Trunks': 'int', 'Diameter at Breast Height (in Feet)': 'float', 'Latitude': 'float', 'Longitude': 'float'} for v in trees.columns(): trees.convert(v, value_types.get(v, "str")) trees.columns() trees.stats() """ Explanation: Below, construct a dataset class. You will see stub functions. You may reuse what we have worked on in class, or you may develop something new. You must implement the routines convert, columns, filter_eq, filter_lt, filter_gt, filter_ne, size and split. End of explanation """ quercus_bicolor = trees.filter_eq("Tree Species", "Quercus bicolor") np.testing.assert_equal(quercus_bicolor.size(), 1239) stats = quercus_bicolor.stats() np.testing.assert_equal(stats['Diameter at Breast Height (in Feet)'], (1.0, 44.0, 4.8951987699249253, 8.3123486682808725)) np.testing.assert_equal(stats['Latitude'], (40.070883672699999, 40.156343598100001, 0.020057649584011371, 40.106116841942452)) np.testing.assert_equal(stats['Longitude'], (-88.332447309000003, -88.229326890500005, 0.023900192692977454, -88.2782118899284)) np.testing.assert_equal(stats['Number of Trunks'], (0, 2, 0.063479522525377516, 0.99757869249394671)) """ Explanation: We will now test that this works by filtering out a given tree species. End of explanation """ from collections import Counter def first_graph(dataset,name): if name == "All Trees": vacant_trees = dataset.filter_eq("Tree Species", "vacant site large") non_vacant_trees = dataset.filter_ne("Tree Species", "vacant site large") plt.plot(non_vacant_trees.data['Longitude'], non_vacant_trees.data['Latitude'],".",label= "non-vacant site",alpha = 0.5) plt.plot(vacant_trees.data['Longitude'], vacant_trees.data['Latitude'],".",label= "vacant site large",alpha = 0.5) plt.xlabel("Longitude",fontsize=20) plt.ylabel("Latitude",fontsize=20) plt.title("Location of {}".format(name),fontsize=22) plt.grid(True) plt.legend() else: plt.plot(dataset.data['Longitude'], dataset.data['Latitude'],".",alpha = 0.5) plt.xlabel("Longitude",fontsize=20) plt.ylabel("Latitude",fontsize=20) plt.title("Location of {}".format(name),fontsize=22) plt.grid(True) count = Counter(trees.filter_ne("Tree Species", "vacant site large").data["Tree Species"]) count.most_common(5) def second_graph(dataset,name): count = Counter(dataset.data["Tree Species"]) most_common = count.most_common(5) most_common_dict = dict((str(k), v) for k, v in most_common) number_of_trunk = {} for key in most_common_dict.keys(): number_of_trunk[key] = dataset.filter_eq("Tree Species",key).data["Number of Trunks"] n, bins, patches = plt.hist(number_of_trunk.values(), stacked = True, log = True, edgecolor = 'gray',bins = np.arange(10),align='left', label = number_of_trunk.keys(),alpha=0.75) # refer to Stack Overflow # http://stackoverflow.com/questions/18449602/matplotlib-creating-stacked-histogram-from-three-unequal-length-arrays plt.legend() plt.xticks(np.arange(10)) plt.xlabel("Number of Trunks of Each Tree",fontsize=20) plt.ylabel("Number of Trees",fontsize=20) plt.ylim(10**(-1),10**4) plt.title("Distribution of tree trunk count for\nfive most common species in {}".format(name),fontsize=22) def third_graph(dataset,name): most_common = Counter(dataset.data["Tree Species"]).most_common(1) most_common_dict = dict((str(k), v) for k, v in most_common) dataset = dataset.filter_eq("Tree Species", list(most_common_dict)[0]) # The following code is referred to the in-class notes. stats = dataset.stats() lon_min = stats["Longitude"][0] lon_max = stats["Longitude"][1] lat_min = stats["Latitude"][0] lat_max = stats["Latitude"][1] def bin_trees(num_bins = 16): lon = np.mgrid[lon_min:lon_max:(num_bins+1)*1j] lat = np.mgrid[lat_min:lat_max:(num_bins+1)*1j] tree_count = np.zeros((num_bins, num_bins)) for i in range(num_bins): left_lat = lat[i] right_lat = lat[i+1] filter_lat_left = dataset.filter_gt("Latitude", left_lat) filter_lat_right = filter_lat_left.filter_lt("Latitude", right_lat) for j in range(num_bins): left_lon = lon[j] right_lon = lon[j+1] filter_lon_left = filter_lat_right.filter_gt("Longitude", left_lon) filter_lon_right = filter_lon_left.filter_lt("Longitude", right_lon) tree_count[i,j] += filter_lon_right.size() plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max), origin="lower") plt.xlabel("Longitude",fontsize=20) plt.ylabel("Latitude",fontsize=20) plt.title("Most common species '{}'\nacross spatial ranges in {}".format(list(most_common_dict)[0],name), fontsize=22) color_bar = plt.colorbar() color_bar.set_label("Count") bin_trees() #import ipywidgets #ipywidgets.interact(bin_trees, num_bins = (4,128)) def fourth_graph(dataset,name): min_number = dataset.data["Diameter at Breast Height (in Feet)"].min() max_number = dataset.data["Diameter at Breast Height (in Feet)"].max() bins = np.mgrid[min_number:max_number+10:5] plt.hist(dataset.data["Diameter at Breast Height (in Feet)"], alpha = 0.75,bins=bins,edgecolor = 'k') plt.xlabel("Diameter at Breast Height (in Feet) of Each Tree",fontsize=20) plt.ylabel("Number of Trees",fontsize=20) plt.title("Distribution of tree trunk breadth in {}".format(name),fontsize=22) plt.xticks(bins) plt.rcParams["figure.figsize"] = (20, 20) def make_plots(dataset, name = ""): # YOUR CODE HERE if (name == "All Trees") | (name == "Non-Vacant Site"): plt.subplot(2,2,1) first_graph(dataset,name) plt.subplot(2,2,2) second_graph(dataset,name) plt.subplot(2,2,3) third_graph(dataset,name) plt.subplot(2,2,4) fourth_graph(dataset,name) else: plt.subplot(2,2,1) first_graph(dataset,name) plt.subplot(2,2,2) max_number = dataset.data["Number of Trunks"].max() plt.hist(dataset.data["Number of Trunks"],log=True,edgecolor='k', bins = np.arange(max_number+3),align='left',alpha=0.75) plt.title("Distribution of tree trunk count\nfor the species {}".format(name),fontsize=22) plt.xlabel("Number of Trunks of Each Tree",fontsize=20) plt.ylabel("Number of Trees",fontsize=20) plt.xticks(np.arange(max_number+3)) plt.subplot(2,2,3) # The following code is referred to the in-class notes. stats = dataset.stats() lon_min = stats["Longitude"][0] lon_max = stats["Longitude"][1] lat_min = stats["Latitude"][0] lat_max = stats["Latitude"][1] def bin_trees(num_bins = 16): lon = np.mgrid[lon_min:lon_max:(num_bins+1)*1j] lat = np.mgrid[lat_min:lat_max:(num_bins+1)*1j] tree_count = np.zeros((num_bins, num_bins)) for i in range(num_bins): left_lat = lat[i] right_lat = lat[i+1] filter_lat_left = dataset.filter_gt("Latitude", left_lat) filter_lat_right = filter_lat_left.filter_lt("Latitude", right_lat) for j in range(num_bins): left_lon = lon[j] right_lon = lon[j+1] filter_lon_left = filter_lat_right.filter_gt("Longitude", left_lon) filter_lon_right = filter_lon_left.filter_lt("Longitude", right_lon) tree_count[i,j] += filter_lon_right.size() plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max), origin="lower") plt.xlabel("Longitude",fontsize=20) plt.ylabel("Latitude",fontsize=20) plt.title("Tree species '{}'\nacross spatial ranges".format(name), fontsize=22) color_bar = plt.colorbar() color_bar.set_label("Count") bin_trees() plt.subplot(2,2,4) fourth_graph(dataset,name) make_plots(trees, "All Trees") make_plots(trees.filter_eq("Tree Species", "Acer rubrum"), "Acer rubrum") make_plots(trees.filter_ne("Tree Species", "vacant site large"), "Non-Vacant Site") """ Explanation: Visualization Assignment For the next phase, there will be a manually-graded section of the homework. This will be worth 50% of the point total. You should implement the function make_plots. It should accept a Dataset object, a name, and should generate four plots: Locations of trees (this can utilize just the lat/lon, and does not need to be overlaid on a map) Distribution of tree trunk count for five most common tree species Most common tree species across spatial ranges (binning) Distribution of tree trunk breadth Your plots (worth 9 points out of a total of 30 for the assignment) will be graded on: Clarity of the visual presentation All components of the plot are present to allow for interpretation of the plot (labels, etc) Reusability of the plots; could the function be used across different datasets drawn from this single dataset? Aesthetics will be discussed in feedback End of explanation """
y2ee201/Deep-Learning-Nanodegree
transfer-learning/Transfer_Learning.ipynb
mit
from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm vgg_dir = 'tensorflow_vgg/' # Make sure vgg exists if not isdir(vgg_dir): raise Exception("VGG directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(vgg_dir + "vgg16.npy"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar: urlretrieve( 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy', vgg_dir + 'vgg16.npy', pbar.hook) else: print("Parameter file already exists!") """ Explanation: Transfer Learning Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using VGGNet trained on the ImageNet dataset as a feature extractor. Below is a diagram of the VGGNet architecture. <img src="assets/cnnarchitecture.jpg" width=700px> VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes. You can read more about transfer learning from the CS231n course notes. Pretrained VGGNet We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. Make sure to clone this repository to the directory you're working from. You'll also want to rename it so it has an underscore instead of a dash. git clone https://github.com/machrisaa/tensorflow-vgg.git tensorflow_vgg This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. You'll need to clone the repo into the folder containing this notebook. Then download the parameter file using the next cell. End of explanation """ import tarfile dataset_folder_path = 'flower_photos' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('flower_photos.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar: urlretrieve( 'http://download.tensorflow.org/example_images/flower_photos.tgz', 'flower_photos.tar.gz', pbar.hook) if not isdir(dataset_folder_path): with tarfile.open('flower_photos.tar.gz') as tar: tar.extractall() tar.close() """ Explanation: Flower power Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial. End of explanation """ import os import numpy as np import tensorflow as tf from tensorflow_vgg import vgg16 from tensorflow_vgg import utils data_dir = 'flower_photos/' contents = os.listdir(data_dir) classes = [each for each in contents if os.path.isdir(data_dir + each)] """ Explanation: ConvNet Codes Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier. Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code): ``` self.conv1_1 = self.conv_layer(bgr, "conv1_1") self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") self.pool5 = self.max_pool(self.conv5_3, 'pool5') self.fc6 = self.fc_layer(self.pool5, "fc6") self.relu6 = tf.nn.relu(self.fc6) ``` So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer, feed_dict = {input_: images} codes = sess.run(vgg.relu6, feed_dict=feed_dict) End of explanation """ # Set the batch size higher if you can fit in in your GPU memory batch_size = 10 codes_list = [] labels = [] batch = [] codes = None with tf.Session() as sess: # TODO: Build the vgg network here vgg = vgg16.Vgg16() inputs_ = tf.placeholder(tf.float32, shape=[None, 224, 224, 3]) with tf.name_scope('content_vgg'): vgg.build(inputs_) for each in classes: print("Starting {} images".format(each)) class_path = data_dir + each files = os.listdir(class_path) for ii, file in enumerate(files, 1): # Add images to the current batch # utils.load_image crops the input images for us, from the center img = utils.load_image(os.path.join(class_path, file)) batch.append(img.reshape((1, 224, 224, 3))) labels.append(each) # Running the batch through the network to get the codes if ii % batch_size == 0 or ii == len(files): # Image batch to pass to VGG network images = np.concatenate(batch) # TODO: Get the values from the relu6 layer of the VGG network codes_batch = sess.run(vgg.relu6, feed_dict={inputs_: images}) # Here I'm building an array of the codes if codes is None: codes = codes_batch else: codes = np.concatenate((codes, codes_batch)) # Reset to start building the next batch batch = [] print('{} images processed'.format(ii)) # write codes to file with open('codes', 'w') as f: codes.tofile(f) # write labels to file import csv with open('labels', 'w') as f: writer = csv.writer(f, delimiter='\n') writer.writerow(labels) """ Explanation: Below I'm running images through the VGG network in batches. Exercise: Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values). End of explanation """ # read codes and labels from file import csv with open('labels') as f: reader = csv.reader(f, delimiter='\n') labels = np.array([each for each in reader if len(each) > 0]).squeeze() with open('codes') as f: codes = np.fromfile(f, dtype=np.float32) codes = codes.reshape((len(labels), -1)) """ Explanation: Building the Classifier Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work. End of explanation """ from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() lb.fit(labels) labels_vecs = lb.transform(labels) # Your one-hot encoded labels array here print(labels[1600:1604]) print(labels_vecs[1600:1604]) """ Explanation: Data prep As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels! Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels. End of explanation """ from sklearn.model_selection import StratifiedShuffleSplit ss = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2) splitter = ss.split(codes, labels_vecs) for train_index, test_index in splitter: train_x, train_y = codes[train_index], labels_vecs[train_index] val_x, val_y = codes[test_index], labels_vecs[test_index] ss = StratifiedShuffleSplit(n_splits = 1, test_size = 0.5) splitter = ss.split(val_x, val_y) for val_index, test_index in splitter: test_x, test_y = val_x[test_index], val_y[test_index] val_x, val_y = val_x[val_index], val_y[val_index] print("Train shapes (x, y):", train_x.shape, train_y.shape) print("Validation shapes (x, y):", val_x.shape, val_y.shape) print("Test shapes (x, y):", test_x.shape, test_y.shape) """ Explanation: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn. You can create the splitter like so: ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) Then split the data with splitter = ss.split(x, y) ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide. Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets. End of explanation """ inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]]) labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]]) learning_rate = 0.01 # TODO: Classifier layers and operations # weights = tf.Variable(tf.truncated_normal(dtype=tf.float32, mean=0, stddev=0.1, shape=[codes.shape[1], 5])) # biases = tf.Variable(tf.truncated_normal(dtype=tf.float32, mean=0, stddev=0.1, shape=[5])) fc = tf.contrib.layers.fully_connected(inputs=inputs_, num_outputs=256) logits = tf.contrib.layers.fully_connected(inputs=fc, num_outputs= labels_vecs.shape[1], activation_fn=None) # output layer logits cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_, name=None)# cross entropy loss cost = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# training optimizer # Operations for validation/test accuracy predicted = tf.nn.softmax(logits) correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: If you did it right, you should see these sizes for the training sets: Train shapes (x, y): (2936, 4096) (2936, 5) Validation shapes (x, y): (367, 4096) (367, 5) Test shapes (x, y): (367, 4096) (367, 5) Classifier layers Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network. Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost. End of explanation """ def get_batches(x, y, n_batches=10): """ Return a generator that yields batches from arrays x and y. """ batch_size = len(x)//n_batches for ii in range(0, n_batches*batch_size, batch_size): # If we're not on the last batch, grab data with size batch_size if ii != (n_batches-1)*batch_size: X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] # On the last batch, grab the rest of the data else: X, Y = x[ii:], y[ii:] # I love generators yield X, Y """ Explanation: Batches! Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data. End of explanation """ epochs = 10 saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # TODO: Your training code here for i in range(epochs): iteration = 0 for x, y in get_batches(train_x, train_y): loss, _ = sess.run([cost, optimizer], feed_dict={inputs_: x, labels_: y}) acc = accuracy.eval(feed_dict={inputs_: val_x, labels_: val_y}) print('Epoch {}/Iteration {}: Loss = {:.4f}, Validation Accuracy = {:.2f}'.format(i + 1, iteration + 1, loss, acc)) iteration = iteration + 1 saver.save(sess, "checkpoints/flowers.ckpt") """ Explanation: Training Here, we'll train the network. Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the get_batches function I wrote before to get your batches like for x, y in get_batches(train_x, train_y). Or write your own! End of explanation """ with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: test_x, labels_: test_y} test_acc = sess.run(accuracy, feed_dict=feed) print("Test accuracy: {:.4f}".format(test_acc)) %matplotlib inline import matplotlib.pyplot as plt from scipy.ndimage import imread """ Explanation: Testing Below you see the test accuracy. You can also see the predictions returned for images. End of explanation """ test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg' test_img = imread(test_img_path) plt.imshow(test_img) # Run this cell if you don't have a vgg graph built if 'vgg' in globals(): print('"vgg" object already exists. Will not create again.') else: #create vgg with tf.Session() as sess: input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) vgg = vgg16.Vgg16() vgg.build(inputs_) with tf.Session() as sess: img = utils.load_image(test_img_path) img = img.reshape((1, 224, 224, 3)) feed_dict = {inputs_: img} code = sess.run(vgg.relu6, feed_dict=feed_dict) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: code} prediction = sess.run(predicted, feed_dict=feed).squeeze() plt.imshow(test_img) plt.barh(np.arange(5), prediction) _ = plt.yticks(np.arange(5), lb.classes_) """ Explanation: Below, feel free to choose images and see how the trained classifier predicts the flowers in them. End of explanation """
quantopian/research_public
notebooks/data/eventvestor.earnings_releases/notebook.ipynb
apache-2.0
# import the dataset from quantopian.interactive.data.eventvestor import earnings_releases # or if you want to import the free dataset, use: # from quantopian.interactivedata.eventvestor import earnings_releases_free # import data operations from odo import odo # import other libraries we will use import pandas as pd # Let's use blaze to understand the data a bit using Blaze dshape() earnings_releases.dshape # And how many rows are there? # N.B. we're using a Blaze function to do this, not len() earnings_releases.count() # Let's see what the data looks like. We'll grab the first three rows. earnings_releases[:3] """ Explanation: EventVestor: Earnings Releases In this notebook, we'll take a look at EventVestor's Earnings Releases dataset, available on the Quantopian Store. This dataset spans January 01, 2007 through the current day, and documents quarterly earnings releases. Blaze Before we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as Blaze. Blaze provides the Quantopian user with a convenient interface to access very large datasets. Blaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side. It is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization. Helpful links: * Query building for Blaze * Pandas-to-Blaze dictionary * SQL-to-Blaze dictionary. Once you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using: from odo import odo odo(expr, pandas.DataFrame) Free samples and limits One other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze. There is a free version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day. With preamble in place, let's get started: End of explanation """ # get apple's sid first aapl_sid = symbols('AAPL').sid aapl_earnings = earnings_releases[('2011-12-31' < earnings_releases['asof_date']) & (earnings_releases['asof_date'] <'2013-01-01') & (earnings_releases.sid==aapl_sid)] # When displaying a Blaze Data Object, the printout is automatically truncated to ten rows. aapl_earnings.sort('asof_date') """ Explanation: Let's go over the columns: - event_id: the unique identifier for this event. - asof_date: EventVestor's timestamp of event capture. - trade_date: for event announcements made before trading ends, trade_date is the same as event_date. For announcements issued after market close, trade_date is next market open day. - symbol: stock ticker symbol of the affected company. - event_type: this should always be Earnings Release/Earnings release. - event_headline: a brief description of the event - event_phase: the inclusion of this field is likely an error on the part of the data vendor. We're currently attempting to resolve this. - fiscal_period: fiscal period for the reported earnings, such as 1Q 15, 2Q 15, etc. - calendar_period: identifies the calendar period based on the fiscal period end date. E.g. if the fiscal period ends any time after the middle of a given calendar quarter, like 1Q 15, that calendar quarter will be assigned regardless of the fiscal quarter. - fiscal_periodend: the last date for the reported earnings period. - currency: currency used for reporting earnings. - revenue: revenue in millions - gross_income: gross income in millions - operating_income: operating income in millions - net_income: net income in millions - eps: earnings per share, in the reported currency - eps_surprisepct: the meaning of this column is presently uncertain. We're working with our data vendor to resolve this issue. - event_rating: this is always 1. The meaning of this is uncertain. - timestamp: this is our timestamp on when we registered the data. - sid: the equity's unique identifier. Use this instead of the symbol. We've done much of the data processing for you. Fields like timestamp and sid are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the sid across all our equity databases. We can select columns and rows with ease. Below, we'll fetch all of Apple's entries from 2012. End of explanation """ # manipulate with Blaze first: big_earnings = earnings_releases[earnings_releases.revenue > 40000] # now that we've got a much smaller object (len: ~2167 rows), we can convert it to a pandas DataFrame df = odo(big_earnings, pd.DataFrame) df = df[['sid', 'asof_date','revenue']].dropna() df.sort('revenue',ascending=False) """ Explanation: Now suppose we want a DataFrame of all earnings releases with revenue over 30 billion dollars. For those earnings releases, we only want the sid and the asof_date. End of explanation """
henriquepgomide/caRtola
src/python/desafio_valorizacao/Descobrindo o algoritmo de valorização do Cartola FC - Parte II.ipynb
mit
# Importar bibliotecas import pprint import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import lightgbm as lgb from sklearn import linear_model from sklearn.metrics import (mean_squared_error, r2_score) from sklearn import ensemble from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import (AdaBoostRegressor, RandomForestRegressor) pd.options.mode.chained_assignment = None %matplotlib inline pd.options.display.float_format = '{:,.2f}'.format # Abrir banco de dados dados = pd.read_csv('~/caRtola/data/desafio_valorizacao/valorizacao_cartola_2018.csv') # Listar nome das variáveis str(list(dados)) # Selecionar variáveis para estudo dados = dados[['slug', 'rodada', 'posicao', 'status', 'variacao_preco', 'pontos', 'preco', 'media_pontos']] # Visualizar dados dados.head(n=10) """ Explanation: Descobrindo o algoritmo de valorização do Cartola FC - Parte II Em busca de um modelo generalizável... Olá novamente! Este é o segundo tutorial da série que tentará descobrir o algoritmo de valorização do Cartola FC. Se você ainda não leu o primeiro tutorial, recomendo leitura antes de ler este estudo. Nossos objetivos aqui são: Preparar os dados para criamos modelos; Criar modelos que possam generalizar nossos resultados promissores do estudo 1; Além disso, você estudará análise de dados usando Python com Pandas, Seaborn, Sklearn. Espero que você tenha noção sobre: Modelos lineares Análise de séries temporais Conhecimentos básicos do Cartola FC. End of explanation """ # 1. Criar atributos para modelagem def recode_position(data): '''Recodificar posicao para númerica''' PositionDictionary = { 'tec':0, 'gol':1, 'zag':2, 'lat':3, 'mei':4, 'ata':5 } return pd.Series(data['posicao'].map(PositionDictionary)) def create_atributes(data): '''Criar atributos com lags - variacao_preco_lag e pontos_lag''' try: data['variacao_preco_lag'] = data.groupby(['slug'])['variacao_preco'].shift(1) data['pontos_lag'] = data.groupby(['slug'])['pontos'].shift(1) data['jogou_partida'] = np.where(data.status.isin(['Nulo', 'Suspenso', 'Contundido', 'Dúvida']) & data.pontos_lag==0, 0, 1) data['posicao_rec'] = recode_position(data) data = data.dropna(how='any') return(data) except: print('Deu ruim - Function create_atributes failed') # 2. Padronizar atributos def agg_by_round(x): '''Computar preco, pontuacao media e preco''' names = { 'med_preco': x['preco'].mean(), 'med_pontos_lag': x['pontos_lag'].mean(), 'std_preco': x['preco'].std(), 'std_pontos_lag': x['pontos_lag'].std(), 'soma_pontos_lag_rp': x['pontos_lag'].sum(), 'soma_preco_rp': x['preco'].sum() } return pd.Series(names, index=['med_preco', 'med_pontos_lag','std_preco', 'std_pontos_lag','soma_pontos_lag_rp','soma_preco_rp']) def center_atributes(data): '''Centrar e escalar atributos dos dados''' data['preco_cen'] = (data['preco'] - data['med_preco']) / data['std_preco'] data['pon_lag_cen'] = (data['pontos_lag'] - data['med_pontos_lag']) / data['std_pontos_lag'] data['pon_lag_ratio'] = data['pontos_lag'] / data['soma_pontos_lag_rp'] data['preco_ratio'] = data['preco'] / data['soma_preco_rp'] return data # Processar dados create_atributes(dados) # Criar atributos agg_data = dados.groupby(['rodada','posicao']).apply(agg_by_round) # Criar atributos gerais por rodada dados = dados.join(agg_data, on=['rodada','posicao']) # Join dataframes dados = center_atributes(dados) # Centrar atributos dados = dados.dropna() # Retirar dados omissos dados.head() # Visualizar banco de dados transformado """ Explanation: Preparar dados Chegamos a etapa mais longa e que é crítica para modelagem de dados - preparação dos dados. Nós iremos repetir alguns passos do estudo 1, mas tentaremos aplicar inicialmente um modelo para todos os jogadores de uma dada posição. Nosso roteiro nestá sessão será: Criar atributos para modelagem - variacao_lag e pontos_lag Padronizar resultados dos jogadores para generalização do modelo End of explanation """ # Matriz de correlação dados[['variacao_preco_lag', 'pontos_lag', 'pon_lag_cen', 'preco_cen', 'med_preco', 'preco', 'med_pontos_lag', 'media_pontos', 'preco_ratio', 'pon_lag_ratio']].corr() """ Explanation: Modelagem dos dados Vamos começar o processo de modelagem observando a matriz de correlação entre os atributos numéricos. Isso nos dará uma ideia de quais variáveis deveremos incluir em nosso modelo. End of explanation """ # Ver ordem das colunas do banco de dados variables = list(dados) variables = {variables[i]:i for i in range(0, len(variables))} sorted_by_value = sorted(variables.items(), key=lambda kv: kv[1]) pprint.pprint(sorted_by_value) # Converter data frame para matriz train_data = dados.values # Atributos: rodada, preco, media_pontos, pontos_lag, jogou_partida, posicao_rec, med_preco, med_pontos_lag train_features = train_data[... , [1,6,7,9,10,11,12,13]] train_result = train_data[... , 8] def get_model_outcomes(model): '''Função que calcula valores usando validação cruzada k-folds para um dado modelo''' SEED=42 mean_rmse = 0.0 mean_r2 = 0.0 n = 10 for i in range(n): X_train, X_cv, y_train, y_cv = train_test_split( train_features, train_result, test_size=.20, random_state=i*SEED) # Treinar modelo e realizar predições model.fit(X_train, y_train) preds = model.predict(X_cv) print('[Fold %d/%d] Mean Squared Error: %.2f | R^2: %.2f' % (i + 1, n, mean_squared_error(y_cv, preds), r2_score(y_cv, preds))) mean_rmse += mean_squared_error(y_cv, preds) mean_r2 += r2_score(y_cv, preds) print('Mean RMSE: %.3f | Mean R^2 %.3f' % (mean_rmse/n, mean_r2/n)) """ Explanation: De acordo com a tabela acima, podemos observar que as seguintes variáveis estão correlacionadas com a variação de preço que estamos tentando prever. Em ordem de magnitude temos: pontos_lag, pon_lag_cen, preco_cen e preco. O resto das variáveis numéricas provavelmente não nos ajudarão no modelo. Validação cruzada e predição Está chegando a hora. Usaremos a biblioteca sckitlearn. Para usar suas funções, precisaremos adaptar nosso banco de dados. Vamos aproveitar e escrever uma função para avaliar os modelos usando validação cruzada 'k-fold'. A ideia é evitar super ajustamento do nosso modelo aos dados. Para avaliar nossos modelos, usaremos o erro médio quadrático, que é uma estimativa de quanto as predições de um dado modelo é próxima dos valores reais. End of explanation """ # Linear regression regr_0 = linear_model.LinearRegression() get_model_outcomes(regr_0) # AdaBoost rng=42 regr_1 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=8), n_estimators=100, random_state=rng) get_model_outcomes(regr_1) # Random Forest regr_2 = RandomForestRegressor(max_depth=8, random_state=0, n_estimators=100) get_model_outcomes(regr_2) # LightGBM features_names = ['rodada', 'preco', 'media_pontos', 'pontos_lag', 'jogou_partida', 'posicao_rec', 'med_preco', 'med_pontos_lag'] train_data = lgb.Dataset(train_features, label=train_result, feature_name=features_names, categorical_feature=['jogou_partida','posicao_rec'], free_raw_data=False) test_data = lgb.Dataset('test.svm', reference=train_data) params = { 'objective':'regression', 'metric':'mean_squared_error', } # Training num_round = 500 cv_results = lgb.cv(params, train_data, num_round, nfold=10, stratified=False, verbose_eval=20, early_stopping_rounds=40) # Display results print('Current parameters:\n', params) print('\nBest num_boost_round:', len(cv_results['l2-mean'])) print('Best CV score:', cv_results['l2-mean'][-1]) """ Explanation: Teste dos modelos com validação cruzada Hora de testar a capacidade preditiva de quatro modelos. Começaremos simples com uma regressão linear, depois tentaremos usar árvores de decisão com adaBoost e Random Forest. Por fim, usaremos o algoritmo lightgbm, campeão em diversas competições. Ele é uma variação do Extreme Gradient Boosting que é mais computacionalmente mais eficiente. Focaremos na métrica RMSE, erro médio quadrático. Para nós valores próximos a zero são desejáveis. Usaremos para os três primeiros modelos também o $R^2$. End of explanation """ ### Treinar LightGBM bst = lgb.train(params, train_data, 182) dados['pred_lgb'] = bst.predict(dados[features_names]) ### Treinar Random Forest model_2 = regr_2.fit(train_features, train_result) dados['pred_m2'] = regr_2.predict(train_features) ### Plotar scatter com previsão do algoritmo e resultado principal dados_plot = pd.melt(dados[['variacao_preco_lag','pred_lgb','pred_m2']], id_vars=['variacao_preco_lag'], value_vars=['pred_lgb','pred_m2']) sns.set(font_scale=3) g = sns.FacetGrid(dados_plot, col="variable", height=16) g.map(sns.regplot, 'value', 'variacao_preco_lag', scatter_kws={'alpha':0.3}) dados[['slug', 'variacao_preco_lag', 'predict_m2', 'pred_lbg']].tail(20) """ Explanation: Temos dois algoritmos para olhar com carinho. O vencedor de competições Kaggle - lightgbm e o random forest. Vamos testá-los agora em nosso conjunto de dados. End of explanation """
mne-tools/mne-tools.github.io
0.22/_downloads/542dc1082b4eb8bba1ec06e757cdf52d/plot_interpolate_bad_channels.ipynb
bsd-3-clause
# Authors: Denis A. Engemann <denis.engemann@gmail.com> # Mainak Jas <mainak.jas@telecom-paristech.fr> # # License: BSD (3-clause) import mne from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis-ave.fif' evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) # plot with bads evoked.plot(exclude=[], picks=('grad', 'eeg')) """ Explanation: Interpolate bad channels for MEG/EEG channels This example shows how to interpolate bad MEG/EEG channels Using spherical splines from :footcite:PerrinEtAl1989 for EEG data. Using field interpolation for MEG and EEG data. In this example, the bad channels will still be marked as bad. Only the data in those channels is replaced. End of explanation """ evoked_interp = evoked.copy().interpolate_bads(reset_bads=False) evoked_interp.plot(exclude=[], picks=('grad', 'eeg')) """ Explanation: Compute interpolation (also works with Raw and Epochs objects) End of explanation """ evoked_interp_mne = evoked.copy().interpolate_bads( reset_bads=False, method=dict(eeg='MNE'), verbose=True) evoked_interp_mne.plot(exclude=[], picks=('grad', 'eeg')) """ Explanation: You can also use minimum-norm for EEG as well as MEG End of explanation """
connordurkin/CPSC_458
final_project.ipynb
mit
%matplotlib inline import yahoo_finance from yahoo_finance import Share import numpy as np import pandas import matplotlib.pyplot as plt import datetime import cvxopt as opt from cvxopt import blas, solvers # We will do a lot of optimizations, # and don't want to see each step. solvers.options['show_progress'] = False """ Explanation: Markowitz Portfolio Optimization Final Project for CPSC 458 by Connor Durkin This project explores the use of a mean variance or Markowitz method of portfolio optimization. The goal is to employ this trading strategy for a portfolio of SPDR ETFs and track returns over historical data. More importantly, though, as this is a class in decision making, I have incporated the ability for the functions here to explain their motivations to a human being--hopefully in a palatable manner. Below are the function definitions and at the end of the notebook you will find an example of their use. These functions were written with default key values but the operations are general enough to apply this strategy to any selection of securities with return data available via yahoo finance. Be sure to read the Results and Analysis at the end! End of explanation """ def getTimeSeries( ticker, start_date='2012-01-01', end_date='2012-02-01'): # yahoo_finance API to load list of dictionaries obj = Share(ticker) ts = obj.get_historical(start_date,end_date) # yahoo_finance indexes most recent date first, reverse this ts = list(reversed(ts)) # Convert date strings to python datetime objects for easier manipulation dates = [datetime.datetime.strptime(ts[i]['Date'],'%Y-%m-%d').date() for i in range(len(ts))] # Convert close price strings to floats for numerical manipulation prices = [float(ts[i]['Adj_Close']) for i in range(len(ts))] # Create DataFrame from the list produced - python will recognize as Series time_series = pandas.DataFrame( prices, index = dates, columns = [ticker]) return time_series """ Explanation: getTimeSeries( ticker, start_date, end_date) What it does: getTimeSeries() takes in a date range and a ticker and returns a timeseries of adjusted closing prices. Inputs: ticker: a string indiciating the security for which the time series will be generated. start_date: a string of the form 'YYYY-MM-DD' declaring the beginning of the historical window. end_date: a string of the form 'YYYY-MM-DD' declaring the end of the historical window Returns: time_series: a single column Pandas DataFrame containing the time series of adjusted close prices for the indicated ticker. End of explanation """ def getMultTimeSeries( tickers = ['XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'], start_date = '2012-01-01', end_date = '2012-02-01'): # Initialize DataFrame time_series_dataframe = pandas.DataFrame() # Iterate over all tickers and append column to DataFrame for ticker in tickers: # Use helper function to get single column DataFrame df = getTimeSeries( ticker, start_date, end_date) # Concatanate on axis = 1 time_series_dataframe = pandas.concat([time_series_dataframe,df],axis = 1) return time_series_dataframe """ Explanation: getMultTimeSeries( tickers, start_date, end_date) What it does: getMultTimeSeries() takes in a list of tickers and a specified date range and returns a Pandas DataFrame containing timeseries of adjusted closing prices. Inputs: tickers: a list of strings indicating which tickers to include. Defaults to these 9 SPDR ETFs: 'XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'. start_date: a string of the form 'YYYY-MM-DD' declaring the beginning of the historical window. end_date: a string of the form 'YYYY-MM-DD' declaring the end of the historical window Returns: time_series_dataframe: a dataframe of adjusted closing price timeseries over the specified date range for the specified group of tickers End of explanation """ def markowitzReturns( returns, tickers, explain = False): n = len(returns) returns_df = returns returns = np.asmatrix(returns) mus = [10**(5.0 * t/50 - 1.0) for t in range(50)] # Convert to cvxopt matrices Sigma = opt.matrix(np.cov(returns)) q = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) # -I*w < 0 i.e. no shorts A = opt.matrix(1.0, (1, n)) # A is all ones so A*w = w b = opt.matrix(1.0) # Dot product sums to 1 # Calculate efficient frontier weights using quadratic programming ports = [solvers.qp(mu*Sigma, -q, G, h, A, b)['x'] for mu in mus] # Calculate risks and returns of frontier returns = [blas.dot(q, x) for x in ports] risks = [np.sqrt(blas.dot(x, Sigma*x)) for x in ports] # Fit polynomial to frontier curve m = np.polyfit(returns, risks, 2) x = np.sqrt(m[2]/m[0]) # Calculate optimal portfolio weights optimal_weights = solvers.qp(opt.matrix(x * Sigma), -q, G, h, A, b)['x'] optimal_return = blas.dot(q, optimal_weights) optimal_risk = np.sqrt(blas.dot(optimal_weights, Sigma*optimal_weights)) # Method to justify this portfolio distribution if asked for if( explain ): date_text = """ -------------------------------------------------------------------------------------------------- Using returns data from {0} to {1} a careful mean - variance analysis was performed. The analysis found a number of portfolios lying on the markowitz efficient frontier and they are found below. The analysis indicates that the optimal portfolio for the next trading day will have the following distribution: """ print(date_text.format(returns_df.columns[0],returns_df.columns[len(returns_df.columns)-1])) # Print optimal weights weights = np.asarray(optimal_weights) weights = [float(weights[i]) for i in range(len(weights))] wts = dict(zip(tickers,weights)) for k in wts: weight_text = "\t{0} : {1:.4f}%" print(weight_text.format(str(k),float(wts[k])*100)) returns_text = """ This portfolio distribution has an expected return of: {0:.4f}%""" print(returns_text.format(float(optimal_return)*100)) risk_text = """ And the associated risk (standard deviation) is: {0:.4f}""" print(risk_text.format(float(optimal_risk))) break_text=""" -------------------------------------------------------------------------------------------------- """ print(break_text) plt.plot(risks, returns, 'b-o') plt.title('Efficient Portfolios on {}'.format(returns_df.columns[len(returns_df.columns)-1])) plt.ylabel('Returns (%)') plt.xlabel('Risk (STD)') return np.asarray(optimal_weights), returns, risks """ Explanation: markowitzReturns( returns) What it does: markowitzReturns() takes in a Pandas DataFrame (or any container which can be converted to a numpy matrix) of returns and uses mean-variance portfolio theory to return an optimally weighted portfolio. It does so by minimizing $\omega^{T}\Sigma\omega -qR^{T}\omega$ (the Markowitz mean - variance framework) for portfolio weights $\omega$. Where $\Sigma$ is the covariance matrix of the securities, $R$ is the expected return matrix and $q$ is the mean return vector of all securities. The optimization is performed using the CVXOPT package employing the use of the solvers.qp() quadratic programming method. This method minimizes $(1/2)x^{T}Px + q^{T}x$ subject to $Gx \preceq h$ and $Ax = b$. It also utilizes CVXOPT's BLAS methods for performing linear algebra computations. Inspiration for this process was found in Dr. Thomas Starke, David Edwards and Dr. Thomas Wiecki's quantopian blog post located at: http://blog.quantopian.com/markowitz-portfolio-optimization-2/. Inputs: returns: a Pandas DataFrame(or other container which can be converted to a numpy matrix). NOTE: the dataframe produced by getMultTimeSeries must be transposed (returns.T) for meaningful results. justify: a True / False input determining whether to print a robust explanation of the choice for the portfolio shift. Returns: optimal_weights: the weights of the optimal portfolio in array form. returns: the returns of all portfolios calculated across the effecient frontier. risks: list of risks of all portfolios calculated across the efficient frontier. End of explanation """ def backtest( tickers = ['XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'], start_date = '2012-01-01', end_date = '2012-01-20', start = 10, max_lookback = 100, explain = False): timeseries = getMultTimeSeries( tickers, start_date, end_date) returns = timeseries.pct_change().dropna() weights_df = pandas.DataFrame() for i in range(len(returns)): if ( i > start ): if( i < max_lookback ): returns_window = returns[0:i] else: returns_window = returns[(i-max_lookback):i] try: if( explain ): weights, returns_window, risks = markowitzReturns(returns_window.T, tickers, explain = True) else: weights, returns_window, risks = markowitzReturns(returns_window.T, tickers, explain = False) except ValueError as e: # Sometimes CVXOPT fails (infrequently) # "ValueError: Rank(A) < p or Rank([P; A; G]) < n" # In this case just do nothing (keep current weights) weights, returns_window, risks = weights_prev, returns_window_prev, risks_prev weights = [float(weights[i]) for i in range(len(weights))] wts = dict(zip(tickers,weights)) df = pandas.DataFrame(wts, index = [returns.index[i]]) weights_df = pandas.concat([weights_df, df]) weights_prev, returns_window_prev, risks_prev = weights, returns_window, risks total_returns = pandas.DataFrame(weights_df.values*returns[(start+1)::], columns = returns.columns, index = returns.index) naive_returns = [np.sum(total_returns[[i]]) for i in range(len(total_returns.columns))] naive_return = np.sum(naive_returns) return weights_df, total_returns.dropna(), naive_return weights, returns, naive_return = backtest(explain = True) """ Explanation: backtest( tickers, start_date, end_date, start, max_lookback, explain) What it does: backtest() applies the mean-variance portfolio optimization trading strategy to a list of stocks. It applies the markowitzReturns() method over a range of dates and tracks the portfolio movement and returns, outputting a DataFrame describing the portfolio over time, a DataFrame describing the returns over time and a total return amount. Backtest does not take into account commission costs. Running backtest(explain = True) produces the output below. The default dates were carefully selected so that just one explain instance would print. Inputs: tickers: a list of strings indicating which tickers to include. Defaults to these 9 SPDR ETFs: 'XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'. start_date: a string of the form 'YYYY-MM-DD' declaring the beginning of the historical window. end_date: a string of the form 'YYYY-MM-DD' declaring the end of the historical window start: the minimum number of days to wait before beginning to trade (i.e. how much information is needed). Default is 10. max_lookback: the maximum number of days to look back for data, i.e. the size of the input to markowitzReturns(). Default is 100. Returns: weights_df: a pandas DataFrame containing the portfolio weights over time beginning with the start date + start$*$days. total_returns: a pandas DataFrame containing the portfolio returns over time beginning with the start date + start$*$days. naive_return: the total naive return (numpy float). End of explanation """ weights, returns, naive_return = backtest(start_date='2012-01-01',end_date='2012-12-31') def analyzeResults( weights_df, total_returns, naive_return, commission = .0004): start_date = weights_df.index[0] end_date = weights_df.index[len(weights_df.index)-1] # Get cummulative sum of returns for plotting return_sums = total_returns.cumsum() return_sums['total_return'] = return_sums.sum(axis=1) # Analyze data with commission costs weights_diff = weights_df.diff() weights_diff['total_delta'] = weights_diff.abs().sum(axis = 1) portfolio_movement = pandas.DataFrame(weights_diff['total_delta']/2) portfolio_movement['commissions'] = portfolio_movement['total_delta']*commission portfolio_movement['naive_return'] = total_returns.sum(axis=1) portfolio_movement['real_return'] = (portfolio_movement['naive_return'] - portfolio_movement['commissions']) real_sums = portfolio_movement.cumsum() real_return = portfolio_movement['real_return'].sum() # Print naive_return and real_return + analysis naive_return_text = """ -------------------------------------------------------------------------------------------------- In trading from {0} to {1} the total return ignoring commission fees was: {2:.4f}% After factoring in commission fees of {3} the total return was: {4:.4f}% -------------------------------------------------------------------------------------------------- """ print(naive_return_text.format( start_date, end_date, naive_return*100, commission ,real_return*100) ) # Get plot of naive_returns and real returns over time plt.figure(figsize=(12,6)) plt.plot(return_sums.index,return_sums['total_return'],label='Naive Returns') plt.plot(real_sums.index,real_sums['real_return'],label='Real Returns') plt.title('Returns over Time') plt.xlabel('Time') plt.ylabel('Returns (%)') plt.xticks(rotation=70) plt.legend() plt.legend(bbox_to_anchor=(1.01, .5), loc=2, borderaxespad=0.) return analyzeResults( weights, returns, naive_return, commission = .0004) """ Explanation: analyzeResults( weights_df, total_returns, naive_return, commission) What it does: analyzeResults() is the final function which analyzes and displays the results of the backtest() function. It takes the output of backtest() plus an argument for the commission wich defaults to 4 basis points. It plots the real and naive returns over time and displays the total real and naive returns over the date range from backtest(). Below is an example from 2012. Inputs: weights_df: pandas DataFrame of portfolio weights over time, returned from backtest(). total_returns: pandas DataFrame of naive returns over time, returned from backtest(). naive_return: total naive_return as returned by backtest(). commission: basis point cost on trades, defualts to 4 basis points. Returns: nothing End of explanation """
beangoben/HistoriaDatos_Higgs
Dia1/1_Computacion_con_Jupyter.ipynb
gpl-2.0
!pwd print("hola bolivia") """ Explanation: 1 Introduccion a IPython notebooks/ Jupyter Que es exactamente? Una libreta IPython/Jupyter es un ambiente interactivo para escribir y correr codigo de python. Es un historial completo y auto-contenido de un calculo y puede ser convertido a otros formatos para compartir con otros. En particular es batante popular en la comunidad cientifica porque es una herramienta interactiva, iterativa para analisis de datos, visualizacion y contar historias. Puedes combinar: - Codigo en vivo - Widgets Interactivos - Graficas - Texto Narrrativo - Ecuaciones - Imagenes - Video Un poco mas... El projecto Ipython reciente se expandio en la versio 3.0 para incluir otros kerneles de computo como R, Julia, C++ y Matlab. Para mas informacion/ideas checa los links abajo de este Ipython Notebook. Vamos a empezar! Corriendo codigo Corre tu codido usando Shift-Enter o presionando el boton <button class='btn btn-default btn-xs'><i class="icon-play fa fa-play"></i></button> en la barra de herramientas arriba. End of explanation """ un_str = "Cuanto es 2 x 4 ?" resultado= 2 * 4 """ Explanation: Computo Iterativo El "kernel" mantiene un estado de todos los calculos del la libreta. Por ejemplo puedes guardar el resultado de un calculo en una variable End of explanation """ print(un_str) print(resultado) print("Magia!") """ Explanation: y usarlo en otra celda End of explanation """ import time time.sleep(10) """ Explanation: Parar codigo El codigo se corre en un proceso separado llamado el"kernel:". Este puede ser interumpido o reseteado. Trata de correr el siguiente codigo y dale al boton <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button>. End of explanation """ a_list = [ "vaca", "taco", "gato"] #list print(a_list) """ Explanation: Resetear Puedes resetear usando el boton <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button>. Python Basico Lists and Arrays (Listas y arreglos) End of explanation """ a_list.append("pollo") # now part of the family a_list """ Explanation: podemos agregar elementos End of explanation """ import numpy as np # lista de numeros del 0 a 100, en incrementos de 1 numeros = np.arange(0,100,1) print(numeros) """ Explanation: o podemos usar el modulo Numpy para arreglos numericos (Protip: quieres saber mas? dale click a la imagen) End of explanation """ array_vacio = np.zeros((5,1)) array_vacio """ Explanation: y que tal arreglos vacios? Aqui creamos un vector de 5 x 1 End of explanation """ array_vacio[2] = 8 # manipular el tercer elemento array_vacio """ Explanation: podemos cambiar valores usando [ indice ]: End of explanation """ integers = np.random.randint(low=1,high=10, size=100000) integers """ Explanation: y que tal arreglos de numeros aleatorios? End of explanation """ print("Sortead :",np.sort(integers)) print("Max:",np.max(integers),", Min:",np.min(integers)) print("Max at:",np.argmax(integers),", Min at:",np.argmin(integers)) print("Mean:",np.mean(integers),", Std:",np.std(integers)) """ Explanation: Utilidades de Array Tenemos muchas utilidades para trabajar con arreglos... Sortear valores: entoces usamos np.sort(array) Encontrar Maximo, Minimo: entoces usamos np.min(array), np.max(array) Encontrar el indices del Maximo o Minimo: entoces usamos np.argmin(array), np.argmax(array) Calcular medias o desviaciones estandar : usamos np.mean(array),np.std(array) End of explanation """ for i in range(2,10,2): print(i) print("Fin!") """ Explanation: For loops (Ciclos For) Un for loop va sobre cada elemento del ciclo, Ojo! nota el espaciamiento/indentacion justo despues del for! End of explanation """ for i in a_list: for j in a_list: print i, j print("Fin!") """ Explanation: Doble! End of explanation """ for index,item in enumerate(a_list): print(index,item) print("Done!") """ Explanation: un loop pero enumerado, te rergresa un indice y un elemento End of explanation """ import random random.sample(a_list, 2) # select 1 random.sample(numeros, 10) """ Explanation: Y si quieres un elemento aleatorio de la lista? El modulo random al rescate, corre varias veces la celda y checa si son aleatorios End of explanation """ # La definimos.. def reordenar(lista): val=random.sample(lista,1) return val # la llamamos print(reordenar(numeros)) print(reordenar(numeros)) """ Explanation: Que tal funciones? End of explanation """ from IPython.display import Image Image(filename='files/large-hadron-collider.jpg') """ Explanation: Actividad 1 : Ejercicios de Programacion Meta: Obtener confianza con Python. 1.a) Usa un ciclo-for e imprime tus platillos bolivianos favoritos 1.b) Crea un arreglo de numeros aleatorios de tamano $n$ 1.c) Encapsula la funcion pasada en una funcion 1.d) Grafica un histograma de numeros aleatorios con $n=10,50,100,1k,10k$ Para graficar histogramas usa plt.hist(). Extra: Sumatoria de ondas Extra: Mas cosas para darle sabor Celdas de Texto: Latex & Markdown Celdas se crean por default como celdas de codigo, pero se pueden cambiar. Cell are by default created as code cells, can be but can be easily changed to text cells by cliking on the toolbar. In text cells you can embed narrative text using Markdown, HTML code and LaTeX equations with inline dollar signs \$ insert equation \$ and new line as \$\$ insert equation \$\$. For example: $$H\psi = E\psi$$ The code for this cell is: ```markdown Text Cells: Latex & Markdown Cell are by default created as code cells, can be but can be easily changed to text cells by cliking on the toolbar. In text cells you can embed narrative text using Markdown, HTML code and LaTeX equations with inline dollar signs \$ insert equation \$ and new line as \$\$ insert equation \$\$. For example: $$H\psi = E\psi$$ ``` Images We can work with images (JPEG, PNG) and SVG via the Image and SVG class. End of explanation """ from IPython.display import YouTubeVideo #https://www.youtube.com/watch?v=_6uKZWnJLCM YouTubeVideo('_6uKZWnJLCM') """ Explanation: Videos? End of explanation """ from IPython.display import HTML HTML('<iframe src=http://ipython.org/ width=700 height=350></iframe>') """ Explanation: External Websites, HTML? End of explanation """
EmuKit/emukit
notebooks/Emukit-tutorial-basic-use-of-the-library.ipynb
apache-2.0
# General imports and parameters of figures should be loaded at the beginning of the overview import numpy as np """ Explanation: Basic use of Emukit Overview End of explanation """ from emukit.test_functions import branin_function from emukit.core import ParameterSpace, ContinuousParameter from emukit.core.initial_designs import RandomDesign from GPy.models import GPRegression from emukit.model_wrappers import GPyModelWrapper from emukit.model_wrappers.gpy_quadrature_wrappers import BaseGaussianProcessGPy, RBFGPy import warnings warnings.filterwarnings('ignore') """ Explanation: The goal of this notebook is to illustrate how a model can wrapped and used in different tasks in Emukit. Navigation Load your objective, collect some data, build a model Load the elements to solve your problem and run the decision loop if needed Conclusions 1. Load your objective, collect some data, build a model These steps are common to all methods in Emukit. Here we illustrate how to do it with the Branin function. End of explanation """ f, _ = branin_function() """ Explanation: Define the objective function In this case we use the Branin function available in Emukit. End of explanation """ parameter_space = ParameterSpace([ContinuousParameter('x1', -5, 10), ContinuousParameter('x2', 0, 15)]) """ Explanation: Define the parameter space The parameter space contains the definition of the input variables of the function. Currently Emukit supports continuous and discrete parameters. End of explanation """ num_data_points = 30 design = RandomDesign(parameter_space) X = design.get_samples(num_data_points) Y = f(X) """ Explanation: Collect some observations of f In this step we are just collecting some initial random points in the parameter space of the objective. These points are used to initialize an emulator of the function. We use the RandomDesign class available in Emukit for this. End of explanation """ model_gpy = GPRegression(X,Y) model_gpy.optimize() model_emukit = GPyModelWrapper(model_gpy) """ Explanation: Fit and wrap a model to the collected data To conclude the steps that are common to any method in Emukit we now build an initial emulator of the objective function and we wrap the model in Emukit. In this example we use GPy but note that any modeling framework can be used here. End of explanation """ # Decision loops from emukit.experimental_design import ExperimentalDesignLoop from emukit.bayesian_optimization.loops import BayesianOptimizationLoop from emukit.quadrature.loop import VanillaBayesianQuadratureLoop # Acquisition functions from emukit.bayesian_optimization.acquisitions import ExpectedImprovement from emukit.experimental_design.acquisitions import ModelVariance from emukit.quadrature.acquisitions import IntegralVarianceReduction # Acquistion optimizers from emukit.core.optimization import GradientAcquisitionOptimizer # Stopping conditions from emukit.core.loop import FixedIterationsStoppingCondition from emukit.core.loop import ConvergenceStoppingCondition # Bayesian quadrature kernel and model from emukit.quadrature.kernels import QuadratureRBFLebesgueMeasure from emukit.quadrature.methods import VanillaBayesianQuadrature from emukit.quadrature.measures import LebesgueMeasure """ Explanation: 2. Load the package components, run the decision loop (if needed), solve your problem In this section we use the model that we have created to solve different decision tasks. When the model is used in a decision loop (Bayesian optimization, Bayesian quadrature, Experimental design) the loop needs to be loaded and elements like acquisitions and optimizer need to be passed in together with other parameters. Sensitivity analysis provide us with tools to interpret the model so no loops are needed. End of explanation """ # Load core elements for Bayesian optimization expected_improvement = ExpectedImprovement(model = model_emukit) optimizer = GradientAcquisitionOptimizer(space = parameter_space) # Create the Bayesian optimization object bayesopt_loop = BayesianOptimizationLoop(model = model_emukit, space = parameter_space, acquisition = expected_improvement, batch_size = 5) # Run the loop and extract the optimum # Run the loop until we either complete 10 steps or converge stopping_condition = FixedIterationsStoppingCondition(i_max = 10) | ConvergenceStoppingCondition(eps=0.01) bayesopt_loop.run_loop(f, stopping_condition) """ Explanation: Bayesian optimization Here we use the model to find the minimum of the objective using Bayesian optimization in a sequential way. We collect 10 batches of points in batches of size 5. End of explanation """ # Load core elements for Experimental design model_variance = ModelVariance(model = model_emukit) optimizer = GradientAcquisitionOptimizer(space = parameter_space) # Create the Experimental design object expdesign_loop = ExperimentalDesignLoop(space = parameter_space, model = model_emukit, acquisition = model_variance, update_interval = 1, batch_size = 5) # Run the loop stopping_condition = FixedIterationsStoppingCondition(i_max = 10) expdesign_loop.run_loop(f, stopping_condition) """ Explanation: Experimental design Here we use the same model to perform experimental design. We use the model variance. We collect 10 batches of 5 points each. After each batch is collected the model is updated. End of explanation """ # Define the lower and upper bounds of the integral. integral_bounds = [(-5, 10), (0, 15)] # Load core elements for Bayesian quadrature emukit_measure = LebesgueMeasure.from_bounds(integral_bounds) emukit_qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(model_gpy.kern), emukit_measure) emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=model_gpy) emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model, X=X, Y=Y) # Create the Bayesian quadrature object bq_loop = VanillaBayesianQuadratureLoop(model=emukit_method) # Run the loop and extract the integral estimate num_iter = 5 bq_loop.run_loop(f, stopping_condition=num_iter) integral_mean, integral_variance = bq_loop.model.integrate() """ Explanation: Bayesian Quadrature Here we use vanilla BQ from the quadrature package to integrate the Branin function. Note that we have to create a slightly different Emukit model since the BQ package needs some additional information. But we can re-use the GPy GP model from above. We also need to specify the integral bounds, i.e., the domain which we want to integrate over. End of explanation """ from emukit.sensitivity.monte_carlo import MonteCarloSensitivity # No loop here, compute Sobol indices senstivity_analysis = MonteCarloSensitivity(model = model_emukit, input_domain = parameter_space) main_effects, total_effects, _ = senstivity_analysis.compute_effects(num_monte_carlo_points = 10000) """ Explanation: Sensitivity analysis To compute the sensitivity indexes of a model does not require any loop. We just load the class MonteCarloSensitivity and pass the model and the parameter space. End of explanation """
mne-tools/mne-tools.github.io
0.15/_downloads/plot_ems_filtering.ipynb
bsd-3-clause
# Author: Denis Engemann <denis.engemann@gmail.com> # Jean-Remi King <jeanremi.king@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne import io, EvokedArray from mne.datasets import sample from mne.decoding import EMS, compute_ems from sklearn.model_selection import StratifiedKFold print(__doc__) data_path = sample.data_path() # Preprocess the data raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' event_ids = {'AudL': 1, 'VisL': 3} # Read data and create epochs raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(0.5, 45, fir_design='firwin') events = mne.read_events(event_fname) picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, exclude='bads') epochs = mne.Epochs(raw, events, event_ids, tmin=-0.2, tmax=0.5, picks=picks, baseline=None, reject=dict(grad=4000e-13, eog=150e-6), preload=True) epochs.drop_bad() epochs.pick_types(meg='grad') # Setup the data to use it a scikit-learn way: X = epochs.get_data() # The MEG data y = epochs.events[:, 2] # The conditions indices n_epochs, n_channels, n_times = X.shape # Initialize EMS transformer ems = EMS() # Initialize the variables of interest X_transform = np.zeros((n_epochs, n_times)) # Data after EMS transformation filters = list() # Spatial filters at each time point # In the original paper, the cross-validation is a leave-one-out. However, # we recommend using a Stratified KFold, because leave-one-out tends # to overfit and cannot be used to estimate the variance of the # prediction within a given fold. for train, test in StratifiedKFold().split(X, y): # In the original paper, the z-scoring is applied outside the CV. # However, we recommend to apply this preprocessing inside the CV. # Note that such scaling should be done separately for each channels if the # data contains multiple channel types. X_scaled = X / np.std(X[train]) # Fit and store the spatial filters ems.fit(X_scaled[train], y[train]) # Store filters for future plotting filters.append(ems.filters_) # Generate the transformed data X_transform[test] = ems.transform(X_scaled[test]) # Average the spatial filters across folds filters = np.mean(filters, axis=0) # Plot individual trials plt.figure() plt.title('single trial surrogates') plt.imshow(X_transform[y.argsort()], origin='lower', aspect='auto', extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)], cmap='RdBu_r') plt.xlabel('Time (ms)') plt.ylabel('Trials (reordered by condition)') # Plot average response plt.figure() plt.title('Average EMS signal') mappings = [(key, value) for key, value in event_ids.items()] for key, value in mappings: ems_ave = X_transform[y == value] plt.plot(epochs.times, ems_ave.mean(0), label=key) plt.xlabel('Time (ms)') plt.ylabel('a.u.') plt.legend(loc='best') plt.show() # Visualize spatial filters across time evoked = EvokedArray(filters, epochs.info, tmin=epochs.tmin) evoked.plot_topomap() """ Explanation: ============================================== Compute effect-matched-spatial filtering (EMS) ============================================== This example computes the EMS to reconstruct the time course of the experimental effect as described in [1]_. This technique is used to create spatial filters based on the difference between two conditions. By projecting the trial onto the corresponding spatial filters, surrogate single trials are created in which multi-sensor activity is reduced to one time series which exposes experimental effects, if present. We will first plot a trials x times image of the single trials and order the trials by condition. A second plot shows the average time series for each condition. Finally a topographic plot is created which exhibits the temporal evolution of the spatial filters. References .. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing multi-sensor data to a single time course that reveals experimental effects", BMC Neuroscience 2013, 14:122. End of explanation """ epochs.equalize_event_counts(event_ids) X_transform, filters, classes = compute_ems(epochs) """ Explanation: Note that a similar transformation can be applied with compute_ems However, this function replicates Schurger et al's original paper, and thus applies the normalization outside a leave-one-out cross-validation, which we recommend not to do. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/mohc/cmip6/models/hadgem3-gc31-mm/ocean.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-mm', 'ocean') """ Explanation: ES-DOC CMIP6 Model Properties - Ocean MIP Era: CMIP6 Institute: MOHC Source ID: HADGEM3-GC31-MM Topic: Ocean Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. Properties: 133 (101 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:15 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Seawater Properties 3. Key Properties --&gt; Bathymetry 4. Key Properties --&gt; Nonoceanic Waters 5. Key Properties --&gt; Software Properties 6. Key Properties --&gt; Resolution 7. Key Properties --&gt; Tuning Applied 8. Key Properties --&gt; Conservation 9. Grid 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Discretisation --&gt; Horizontal 12. Timestepping Framework 13. Timestepping Framework --&gt; Tracers 14. Timestepping Framework --&gt; Baroclinic Dynamics 15. Timestepping Framework --&gt; Barotropic 16. Timestepping Framework --&gt; Vertical Physics 17. Advection 18. Advection --&gt; Momentum 19. Advection --&gt; Lateral Tracers 20. Advection --&gt; Vertical Tracers 21. Lateral Physics 22. Lateral Physics --&gt; Momentum --&gt; Operator 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff 24. Lateral Physics --&gt; Tracers 25. Lateral Physics --&gt; Tracers --&gt; Operator 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity 28. Vertical Physics 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum 32. Vertical Physics --&gt; Interior Mixing --&gt; Details 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum 35. Uplow Boundaries --&gt; Free Surface 36. Uplow Boundaries --&gt; Bottom Boundary Layer 37. Boundary Forcing 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing 1. Key Properties Ocean key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean model code (NEMO 3.6, MOM 5.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OGCM" # "slab ocean" # "mixed layer ocean" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Primitive equations" # "Non-hydrostatic" # "Boussinesq" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the ocean. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # "Salinity" # "U-velocity" # "V-velocity" # "W-velocity" # "SSH" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the ocean component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Wright, 1997" # "Mc Dougall et al." # "Jackett et al. 2006" # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Seawater Properties Physical properties of seawater in ocean 2.1. Eos Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # TODO - please enter value(s) """ Explanation: 2.2. Eos Functional Temp Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Temperature used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Practical salinity Sp" # "Absolute salinity Sa" # TODO - please enter value(s) """ Explanation: 2.3. Eos Functional Salt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Salinity used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pressure (dbars)" # "Depth (meters)" # TODO - please enter value(s) """ Explanation: 2.4. Eos Functional Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Depth or pressure used in EOS for sea water ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2.5. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.6. Ocean Specific Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specific heat in ocean (cpocean) in J/(kg K) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.7. Ocean Reference Density Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boussinesq reference density (rhozero) in kg / m3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Present day" # "21000 years BP" # "6000 years BP" # "LGM" # "Pliocene" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Bathymetry Properties of bathymetry in ocean 3.1. Reference Dates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date of bathymetry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the bathymetry fixed in time in the ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Ocean Smoothing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any smoothing or hand editing of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.source') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Source Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe source of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Nonoceanic Waters Non oceanic waters treatement in ocean 4.1. Isolated Seas Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how isolated seas is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. River Mouth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how river mouth mixing or estuaries specific treatment is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Software Properties Software properties of ocean code 5.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Resolution Resolution in the ocean grid 6.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.4. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.5. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.6. Is Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.7. Thickness Level 1 Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Thickness of first surface ocean level (in meters) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Tuning Applied Tuning methodology for ocean component 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the ocean component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Brief description of conservation methodology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Enstrophy" # "Salt" # "Volume of ocean" # "Momentum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in the ocean by the numerical schemes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Consistency Properties Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Corrected Conserved Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Set of variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.5. Was Flux Correction Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does conservation involve flux correction ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Grid Ocean grid 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of grid in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Z-coordinate" # "Z*-coordinate" # "S-coordinate" # "Isopycnic - sigma 0" # "Isopycnic - sigma 2" # "Isopycnic - sigma 4" # "Isopycnic - other" # "Hybrid / Z+S" # "Hybrid / Z+isopycnic" # "Hybrid / other" # "Pressure referenced (P)" # "P*" # "Z**" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Properties of vertical discretisation in ocean 10.1. Coordinates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical coordinates in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 10.2. Partial Steps Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Using partial steps with Z or Z vertical coordinate in ocean ?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Lat-lon" # "Rotated north pole" # "Two north poles (ORCA-style)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Discretisation --&gt; Horizontal Type of horizontal discretisation scheme in ocean 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa E-grid" # "N/a" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Staggering Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal grid staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite difference" # "Finite volumes" # "Finite elements" # "Unstructured grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Timestepping Framework Ocean Timestepping Framework 12.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Via coupling" # "Specific treatment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Diurnal Cycle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Diurnal cycle type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Timestepping Framework --&gt; Tracers Properties of tracers time stepping in ocean 13.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time stepping scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Preconditioned conjugate gradient" # "Sub cyling" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Timestepping Framework --&gt; Baroclinic Dynamics Baroclinic dynamics in ocean 14.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Baroclinic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "split explicit" # "implicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Timestepping Framework --&gt; Barotropic Barotropic time stepping in ocean 15.1. Splitting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time splitting method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.2. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Barotropic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Timestepping Framework --&gt; Vertical Physics Vertical physics time stepping in ocean 16.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of vertical time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17. Advection Ocean advection 17.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of advection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flux form" # "Vector form" # TODO - please enter value(s) """ Explanation: 18. Advection --&gt; Momentum Properties of lateral momemtum advection scheme in ocean 18.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of lateral momemtum advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Scheme Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean momemtum advection scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.ALE') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 18.3. ALE Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Using ALE for vertical advection ? (if vertical coordinates are sigma) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19. Advection --&gt; Lateral Tracers Properties of lateral tracer advection scheme in ocean 19.1. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 19.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for lateral tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19.3. Effective Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Effective order of limited lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.4. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ideal age" # "CFC 11" # "CFC 12" # "SF6" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19.5. Passive Tracers Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Passive tracers advected End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.6. Passive Tracers Advection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Is advection of passive tracers different than active ? if so, describe. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20. Advection --&gt; Vertical Tracers Properties of vertical tracer advection scheme in ocean 20.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 20.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for vertical tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21. Lateral Physics Ocean lateral physics 21.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of lateral physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Eddy active" # "Eddy admitting" # TODO - please enter value(s) """ Explanation: 21.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transient eddy representation in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Lateral Physics --&gt; Momentum --&gt; Operator Properties of lateral physics operator for momentum in ocean 22.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean 23.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics momemtum eddy viscosity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 23.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24. Lateral Physics --&gt; Tracers Properties of lateral physics for tracers in ocean 24.1. Mesoscale Closure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a mesoscale closure in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24.2. Submesoscale Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Lateral Physics --&gt; Tracers --&gt; Operator Properties of lateral physics operator for tracers in ocean 25.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean 26.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics tracers eddy diffusity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 26.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "GM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean 27.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV in lateral physics tracers in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 27.2. Constant Val Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If EIV scheme for tracers is constant, specify coefficient value (M2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.3. Flux Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV flux (advective or skew) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.4. Added Diffusivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV added diffusivity (constant, flow dependent or none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28. Vertical Physics Ocean Vertical Physics 28.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of vertical physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details Properties of vertical physics in ocean 29.1. Langmuir Cells Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there Langmuir cells mixing in upper ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers *Properties of boundary layer (BL) mixing on tracers in the ocean * 30.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum *Properties of boundary layer (BL) mixing on momentum in the ocean * 31.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Non-penetrative convective adjustment" # "Enhanced vertical diffusion" # "Included in turbulence closure" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32. Vertical Physics --&gt; Interior Mixing --&gt; Details *Properties of interior mixing in the ocean * 32.1. Convection Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical convection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.2. Tide Induced Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how tide induced mixing is modelled (barotropic, baroclinic, none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.3. Double Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there double diffusion End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.4. Shear Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there interior shear mixing End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers *Properties of interior mixing on tracers in the ocean * 33.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 33.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum *Properties of interior mixing on momentum in the ocean * 34.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 34.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Uplow Boundaries --&gt; Free Surface Properties of free surface in ocean 35.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of free surface in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear implicit" # "Linear filtered" # "Linear semi-explicit" # "Non-linear implicit" # "Non-linear filtered" # "Non-linear semi-explicit" # "Fully explicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Free surface scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 35.3. Embeded Seaice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the sea-ice embeded in the ocean model (instead of levitating) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Uplow Boundaries --&gt; Bottom Boundary Layer Properties of bottom boundary layer in ocean 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diffusive" # "Acvective" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.2. Type Of Bbl Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 36.3. Lateral Mixing Coef Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.4. Sill Overflow Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any specific treatment of sill overflows End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37. Boundary Forcing Ocean boundary forcing 37.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of boundary forcing in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Surface Pressure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.3. Momentum Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.4. Tracers Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.5. Wave Effects Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how wave effects are modelled at ocean surface. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.6. River Runoff Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river runoff from land surface is routed to ocean and any global adjustment done. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.7. Geothermal Heating Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how geothermal heating is present at ocean bottom. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Non-linear" # "Non-linear (drag function of speed of tides)" # "Constant drag coefficient" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction Properties of momentum bottom friction in ocean 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum bottom friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Free-slip" # "No-slip" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction Properties of momentum lateral friction in ocean 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum lateral friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "1 extinction depth" # "2 extinction depth" # "3 extinction depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration Properties of sunlight penetration scheme in ocean 40.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of sunlight penetration scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 40.2. Ocean Colour Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the ocean sunlight penetration scheme ocean colour dependent ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40.3. Extinction Depth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe and list extinctions depths for sunlight penetration scheme (if applicable). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing Properties of surface fresh water forcing in ocean 41.1. From Atmopshere Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from atmos in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Real salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. From Sea Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from sea-ice in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 41.3. Forced Mode Restoring Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface salinity restoring in forced mode (OMIP) End of explanation """
asharel/ml
LAB3/src/LAB3.ipynb
gpl-3.0
print ('benign=>', (all_classes<0.5).sum()) print ('malignant=>', (all_classes>0.5).sum()) """ Explanation: Datos de partida: ¿Cuántas clases hay? ¿Cuántos ejemplos de cada? Hay dos clases en nuestro grupo de datos, muestras malignas y muestras benignas, que se representan el las imagenes de la celda 4. Tenemos 6691 muestras de las cuales 4165 (62.25%) benignas y 2526 (37,75%) malignas. End of explanation """ X_vec = (np.expand_dims(all_slices,-1) /(10**4)) (X_vec>1).sum() from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical X_vec = (np.expand_dims(all_slices,-1) - np.mean(all_slices))/np.std(all_slices) y_vec = to_categorical(all_classes) X_train, X_test, y_train, y_test = train_test_split(X_vec, y_vec, train_size = 0.75, random_state = 1, stratify = all_classes) X_tr = np.zeros((np.shape(X_train)[0], np.shape(X_train)[1]*np.shape(X_train)[2])) for i in range(np.shape(X_train)[0]): X_tr[i] = X_train[i].flatten() X_te = np.zeros((np.shape(X_test)[0], np.shape(X_test)[1]*np.shape(X_test)[2])) for i in range(np.shape(X_test)[0]): X_te[i] = X_test[i].flatten() print("X_train shape:", np.shape(X_train)) print("X_tr shape: ", np.shape(X_tr)) print("X_test shape: ", np.shape(X_test)) print("X_te shape: ", np.shape(X_te)) from sklearn.linear_model import LogisticRegression model2 = LogisticRegression() model2.fit(X_tr, np.argmax(y_train,1)) """ Explanation: ¿Cuál es el acierto mínimo esperado? //////El acierto mínomo de esperado será que clasifique todas las muestras en una sola clase... ¿Cómo se normalizan las imágenes de entrada? ¿se te ocurre una opción mejor? Si es así, ¿cuál? Prúebala Los datos se han normalizan usando la siguiente sentencia: X_vec = (np.expand_dims(all_slices,-1) - np.mean(all_slices))/np.std(all_slices) Es decir usa la normalización de Z-core: $x'=\frac{x-\mu}{\sigma}$, que normaliza los datos a valores entre -1 y 1, útil en casos de que los datos sigan una distribución normal, otra posible forma de normalizar sería user la normalización a escala decimal, $x'=\frac{x}{10^j}$ donde $max(x')<1$ End of explanation """ print('Train accuracy:', model2.score(X_tr, np.argmax(y_train,1))) print('Test accuracy :', model2.score(X_te, np.argmax(y_test,1))) """ Explanation: ¿Cuál es el formato de las imágenes de entrada (dimensiones)? ¿Por qué es necesario hacer la operación flatten en dichas imágenes para aplicar la regresión logística? Los datos de entrada tiene una dimension de 64X64, al hacer la operación flatten los transformamos en dimension 4096x1. La función de regresión logistica espera matrices de un máximo de dos dimensiones ,nº de ejemplox4096 (nº de variables), mientras tal y como tenemos los datos, tenemos matrices de 4 dimensiones nº de ejemplox64x64x1 Modelo simple ¿Cuántos parámetros libres tiene la regresión logística? Es 4096, por la dimesión de entrada +1 por el sesgo= 4097 ¿qué resultados da tras el entrenamiento? ¿produce sobreajuste? A continuación se muestran los resultados de precesión para los datos de train y de test, como vemos hay una diferencia notable entre la precisión de los datos de train y la de los datos de test, siendo mejor el valor en el primer caso. Esto nos indica que el entrenamiento de la regresión logistica está produciendo sobreajuste. End of explanation """ from sklearn.metrics import classification_report, roc_curve, auc print('') print(classification_report(np.argmax(y_test,1), model2.predict(X_te))) fpr, tpr, thresholds = roc_curve(np.argmax(y_test, 1), model2.predict_proba(X_te)[:,1]) fig, ax1 = plt.subplots(1,1) ax1.plot(fpr, tpr, 'r-.', label = 'Simple model (%2.2f)' % auc(fpr, tpr)) ax1.set_xlabel('False Positive Rate') ax1.set_ylabel('True Positive Rate') ax1.plot(fpr, fpr, 'b-', label = 'Random Guess') ax1.legend() plt.show() """ Explanation: ¿qué puedes concluir sobre dicho modelo a través de su curva ROC? La curva ROC que se muestra, a continuación, es una valor justo (pues está en 0.78, entre 0.7 y 0.8), y por lo tanto no es una buena clasificación End of explanation """ from sklearn.metrics import classification_report, roc_curve, auc print('') print(classification_report(np.argmax(y_test,1), model2.predict(X_te))) fpr, tpr, thresholds = roc_curve(np.argmax(y_train, 1), model2.predict_proba(X_tr)[:,1]) fig, ax1 = plt.subplots(1,1) ax1.plot(fpr, tpr, 'r-.', label = 'Simple model (%2.2f)' % auc(fpr, tpr)) ax1.set_xlabel('False Positive Rate') ax1.set_ylabel('True Positive Rate') ax1.plot(fpr, fpr, 'b-', label = 'Random Guess') ax1.legend() plt.show() """ Explanation: ¿qué ocurre si la curva ROC se calcula con los datos de entrenamiento? ¿sale mejor? ¿por qué? Como vemos, en la curva ROC de los datos de entrenamiento, es excelente (es 1), por lo tanto si sale mucho mejor, esto se debe a que como está sobreajustando los datos de entrenemiento, es decir se los aprende de memoria, posteriormente los clasifica casi perfectamente, dejando una curva ROC óptima End of explanation """ from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from keras import backend as K from keras.losses import binary_crossentropy from keras import optimizers from IPython.display import SVG from keras.utils.vis_utils import model_to_dot print("X_train shape:", np.shape(X_train)) print("X_test shape: ", np.shape(X_test)) print("y_train shape:", np.shape(y_train)) print("y_test shape: ", np.shape(y_test)) """ Explanation: Modelo CNN básico End of explanation """ batch_size = 128 epochs = 12 input_shape = (64,64,1) num_classes = 2 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) #model.add(Dropout(0.25)) model.add(Flatten()) #model.add(Dense(128, activation='relu')) #model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) 23 model.compile(loss=binary_crossentropy, optimizer=optimizers.Adadelta(), metrics=['accuracy']) model.summary() #SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) """ Explanation: ¿Cuántos parámetros libres tiene en total cada capa del modelo? Comprueba que estos números coinciden con lo que te esperarías explicando y computando la cuenta que harías. Para calcular los parámetros libres en las capas convolucionales seguimos la sigiente formula: conjunto de entrada x kernel x conjunto de salida + sesgo x conjunto de salida En este caso el sesgo es 1 por lo que tenemos: capa convolucional 1: 1x3x3x32+32x1=320 capa convolucional 2: 32x3x3x64+64x1=18496 Y ambos coinciden con lo que muesta el reporte. La capa dense: conjunto de entrada x conjunto de salida + sesgo x conjundo de salida= 57600 x2+2=115202 ¿Por qué la salida de la primera capa Conv2D es 62x62x32 y no 64x64x32? ¿y por qué no es 64x64x1? El tamaño de la salida en la primera capa Conv2D es 62x62x32 en lugar de 64x64x32 debido al tamaño del kernel (3x3). El tamaño de la salida se reduce el tamaño del kernel menos uno, para cada dimension, así pues, para que fuera 64x64x32, el tamaño del kernel debería ser 1x1. El último valor indica el tamaño del espacio de salida o número de neuronas de la capa, que se indica cuando se llama a la función (el primer parámetro): model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) Si queremos que sea 1, tan solo hay que indicarlo aquí. ¿qué puedes hacer para que su salida sea 64x64x32? Modificar los paramentros de la función de la siguiente manera, a continuación, en la siguiente celda hay un ejemplo: model.add(Conv2D(32, kernel_size=(1, 1), activation='relu', input_shape=input_shape)) End of explanation """
gtrichards/PHYS_T480
NonlinearDimensionReduction.ipynb
mit
import numpy as np from sklearn.manifold import LocallyLinearEmbedding X = np.random.normal(size=(1000,2)) # 1000 points in 2D R = np.random.random((2,10)) # projection matrix X = np.dot(X,R) # now a 2D linear manifold in 10D space k = 5 # Number of neighbors to use in fit n = 2 # Number of dimensions to fit lle = LocallyLinearEmbedding(k,n) lle.fit(X) proj = lle.transform(X) # 100x2 projection of the data """ Explanation: Nonlinear Dimensionality Reduction G. Richards (2016), based on materials from Ivezic, Connolly, Miller, Leighly, and VanderPlas. Today we will talk about the concepts of * manifold learning * nonlinear dimensionality reduction Specifically using the following algorithms * local linear embedding (LLE) * isometric mapping (IsoMap) * t-distributed Stochastic Neighbor Embedding (t-SNE) Let's start by my echoing the brief note of caution given in Adam Miller's notebook: "astronomers will often try to derive physical insight from PCA eigenspectra or eigentimeseries, but this is not advisable as there is no physical reason for the data to be linearly and orthogonally separable". Moreover, physical components are (generally) positive definite. So, PCA is great for dimensional reduction, but for doing physics there are generally better choices. While NMF "solves" the issue of negative components, it is still a linear process. For data with non-linear correlations, an entire field, known as Manifold Learning and nonlinear dimensionality reduction, has been developed, with several algorithms available via the sklearn.manifold module. For example, if your data set looks like this: Then PCA is going to give you something like this. Clearly not very helpful! What you really want is something more like the results below. For more examples see Vanderplas & Connolly 2009 Local Linear Embedding Local Linear Embedding attempts to embed high-$D$ data in a lower-$D$ space. Crucially it also seeks to preserve the geometry of the local "neighborhoods" around each point. In the case of the "S" curve, it seeks to unroll the data. The steps are Step 1: define local geometry - local neighborhoods determined from $k$ nearest neighbors. - for each point calculate weights that reconstruct a point from its $k$ nearest neighbors via $$ \begin{equation} \mathcal{E}_1(W) = \left|X - WX\right|^2, \end{equation} $$ where $X$ is an $N\times K$ matrix and $W$ is an $N\times N$ matrix that minimizes the reconstruction error. Essentially this is finding the hyperplane that describes the local surface at each point within the data set. So, imagine that you have a bunch of square tiles and you are trying to tile the surface with them. Step 2: embed within a lower dimensional space - set all $W_{ij}=0$ except when point $j$ is one of the $k$ nearest neighbors of point $i$. - $W$ becomes very sparse for $k \ll N$ (only $Nk$ entries in $W$ are non-zero). - minimize $\begin{equation} \mathcal{E}_2(Y) = \left|Y - W Y\right|^2, \end{equation} $ with $W$ fixed to find an $N$ by $d$ matrix ($d$ is the new dimensionality). Step 1 requires a nearest-neighbor search. Step 2 requires an eigenvalue decomposition of the matrix $C_W \equiv (I-W)^T(I-W)$. LLE has been applied to data as diverse as galaxy spectra, stellar spectra, and photometric light curves. It was introduced by Roweis & Saul (2000). Skikit-Learn's call to LLE is as follows, with a more detailed example already being given above. End of explanation """ # Execute this cell to load the digits sample %matplotlib inline import numpy as np from sklearn.datasets import load_digits from matplotlib import pyplot as plt digits = load_digits() grid_data = np.reshape(digits.data[0], (8,8)) #reshape to 8x8 plt.imshow(grid_data, interpolation = "nearest", cmap = "bone_r") print grid_data X = digits.data y = digits.target #LLE from sklearn.manifold import LocallyLinearEmbedding # Complete """ Explanation: See what LLE does for the digits data, using the 7 nearest neighbors and 2 components. End of explanation """ # Execute this cell import numpy as np from sklearn.manifold import Isomap XX = np.random.normal(size=(1000,2)) # 1000 points in 2D R = np.random.random((2,10)) # projection matrix XX = np.dot(XX,R) # X is a 2D manifold in 10D space k = 5 # number of neighbors n = 2 # number of dimensions iso = Isomap(k,n) iso.fit(XX) proj = iso.transform(XX) # 1000x2 projection of the data """ Explanation: Isometric Mapping is based on multi-dimensional scaling (MDS) framework. It was introduced in the same volume of science as the article above. See Tenenbaum, de Silva, & Langford (2000). Geodestic curves are used to recover non-linear structure. In Scikit-Learn IsoMap is implemented as follows: End of explanation """ # IsoMap from sklearn.manifold import Isomap # Complete """ Explanation: Try 7 neighbors and 2 dimensions on the digits data. End of explanation """ # t-SNE from sklearn.manifold import TSNE # Complete """ Explanation: t-SNE t-distributed Stochastic Neighbor Embedding (t-SNE) is not discussed in the book, Scikit-Learn does have a t-SNE implementation and it is well worth mentioning this manifold learning algorithm too. SNE itself was developed by Hinton & Roweis with the "$t$" part being added by van der Maaten & Hinton. It works like the other manifold learning algorithms. Try it on the digits data. End of explanation """ # Execute this cell from matplotlib import offsetbox #---------------------------------------------------------------------- # Scale and visualize the embedding vectors def plot_embedding(X): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure() ax = plt.subplot(111) for i in range(X.shape[0]): #plt.text(X[i, 0], X[i, 1], str(digits.target[i]), color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) plt.text(X[i, 0], X[i, 1], str(digits.target[i]), color=plt.cm.nipy_spectral(y[i]/9.)) shown_images = np.array([[1., 1.]]) # just something big for i in range(digits.data.shape[0]): dist = np.sum((X[i] - shown_images) ** 2, 1) if np.min(dist) < 4e-3: # don't show points that are too close continue shown_images = np.r_[shown_images, [X[i]]] imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), X[i]) ax.add_artist(imagebox) plt.xticks([]), plt.yticks([]) plot_embedding(X_reduced) plt.show() """ Explanation: You'll know if you have done it right if you understand Adam Miller's comment "Holy freakin' smokes. That is magic. (It's possible we just solved science)." Personally, I think that some exclamation points may be needed in there! What's even more illuminating is to make the plot using the actual digits to plot the points. Then you can see why certain digits are alike or split into multiple regions. Can you explain the patterns you see here? End of explanation """
junhwanjang/DataSchool
Lecture/18. 문서 전처리/2) NLTK 자연어 처리 패키지 소개.ipynb
mit
nltk.download('averaged_perceptron_tagger') nltk.download("gutenberg") nltk.download('punkt') nltk.download('reuters') nltk.download("stopwords") nltk.download("taggers") nltk.download("webtext") nltk.download("wordnet") nltk.corpus.gutenberg.fileids() emma_raw = nltk.corpus.gutenberg.raw("austen-emma.txt") print(emma_raw[:1302]) """ Explanation: NLTK 자연어 처리 패키지 소개 NLTK(Natural Language Toolkit) 패키지는 교육용으로 개발된 자연어 처리 및 문서 분석용 파이썬 패키지다. 다양한 기능 및 예제를 가지고 있으며 실무 및 연구에서도 많이 사용된다. NLTK 패키지가 제공하는 주요 기능은 다음과 같다. 샘플 corpus 및 사전 토큰 생성(tokenizing) 형태소 분석(stemming/lemmatizing) 품사 태깅(part-of-speech tagging) 구문 분석(syntax parsing) 샘플 corpus corpus는 분석 작업을 위한 샘플 문서 집합을 말한다. 단순히 소설, 신문 등의 문서를 모아놓은 것도 있지만 대부분 품사. 형태소, 등의 보조적 의미를 추가하고 쉬운 분석을 위해 구조적인 형태로 정리해 놓은 것이 많다. NLTK 패키지의 corpus 서브패키지에서는 다음과 같은 다양한 연구용 corpus를 제공한다. 이 목록은 전체 corpus의 일부일 뿐이다. averaged_perceptron_tagger Averaged Perceptron Tagger book_grammars: Grammars from NLTK Book brown: Brown Corpus chat80: Chat-80 Data Files city_database: City Database comparative_sentences Comparative Sentence Dataset dependency_treebank. Dependency Parsed Treebank gutenberg: Project Gutenberg Selections hmm_treebank_pos_tagger Treebank Part of Speech Tagger (HMM) inaugural: C-Span Inaugural Address Corpus large_grammars: Large context-free and feature-based grammars for parser comparison mac_morpho: MAC-MORPHO: Brazilian Portuguese news text with part-of-speech tags masc_tagged: MASC Tagged Corpus maxent_ne_chunker: ACE Named Entity Chunker (Maximum entropy) maxent_treebank_pos_tagger Treebank Part of Speech Tagger (Maximum entropy) movie_reviews: Sentiment Polarity Dataset Version 2.0 names: Names Corpus, Version 1.3 (1994-03-29) nps_chat: NPS Chat omw: Open Multilingual Wordnet opinion_lexicon: Opinion Lexicon pros_cons: Pros and Cons ptb: Penn Treebank punkt: Punkt Tokenizer Models reuters: The Reuters-21578 benchmark corpus, ApteMod version sample_grammars: Sample Grammars sentence_polarity: Sentence Polarity Dataset v1.0 sentiwordnet: SentiWordNet snowball_data: Snowball Data stopwords: Stopwords Corpus subjectivity: Subjectivity Dataset v1.0 tagsets: Help on Tagsets treebank: Penn Treebank Sample twitter_samples: Twitter Samples unicode_samples: Unicode Samples universal_tagset: Mappings to the Universal Part-of-Speech Tagset universal_treebanks_v20 Universal Treebanks Version 2.0 verbnet: VerbNet Lexicon, Version 2.1 webtext: Web Text Corpus word2vec_sample: Word2Vec Sample wordnet: WordNet words: Word Lists 이러한 corpus 자료는 설치시에 제공되는 것이 아니라 download 명령으로 사용자가 다운로드 받아야 한다. End of explanation """ from nltk.tokenize import word_tokenize word_tokenize(emma_raw[50:100]) from nltk.tokenize import RegexpTokenizer t = RegexpTokenizer("[\w]+") t.tokenize(emma_raw[50:100]) from nltk.tokenize import sent_tokenize print(sent_tokenize(emma_raw[:1000])[3]) """ Explanation: 토큰 생성(tokenizing) 문서를 분석하기 위해서는 우선 긴 문자열을 분석을 위한 작은 단위로 나누어야 한다. 이 문자열 단위를 토큰(token)이라고 한다. End of explanation """ from nltk.stem import PorterStemmer st = PorterStemmer() st.stem("eating") from nltk.stem import LancasterStemmer st = LancasterStemmer() st.stem("shopping") from nltk.stem import RegexpStemmer st = RegexpStemmer("ing") st.stem("cooking") from nltk.stem import WordNetLemmatizer lm = WordNetLemmatizer() print(lm.lemmatize("cooking")) print(lm.lemmatize("cooking", pos="v")) print(lm.lemmatize("cookbooks")) print(WordNetLemmatizer().lemmatize("believes")) print(LancasterStemmer().stem("believes")) """ Explanation: 형태소 분석 형태소 분석이란 어근, 접두사/접미사, 품사(POS, part-of-speech) 등 다양한 언어적 속성의 구조를 파악하는 작업이다. 구체적으로는 다음과 같은 작업으로 나뉜다. stemming (어근 추출) lemmatizing (원형 복원) POS tagging (품사 태깅) ### Stemming and lemmatizing End of explanation """ from nltk.tag import pos_tag tagged_list = pos_tag(word_tokenize(emma_raw[:100])) tagged_list from nltk.tag import untag untag(tagged_list) """ Explanation: POS tagging POS(part-of-speech)는 품사를 말한다. Part-of-Speech Tagset https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.htm http://www.ibm.com/support/knowledgecenter/ko/SS5RWK_3.5.0/com.ibm.discovery.es.ta.doc/iiysspostagset.htm End of explanation """
IST256/learn-python
content/lessons/13-Visualization/WMC1-Matplotlib.ipynb
mit
# Jupyter Directive %matplotlib inline # imports import matplotlib import pandas as pd import numpy as np import matplotlib.pyplot as plt matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # larger figure size """ Explanation: Watch Me Code 1: Matplotlib We will demonstrate Pythons data visualization library Matplotlib using it two ways Standalone With Pandas End of explanation """ # Matplotlib requires lists to plot x = [1,2,3,4,5] xsquared = [1,4,9,16,25] plt.plot(x,xsquared) # default is a blue line # this can be overridden. consult help(plt.plot) for details #MATLAB MATPLOTLIB plt.plot(x, xsquared, 'ro') # red dots # we can manipulate the axis too, rather than auto scale. In this case we must call plt.show() to display the plot plt.plot(x, xsquared, 'ro') # red dots plt.axis([0,6,0,26]) # a list in the form [xmin, xmax, ymin, ymax] plt.show() # Labels are simple plt.bar(x, xsquared) #,'r--') # red dashes plt.axis([0,6,0,26]) # a list in the form [xmin, xmax, ymin, ymax] plt.xlabel("Value of X", fontsize=36) plt.ylabel("Value of X Squared", fontsize=36) plt.title("Plot of X versus X Squared", fontsize=48) plt.grid(True) plt.show() """ Explanation: Manual Plotting in Matplotlib End of explanation """ plt.bar(x,xsquared) plt.pie(x) plt.scatter(x, xsquared) """ Explanation: Plotting chart types End of explanation """ scores = pd.read_csv("https://raw.githubusercontent.com/mafudge/datasets/master/exam-scores/exam-scores.csv") scores.sample(10) # Plotting with Pandas is a bit more expressive scores.plot.scatter(x ='Completion_Time', y ='Student_Score' ) scores.corr() ## Labels too small, we can fall back to Matplot lib! p = scores.plot.scatter(x ='Completion Time', y ='Student Score', fontsize=20) p.set_xlabel('Completetion Time', fontsize=20) p.set_ylabel('Student Score', fontsize=20) p # Take the value counts of letter grade and create a data frame letter_grades = pd.DataFrame( { 'Letter' : scores['Letter_Grade'].value_counts() } ).sort_index() letter_grades.plot.bar(sort_columns=True) letter_grades.plot.pie( y = 'Letter', fontsize = 20) """ Explanation: Plotting with Pandas End of explanation """
balarsen/pymc_learning
Counting/Counting multi detector Dead TIme.ipynb
bsd-3-clause
%matplotlib inline from pprint import pprint import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as mc import spacepy.toolbox as tb import spacepy.plot as spp import tqdm from scipy import stats import seaborn as sns sns.set() matplotlib.pyplot.rc('figure', figsize=(10,10)) matplotlib.pyplot.rc('lines', lw=3) matplotlib.pyplot.rc('font', size=20) %matplotlib inline """ Explanation: Counting with dead time How much does it matter and can we understand it with MCMC Using wikipedia and others names below: https://en.wikipedia.org/wiki/Dead_time Bottom work is based on Adams RP, Murray I, MacKay DJC. Tractable nonparametric Bayesian inference in Poisson processes with Gaussian process intensities. Proceedings of the 26th Annual International Conference on Machine Learning; Montreal, Quebec, Canada. 1553376: ACM; 2009. p. 9-16. End of explanation """ np.random.seed(8675309) Rate1 = 10 Rate2 = 20 deadtime = 0.05 times1 = np.random.exponential(1/Rate1, size=1000) times2 = np.random.exponential(1/Rate2, size=1000) times = pd.DataFrame({'between1':times1, 'between2':times2}) times['times1'] = np.cumsum(times['between1']) times['times2'] = np.cumsum(times['between2']) # get the times of bunch of hits fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20,8), squeeze=False) print(ax[0][0]) times[['between1', 'between2']].plot(ax=ax[0][0]) ax[0][0].set_ylabel('Time between') times[['times1', 'times2']].plot(ax=ax[0][1]) ax[0][1].set_ylabel('Wall time') plt.tight_layout() plt.figure(figsize=(20,8)) times[['between1', 'between2']].hist(bins=30, figsize=(20,8)) # make cts per unit realcps1 = np.histogram(times['times1'], bins=np.arange(0, times['times1'].max()))[0] realcps2 = np.histogram(times['times2'], bins=np.arange(0, times['times2'].max()))[0] # then cull out all points and are closer than deadtime to each other times['between1dt'] = times['between1'].copy() times['between1dt'].loc[times['between1dt'] < deadtime] = np.nan times['between2dt'] = times['between2'].copy() times['between2dt'].loc[times['between2dt'] < deadtime] = np.nan times['times1dt'] = np.cumsum(times['between1dt']) times['times2dt'] = np.cumsum(times['between2dt']) times[['times1', 'times1dt']].plot(figsize=(10,7)) times[['times2', 'times2dt']].plot(figsize=(10,7)) dtcps1 = np.histogram(times['times1dt'].dropna(), bins=np.arange(0, times['times1dt'].dropna().max()))[0] dtcps2 = np.histogram(times['times2dt'].dropna(), bins=np.arange(0, times['times2dt'].dropna().max()))[0] df = pd.DataFrame({'dtcps1':dtcps1, 'realcps1':realcps1[:len(dtcps1)]}) df.plot() # TODO looks wrong!! np.random.seed(8675309) size = 100 n_rate = 6 data = np.empty((n_rate, size)) for ii, d in enumerate(tb.linspace(1, 100, n_rate)): data[ii] = np.random.poisson(d, size=size) print(data.shape) plt.plot(data.T); """ Explanation: Generate some data End of explanation """ with mc.Model() as model0: mu = mc.Uniform('mu', 0, 1e6, shape=n_rate) dat = mc.Poisson('dat', mu=mu, observed=data.T, shape=n_rate) # det2 = mc.Poisson('d2', mu=mu, observed=d2[0:10]) start = mc.find_MAP() trace0 = mc.sample(10000, start=start) mc.summary(trace0) ax = mc.traceplot(trace0, lines={'mu':tb.linspace(1, 100, n_rate)}) """ Explanation: From the reference: The relationship of the real counting rates with the measured counting rates is well known for these two basic models. If we denote by N the real counting rate, by M the measured counting rate, and by τ the dead time and considering that the non-dead time disturbed distribution is Poissonian, M and N are related by $N=\frac{M}{1-M\tau}$ for the case of a non-paralyzable dead time, and by $M=Ne^{-N\tau}$ No dead time just run End of explanation """
prasants/pyds
.ipynb_checkpoints/01.Unix_and_Shell_Command_Basics-checkpoint.ipynb
mit
!pwd """ Explanation: Table of Contents <p><div class="lev1 toc-item"><a href="#Overview" data-toc-modified-id="Overview-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Overview</a></div><div class="lev2 toc-item"><a href="#pwd---Print-Working-Directory" data-toc-modified-id="pwd---Print-Working-Directory-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>pwd - Print Working Directory</a></div><div class="lev2 toc-item"><a href="#ls---List-files-and-directory-names,-attributes" data-toc-modified-id="ls---List-files-and-directory-names,-attributes-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>ls - List files and directory names, attributes</a></div><div class="lev2 toc-item"><a href="#mkdir---Make-a-new-directory" data-toc-modified-id="mkdir---Make-a-new-directory-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>mkdir - Make a new directory</a></div><div class="lev2 toc-item"><a href="#cd---Change-to-a-particular-directory" data-toc-modified-id="cd---Change-to-a-particular-directory-14"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>cd - Change to a particular directory</a></div><div class="lev2 toc-item"><a href="#rmdir---Remove-a-directory" data-toc-modified-id="rmdir---Remove-a-directory-15"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>rmdir - Remove a directory</a></div><div class="lev2 toc-item"><a href="#cp---Copy-Files" data-toc-modified-id="cp---Copy-Files-16"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>cp - Copy Files</a></div><div class="lev2 toc-item"><a href="#rm---Remove-files" data-toc-modified-id="rm---Remove-files-17"><span class="toc-item-num">1.7&nbsp;&nbsp;</span>rm - Remove files</a></div><div class="lev2 toc-item"><a href="#mv-:-Move-a-file" data-toc-modified-id="mv-:-Move-a-file-18"><span class="toc-item-num">1.8&nbsp;&nbsp;</span>mv : Move a file</a></div><div class="lev2 toc-item"><a href="#CURL---Getting-Data-from-the-Command-Line" data-toc-modified-id="CURL---Getting-Data-from-the-Command-Line-19"><span class="toc-item-num">1.9&nbsp;&nbsp;</span>CURL - Getting Data from the Command Line</a></div><div class="lev2 toc-item"><a href="#head/tail" data-toc-modified-id="head/tail-110"><span class="toc-item-num">1.10&nbsp;&nbsp;</span>head/tail</a></div><div class="lev2 toc-item"><a href="#grep:" data-toc-modified-id="grep:-111"><span class="toc-item-num">1.11&nbsp;&nbsp;</span>grep:</a></div><div class="lev2 toc-item"><a href="#Redirection-(or-Downloading)" data-toc-modified-id="Redirection-(or-Downloading)-112"><span class="toc-item-num">1.12&nbsp;&nbsp;</span>Redirection (or Downloading)</a></div> # Overview This is by no means an exhaustive list, the point is just to give you a feeler for what's possible. If you have used Linux or Mac, or have written code in Ruby, chances are you have used Unix commands already. If you're a Windows user, here's a good resource: https://www.howtogeek.com/249966/how-to-install-and-use-the-linux-bash-shell-on-windows-10/ Another great resource in general on the basics of unix commands: * http://matt.might.net/articles/basic-unix/ ## pwd - Print Working Directory End of explanation """ ls !ls ls -al ls -Al """ Explanation: ls - List files and directory names, attributes Some commonly used commands are below: * -A: list all of the contents of the queried directory, even hidden files. * -l: detailed format, display additional info for all files and directories. * -R: recursively list the contents of any subdirectories. * -t: sort files by the time of the last modification. * -S: sort files by size. * -r: reverse any sort order. * -h: when used in conjunction with -l, gives a more human-readable output. You can also combine the commands/flags. For example: * -al * -Al Read more on this topic here: https://www.mkssoftware.com/docs/man1/ls.1.asp End of explanation """ !mkdir NewFolder ls """ Explanation: mkdir - Make a new directory End of explanation """ cd NewFolder cd .. ls """ Explanation: cd - Change to a particular directory End of explanation """ rmdir NewFolder ls """ Explanation: rmdir - Remove a directory If the folder is not empty, it need the "-r" flag. Example: rmdir -r NewFolder End of explanation """ ls # Copy in the same directory !cp 01.Unix_and_Shell_Command_Basics.ipynb Notebook01.ipynb ls rm Notebook01.ipynb ls # Copy to another directory !mkdir TempFolder !cp 01.Unix_and_Shell_Command_Basics.ipynb TempFolder/File01.ipynb !ls cd TempFolder ls """ Explanation: cp - Copy Files Careful with the filenames! Will be overwritten without warning. End of explanation """ ls pwd !rm File01.ipynb !ls !pwd !ls -al !cd .. !pwd !ls cd .. ls cp -i 01.Unix_and_Shell_Command_Basics.ipynb TempFolder/NewFile01.ipynb cd Tempfolder ls cp -i NewFile01.ipynb NewFile01.ipynb """ Explanation: rm - Remove files Note that this is different to rmdir, which exists to remove a directory End of explanation """ pwd ls rm -r TempFolder ls cp 01.Unix_and_Shell_Command_Basics.ipynb NewFile01.ipynb ls mkdir TempFolder02 ls mv NewFile01.ipynb TempFolder02 ls cd TempFolder02 ls cd .. """ Explanation: mv : Move a file This is close to the the 'cut' function available for files on Windows. When you use the 'mv' command, a file is copied to a new location, and removed from it's original location. End of explanation """ !curl -L 'https://dl.dropboxusercontent.com/s/j2yh7nvlli1nsa5/gdp.txt' !curl -L 'https://dl.dropboxusercontent.com/s/eqyhkf3tpgre0jb/foo.txt' !curl -s "http://freegeoip.net/json/" | jq . !curl -s "http://api.open-notify.org/iss-now.json" !curl -s "http://api.open-notify.org/astros.json" """ Explanation: CURL - Getting Data from the Command Line Let's begin by copying a simple tab-separated file. The format is as below: !curl -OptionalFlag 'http://url' End of explanation """ !curl -X POST --include 'https://community-sentiment.p.mashape.com/text/' \ -H 'X-Mashape-Key: YFWRiIyfNemshsFin8iTJy0XFUjNp1rXoY7jsnoPlVphvWnKY6' \ -H 'Content-Type: application/x-www-form-urlencoded' \ -H 'Accept: application/json' \ -d 'txt=My team lost! :(' """ Explanation: Register for the Mashape API Market here: https://market.mashape.com End of explanation """ pwd ls cd Data ls !head -n 3 sample.txt !tail -n 3 sample.txt !cat sample.txt # Selecting specific fields !cut -f2,3 sample.txt !sort sample.txt !sort -k 2 sample.txt !wc sample.txt !wc -w sample.txt !find ~ -sample.txt 'sample.txt' """ Explanation: Note: This is a free API, so I have exposed my API key in the code. In practice, if you are ever sharing code, please take adequate precautions, and never expose your private key. head/tail End of explanation """ pwd ls !cat nyt.txt # Count the number of matches !grep -c 'Kennedy' nyt.txt !grep -o 'Kennedy' nyt.txt """ Explanation: grep: Grep is a pattern matching utility built into unix and it's flavors. The typical format is: grep [option] [pattern] [file/s] End of explanation """ !curl -s 'http://freegeoip.net/json/' > location.json !jq . location.json !curl -s 'http://freegeoip.net/json/' | jq . """ Explanation: More options for grep: * -c Print only a count of matched lines. * -l List only filenames * -i Ignore lowercase and uppercase distinctions * -o prints only the matching part of the line * -n Print matching line with its line number * -v Negate matches; print lines that do not match the regex * -r Recursively Search subdirectories listed End of explanation """ !curl "https://data.cityofnewyork.us/resource/2pc8-n4xe.json" > venues.json !cat venues.json !grep 'Ripley' venues.json !grep -i 'Theater' venues.json # Multiple flags, and multiple conditions !grep -v -e 'Theater' -e 'Theatre' venues.json """ Explanation: Redirection (or Downloading) This is really useful to quickly download a dataset using what is called an API Endpoint. Let's download the 'Times Square Entertainment Venues' dataset from New York City's Open Data Portal to demonstrate this. https://data.cityofnewyork.us/Business/Times-Square-Entertainment-Venues/jxdc-hnze End of explanation """
M4573R/BerkeleyX-CS190.1x-Scalable-Machine-Learning
Week 1 - Course Overview and Machine Learning Basics/ML_lab1_review_student.ipynb
mit
labVersion = 'cs190_week1_v_1_2' """ Explanation: Math and Python review and CTR data download This notebook reviews vector and matrix math, the NumPy Python package, and Python lambda expressions. It also covers downloading the data required for Lab 4, where you will analyze website click-through rates. Part 1 covers vector and matrix math, and you'll do a few exercises by hand. In Part 2, you'll learn about NumPy and use ndarray objects to solve the math exercises. Part 3 provides additional information about NumPy and how it relates to array usage in Spark's MLlib. Part 4 provides an overview of lambda expressions, and you'll wrap up by downloading the dataset for Lab 4. To move through the notebook just run each of the cells. You can run a cell by pressing "shift-enter", which will compute the current cell and advance to the next cell, or by clicking in a cell and pressing "control-enter", which will compute the current cell and remain in that cell. You should move through the notebook from top to bottom and run all of the cells. If you skip some cells, later cells might not work as expected. Note that there are several exercises within this notebook. You will need to provide solutions for cells that start with: # TODO: Replace &lt;FILL IN&gt; with appropriate code. This notebook covers: Part 1: Math review Part 2: NumPy Part 3: Additional NumPy and Spark linear algebra Part 4: Python lambda expressions Part 5: CTR data download End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Manually calculate your answer and represent the vector as a list of integers values. # For example, [2, 4, 8]. x = [3, -6, 0] y = [4, 8, 16] # TEST Scalar multiplication: vectors (1a) # Import test library from test_helper import Test Test.assertEqualsHashed(x, 'e460f5b87531a2b60e0f55c31b2e49914f779981', 'incorrect value for vector x') Test.assertEqualsHashed(y, 'e2d37ff11427dbac7f833a5a7039c0de5a740b1e', 'incorrect value for vector y') """ Explanation: Part 1: Math review (1a) Scalar multiplication: vectors In this exercise, you will calculate the product of a scalar and a vector by hand and enter the result in the code cell below. Scalar multiplication is straightforward. The resulting vector equals the product of the scalar, which is a single value, and each item in the original vector. In the example below, $ a $ is the scalar (constant) and $ \mathbf{v} $ is the vector. $$ a \mathbf{v} = \begin{bmatrix} a v_1 \\ a v_2 \\ \vdots \\ a v_n \end{bmatrix} $$ Calculate the value of $ \mathbf{x} $: $$ \mathbf{x} = 3 \begin{bmatrix} 1 \\ -2 \\ 0 \end{bmatrix} $$ Calculate the value of $ \mathbf{y} $: $$ \mathbf{y} = 2 \begin{bmatrix} 2 \\ 4 \\ 8 \end{bmatrix} $$ End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Manually calculate your answer and represent the vector as a list of integers values. z = [4, 10, 18] # TEST Element-wise multiplication: vectors (1b) Test.assertEqualsHashed(z, '4b5fe28ee2d274d7e0378bf993e28400f66205c2', 'incorrect value for vector z') """ Explanation: (1b) Element-wise multiplication: vectors In this exercise, you will calculate the element-wise multiplication of two vectors by hand and enter the result in the code cell below. You'll later see that element-wise multiplication is the default method when two NumPy arrays are multiplied together. Note we won't be performing element-wise multiplication in future labs, but we are introducing it here to distinguish it from other vector operators, and to because it is a common operations in NumPy, as we will discuss in Part (2b). The element-wise calculation is as follows: $$ \mathbf{x} \odot \mathbf{y} = \begin{bmatrix} x_1 y_1 \\ x_2 y_2 \\ \vdots \\ x_n y_n \end{bmatrix} $$ Calculate the value of $ \mathbf{z} $: $$ \mathbf{z} = \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix} \odot \begin{bmatrix} 4 \\ 5 \\ 6 \end{bmatrix} $$ End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Manually calculate your answer and set the variables to their appropriate integer values. c1 = -11 c2 = 26 # TEST Dot product (1c) Test.assertEqualsHashed(c1, '8d7a9046b6a6e21d66409ad0849d6ab8aa51007c', 'incorrect value for c1') Test.assertEqualsHashed(c2, '887309d048beef83ad3eabf2a79a64a389ab1c9f', 'incorrect value for c2') """ Explanation: (1c) Dot product In this exercise, you will calculate the dot product of two vectors by hand and enter the result in the code cell below. Note that the dot product is equivalent to performing element-wise multiplication and then summing the result. Below, you'll find the calculation for the dot product of two vectors, where each vector has length $ n $: $$ \mathbf{w} \cdot \mathbf{x} = \sum_{i=1}^n w_i x_i $$ Note that you may also see $ \mathbf{w} \cdot \mathbf{x} $ represented as $ \mathbf{w}^\top \mathbf{x} $ Calculate the value for $ c_1 $ based on the dot product of the following two vectors: $$ c_1 = \begin{bmatrix} 1 \\ -3 \end{bmatrix} \cdot \begin{bmatrix} 4 \\ 5 \end{bmatrix}$$ Calculate the value for $ c_2 $ based on the dot product of the following two vectors: $$ c_2 = \begin{bmatrix} 3 \\ 4 \\ 5 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix}$$ End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Represent matrices as lists within lists. For example, [[1,2,3], [4,5,6]] represents a matrix with # two rows and three columns. Use integer values. X = [[22, 28], [49, 64]] Y = [[1,2,3],[2,4,6],[3,6,9]] # TEST Matrix multiplication (1d) Test.assertEqualsHashed(X, 'c2ada2598d8a499e5dfb66f27a24f444483cba13', 'incorrect value for matrix X') Test.assertEqualsHashed(Y, 'f985daf651531b7d776523836f3068d4c12e4519', 'incorrect value for matrix Y') """ Explanation: (1d) Matrix multiplication In this exercise, you will calculate the result of multiplying two matrices together by hand and enter the result in the code cell below. Below, you'll find the calculation for multiplying two matrices together. Note that the number of columns for the first matrix and the number of rows for the second matrix have to be equal and are are represented by $ n $: $$ [\mathbf{X} \mathbf{Y}]{i,j} = \sum{r=1}^n \mathbf{X}{i,r} \mathbf{Y}{r,j} $$ First, you'll calculate the value for $ \mathbf{X} $. $$ \mathbf{X} = \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix} \begin{bmatrix} 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{bmatrix} $$ Next, you'll perform an outer product and calculate the value for $ \mathbf{Y} $. Note that outer product is just a special case of general matrix multiplication and follows the same rules as normal matrix multiplication. $$ \mathbf{Y} = \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix} \begin{bmatrix} 1 & 2 & 3 \end{bmatrix} $$ End of explanation """ # It is convention to import NumPy with the alias np import numpy as np # TODO: Replace <FILL IN> with appropriate code # Create a numpy array with the values 1, 2, 3 simpleArray = np.array([1, 2, 3]) # Perform the scalar product of 5 and the numpy array timesFive = simpleArray * 5 print simpleArray print timesFive # TEST Scalar multiplication (2a) Test.assertTrue(np.all(timesFive == [5, 10, 15]), 'incorrect value for timesFive') """ Explanation: Part 2: NumPy (2a) Scalar multiplication NumPy is a Python library for working with arrays. NumPy provides abstractions that make it easy to treat these underlying arrays as vectors and matrices. The library is optimized to be fast and memory efficient, and we'll be using it throughout the course. The building block for NumPy is the ndarray, which is a multidimensional array of fixed-size that contains elements of one type (e.g. array of floats). For this exercise, you'll create a ndarray consisting of the elements [1, 2, 3] and multiply this array by 5. Use np.array() to create the array. Note that you can pass a Python list into np.array(). To perform scalar multiplication with an ndarray just use *. Note that if you create an array from a Python list of integers you will obtain a one-dimensional array, which is equivalent to a vector for our purposes. End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Create a ndarray based on a range and step size. u = np.arange(0, 5, .5) v = np.arange(5, 10, .5) elementWise = u * v dotProduct = np.dot(u, v) print 'u: {0}'.format(u) print 'v: {0}'.format(v) print '\nelementWise\n{0}'.format(elementWise) print '\ndotProduct\n{0}'.format(dotProduct) # TEST Element-wise multiplication and dot product (2b) Test.assertTrue(np.all(elementWise == [ 0., 2.75, 6., 9.75, 14., 18.75, 24., 29.75, 36., 42.75]), 'incorrect value for elementWise') Test.assertEquals(dotProduct, 183.75, 'incorrect value for dotProduct') """ Explanation: (2b) Element-wise multiplication and dot product NumPy arrays support both element-wise multiplication and dot product. Element-wise multiplication occurs automatically when you use the * operator to multiply two ndarray objects of the same length. To perform the dot product you can use either np.dot() or np.ndarray.dot(). For example, if you had NumPy arrays x and y, you could compute their dot product four ways: np.dot(x, y), np.dot(y, x), x.dot(y), or y.dot(x). For this exercise, multiply the arrays u and v element-wise and compute their dot product. End of explanation """ # TODO: Replace <FILL IN> with appropriate code from numpy.linalg import inv A = np.matrix([[1,2,3,4],[5,6,7,8]]) print 'A:\n{0}'.format(A) # Print A transpose print '\nA transpose:\n{0}'.format(A.T) # Multiply A by A transpose AAt = A * A.T print '\nAAt:\n{0}'.format(AAt) # Invert AAt with np.linalg.inv() AAtInv = np.linalg.inv(AAt) print '\nAAtInv:\n{0}'.format(AAtInv) # Show inverse times matrix equals identity # We round due to numerical precision print '\nAAtInv * AAt:\n{0}'.format((AAtInv * AAt).round(4)) # TEST Matrix math (2c) Test.assertTrue(np.all(AAt == np.matrix([[30, 70], [70, 174]])), 'incorrect value for AAt') Test.assertTrue(np.allclose(AAtInv, np.matrix([[0.54375, -0.21875], [-0.21875, 0.09375]])), 'incorrect value for AAtInv') """ Explanation: (2c) Matrix math With NumPy it is very easy to perform matrix math. You can use np.matrix() to generate a NumPy matrix. Just pass a two-dimensional ndarray or a list of lists to the function. You can perform matrix math on NumPy matrices using *. You can transpose a matrix by calling numpy.matrix.transpose() or by using .T on the matrix object (e.g. myMatrix.T). Transposing a matrix produces a matrix where the new rows are the columns from the old matrix. For example: $$ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix}^\mathbf{\top} = \begin{bmatrix} 1 & 4 \\ 2 & 5 \\ 3 & 6 \end{bmatrix} $$ Inverting a matrix can be done using numpy.linalg.inv(). Note that only square matrices can be inverted, and square matrices are not guaranteed to have an inverse. If the inverse exists, then multiplying a matrix by its inverse will produce the identity matrix. $ \scriptsize ( \mathbf{A}^{-1} \mathbf{A} = \mathbf{I_n} ) $ The identity matrix $ \scriptsize \mathbf{I_n} $ has ones along its diagonal and zero elsewhere. $$ \mathbf{I_n} = \begin{bmatrix} 1 & 0 & 0 & \dots & 0 \\ 0 & 1 & 0 & \dots & 0 \\ 0 & 0 & 1 & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \dots & 1 \end{bmatrix} $$ For this exercise, multiply $ \mathbf{A} $ times its transpose $ ( \mathbf{A}^\top ) $ and then calculate the inverse of the result $ ( [ \mathbf{A} \mathbf{A}^\top ]^{-1} ) $. End of explanation """ # TODO: Replace <FILL IN> with appropriate code features = np.array([1, 2, 3, 4]) print 'features:\n{0}'.format(features) # The last three elements of features lastThree = features[-3:] print '\nlastThree:\n{0}'.format(lastThree) # TEST Slices (3a) Test.assertTrue(np.all(lastThree == [2, 3, 4]), 'incorrect value for lastThree') """ Explanation: Part 3: Additional NumPy and Spark linear algebra (3a) Slices You can select a subset of a one-dimensional NumPy ndarray's elements by using slices. These slices operate the same way as slices for Python lists. For example, [0, 1, 2, 3][:2] returns the first two elements [0, 1]. NumPy, additionally, has more sophisticated slicing that allows slicing across multiple dimensions; however, you'll only need to use basic slices in future labs for this course. Note that if no index is placed to the left of a :, it is equivalent to starting at 0, and hence [0, 1, 2, 3][:2] and [0, 1, 2, 3][0:2] yield the same result. Similarly, if no index is placed to the right of a :, it is equivalent to slicing to the end of the object. Also, you can use negative indices to index relative to the end of the object, so [-2:] would return the last two elements of the object. For this exercise, return the last 3 elements of the array features. End of explanation """ # TODO: Replace <FILL IN> with appropriate code zeros = np.zeros(8) ones = np.ones(8) print 'zeros:\n{0}'.format(zeros) print '\nones:\n{0}'.format(ones) zerosThenOnes = np.hstack((zeros, ones)) # A 1 by 16 array zerosAboveOnes = np.vstack((zeros, ones)) # A 2 by 8 array print '\nzerosThenOnes:\n{0}'.format(zerosThenOnes) print '\nzerosAboveOnes:\n{0}'.format(zerosAboveOnes) # TEST Combining ndarray objects (3b) Test.assertTrue(np.all(zerosThenOnes == [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]), 'incorrect value for zerosThenOnes') Test.assertTrue(np.all(zerosAboveOnes == [[0,0,0,0,0,0,0,0],[1,1,1,1,1,1,1,1]]), 'incorrect value for zerosAboveOnes') """ Explanation: (3b) Combining ndarray objects NumPy provides many functions for creating new arrays from existing arrays. We'll explore two functions: np.hstack(), which allows you to combine arrays column-wise, and np.vstack(), which allows you to combine arrays row-wise. Note that both np.hstack() and np.vstack() take in a tuple of arrays as their first argument. To horizontally combine three arrays a, b, and c, you would run np.hstack((a, b, c)). If we had two arrays: a = [1, 2, 3, 4] and b = [5, 6, 7, 8], we could use np.vstack((a, b)) to produce the two-dimensional array: $$ \begin{bmatrix} 1 & 2 & 3 & 4 \\ 5 & 6 & 7 & 8 \end{bmatrix} $$ For this exercise, you'll combine the zeros and ones arrays both horizontally (column-wise) and vertically (row-wise). Note that the result of stacking two arrays is an ndarray. If you need the result to be a matrix, you can call np.matrix() on the result, which will return a NumPy matrix. End of explanation """ from pyspark.mllib.linalg import DenseVector # TODO: Replace <FILL IN> with appropriate code numpyVector = np.array([-3, -4, 5]) print '\nnumpyVector:\n{0}'.format(numpyVector) # Create a DenseVector consisting of the values [3.0, 4.0, 5.0] myDenseVector = DenseVector([3.0, 4.0, 5.0]) # Calculate the dot product between the two vectors. denseDotProduct = myDenseVector.dot(numpyVector) print 'myDenseVector:\n{0}'.format(myDenseVector) print '\ndenseDotProduct:\n{0}'.format(denseDotProduct) # TEST PySpark's DenseVector (3c) Test.assertTrue(isinstance(myDenseVector, DenseVector), 'myDenseVector is not a DenseVector') Test.assertTrue(np.allclose(myDenseVector, np.array([3., 4., 5.])), 'incorrect value for myDenseVector') Test.assertTrue(np.allclose(denseDotProduct, 0.0), 'incorrect value for denseDotProduct') """ Explanation: (3c) PySpark's DenseVector PySpark provides a DenseVector class within the module pyspark.mllib.linalg. DenseVector is used to store arrays of values for use in PySpark. DenseVector actually stores values in a NumPy array and delegates calculations to that object. You can create a new DenseVector using DenseVector() and passing in an NumPy array or a Python list. DenseVector implements several functions. The only function needed for this course is DenseVector.dot(), which operates just like np.ndarray.dot(). Note that DenseVector stores all values as np.float64, so even if you pass in an NumPy array of integers, the resulting DenseVector will contain floating-point numbers. Also, DenseVector objects exist locally and are not inherently distributed. DenseVector objects can be used in the distributed setting by either passing functions that contain them to resilient distributed dataset (RDD) transformations or by distributing them directly as RDDs. You'll learn more about RDDs in the spark tutorial. For this exercise, create a DenseVector consisting of the values [3.0, 4.0, 5.0] and compute the dot product of this vector with numpyVector. End of explanation """ # Example function def addS(x): return x + 's' print type(addS) print addS print addS('cat') # As a lambda addSLambda = lambda x: x + 's' print type(addSLambda) print addSLambda print addSLambda('cat') # TODO: Replace <FILL IN> with appropriate code # Recall that: "lambda x, y: x + y" creates a function that adds together two numbers multiplyByTen = lambda x: x * 10 print multiplyByTen(5) # Note that the function still shows its name as <lambda> print '\n', multiplyByTen # TEST Python lambda expressions (4a) Test.assertEquals(multiplyByTen(10), 100, 'incorrect definition for multiplyByTen') """ Explanation: Part 4: Python lambda expressions (4a) Lambda is an anonymous function We can use a lambda expression to create a function. To do this, you type lambda followed by the names of the function's parameters separated by commas, followed by a :, and then the expression statement that the function will evaluate. For example, lambda x, y: x + y is an anonymous function that computes the sum of its two inputs. Lambda expressions return a function when evaluated. The function is not bound to any variable, which is why lambdas are associated with anonymous functions. However, it is possible to assign the function to a variable. Lambda expressions are particularly useful when you need to pass a simple function into another function. In that case, the lambda expression generates a function that is bound to the parameter being passed into the function. Below, we'll see an example of how we can bind the function returned by a lambda expression to a variable named addSLambda. From this example, we can see that lambda provides a shortcut for creating a simple function. Note that the behavior of the function created using def and the function created using lambda is equivalent. Both functions have the same type and return the same results. The only differences are the names and the way they were created. For this exercise, first run the two cells below to compare a function created using def with a corresponding anonymous function. Next, write your own lambda expression that creates a function that multiplies its input (a single parameter) by 10. Here are some additional references that explain lambdas: Lambda Functions, Lambda Tutorial, and Python Functions. End of explanation """ # Code using def that we will recreate with lambdas def plus(x, y): return x + y def minus(x, y): return x - y functions = [plus, minus] print functions[0](4, 5) print functions[1](4, 5) # TODO: Replace <FILL IN> with appropriate code # The first function should add two values, while the second function should subtract the second # value from the first value. lambdaFunctions = [lambda a, b: a + b , lambda a, b : a -b] print lambdaFunctions[0](4, 5) print lambdaFunctions[1](4, 5) # TEST lambda fewer steps than def (4b) Test.assertEquals(lambdaFunctions[0](10, 10), 20, 'incorrect first lambdaFunction') Test.assertEquals(lambdaFunctions[1](10, 10), 0, 'incorrect second lambdaFunction') """ Explanation: (4b) lambda fewer steps than def lambda generates a function and returns it, while def generates a function and assigns it to a name. The function returned by lambda also automatically returns the value of its expression statement, which reduces the amount of code that needs to be written. For this exercise, recreate the def behavior using lambda. Note that since a lambda expression returns a function, it can be used anywhere an object is expected. For example, you can create a list of functions where each function in the list was generated by a lambda expression. End of explanation """ # Examples. Note that the spacing has been modified to distinguish parameters from tuples. # One-parameter function a1 = lambda x: x[0] + x[1] a2 = lambda (x0, x1): x0 + x1 print 'a1( (3,4) ) = {0}'.format( a1( (3,4) ) ) print 'a2( (3,4) ) = {0}'.format( a2( (3,4) ) ) # Two-parameter function b1 = lambda x, y: (x[0] + y[0], x[1] + y[1]) b2 = lambda (x0, x1), (y0, y1): (x0 + y0, x1 + y1) print '\nb1( (1,2), (3,4) ) = {0}'.format( b1( (1,2), (3,4) ) ) print 'b2( (1,2), (3,4) ) = {0}'.format( b2( (1,2), (3,4) ) ) # TODO: Replace <FILL IN> with appropriate code # Use both syntaxes to create a function that takes in a tuple of two values and swaps their order # E.g. (1, 2) => (2, 1) swap1 = lambda x: (x[1], x[0]) swap2 = lambda (x0, x1): (x1, x0) print 'swap1((1, 2)) = {0}'.format(swap1((1, 2))) print 'swap2((1, 2)) = {0}'.format(swap2((1, 2))) # Using either syntax, create a function that takes in a tuple with three values and returns a tuple # of (2nd value, 3rd value, 1st value). E.g. (1, 2, 3) => (2, 3, 1) swapOrder = lambda x: (x[1], x[2], x[0]) print 'swapOrder((1, 2, 3)) = {0}'.format(swapOrder((1, 2, 3))) # Using either syntax, create a function that takes in three tuples each with two values. The # function should return a tuple with the values in the first position summed and the values in the # second position summed. E.g. (1, 2), (3, 4), (5, 6) => (1 + 3 + 5, 2 + 4 + 6) => (9, 12) sumThree = lambda a, b, c: (a[0] + b[0] + c[0], a[1] + b[1] + c[1]) print 'sumThree((1, 2), (3, 4), (5, 6)) = {0}'.format(sumThree((1, 2), (3, 4), (5, 6))) # TEST Lambda expression arguments (4c) Test.assertEquals(swap1((1, 2)), (2, 1), 'incorrect definition for swap1') Test.assertEquals(swap2((1, 2)), (2, 1), 'incorrect definition for swap2') Test.assertEquals(swapOrder((1, 2, 3)), (2, 3, 1), 'incorrect definition fo swapOrder') Test.assertEquals(sumThree((1, 2), (3, 4), (5, 6)), (9, 12), 'incorrect definition for sumThree') """ Explanation: (4c) Lambda expression arguments Lambda expressions can be used to generate functions that take in zero or more parameters. The syntax for lambda allows for multiple ways to define the same function. For example, we might want to create a function that takes in a single parameter, where the parameter is a tuple consisting of two values, and the function adds the two values together. The syntax could be either: lambda x: x[0] + x[1] or lambda (x0, x1): x0 + x1. If we called either function on the tuple (3, 4) it would return 7. Note that the second lambda relies on the tuple (3, 4) being unpacked automatically, which means that x0 is assigned the value 3 and x1 is assigned the value 4. As an other example, consider the following parameter lambda expressions: lambda x, y: (x[0] + y[0], x[1] + y[1]) and lambda (x0, x1), (y0, y1): (x0 + y0, x1 + y1). The result of applying either of these functions to tuples (1, 2) and (3, 4) would be the tuple (4, 6). For this exercise: you'll create one-parameter functions swap1 and swap2 that swap the order of a tuple; a one-parameter function swapOrder that takes in a tuple with three values and changes the order to: second element, third element, first element; and finally, a three-parameter function sumThree that takes in three tuples, each with two values, and returns a tuple containing two values: the sum of the first element of each tuple and the sum of second element of each tuple. End of explanation """ # Just run this code # This code will fail with a syntax error, as we can't use print in a lambda expression import traceback try: exec "lambda x: print x" except: traceback.print_exc() """ Explanation: (4d) Restrictions on lambda expressions Lambda expressions consist of a single expression statement and cannot contain other simple statements. In short, this means that the lambda expression needs to evaluate to a value and exist on a single logical line. If more complex logic is necessary, use def in place of lambda. Expression statements evaluate to a value (sometimes that value is None). Lambda expressions automatically return the value of their expression statement. In fact, a return statement in a lambda would raise a SyntaxError. The following Python keywords refer to simple statements that cannot be used in a lambda expression: assert, pass, del, print, return, yield, raise, break, continue, import, global, and exec. Also, note that assignment statements (=) and augmented assignment statements (e.g. +=) cannot be used either. End of explanation """ # Create a class to give our examples the same syntax as PySpark class FunctionalWrapper(object): def __init__(self, data): self.data = data def map(self, function): """Call `map` on the items in `data` using the provided `function`""" return FunctionalWrapper(map(function, self.data)) def reduce(self, function): """Call `reduce` on the items in `data` using the provided `function`""" return reduce(function, self.data) def filter(self, function): """Call `filter` on the items in `data` using the provided `function`""" return FunctionalWrapper(filter(function, self.data)) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __getattr__(self, name): return getattr(self.data, name) def __getitem__(self, k): return self.data.__getitem__(k) def __repr__(self): return 'FunctionalWrapper({0})'.format(repr(self.data)) def __str__(self): return 'FunctionalWrapper({0})'.format(str(self.data)) # Map example # Create some data mapData = FunctionalWrapper(range(5)) # Define a function to be applied to each element f = lambda x: x + 3 # Imperative programming: loop through and create a new object by applying f mapResult = FunctionalWrapper([]) # Initialize the result for element in mapData: mapResult.append(f(element)) # Apply f and save the new value print 'Result from for loop: {0}'.format(mapResult) # Functional programming: use map rather than a for loop print 'Result from map call: {0}'.format(mapData.map(f)) # Note that the results are the same but that the map function abstracts away the implementation # and requires less code # TODO: Replace <FILL IN> with appropriate code dataset = FunctionalWrapper(range(10)) # Multiply each element by 5 mapResult = dataset.map(lambda x: x * 5) # Keep the even elements # Note that "x % 2" evaluates to the remainder of x divided by 2 filterResult = dataset.filter(lambda x: x % 2 == 0) # Sum the elements reduceResult = dataset.reduce(lambda a, b: a + b) print 'mapResult: {0}'.format(mapResult) print '\nfilterResult: {0}'.format(filterResult) print '\nreduceResult: {0}'.format(reduceResult) # TEST Functional programming (4e) Test.assertEquals(mapResult, FunctionalWrapper([0, 5, 10, 15, 20, 25, 30, 35, 40, 45]), 'incorrect value for mapResult') Test.assertEquals(filterResult, FunctionalWrapper([0, 2, 4, 6, 8]), 'incorrect value for filterResult') Test.assertEquals(reduceResult, 45, 'incorrect value for reduceResult') """ Explanation: (4e) Functional programming The lambda examples we have shown so far have been somewhat contrived. This is because they were created to demonstrate the differences and similarities between lambda and def. An excellent use case for lambda expressions is functional programming. In functional programming, you will often pass functions to other functions as parameters, and lambda can be used to reduce the amount of code necessary and to make the code more readable. Some commonly used functions in functional programming are map, filter, and reduce. Map transforms a series of elements by applying a function individually to each element in the series. It then returns the series of transformed elements. Filter also applies a function individually to each element in a series; however, with filter, this function evaluates to True or False and only elements that evaluate to True are retained. Finally, reduce operates on pairs of elements in a series. It applies a function that takes in two values and returns a single value. Using this function, reduce is able to, iteratively, "reduce" a series to a single value. For this exercise, you'll create three simple lambda functions, one each for use in map, filter, and reduce. The map lambda will multiply its input by 5, the filter lambda will evaluate to True for even numbers, and the reduce lambda will add two numbers. Note that we have created a class called FunctionalWrapper so that the syntax for this exercise matches the syntax you'll see in PySpark. Note that map requires a one parameter function that returns a new value, filter requires a one parameter function that returns True or False, and reduce requires a two parameter function that combines the two parameters and returns a new value. End of explanation """ # Example of a mult-line expression statement # Note that placing parentheses around the expression allow it to exist on multiple lines without # causing a syntax error. (dataset .map(lambda x: x + 2) .reduce(lambda x, y: x * y)) # TODO: Replace <FILL IN> with appropriate code # Multiply the elements in dataset by five, keep just the even values, and sum those values finalSum = dataset.map(lambda x: x * 5).filter(lambda x: x % 2 == 0).reduce(lambda a, b: a + b) print finalSum # TEST Composability (4f) Test.assertEquals(finalSum, 100, 'incorrect value for finalSum') """ Explanation: (4f) Composability Since our methods for map and filter in the FunctionalWrapper class return FunctionalWrapper objects, we can compose (or chain) together our function calls. For example, dataset.map(f1).filter(f2).reduce(f3), where f1, f2, and f3 are functions or lambda expressions, first applies a map operation to dataset, then filters the result from map, and finally reduces the result from the first two operations. Note that when we compose (chain) an operation, the output of one operation becomes the input for the next operation, and operations are applied from left to right. It's likely you've seen chaining used with Python strings. For example, 'Split this'.lower().split(' ') first returns a new string object 'split this' and then split(' ') is called on that string to produce ['split', 'this']. For this exercise, reuse your lambda expressions from (4e) but apply them to dataset in the sequence: map, filter, reduce. Note that since we are composing the operations our result will be different than in (4e). Also, we can write our operations on separate lines to improve readability. End of explanation """ # Run this code to view Criteo's agreement # Note that some ad blocker software will prevent this IFrame from loading. # If this happens, open the webpage in a separate tab and follow the instructions from above. from IPython.lib.display import IFrame IFrame("http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/", 600, 350) # TODO: Replace <FILL IN> with appropriate code # Just replace <FILL IN> with the url for dac_sample.tar.gz import glob import os.path import tarfile import urllib import urlparse # Paste url, url should end with: dac_sample.tar.gz url = 'http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz' url = url.strip() baseDir = os.path.join('data') inputPath = os.path.join('cs190', 'dac_sample.txt') fileName = os.path.join(baseDir, inputPath) inputDir = os.path.split(fileName)[0] def extractTar(check = False): # Find the zipped archive and extract the dataset tars = glob.glob('dac_sample*.tar.gz*') if check and len(tars) == 0: return False if len(tars) > 0: try: tarFile = tarfile.open(tars[0]) except tarfile.ReadError: if not check: print 'Unable to open tar.gz file. Check your URL.' return False tarFile.extract('dac_sample.txt', path=inputDir) print 'Successfully extracted: dac_sample.txt' return True else: print 'You need to retry the download with the correct url.' print ('Alternatively, you can upload the dac_sample.tar.gz file to your Jupyter root ' + 'directory') return False if os.path.isfile(fileName): print 'File is already available. Nothing to do.' elif extractTar(check = True): print 'tar.gz file was already available.' elif not url.endswith('dac_sample.tar.gz'): print 'Check your download url. Are you downloading the Sample dataset?' else: # Download the file and store it in the same directory as this notebook try: urllib.urlretrieve(url, os.path.basename(urlparse.urlsplit(url).path)) except IOError: print 'Unable to download and store: {0}'.format(url) extractTar() import os.path baseDir = os.path.join('data') inputPath = os.path.join('cs190', 'dac_sample.txt') fileName = os.path.join(baseDir, inputPath) if os.path.isfile(fileName): rawData = (sc .textFile(fileName, 2) .map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data print rawData.take(1) rawDataCount = rawData.count() print rawDataCount # This line tests that the correct number of observations have been loaded assert rawDataCount == 100000, 'incorrect count for rawData' if rawDataCount == 100000: print 'Criteo data loaded successfully!' """ Explanation: Part 5: CTR data download Lab four will explore website click-through data provided by Criteo. To obtain the data, you must first accept Criteo's data sharing agreement. Below is the agreement from Criteo. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the # TODO cell below. The file is 8.4 MB compressed. The script below will download the file to the virtual machine (VM) and then extract the data. If running the cell below does not render a webpage, open the Criteo agreement in a separate browser tab. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the # TODO cell below. Note that the download could take a few minutes, depending upon your connection speed. End of explanation """
waltervh/BornAgain-tutorial
talks/day_3/complex_samples_2_M/diffuse_scattering.ipynb
gpl-3.0
# %load density_grad.py import numpy as np import bornagain as ba from bornagain import deg, angstrom, nm # define used SLDs sld_D2O = 6.34e-06 sld_polymer = 4.0e-06 sld_Si = 2.07e-06 h = 100.0*nm # thickness of the non-uniform polymer layer nslices = 100 # number of slices to slice the polymer layer def get_sld(z): """ function to calculate SLD(z) for the polymer layer """ return sld_polymer*np.exp(-z/h) def add_slices(multilayer): dz = h/nslices zvals = np.linspace(0, h, nslices, endpoint=False) + 0.5*dz for z in zvals: sld = get_sld(z) material = ba.MaterialBySLD("Polymer_{:.1f}".format(z), sld, 0.0) layer = ba.Layer(material, dz) multilayer.addLayer(layer) def get_sample(): # Defining Materials m_Si = ba.MaterialBySLD("Si", sld_Si, 0.0) m_Polymer = ba.MaterialBySLD("Polymer-0", sld_polymer, 0.0) m_D2O = ba.MaterialBySLD("D2O", sld_D2O, 0.0) # Defining Layers layer_si = ba.Layer(m_Si) layer_polymer = ba.Layer(m_Polymer, 2.0*nm) layer_d2o = ba.Layer(m_D2O) # Defining Multilayers multiLayer = ba.MultiLayer() multiLayer.addLayer(layer_si) multiLayer.addLayer(layer_polymer) add_slices(multiLayer) multiLayer.addLayer(layer_d2o) return multiLayer def get_simulation(): simulation = ba.SpecularSimulation() alpha_i_axis = ba.FixedBinAxis("alpha_i", 500, 0.0*deg, 6.5*deg) simulation.setBeamParameters(8.0*angstrom, alpha_i_axis) simulation.setBeamIntensity(1.0) # add wavelength distribution distr_1 = ba.DistributionCosine(8.0*angstrom, 0.8*angstrom/2.355) simulation.addParameterDistribution("*/Beam/Wavelength", distr_1, 50, 2.0, ba.RealLimits.positive()) return simulation def run_simulation(): sample = get_sample() simulation = get_simulation() simulation.setSample(sample) simulation.runSimulation() return simulation.result() if __name__ == '__main__': results = run_simulation() ba.plot_simulation_result(results, units=ba.AxesUnits.QSPACE) """ Explanation: SLD gradients For the moment, BornAgain does not support input of SLD profiles. However, one can approximate the smooth SLD profile by a large number of layers. See the example script below. End of explanation """ # plot an SLD profile import matplotlib.pyplot as plt x = np.linspace(0, h, nslices) y = get_sld(x) plt.plot(x, y*1e+6, color='k') plt.xlabel(r'$z$ (nm)') plt.ylabel(r'SLD$\cdot 10^6$') plt.title("SLD profile"); """ Explanation: Exercise Play with the script above. Change the function for SLD profile, add/remove/vary the beam divergence. How does it influence the simulation result? End of explanation """ # %load https://www.bornagainproject.org/files/python/simulation/ex01_BasicParticles/RotatedPyramids.py """ Rotated pyramids on top of substrate """ import bornagain as ba from bornagain import deg, angstrom, nm def get_sample(): """ Returns a sample with rotated pyramids on top of a substrate. """ # defining materials m_ambience = ba.HomogeneousMaterial("Air", 0.0, 0.0) m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8) m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8) # collection of particles pyramid_ff = ba.FormFactorPyramid(40*nm, 20*nm, 54.73*deg) pyramid = ba.Particle(m_particle, pyramid_ff) transform = ba.RotationZ(45.*deg) particle_layout = ba.ParticleLayout() particle_layout.addParticle( pyramid, 1.0, ba.kvector_t(0.0, 0.0, 0.0), transform) air_layer = ba.Layer(m_ambience) air_layer.addLayout(particle_layout) substrate_layer = ba.Layer(m_substrate) multi_layer = ba.MultiLayer() multi_layer.addLayer(air_layer) multi_layer.addLayer(substrate_layer) return multi_layer def get_simulation(): """ Returns a GISAXS simulation with beam and detector defined. """ simulation = ba.GISASSimulation() simulation.setDetectorParameters(200, -2.0*deg, 2.0*deg, 200, 0.0*deg, 2.0*deg) simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg) return simulation def run_simulation(): """ Runs simulation and returns intensity map. """ simulation = get_simulation() simulation.setSample(get_sample()) simulation.runSimulation() return simulation.result() if __name__ == '__main__': result = run_simulation() ba.plot_simulation_result(result) """ Explanation: Diffuse scattering: disordered samples Understanding the diffuse scattering in GISAS is a challenging task. The reason of the diffuse scattering is any kind of disorder in the sample. Possible reasons of diffuse scattering - Particle size distribution - Different kinds of particles - Disordered particle layout - Variety of particle rotations - Variety of lattice rotation - Polymer density fluctuations Particle rotation Let's take the Rotated Pyramids example and modiy it to account for rotational distribution of particles. First, we increase a bit the size of pyramids to get nicer images. Set the pyramid BaseEdge to be 40 nm and the pyramid Height to 20 nm: python pyramid_ff = ba.FormFactorPyramid(40*nm, 20*nm, 54.73*deg) and run the script below. End of explanation """ %load RotatedPyramids.py """ Explanation: Exercise Add the rotational distribution. If got stucked, see the solution or just run the line below. End of explanation """ # %load https://www.bornagainproject.org/files/python/simulation/ex03_InterferenceFunctions/SpheresAtHexLattice.py """ Spheres on a hexagonal lattice """ import bornagain as ba from bornagain import deg, angstrom, nm def get_sample(): """ Returns a sample with spherical particles on a substrate, forming a hexagonal 2D lattice. """ m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0) m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8) m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8) sphere_ff = ba.FormFactorFullSphere(10.0*nm) sphere = ba.Particle(m_particle, sphere_ff) particle_layout = ba.ParticleLayout() particle_layout.addParticle(sphere) interference = ba.InterferenceFunction2DLattice.createHexagonal(35.0*nm) pdf = ba.FTDecayFunction2DCauchy(100*nm, 100*nm) interference.setDecayFunction(pdf) particle_layout.setInterferenceFunction(interference) air_layer = ba.Layer(m_air) air_layer.addLayout(particle_layout) substrate_layer = ba.Layer(m_substrate, 0) multi_layer = ba.MultiLayer() multi_layer.addLayer(air_layer) multi_layer.addLayer(substrate_layer) return multi_layer def get_simulation(): """ Create and return GISAXS simulation with beam and detector defined """ simulation = ba.GISASSimulation() simulation.setDetectorParameters(200, -1.0*deg, 1.0*deg, 200, 0.0*deg, 1.0*deg) simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg) return simulation def run_simulation(): """ Runs simulation and returns intensity map. """ simulation = get_simulation() simulation.setSample(get_sample()) simulation.runSimulation() return simulation.result() if __name__ == '__main__': result = run_simulation() ba.plot_simulation_result(result) """ Explanation: Lattice rotation Imaging techniques deliver information from a few $\mu m^2$, while GISAS typically averages the information over the whole sample surface. This explains the effect of diffuse scattering for samples that look well ordered on SEM/AFM images. Let's take a BornAgain example and modify it to account for variety of lattice rotations. First, we increase a bit lattice length to see more peaks and DecayLengths of the decay function to make peaks more narrow: python interference = ba.InterferenceFunction2DLattice.createHexagonal(35.0*nm) pdf = ba.FTDecayFunction2DCauchy(100*nm, 100*nm) End of explanation """ sample = get_sample() print(sample.parametersToString()) """ Explanation: Exercise Modify the script above to account for the lattice rotational distribution. Let's distribute lattice rotation angles using Gaussian PDF. Hint: the code below helps to get the list of sample parameters End of explanation """ %load rotated_lattice.py """ Explanation: Solution See the solution or just run the line below. End of explanation """ # %load polymer.py import numpy as np import bornagain as ba from bornagain import deg, angstrom, nm # KWS-1 detector parameters npx, npy = 128, 128 # number of detector pixels psize = 5.3 # pixel size, mm det_width, det_height = npx*psize, npy*psize # mm, detector size sdd = 20000.0 # mm, sample-detector distance # direct beam position beam_xpos, beam_ypos = 64.5, 64.5 # pixel # incident angle ai = 0.2 # degree wavelength = 5.0 # angstrom # beam beam_intensity = 1.0 # SLDs sld_Si = 2.074e-6 sld_Si_im = -2.3819e-11 sld_D2O = 6.356e-6 sld_D2O_im = -1.1295e-13 sld_polymer = 4.0e-6 sld_polymer_im = 0.0 def get_sample(): """ Returns a sample """ # defining materials m_si = ba.MaterialBySLD("Si", sld_Si, sld_Si_im) m_d2o = ba.MaterialBySLD("D2O", sld_D2O, sld_D2O_im) m_polymer = ba.MaterialBySLD("Polymer", sld_polymer, sld_polymer_im) # particle layout microgel_layout = ba.ParticleLayout() # weights for components w_particles = 0.005 w_oz =0.5 w_db = 1.0 - w_oz - w_particles # fluctuation component ff_oz = ba.FormFactorOrnsteinZernike(1000, 10.0*nm, 5.0*nm) particle_oz = ba.Particle(m_polymer, ff_oz) microgel_layout.addParticle(particle_oz, w_oz) # Debye-Buche component ff_db = ba.FormFactorDebyeBueche(1000, 20.0*nm) particle_db = ba.Particle(m_polymer, ff_db) microgel_layout.addParticle(particle_db, w_db) # collection of particles radius = 100.0*nm ff = ba.FormFactorTruncatedSphere(radius=radius, height=radius) particle = ba.Particle(m_polymer, ff) particle.setPosition(ba.kvector_t(0.0, 0.0, -1.0*radius)) microgel_layout.addParticle(particle, w_particles) # no interference function interference = ba.InterferenceFunctionNone() microgel_layout.setInterferenceFunction(interference) microgel_layout.setTotalParticleSurfaceDensity(1e-6) d2o_layer = ba.Layer(m_d2o) d2o_layer.addLayout(microgel_layout) si_layer = ba.Layer(m_si) multi_layer = ba.MultiLayer() multi_layer.addLayer(si_layer) multi_layer.addLayer(d2o_layer) return multi_layer def create_detector(): """ Creates and returns KWS-1 detector """ u0 = beam_xpos*psize # in mm v0 = beam_ypos*psize # in mm detector = ba.RectangularDetector(npx, det_width, npy, det_height) detector.setPerpendicularToDirectBeam(sdd, u0, v0) return detector def get_simulation(wl=5.0, alpha_i=ai): """ Returns a GISAS simulation with beam and detector defined """ simulation = ba.GISASSimulation() simulation.setBeamParameters(wl*ba.angstrom, alpha_i*ba.deg, 0.0*ba.deg) simulation.setDetector(create_detector()) simulation.setBeamIntensity(beam_intensity) return simulation def run_simulation(): """ Runs simulation and returns resulting intensity map. """ sample = get_sample() simulation = get_simulation(wavelength) simulation.setDetectorResolutionFunction(ba.ResolutionFunction2DGaussian(2.0*psize, 1.0*psize)) simulation.setSample(sample) simulation.setRegionOfInterest(20, 400, 650, 650) # options simulation.getOptions().setUseAvgMaterials(True) #simulation.getOptions().setIncludeSpecular(True) simulation.setTerminalProgressMonitor() simulation.runSimulation() return simulation.result() if __name__ == '__main__': result = run_simulation() ba.plot_simulation_result(result, units=ba.AxesUnits.QSPACE) """ Explanation: Remember, peak on the detector is observed when the reciprocal lattice is aligned so that the Ewald sphere intersects the peak. Thus, lattice rotations cause additional peaks at the GISAS pattern. Image from: K. Yager, GISAXS/GIWAX Data Analysis: Thinking in Reciprocal Space Feel free to play with the example: change the kind of the distribution, add the beam divergence. How does it influence the simulation result? Polymer density fluctuations Polymer density fluctuations smear out the peaks an cause a lot of diffuse scattering. Example GISANS partern is shown in the figure below [1]. Figure below illustrates kinds of inhomogenities in polymer solutions. Blobs of polymer chains are represented as black lines and blobs of crosslinks with red dots. Schematic representations of (a) a two-dimensional reaction bath well above the chain gelation threshold, (b) an overswollen gel by the addition of solvent and (c) dynamic, static and total concentration fluctuations with space coordinate r. For the sake of simplicity, the chains, which are random walks on this lattice, are not shown in the figure. Black dots represent the interchain crosslinks placed at random [2]. These inhomogenities account for the diffuse scattering. To take them into account, two form factors are available in BornAgain: Form factor Ornstein-Zernike Born form factor is implemented in BornAgain as $$F_{OZ}(\mathbf{q}) = \sqrt{\frac{I_0}{1 + \xi_{xy}^2\cdot(q_x^2 + q_y^2) + \xi_z^2\cdot q_z^2}}$$ where $\xi_{xy}$ and $\xi_z$ represent inhomogenity blob size (in nm) in azimuthal and vertical directions, respectively. To create the Ornstein-Zernike form factor, use statement python import bornagain as ba myff = ba.FormFactorOrnsteinZernike(I0, xi_xy, xi_z) Form factor Debye-Buche Born form factor is implemented in BornAgain as $$F_{DB}(\mathbf{q}) = \frac{\sqrt{I_0}}{1 + \xi^2\cdot|\mathbf{q}|^2}$$ To create it, use statement python import bornagain as ba myff = ba.FormFactorDebyeBueche(I0, xi) Example script End of explanation """
cjbrasher/LipidFinder
docs/LipidFinder2_manual.ipynb
mit
config_params.py -m peakfilter """ Explanation: Welcome to LipidFinder 2 LipidFinder is an open-source Python workflow designed to facilitate further targeted lipidomics analysis. LipidFinder categorises and removes noise and artifacts from liquid chromatography/mass spectrometry (LC/MS) datasets, searches the outcome in different databases to obtain putative identification of lipids, and assigns them to a class based on the LIPID MAPS classification system. The software quickly distinguishes and quantifies lipid-like features from contaminants, adducts and noise in LC/MS datasets that have been pre-processed using XCMS. Although we advise users to use XCMS, LipidFinder accepts input files from other pre-processing software tools (e.g. SIEVE™ from ThermoFisher). LipidFinder 1.0: O'Connor et al., 2017. LipidFinder on LIPIDMAPS: Fahy et al., 2018. This Jupyter notebook will explain how to get your computer ready to use LipidFinder and will guide you through LipidFinder's workflow to process your data. Before continuing, note that your data needs to be high resolution MS (e.g. at least 60000ppm) and with long chromatography to separate isobaric lipids. The approach is not suitable for shotgun lipidomics, MS/MS or low resolution datasets. The demo data provided in the tests folder was generated with an Orbitrap Elite Mass Spectrometer, but the software is MS-platform independent. It is composed by 12 LC/MS runs of macrophage-like cells: first 6 samples from RAW cells (264.7) and last 6 samples from mouse wildtype (C57BL/6). Afterwards, the data was pre-processed with XCMS Online using the set of parameters Orbitrap II. LipidFinder can be downloaded from GitHub or accessed online via LIPID MAPS: http://www.lipidmaps.org/resources/tools/lipidfinder. For questions or suggestions regarding LipidFinder please contact us at lipidfinder@cardiff.ac.uk. LipidFinder is distributed under the MIT license (see LICENSE file for more information). Read before using LipidFinder for the first time LipidFinder has been designed for extensive clean-up of LC/MS datasets where a high degree of artifact removal is desired (e.g. discovery lipidomics). ESI-high resolution MS experiments contain many spurious signals that can arise from diverse sources, including common contaminants, adducts, in-source fragments, etc. LipidFinder is devised to work primarily as an add-on to XCMS, focusing on the clean-up of MS data files which have already been pre-processed for peak alignment and integration. Removal of these artifacts results in significantly cleaner datasets that perform better in downstream statistical analysis pipelines. Key points for users: LipidFinder shares some functionalities with XCMS (e.g. isotope removal or retention time correction), however these use different algorithms and perform differently. Thus, running these functionalities again in LipidFinder significantly improves the quality of XCMS datasets. LipidFinder includes extra functionalities specifically designed to improve artifact removal that are not in XCMS, including: contaminant, adduct and stack removal, solvent removal, mass reassignment and outlier correction. Extensive LC chromatography is essential with the LipidFinder approach to separate isobaric lipids which are a major complicating issue in lipidomics MS. This method is not suitable for “shotgun” applications. Qualitative and semi-quantitative comparison of lipids using high resolution MS is a powerful approach for screening all lipids, both unknown and known in an unbiased manner so long as it is used appropriately and its benefits and limitations are appreciated and acknowledged in full by the user. When correctly applied, it is an MS approach that is extremely powerful for lipid discovery and comparative profiling of biological samples. Nowadays, targeted MS/MS based methods can measure up to 500 or more known lipids, however, we know that lipidomes from mammalian cells and plasma can contain thousands, perhaps up to 5,000 or more per sample, with approximately 50% of these not appearing in any database (true unknowns). Thus there is huge potential for discovery of new lipids using untargeted approaches, something that cannot be accomplished using MS/MS. The LipidFinder approach is a hypothesis-generating screening tool designed specifically to clean up MS datasets that initially present with around 60K datapoints of which we estimate around 10% to be real lipids (Slatter et al., 2016). All observations of interest obtained using this method require rigorous validation using (i) manual examination of chromatographic data where significant differences are detected to ensure peak quality, followed by (ii) gold standard MS/MS methods, where the differences seen are for known lipids that can be purchased as standards. Database matches provided using LipidFinder are putative, and ions are assigned to putative lipid classes, not to actual molecular species because the isobaric nature of lipids makes this impossible. The LipidFinder approach is analogous to the older Affymetrix array methods in genomics, which also require strict validation using qPCR, etc. The approach is semi-quantitative, and reports on relative changes between datasets. Internal standards can be used if desired to calculate A/IS. Statistical analysis post-LipidFinder, which can be found on its online version, can be used to identify significantly-different lipids that then need to be followed up using targeted methods and fully validated, etc. 1. Configuring your computer LipidFinder has been tested for Python 3.6.3. This doesn't mean it won't work in earlier versions, but you might get errors or significant differences in the results. Some computer’s operating systems come bundled with Python, but it can also be downloaded and installed from the Python Software Foundation. The first step is to download LipidFinder's latest package file from GitHub: link Default installation LipidFinder's package includes all the instructions to install all the dependencies required. The easiest way to install LipidFinder is to open a command prompt/terminal, go to the folder where the downloaded Wheel file is located, and run the following command: shell pip install LipidFinder-2.*-py3-none-any.whl Alternative option: Anaconda Many users prefer to use Anaconda, an open-source distribution aimed to do Python data science and machine learning in Windows, Linux, and MacOS. To install LipidFinder, open an Anaconda prompt/terminal and run the following command: shell pip install LipidFinder-2.*-py3-none-any.whl Note: All the scripts include the .py extension that needs to be removed in Windows systems. 2. Pre-processing the input files We have included a thorough manual on how to pre-process your input files to leave them ready for LipidFinder in the docs folder, in a PDF document named Input_preparation_manual.pdf. 3. Setting up your parameters There are two different ways to set the parameters of each module of LipidFinder's workflow. The first one is to run the config_params script. This script requires as argument the module you want to configure: End of explanation """ config_params.py -m peakfilter -p my_parameters.json """ Explanation: Additionally, if you already have a parameters JSON file, you can load its values instead of LipidFinder's defaults (see example below). Once launched, the process will guide you through a question-answering system to configure each parameter. At the end, the program will ask for the path and file name in which you want to save the new set of parameters: End of explanation """ from LipidFinder.Configuration.LFParametersGUI import LFParametersGUI LFParametersGUI(module='amalgamator'); """ Explanation: The second option is through a Jupyter notebook (like this one). The Configuration module includes a graphical user interface (GUI) class to set up each parameter of the selected module interactively based on Jupyter's widgets. The following code shows an example of how to launch the GUI to set Amalgamator's parameters based on default values: End of explanation """ run_peakfilter.py -i tests/XCMS/negative.csv -o results -p tests/XCMS/params_peakfilter_negative.json """ Explanation: To use an existing parameters JSON file instead of the default values, you need to add the argument src=x, where x is the path to the JSON file, to the LFParametersGUI() call. Hint: once you have configured PeakFilter's parameters, you can use that JSON file as template for the other modules so you do not need to type in again the value of the parameters they all share (e.g. m/z column name). Warning: parameter firstSampleIndex needs to be changed when using PeakFilter's summary output file as input. We have included a help option to display the description, usage and other information of each Python script included in LipidFinder. For instance, for the previous script, the command to run would be the following: shell config_params.py -h 3.1. Backwards compatibility A user that has used LipidFinder 1.0 might be interested in repeating their experiments with the new version or run new ones under a similar parameter configuration. Thus, we have developed a script to transform the old parameters CSV file for PeakFilter and Amalgamator to the new parameters JSON files for the same modules. To run it you will also need the old adducts CSV file to update the lists of adduct pairs. We have included an example of these two files in the tests folder (available on GitHub) to illustrate how to use the script: shell update_params.py -p tests/LipidFinder-1.0/old_parameters.csv -a test/LipidFinder-1.0/old_adducts.csv -o results The script will generate two files: peakfilter.json and amalgamator.json. Be aware that these new parameters JSON files are incomplete (some new parameters have been introduced in LipidFinder 2.0) and will raise an error when used for their corresponding module. They should be handled first by config_params.py (-p argument) to fill in the missing parameters and generate a complete version. 4. LipidFinder's workflow LipidFinder's complete workflow is composed by three modules: PeakFilter, Amalgamator and MSSearch. We have developed one script for each one to ease their usage. Each module will create a log file (named after the module) that will save any information that might be useful for the user, e.g. which frames that have been removed by which stages during PeakFilter. A new run will append the new information at the end of the log file if it already exists, so no information is lost. A standard LipidFinder workflow would first process the pre-aligned data with PeakFilter (once for negative and once for positive ion polarity), afterwards Amalgamator would merge both files' information based on matching m/z values, and finally, MSSearch would identify and classify lipid-like features with the selected LIPID MAPS database. Alternatively, LipidFinder can also process a single file with PeakFilter and run MSSearch afterwards. The following examples are all based on the demo data pre-processed with XCMS, but we also provide an alternative to show LipidFinder's flexibility with SIEVE™ pre-processed files (just replace XCMS by SIEVE in each command). 4.1. PeakFilter PeakFilter is intended to clean-up the data from contaminants, adducts, stacks and other artifacts like in-source ion fragments and salt clusters. Among its parameters, PeakFilter has several "switches" for determined filtering functionalities that should be configured based on the experimental set-up that generated the input dataset. In most scenarios, an experiment involving LC/MS will generate two sets of data with different ion polarity: one negative and one positive. After they have been pre-processed separately with XCMS, we need to process each file individually with PeakFilter. Using our demo data available on GitHub under the tests folder, we show first how to process the negative polarity CSV file: End of explanation """ run_peakfilter.py -i tests/XCMS/positive.csv -o results -p tests/XCMS/params_peakfilter_positive.json """ Explanation: And then the positive one: End of explanation """ run_amalgamator.py -neg results/peakfilter_negative_summary.csv \ -pos results/peakfilter_positive_summary.csv \ -p tests/XCMS/params_amalgamator.json -o results """ Explanation: By default, PeakFilter will generate the complete filtered file and a summary output CSV file with the relevant information of each remaining frame. The output file names will always contain ion polarity, so running PeakFilter once for each polarity will not be a problem when choosing the same output folder (e.g. results in the previous examples). However, if we change the parameters and run PeakFilter again with the same output folder, we will overwrite any previous output file for the same polarity. 4.2. Amalgamator Amalgamator merges the output files for both negative and positive ion polarities generated with PeakFilter. By default, it will keep every frame that exists in only one of the input files, and for those with a match in both files, Amalgamator will retain the information of the one with the highest mean intensity for all samples tagging the selected source in the output file's Polarity column. End of explanation """ run_mssearch.py -i results/amalgamated.csv -o results \ -p tests/XCMS/params_mssearch.json """ Explanation: Duplicates are identified by comparing the negative file with the positive file within a small retention time tolerance and a corrected m/z tolerance (negative m/z + 2H<sup>+</sup>, followed by negative m/z + H<sup>+</sup> + CH3<sup>+</sup> for phosphotidylcholine and sphingomyelins with phosphocholine head group). Any hits are classed as a match. Alternatively, you can use the complete output files generated by PeakFilter as input files if you want to keep every column of your source data file. 4.3. MSSearch MSSearch has been designed to identify and classify lipid-like features from either PeakFilter or Amalgamator output file, using the knowledge available in LIPID MAPS. The output file will include all the matches for each m/z value in the input file (within the indicated tolerance in the parameters JSON file). The output file will also include every frame not found in the selected database, and they will be classified as unknown. Finally, MSSearch will create a lipid-category scatter plot of the results by m/z and retention time in a PDF file (by default). End of explanation """
AtmaMani/pyChakras
udemy_ml_bootcamp/Python-for-Data-Visualization/Matplotlib/Advanced Matplotlib Concepts.ipynb
mit
import matplotlib.pyplot as plt import numpy as np %matplotlib inline x = np.linspace(0, 5, 11) y = x**2 fig, axes = plt.subplots(1, 2, figsize=(10,4)) axes[0].plot(x, x**2, x, np.exp(x)) axes[0].set_title("Normal scale") axes[1].plot(x, x**2, x, np.exp(x)) axes[1].set_yscale("log") axes[1].set_title("Logarithmic scale (y)"); """ Explanation: Advanced Matplotlib Concepts Lecture In this lecture we cover some more advanced topics which you won't usually use as often. You can always reference the documentation for more resources! Logarithmic scale It is also possible to set a logarithmic scale for one or both axes. This functionality is in fact only one application of a more general transformation system in Matplotlib. Each of the axes' scales are set seperately using set_xscale and set_yscale methods which accept one parameter (with the value "log" in this case): End of explanation """ fig, ax = plt.subplots(figsize=(10, 4)) ax.plot(x, x**2, x, x**3, lw=2) ax.set_xticks([1, 2, 3, 4, 5]) ax.set_xticklabels([r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'], fontsize=18) yticks = [0, 50, 100, 150] ax.set_yticks(yticks) ax.set_yticklabels(["$%.1f$" % y for y in yticks], fontsize=18); # use LaTeX formatted labels """ Explanation: Placement of ticks and custom tick labels We can explicitly determine where we want the axis ticks with set_xticks and set_yticks, which both take a list of values for where on the axis the ticks are to be placed. We can also use the set_xticklabels and set_yticklabels methods to provide a list of custom text labels for each tick location: End of explanation """ fig, ax = plt.subplots(1, 1) ax.plot(x, x**2, x, np.exp(x)) ax.set_title("scientific notation") ax.set_yticks([0, 50, 100, 150]) from matplotlib import ticker formatter = ticker.ScalarFormatter(useMathText=True) formatter.set_scientific(True) formatter.set_powerlimits((-1,1)) ax.yaxis.set_major_formatter(formatter) """ Explanation: There are a number of more advanced methods for controlling major and minor tick placement in matplotlib figures, such as automatic placement according to different policies. See http://matplotlib.org/api/ticker_api.html for details. Scientific notation With large numbers on axes, it is often better use scientific notation: End of explanation """ # distance between x and y axis and the numbers on the axes matplotlib.rcParams['xtick.major.pad'] = 5 matplotlib.rcParams['ytick.major.pad'] = 5 fig, ax = plt.subplots(1, 1) ax.plot(x, x**2, x, np.exp(x)) ax.set_yticks([0, 50, 100, 150]) ax.set_title("label and axis spacing") # padding between axis label and axis numbers ax.xaxis.labelpad = 5 ax.yaxis.labelpad = 5 ax.set_xlabel("x") ax.set_ylabel("y"); # restore defaults matplotlib.rcParams['xtick.major.pad'] = 3 matplotlib.rcParams['ytick.major.pad'] = 3 """ Explanation: Axis number and axis label spacing End of explanation """ fig, ax = plt.subplots(1, 1) ax.plot(x, x**2, x, np.exp(x)) ax.set_yticks([0, 50, 100, 150]) ax.set_title("title") ax.set_xlabel("x") ax.set_ylabel("y") fig.subplots_adjust(left=0.15, right=.9, bottom=0.1, top=0.9); """ Explanation: Axis position adjustments Unfortunately, when saving figures the labels are sometimes clipped, and it can be necessary to adjust the positions of axes a little bit. This can be done using subplots_adjust: End of explanation """ fig, axes = plt.subplots(1, 2, figsize=(10,3)) # default grid appearance axes[0].plot(x, x**2, x, x**3, lw=2) axes[0].grid(True) # custom grid appearance axes[1].plot(x, x**2, x, x**3, lw=2) axes[1].grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5) """ Explanation: Axis grid With the grid method in the axis object, we can turn on and off grid lines. We can also customize the appearance of the grid lines using the same keyword arguments as the plot function: End of explanation """ fig, ax = plt.subplots(figsize=(6,2)) ax.spines['bottom'].set_color('blue') ax.spines['top'].set_color('blue') ax.spines['left'].set_color('red') ax.spines['left'].set_linewidth(2) # turn off axis spine to the right ax.spines['right'].set_color("none") ax.yaxis.tick_left() # only ticks on the left side """ Explanation: Axis spines We can also change the properties of axis spines: End of explanation """ fig, ax1 = plt.subplots() ax1.plot(x, x**2, lw=2, color="blue") ax1.set_ylabel(r"area $(m^2)$", fontsize=18, color="blue") for label in ax1.get_yticklabels(): label.set_color("blue") ax2 = ax1.twinx() ax2.plot(x, x**3, lw=2, color="red") ax2.set_ylabel(r"volume $(m^3)$", fontsize=18, color="red") for label in ax2.get_yticklabels(): label.set_color("red") """ Explanation: Twin axes Sometimes it is useful to have dual x or y axes in a figure; for example, when plotting curves with different units together. Matplotlib supports this with the twinx and twiny functions: End of explanation """ fig, ax = plt.subplots() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.spines['bottom'].set_position(('data',0)) # set position of x spine to x=0 ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0)) # set position of y spine to y=0 xx = np.linspace(-0.75, 1., 100) ax.plot(xx, xx**3); """ Explanation: Axes where x and y is zero End of explanation """ n = np.array([0,1,2,3,4,5]) fig, axes = plt.subplots(1, 4, figsize=(12,3)) axes[0].scatter(xx, xx + 0.25*np.random.randn(len(xx))) axes[0].set_title("scatter") axes[1].step(n, n**2, lw=2) axes[1].set_title("step") axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5) axes[2].set_title("bar") axes[3].fill_between(x, x**2, x**3, color="green", alpha=0.5); axes[3].set_title("fill_between"); """ Explanation: Other 2D plot styles In addition to the regular plot method, there are a number of other functions for generating different kind of plots. See the matplotlib plot gallery for a complete list of available plot types: http://matplotlib.org/gallery.html. Some of the more useful ones are show below: End of explanation """ fig, ax = plt.subplots() ax.plot(xx, xx**2, xx, xx**3) ax.text(0.15, 0.2, r"$y=x^2$", fontsize=20, color="blue") ax.text(0.65, 0.1, r"$y=x^3$", fontsize=20, color="green"); """ Explanation: Text annotation Annotating text in matplotlib figures can be done using the text function. It supports LaTeX formatting just like axis label texts and titles: End of explanation """ fig, ax = plt.subplots(2, 3) fig.tight_layout() """ Explanation: Figures with multiple subplots and insets Axes can be added to a matplotlib Figure canvas manually using fig.add_axes or using a sub-figure layout manager such as subplots, subplot2grid, or gridspec: subplots End of explanation """ fig = plt.figure() ax1 = plt.subplot2grid((3,3), (0,0), colspan=3) ax2 = plt.subplot2grid((3,3), (1,0), colspan=2) ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2) ax4 = plt.subplot2grid((3,3), (2,0)) ax5 = plt.subplot2grid((3,3), (2,1)) fig.tight_layout() """ Explanation: subplot2grid End of explanation """ import matplotlib.gridspec as gridspec fig = plt.figure() gs = gridspec.GridSpec(2, 3, height_ratios=[2,1], width_ratios=[1,2,1]) for g in gs: ax = fig.add_subplot(g) fig.tight_layout() """ Explanation: gridspec End of explanation """ fig, ax = plt.subplots() ax.plot(xx, xx**2, xx, xx**3) fig.tight_layout() # inset inset_ax = fig.add_axes([0.2, 0.55, 0.35, 0.35]) # X, Y, width, height inset_ax.plot(xx, xx**2, xx, xx**3) inset_ax.set_title('zoom near origin') # set axis range inset_ax.set_xlim(-.2, .2) inset_ax.set_ylim(-.005, .01) # set axis tick locations inset_ax.set_yticks([0, 0.005, 0.01]) inset_ax.set_xticks([-0.1,0,.1]); """ Explanation: add_axes Manually adding axes with add_axes is useful for adding insets to figures: End of explanation """ alpha = 0.7 phi_ext = 2 * np.pi * 0.5 def flux_qubit_potential(phi_m, phi_p): return 2 + alpha - 2 * np.cos(phi_p) * np.cos(phi_m) - alpha * np.cos(phi_ext - 2*phi_p) phi_m = np.linspace(0, 2*np.pi, 100) phi_p = np.linspace(0, 2*np.pi, 100) X,Y = np.meshgrid(phi_p, phi_m) Z = flux_qubit_potential(X, Y).T """ Explanation: Colormap and contour figures Colormaps and contour figures are useful for plotting functions of two variables. In most of these functions we will use a colormap to encode one dimension of the data. There are a number of predefined colormaps. It is relatively straightforward to define custom colormaps. For a list of pre-defined colormaps, see: http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps End of explanation """ fig, ax = plt.subplots() p = ax.pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max()) cb = fig.colorbar(p, ax=ax) """ Explanation: pcolor End of explanation """ fig, ax = plt.subplots() im = ax.imshow(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1]) im.set_interpolation('bilinear') cb = fig.colorbar(im, ax=ax) """ Explanation: imshow End of explanation """ fig, ax = plt.subplots() cnt = ax.contour(Z, cmap=matplotlib.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1]) """ Explanation: contour End of explanation """ from mpl_toolkits.mplot3d.axes3d import Axes3D """ Explanation: 3D figures To use 3D graphics in matplotlib, we first need to create an instance of the Axes3D class. 3D axes can be added to a matplotlib figure canvas in exactly the same way as 2D axes; or, more conveniently, by passing a projection='3d' keyword argument to the add_axes or add_subplot methods. End of explanation """ fig = plt.figure(figsize=(14,6)) # `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot ax = fig.add_subplot(1, 2, 1, projection='3d') p = ax.plot_surface(X, Y, Z, rstride=4, cstride=4, linewidth=0) # surface_plot with color grading and color bar ax = fig.add_subplot(1, 2, 2, projection='3d') p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False) cb = fig.colorbar(p, shrink=0.5) """ Explanation: Surface plots End of explanation """ fig = plt.figure(figsize=(8,6)) ax = fig.add_subplot(1, 1, 1, projection='3d') p = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4) """ Explanation: Wire-frame plot End of explanation """ fig = plt.figure(figsize=(8,6)) ax = fig.add_subplot(1,1,1, projection='3d') ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25) cset = ax.contour(X, Y, Z, zdir='z', offset=-np.pi, cmap=matplotlib.cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='x', offset=-np.pi, cmap=matplotlib.cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='y', offset=3*np.pi, cmap=matplotlib.cm.coolwarm) ax.set_xlim3d(-np.pi, 2*np.pi); ax.set_ylim3d(0, 3*np.pi); ax.set_zlim3d(-np.pi, 2*np.pi); """ Explanation: Coutour plots with projections End of explanation """
trolldbois/python-haystack
docs/Haystack_basic_usage.ipynb
gpl-3.0
!cat ../test/src/test-ctypes3.c """ Explanation: Let's look at a basic usage example for Haystack. First we have a program which allocates structures on the HEAP: End of explanation """ !clang2py ../test/src/test-ctypes3.c #-o ../test/src/clang3_gen64.py """ Explanation: Then, using ctypeslib, we have generated python ctypes records for the same structures. End of explanation """ !ls -hsw 1 ../test/src/test-ctypes3.64.dump/ print '' !cat ../test/src/test-ctypes3.64.stdout """ Explanation: We have run the compiled C program, and dumped its memory mappings to files: End of explanation """ import haystack memdumpname = '../test/src/test-ctypes3.64.dump' # we need a memory dump loader from haystack import dump_loader memory_handler = dump_loader.load(memdumpname) print memory_handler """ Explanation: Let's load that memory dump in haystack. End of explanation """ # we need to add our test path to the env import sys sys.path.append('../test/src/') py_modulename = 'ctypes3_gen64' # load this module with haystack my_model = memory_handler.get_model() test3 = my_model.import_module("ctypes3_gen64") print test3.__dict__.keys() """ Explanation: Now the first feature of haystack is to search a specific structure in memory. End of explanation """ py_class = test3.struct_test3 results = haystack.search_record(memory_handler, py_class) print results """ Explanation: Now we can search for the structure in memory. End of explanation """ out = haystack.output_to_string(memory_handler, results) print out """ Explanation: The output is a list of ctypes records and their memory offset. We can also get a better formatted string output: End of explanation """ out = haystack.output_to_python(memory_handler, results) print out for x in out: print "@0x%x val1: 0x%x , val1b: 0x%x "%(x[1], x[0].val1, x[0].val1b) """ Explanation: Or even a translation of these records to plain old python dynamically generated objects. End of explanation """ !cat ../test/src/ctypes3.constraints """ Explanation: There should only be 3 of struct_test3. The true instance are at 0x202a030, 0x202a070 and 0x202a0b0 oh-oh, there is a bit too many chunks in memory which could be possible struct_test3. Let's apply more constraints to the search End of explanation """ from haystack import constraints handler = constraints.ConstraintsConfigHandler() my_constraints = handler.read('../test/src/ctypes3.constraints') # now use them results = haystack.search_record(memory_handler, py_class, my_constraints) print haystack.output_to_string(memory_handler, results) for x in results: print "@0x%x val1: 0x%x , val1b: 0x%x "%(x[1], x[0].val1, x[0].val1b) """ Explanation: Here we say that val1 and val1b fields should both be 0xdeadbeef. Let's apply these constraints to the search. End of explanation """
keras-team/keras-io
examples/nlp/ipynb/lstm_seq2seq.ipynb
apache-2.0
import numpy as np import tensorflow as tf from tensorflow import keras """ Explanation: Character-level recurrent sequence-to-sequence model Author: fchollet<br> Date created: 2017/09/29<br> Last modified: 2020/04/26<br> Description: Character-level recurrent sequence-to-sequence model. Introduction This example demonstrates how to implement a basic character-level recurrent sequence-to-sequence model. We apply it to translating short English sentences into short French sentences, character-by-character. Note that it is fairly unusual to do character-level machine translation, as word-level models are more common in this domain. Summary of the algorithm We start with input sequences from a domain (e.g. English sentences) and corresponding target sequences from another domain (e.g. French sentences). An encoder LSTM turns input sequences to 2 state vectors (we keep the last LSTM state and discard the outputs). A decoder LSTM is trained to turn the target sequences into the same sequence but offset by one timestep in the future, a training process called "teacher forcing" in this context. It uses as initial state the state vectors from the encoder. Effectively, the decoder learns to generate targets[t+1...] given targets[...t], conditioned on the input sequence. In inference mode, when we want to decode unknown input sequences, we: Encode the input sequence into state vectors Start with a target sequence of size 1 (just the start-of-sequence character) Feed the state vectors and 1-char target sequence to the decoder to produce predictions for the next character Sample the next character using these predictions (we simply use argmax). Append the sampled character to the target sequence Repeat until we generate the end-of-sequence character or we hit the character limit. Setup End of explanation """ !!curl -O http://www.manythings.org/anki/fra-eng.zip !!unzip fra-eng.zip """ Explanation: Download the data End of explanation """ batch_size = 64 # Batch size for training. epochs = 100 # Number of epochs to train for. latent_dim = 256 # Latent dimensionality of the encoding space. num_samples = 10000 # Number of samples to train on. # Path to the data txt file on disk. data_path = "fra.txt" """ Explanation: Configuration End of explanation """ # Vectorize the data. input_texts = [] target_texts = [] input_characters = set() target_characters = set() with open(data_path, "r", encoding="utf-8") as f: lines = f.read().split("\n") for line in lines[: min(num_samples, len(lines) - 1)]: input_text, target_text, _ = line.split("\t") # We use "tab" as the "start sequence" character # for the targets, and "\n" as "end sequence" character. target_text = "\t" + target_text + "\n" input_texts.append(input_text) target_texts.append(target_text) for char in input_text: if char not in input_characters: input_characters.add(char) for char in target_text: if char not in target_characters: target_characters.add(char) input_characters = sorted(list(input_characters)) target_characters = sorted(list(target_characters)) num_encoder_tokens = len(input_characters) num_decoder_tokens = len(target_characters) max_encoder_seq_length = max([len(txt) for txt in input_texts]) max_decoder_seq_length = max([len(txt) for txt in target_texts]) print("Number of samples:", len(input_texts)) print("Number of unique input tokens:", num_encoder_tokens) print("Number of unique output tokens:", num_decoder_tokens) print("Max sequence length for inputs:", max_encoder_seq_length) print("Max sequence length for outputs:", max_decoder_seq_length) input_token_index = dict([(char, i) for i, char in enumerate(input_characters)]) target_token_index = dict([(char, i) for i, char in enumerate(target_characters)]) encoder_input_data = np.zeros( (len(input_texts), max_encoder_seq_length, num_encoder_tokens), dtype="float32" ) decoder_input_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype="float32" ) decoder_target_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype="float32" ) for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): for t, char in enumerate(input_text): encoder_input_data[i, t, input_token_index[char]] = 1.0 encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0 for t, char in enumerate(target_text): # decoder_target_data is ahead of decoder_input_data by one timestep decoder_input_data[i, t, target_token_index[char]] = 1.0 if t > 0: # decoder_target_data will be ahead by one timestep # and will not include the start character. decoder_target_data[i, t - 1, target_token_index[char]] = 1.0 decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0 decoder_target_data[i, t:, target_token_index[" "]] = 1.0 """ Explanation: Prepare the data End of explanation """ # Define an input sequence and process it. encoder_inputs = keras.Input(shape=(None, num_encoder_tokens)) encoder = keras.layers.LSTM(latent_dim, return_state=True) encoder_outputs, state_h, state_c = encoder(encoder_inputs) # We discard `encoder_outputs` and only keep the states. encoder_states = [state_h, state_c] # Set up the decoder, using `encoder_states` as initial state. decoder_inputs = keras.Input(shape=(None, num_decoder_tokens)) # We set up our decoder to return full output sequences, # and to return internal states as well. We don't use the # return states in the training model, but we will use them in inference. decoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, return_state=True) decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states) decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax") decoder_outputs = decoder_dense(decoder_outputs) # Define the model that will turn # `encoder_input_data` & `decoder_input_data` into `decoder_target_data` model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs) """ Explanation: Build the model End of explanation """ model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"] ) model.fit( [encoder_input_data, decoder_input_data], decoder_target_data, batch_size=batch_size, epochs=epochs, validation_split=0.2, ) # Save model model.save("s2s") """ Explanation: Train the model End of explanation """ # Define sampling models # Restore the model and construct the encoder and decoder. model = keras.models.load_model("s2s") encoder_inputs = model.input[0] # input_1 encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1 encoder_states = [state_h_enc, state_c_enc] encoder_model = keras.Model(encoder_inputs, encoder_states) decoder_inputs = model.input[1] # input_2 decoder_state_input_h = keras.Input(shape=(latent_dim,)) decoder_state_input_c = keras.Input(shape=(latent_dim,)) decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] decoder_lstm = model.layers[3] decoder_outputs, state_h_dec, state_c_dec = decoder_lstm( decoder_inputs, initial_state=decoder_states_inputs ) decoder_states = [state_h_dec, state_c_dec] decoder_dense = model.layers[4] decoder_outputs = decoder_dense(decoder_outputs) decoder_model = keras.Model( [decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states ) # Reverse-lookup token index to decode sequences back to # something readable. reverse_input_char_index = dict((i, char) for char, i in input_token_index.items()) reverse_target_char_index = dict((i, char) for char, i in target_token_index.items()) def decode_sequence(input_seq): # Encode the input as state vectors. states_value = encoder_model.predict(input_seq) # Generate empty target sequence of length 1. target_seq = np.zeros((1, 1, num_decoder_tokens)) # Populate the first character of target sequence with the start character. target_seq[0, 0, target_token_index["\t"]] = 1.0 # Sampling loop for a batch of sequences # (to simplify, here we assume a batch of size 1). stop_condition = False decoded_sentence = "" while not stop_condition: output_tokens, h, c = decoder_model.predict([target_seq] + states_value) # Sample a token sampled_token_index = np.argmax(output_tokens[0, -1, :]) sampled_char = reverse_target_char_index[sampled_token_index] decoded_sentence += sampled_char # Exit condition: either hit max length # or find stop character. if sampled_char == "\n" or len(decoded_sentence) > max_decoder_seq_length: stop_condition = True # Update the target sequence (of length 1). target_seq = np.zeros((1, 1, num_decoder_tokens)) target_seq[0, 0, sampled_token_index] = 1.0 # Update states states_value = [h, c] return decoded_sentence """ Explanation: Run inference (sampling) encode input and retrieve initial decoder state run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token. Repeat with the current target token and current states End of explanation """ for seq_index in range(20): # Take one sequence (part of the training set) # for trying out decoding. input_seq = encoder_input_data[seq_index : seq_index + 1] decoded_sentence = decode_sequence(input_seq) print("-") print("Input sentence:", input_texts[seq_index]) print("Decoded sentence:", decoded_sentence) """ Explanation: You can now generate decoded sentences as such: End of explanation """
Kaggle/learntools
notebooks/ml_intermediate/raw/tut2.ipynb
apache-2.0
#$HIDE$ import pandas as pd from sklearn.model_selection import train_test_split # Load the data data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') # Select target y = data.Price # To keep things simple, we'll use only numerical predictors melb_predictors = data.drop(['Price'], axis=1) X = melb_predictors.select_dtypes(exclude=['object']) # Divide data into training and validation subsets X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) """ Explanation: In this tutorial, you will learn three approaches to dealing with missing values. Then you'll compare the effectiveness of these approaches on a real-world dataset. Introduction There are many ways data can end up with missing values. For example, - A 2 bedroom house won't include a value for the size of a third bedroom. - A survey respondent may choose not to share his income. Most machine learning libraries (including scikit-learn) give an error if you try to build a model using data with missing values. So you'll need to choose one of the strategies below. Three Approaches 1) A Simple Option: Drop Columns with Missing Values The simplest option is to drop columns with missing values. Unless most values in the dropped columns are missing, the model loses access to a lot of (potentially useful!) information with this approach. As an extreme example, consider a dataset with 10,000 rows, where one important column is missing a single entry. This approach would drop the column entirely! 2) A Better Option: Imputation Imputation fills in the missing values with some number. For instance, we can fill in the mean value along each column. The imputed value won't be exactly right in most cases, but it usually leads to more accurate models than you would get from dropping the column entirely. 3) An Extension To Imputation Imputation is the standard approach, and it usually works well. However, imputed values may be systematically above or below their actual values (which weren't collected in the dataset). Or rows with missing values may be unique in some other way. In that case, your model would make better predictions by considering which values were originally missing. In this approach, we impute the missing values, as before. And, additionally, for each column with missing entries in the original dataset, we add a new column that shows the location of the imputed entries. In some cases, this will meaningfully improve results. In other cases, it doesn't help at all. Example In the example, we will work with the Melbourne Housing dataset. Our model will use information such as the number of rooms and land size to predict home price. We won't focus on the data loading step. Instead, you can imagine you are at a point where you already have the training and validation data in X_train, X_valid, y_train, and y_valid. End of explanation """ #$HIDE$ from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error # Function for comparing different approaches def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=10, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) """ Explanation: Define Function to Measure Quality of Each Approach We define a function score_dataset() to compare different approaches to dealing with missing values. This function reports the mean absolute error (MAE) from a random forest model. End of explanation """ # Get names of columns with missing values cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] # Drop columns in training and validation data reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) print("MAE from Approach 1 (Drop columns with missing values):") print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid)) """ Explanation: Score from Approach 1 (Drop Columns with Missing Values) Since we are working with both training and validation sets, we are careful to drop the same columns in both DataFrames. End of explanation """ from sklearn.impute import SimpleImputer # Imputation my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) # Imputation removed column names; put them back imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns print("MAE from Approach 2 (Imputation):") print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid)) """ Explanation: Score from Approach 2 (Imputation) Next, we use SimpleImputer to replace missing values with the mean value along each column. Although it's simple, filling in the mean value generally performs quite well (but this varies by dataset). While statisticians have experimented with more complex ways to determine imputed values (such as regression imputation, for instance), the complex strategies typically give no additional benefit once you plug the results into sophisticated machine learning models. End of explanation """ # Make copy to avoid changing original data (when imputing) X_train_plus = X_train.copy() X_valid_plus = X_valid.copy() # Make new columns indicating what will be imputed for col in cols_with_missing: X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull() X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull() # Imputation my_imputer = SimpleImputer() imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus)) imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus)) # Imputation removed column names; put them back imputed_X_train_plus.columns = X_train_plus.columns imputed_X_valid_plus.columns = X_valid_plus.columns print("MAE from Approach 3 (An Extension to Imputation):") print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid)) """ Explanation: We see that Approach 2 has lower MAE than Approach 1, so Approach 2 performed better on this dataset. Score from Approach 3 (An Extension to Imputation) Next, we impute the missing values, while also keeping track of which values were imputed. End of explanation """ # Shape of training data (num_rows, num_columns) print(X_train.shape) # Number of missing values in each column of training data missing_val_count_by_column = (X_train.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0]) """ Explanation: As we can see, Approach 3 performed slightly worse than Approach 2. So, why did imputation perform better than dropping the columns? The training data has 10864 rows and 12 columns, where three columns contain missing data. For each column, less than half of the entries are missing. Thus, dropping the columns removes a lot of useful information, and so it makes sense that imputation would perform better. End of explanation """
cwhite1026/Py2PAC
examples/Calculating_correlation_functions.ipynb
bsd-3-clause
import AngularCatalog_class as ac import numpy.random as rand import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) """ Explanation: Calculating correlation functions This document walks through using Py2PAC to calculate correlation functions with or without error estimates. We'll do this with the AngularCatalog class. First, import the things that we'll need End of explanation """ #Set the seed rand.seed(seed=1234) #Generate the catalog cat = ac.AngularCatalog.random_catalog(2e3, ra_range=[0, 1], dec_range=[-.5, .5]) #Show it as a scatter plot cat.scatterplot_points(sample="data") """ Explanation: Creating an AngularCatalog with randomly placed data The first catalog we'll look at will be random data- the correlation functions should be 0 at all scales. We do this with the class method random_catalog in the AngularCatalog class. In this case, we're just making it in a rectangle over RA = 0 to 1 degrees and Dec = -0.5 to 0.5 degrees. End of explanation """ cat.generate_random_sample(number_to_make=1e4) #Show it as a scatter plot cat.scatterplot_points(sample="both") """ Explanation: Calculating a correlation function Now we want to get the correlation function from this. The things we need are a binning in separation and a random sample. The theta bins are set with AngularCatalog.set_theta_bins(min, max, nbins) and the randoms are generated with AngularCatalog.generate_random_sample(&lt;number of randoms&gt;). Generating randoms The number of randoms can be defined in a few ways. - number_to_make=N: The set number of points to lay down. - multiple_of_data=N: Makes N_data * N randoms. - density_on_sky=N: Number of randoms per square degree. In this case, we'll just use a set number, so number_to_make=1e4. End of explanation """ #Set the theta bins- this has default unit='a' (for arcseconds) and logbins=True cat.set_theta_bins(20, 1000, 10) #Examples of other ways you might call this #cat.set_theta_bins(5.56e-3, 0.278, 15, unit='d') #same as above (modulo rounding) but in deg #cat.set_theta_bins(20, 1000, 15, logbins=False) #Same as above but with linear bins """ Explanation: Setting the theta binning Pretty simple- you just use AngularCatalog.set_theta_bins and tell it the minimum and maximum separations and the number of bins and it sets it all up. The required parameters are the min and max separation and the number of bins. The keyword parameters are logbins and unit, by default 'a' and True respectively. The logbins parameter sets whether the bins are evenly spaced in log separation (True) or linear separation (False). The unit argument sets the unit of the input minimum and maximum separations and can be arcseconds, degrees, or radians. End of explanation """ cat.subdivide_mask(n_shortside=3, n_longside=3, preview=True) #Actually do this subdivision cat.subdivide_mask(n_shortside=3, n_longside=3) """ Explanation: Different methods for calculating correlation functions The AngularCatalog class has four functions that calculate correlation functions that differ mainly by the error estimation. $N_{gals}$ is the total number of galaxies in the data catalogs. AngularCatalog.cf: calculates the correlation function without errors. AngularCatalog.cf_bootstrap: calculates the correlation function with errors estimated with single-galaxy bootstrapping. What this means is that it calculates $N_{boots}$ correlation functions, each with $N_{gals}$ galaxies randomly selected from the data catalog with replacement so that an individual galaxy might appear several times or not at all in an interation. The mean and standard deviation in each bin of separation are the value of and error on the correlation function at that separation. AngularCatalog.cf_block_bootstrap: calculates the correlation function with errors from block bootstrapping. Block bootstrapping is like single-galaxy bootstrapping, but works with large areas of the image rather than single galaxies. Before you run a block bootstrapped correlation function, you divide the image into $N_{blocks}$ blocks that should be as close to square and as close to equal areas as possible. Requires mask subdivision. AngularCatalog.cf_jackknife: calculates the correlation function with errors from jackknifing. Jackknifing is another error estimator that requires the image area to be subdivided into $N_{blocks}$ blocks. Then, $N_{blocks}$ correlation functions are calculated, each one omitting a different spatial block. The mean and standard deciation again estimate the correlation function value and error. Requires mask subdivision. Subdividing the mask Before we do block bootstrap or jackknife, we have to subdivide the catalog. We do this with AngularCatalog.subdivide_mask. If you want to try a subdivision method, you can use preview=True, which will show you a plot of that subdivision but will not store it. If you preview a subdivision and you decide you want to keep it, you must run the routine again with preview=False (the default value). End of explanation """ %%capture # ^ To hide the long output that we don't really care about #Without error bars cat.cf(n_iter=20, clobber=True, name='noerr_cf') #Single-galaxy bootstrapping cat.cf_bootstrap(n_boots=20, clobber=True, name="single_gal_cf") #Block bootstrapping cat.cf_block_bootstrap(n_boots=20, clobber=True, name="block_bs_cf") #Jackknife cat.cf_jackknife(clobber=True, name="jackknife") """ Explanation: Performing the calculations Now we're set to actually calculate the correlation functions. For more information on exactly how the individual functions work, see the documentation. End of explanation """ cat.plot_cfs(which_cfs=['noerr_cf', 'single_gal_cf', 'block_bs_cf', 'jackknife'], labels=["No errors", "Single gal bootstrap", "Block bootstrap", "Jackknife"], fmt='o-', log_yscale=False) """ Explanation: Plotting correlation functions The CorrelationFunction class has a plotting routine. For convenience, the AngularCatalog class can use it to plot multiple correlation functions at once. In general, correlation functions are plotted with a logarithmic x- and y-axis, but in this case we're plotting a correlation function that we expect to be zero. The log_yscale=False keyword argument does this for us. End of explanation """ #Simple correlation function calculation cat.cfs={} #Clear the correlation functions in case you're re-running this block. print "Dictionary keys before: ", cat.cfs.keys() print "" cat.cf() print "" print "Dictionary keys after: ", cat.cfs.keys() """ Explanation: This clearly isn't exactly 0, but is generally consistent with zero. Correlation function management in AngularCatalogs All the correlation functions stored in an AngularCatalog object have a name that identifies them. Each of the methods has a distinct default name, but you can also specify the name explicitly with the name keyword argument in the call to the correlation function routine. In addition, AngularCatalogs protect the already-calculated correlation functions. If a correlation function with that name already exists in the object, you must set clobber=True in the function call in order to overwrite it. Correlation functions are stored as CorrelationFunction objects in the AngularCatalog.cfs dictionary. Below, we show that the cfs dictionary is empty, calculate a correlation function with no error bars with all default arguments, and then show that afterwards there is a correlation function in the dictionary with the default name for the cf() function. End of explanation """ print "Without clobber or a different name: " cat.cf() print "With clobber:" print "" cat.cf(clobber=True) print "Without clobber but with a different name: " print "" cat.cf(name='cf2') print "" print "Correlation function names: ", cat.cfs.keys() """ Explanation: If you try to do the same thing again, it fails unless you use clobber=True End of explanation """
molgor/spystats
notebooks/Sandboxes/TensorFlow/BiospytialGaussianModels.ipynb
bsd-2-clause
run ../../../../traversals/tests.py """ Explanation: In this notebook I´ll create functions for easing the development of geostatistical models using the GPFlow (James H, et.al )the library for modelling gaussian processes in Tensor Flow (Google) (Great Library, btw). Requirements Inputs Design Matrix X composed of coovariates and spatio-temporal coordinates. A desired hypespace $A \subseteq \mathbb{R}^{n}$ (e.g. Borelian, Closed, Discrete,Partition) An aditional set of hyperparameters and initializations. Processing A wrapper with GPflow regressor (This will be experimental) Outputs The fitted GPR model. A tensor composed of the coordinates of two dimensions and the predicted field given a initial condition (tensor of rank two. Get some sample data End of explanation """ import tensorflow as tf import GPflow as gf import pandas as pd #k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [3,4]) X = pd.concat((rd[['MeanTemperature_mean','Precipitation_mean','WindSpeed_mean']],s[['Longitude','Latitude']]),axis=1) k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1]) X = s[['Longitude','Latitude']] Y = rd['Elevation_mean'] mx = X.as_matrix() my = Y.as_matrix().reshape(16,1) mx.shape meanf = gf.mean_functions.Linear(np.ones((2,1)), np.ones(1)) m = gf.gpr.GPR(mx,my,k,mean_function=meanf) m.likelihood.variance = 70 m.optimize() print(m) """ Explanation: GPFlow first approximation End of explanation """ plt.style.use('ggplot') X.plot.scatter('Longitude','Latitude') """ Explanation: Buidling a grid for the interpolation (prediction) The first step is to inspect the range of the geographical space. End of explanation """ Nn = 300 predicted_x = np.linspace(min(X.Longitude),max(X.Longitude),Nn) predicted_y = np.linspace(min(X.Latitude),max(X.Latitude),Nn) Xx, Yy = np.meshgrid(predicted_x,predicted_y) predicted_coordinates = np.vstack([Xx.ravel(), Yy.ravel()]).transpose() predicted_coordinates.shape means,variances = m.predict_y(predicted_coordinates) upperl = (np.sqrt(variances))/2.0 lowerl = -1 * upperl ### Let´s plot #X.plot.scatter('Longitude','Latitude') plt.pcolor(Xx,Yy,means.reshape(Nn,Nn)) plt.colorbar() plt.scatter(X.Longitude,X.Latitude,s=Y*0.05,c=Y,cmap=plt.cm.binary) ## ## Upper limit plt.pcolor(Xx,Yy,variances.reshape(Nn,Nn)) plt.colorbar() plt.scatter(X.Longitude,X.Latitude,s=Y*0.05,c=Y,cmap=plt.cm.binary) ## Upper limit plt.pcolor(Xx,Yy,upperl.reshape(Nn,Nn)) plt.colorbar() plt.scatter(X.Longitude,X.Latitude,s=Y*0.05,c=Y,cmap=plt.cm.binary) ## Lower limit plt.pcolor(Xx,Yy,lowerl.reshape(Nn,Nn)) plt.colorbar() plt.scatter(X.Longitude,X.Latitude,s=Y*0.05,c=Y,cmap=plt.cm.binary) min(upperl) """ Explanation: Lets build a mesh grid and then a pcolor using that meshgrid. End of explanation """ elev = big_t.associatedData.getAssociatedRasterAreaData('Elevation') elev.display_field() print(elev.rasterdata.bands[0].data().shape) ## But we can extract directly the info from this raster. from django.contrib.gis.geos import Point true_elevs = map(lambda p : elev.getValue(Point(*p)),predicted_coordinates) # so the errors are: errors= means - true_elevs plt.hist(errors,bins=50) plt.scatter(range(len(errors)),errors) """ Explanation: We can get the direct Elevation data with: End of explanation """ k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [6,7]) X = pd.concat((rd[['MaxTemperature_mean', u'MeanTemperature_mean', u'MinTemperature_mean', u'Precipitation_mean', u'SolarRadiation_mean', u'Vapor_mean']],s[['Longitude','Latitude']]),axis=1) mx = X.as_matrix() #Y is still elevation (4,4) matrix my = Y.as_matrix().reshape(16,1) meanf = gf.mean_functions.Linear(np.ones((8,1)), np.ones(1)) m = gf.gpr.GPR(mx,my,k,mean_function=meanf) m.likelihood.variance = 10 m.optimize() print(m) X.columns mx = X.as_matrix() my = Y.as_matrix().reshape(16,1) mx.shape meanf = gf.mean_functions.Linear(np.ones((8,1)), np.ones(1)) m = gf.gpr.GPR(mx,my,k,mean_function=meanf) m.likelihood.variance = 10 m.optimize() print(m) # Now Let´s do a Logistic Regression s k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1]) X = s[['Longitude','Latitude']] Y = s[['Falconidae']] mx = X.as_matrix() my = Y.as_matrix().reshape(16,1) meanf = gf.mean_functions.Linear(np.ones((2,1)), np.ones(1)) ## I need a likelihood function ! m = gf.gpmc.GPMC(mx,my,k,mean_function=meanf) #m.likelihood.variance = 10 m.optimize() #print(m) """ Explanation: Using all* covariates for predicting elevation End of explanation """
googledatalab/notebooks
tutorials/BigQuery/BigQuery APIs.ipynb
apache-2.0
import google.datalab.bigquery as bq """ Explanation: BigQuery APIs Google Cloud Datalab provides an integrated environment for working with Google BigQuery for both ad-hoc, exploratory work as well as pipeline development. You've already seen the use of %%bq in the Hello BigQuery notebook, and various commands in the BigQuery Commands notebook. These BigQuery commands are in fact built using the same BigQuery APIs that are available for your own use. This notebook introduces some examples of using those APIs. Importing the API The Datalab APIs are provided in the google.datalab Python module, and the BigQuery functionality is contained within the google.datalab.bigquery module. End of explanation """ # Create and run a SQL query httplogs_query = bq.Query('SELECT * FROM `cloud-datalab-samples.httplogs.logs_20140615` LIMIT 3') """ Explanation: Querying Data The most important BigQuery-related API is the one that allows you to execute a SQL query. The google.datalab.bigquery.Query class provides that functionality. To run a query using BigQuery Standard SQL, create a new Query object with the desired SQL string, or use an object that has already been defined by the %%bq query --name command. Let's take a look at an example: End of explanation """ output_options = bq.QueryOutput.table(use_cache=False) result = httplogs_query.execute(output_options=output_options).result() result """ Explanation: Let's run the query created above with caching turned off, so we're sure to be able to retrieve metadata, such as bytes processed from resulting query job. For this, we'll need to use a QueryOutput object. End of explanation """ # Inspecting the result, and the associated job print(result.sql) print(str(result.length) + ' rows') print(str(result.job.bytes_processed) + ' bytes processed') # Inspect the programmatic representation. # Converting the QueryResultsTable to a vanilla list enables viewing the literal data, # as well as side-stepping the HTML rendering seen above. list(result) """ Explanation: The result object is a QueryResultsTable class, and can be enumerated in the same manner a regular Python list, in addition to retrieving metadata about the result. End of explanation """ result.to_dataframe() type(result.to_dataframe()) """ Explanation: The QueryResultsTable has more functionality you can explore, such as converting it to a pandas dataframe, or exporting to a file. End of explanation """ UniqueNames2013 = bq.Query(sql=''' WITH UniqueNames2013 AS (SELECT DISTINCT name FROM `bigquery-public-data.usa_names.usa_1910_2013` WHERE Year = 2013) SELECT * FROM UniqueNames2013 ''') """ Explanation: Sampling Data Let's take a look at a sampling query. Consider the following query: End of explanation """ sampling = bq.Sampling.random(percent=2) job = UniqueNames2013.execute(sampling=sampling) job.result() """ Explanation: To execute the query and view a sample from the result table, use a Sampling object, let's use random sampling for this example: End of explanation """ output_options = bq.QueryOutput.dataframe(max_rows=10) job = UniqueNames2013.execute(output_options=output_options) job.result() """ Explanation: Notice every time we run the query above, we get a different set of results, since we chose a random sampling of 2%. We can also run the query and copy the sampled result into a pandas DataFrame directly. For that, we use a QueryOutput object of type dataframe: End of explanation """ from google.datalab import Context datasets = bq.Datasets(Context('cloud-datalab-samples', Context.default().credentials)) for ds in datasets: print(ds.name) sample_dataset = list(datasets)[1] tables = sample_dataset.tables() for table in tables: print('%s (%d rows - %d bytes)' % (table.name.table_id, table.metadata.rows, table.metadata.size)) table = bq.Table('cloud-datalab-samples.httplogs.logs_20140615') fields = map(lambda tsf: tsf.name, table.schema) list(fields) """ Explanation: Datasets and Tables In addition to executing queries, BigQuery objects like Datasets, Tables and their Schemas can be accessed programmatically as well. Listing Resources End of explanation """ # Create a new dataset (this will be deleted later in the notebook) sample_dataset = bq.Dataset('apisample') sample_dataset.create(friendly_name = 'Sample Dataset', description = 'Created from Sample Notebook') sample_dataset.exists() # To create a table, we also need to create a schema. # We've seen before how to create a schema from some existing data, # let's now try creating it from a list of records: schema = [ {'name': 'name', 'type': 'STRING'}, {'name': 'value', 'type': 'INT64'}, {'name': 'flag', 'type': 'BOOL', 'mode': 'NULLABLE'} ] sample_schema = bq.Schema.from_data(schema) sample_table = bq.Table("apisample.sample_table").create(schema = sample_schema, overwrite = True) sample_table.schema """ Explanation: Creating Resources End of explanation """ list(sample_dataset.tables()) """ Explanation: You can run the cell, below, to see the contents of the new dataset: End of explanation """ # Clear out sample resources sample_dataset.delete(delete_contents = True) """ Explanation: Deleting Resources End of explanation """
zacwentzell/BIA-660-C-Spring2017
In-class Lectures/March 09/Lecture_March_9.ipynb
mit
import matplotlib import matplotlib.pyplot as plt matplotlib.style.use('ggplot') # Pandas has a ton of built-in visualizations # Play and Learn # http://pandas.pydata.org/pandas-docs/stable/visualization.html df.plot.scatter(x='Start_Date', y='Price') df['Price'].plot.box() color = dict(boxes='DarkGreen', whiskers='DarkOrange', medians='DarkBlue', caps='Gray') df['Price'].plot.box(color=color, sym='r+') """ Explanation: Exploratory Data Analysis (EDA) is where you should always begin As well as where you should spend most of your time, see what's happening in the data. Your brain is better than most ML algorithms (at least for now) End of explanation """ df = df.set_value(49, 'Price', 55) # Time for a Google Investigation # "IQR Outlier" """ Explanation: Let's see what happens when we manually add an outlier End of explanation """ # Check out the gallery: import seaborn as sns # this can break matplotlib for some reason... g = sns.jointplot(df['Start_Date'], df['Price'], kind="kde", size=7, space=0) import seaborn as sns # don't blindly set parameters, please read and understand what they mean and how it works # http://seaborn.pydata.org/tutorial/distributions.html # lots of great tutorials: http://seaborn.pydata.org/tutorial.html g = sns.jointplot(df['Start_Date'], df['Price'], kind="kde", size=7, space=0, bw=100) import matplotlib.pyplot as plt g = sns.jointplot(x="Start_Date", y="Price", data=df, kind="kde", color="MediumTurquoise") # https://en.wikipedia.org/wiki/Web_colors g.plot_joint(plt.scatter, c="w", s=30, linewidth=1, marker="+") g.ax_joint.collections[0].set_alpha(0) g.set_axis_labels("$X$", "$Y$"); from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler import numpy as np # All of pandas' viz is built on top of matplotlib as you might have noticed # You can get started learning matplotlib here: http://matplotlib.org/users/pyplot_tutorial.html # df = df.set_value(49, 'Price', 255) X = StandardScaler().fit_transform(df[['Start_Date', 'Price']]) db = DBSCAN(eps=.5, min_samples=3).fit(X) labels = db.labels_ clusters = len(set(labels)) unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) plt.subplots(figsize=(12,8)) for k, c in zip(unique_labels, colors): class_member_mask = (labels == k) xy = X[class_member_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=c, markeredgecolor='k', markersize=14) plt.title("Total Clusters: {}".format(clusters), fontsize=14, y=1.01) df['dbscan_labels'] = db.labels_ df.head() df.dbscan_labels.unique() t = X[df.dbscan_labels == 1,:] t.mean(axis=0) df from pandas.tools.plotting import parallel_coordinates df2 = df[['Trip_Length','Start_Date', 'Price', 'dbscan_labels']] scaled = StandardScaler().fit_transform(df2[df2.columns[:-1]]) df2 = pd.DataFrame(scaled, columns=df2.columns[:-1]) df2['dbscan_labels'] = df.dbscan_labels parallel_coordinates(df2, 'dbscan_labels') for result in results: bars = result.find_elements_by_class_name('LJTSM3-w-x') for bar in bars: ActionChains(driver).move_to_element(bar).perform() time.sleep(0.0001) print (result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[0].text, result.find_element_by_class_name('LJTSM3-w-k').find_elements_by_tag_name('div')[1].text) """ Explanation: Introducing seaborn seaborn makes it easy to create some really high quality visualizations End of explanation """
paolorivas/homeworkfoundations
classes and excersises/class9/.ipynb_checkpoints/09 - Functions-checkpoint.ipynb
mit
len """ Explanation: Class 9: Functions A painful analogy What do you do when you wake up in the morning? I don't know about you, but I get ready. "Obviously," you say, a little too snidely for my liking. You're particular, very detail-oriented, and need more information out of me. Fine, then. Since you're going to be nitpicky, I might be able to break it down a little bit more for you... I get out of bed I take a shower I get dressed I eat breakfast Unfortunately that's not good enough for you. "But how do you eat breakfast?" Well, maybe I... Get a bowl out of a cabinet Get some cereal out of the pantry Get some milk out of the fridge Pour some cereal into a bowl Pour some milk into the bowl Sit down at the table and start eating "Are you eating with a spoon?" you interrupt. "When did you get the spoon out? Was that after the milk, or before the bowl?" It's annoying people like this that make us have functions. FUN FACT: The joke's on you, because I don't even actually eat cereal. Maybe I don't even get ready in the morning, either. What is a function? Functions are chunks of code that do something. They're different than the code we've written so far because they have names. Instead of detailing each and every step involved in eating breakfast, I just use "I eat breakfast" as a shorthand for many, many detailed steps. Functions are the same - they allow us to take complicated parts of code, give it a name, and type just_eat_breakfast() every morning instead of twenty-five lines of code. What are some examples of functions? We've used a lot of functions in our time with Python. You remember our good buddy len? It's a function that gives back the length of whatever you send its way, e.g. len("ghost") is 5 and len("cartography") is 11. End of explanation """ max print import requests requests.get # And if we just wanted to use them, for some reason n = -34 print(n, "in absolute value is", abs(n)) print("We can add after casting to int:", 55 + int("55")) n = 4.4847 print(n, "can be rounded to", round(n)) print(n, "can also be rounded to 2 decimal points", round(n, 2)) numbers = [4, 22, 40, 54] print("The total of the list is", sum(numbers)) """ Explanation: Almost everything useful is a function. Python has a ton of other built-in functions! Along with len, a couple you might have seen are: abs(...) takes a number and returns the absolute value of the number int(...) takes a string or float and returns it as an integer round(...) takes a float and returns a rounded version of it sum(...) takes a list and returns the sum of all of its elements max(...) takes a list and returns the largest of all of its selements print(...) takes whatever you want to give it and displays it on the screen Functions can also come from packages and libraries. The .get part of requests.get is a function, too! And here, to prove it to you? End of explanation """ def urlretrieve(url, filename=None, reporthook=None, data=None): url_type, path = splittype(url) with contextlib.closing(urlopen(url, data)) as fp: headers = fp.info() # Just return the local path and the "headers" for file:// # URLs. No sense in performing a copy unless requested. if url_type == "file" and not filename: return os.path.normpath(path), headers # Handle temporary file setup. if filename: tfp = open(filename, 'wb') else: tfp = tempfile.NamedTemporaryFile(delete=False) filename = tfp.name _url_tempfiles.append(filename) with tfp: result = filename, headers bs = 1024*8 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, bs, size) while True: block = fp.read(bs) if not block: break read += len(block) tfp.write(block) blocknum += 1 if reporthook: reporthook(blocknum, bs, size) if size >= 0 and read < size: raise ContentTooShortError( "retrieval incomplete: got only %i out of %i bytes" % (read, size), result) return result """ Explanation: See? Functions make the world run. One useful role they play is functions hide code that you wouldn't want to type a thousand times. For example, you might have used urlretrieve from urllib to download files from around the internet. If you didn't use urlretrieve you'd have to type all of this: End of explanation """ # A function to multiply a number by two """ Explanation: Horrifying, right? Thank goodness for functions. Writing your own functions I've always been kind of jealous of len(...) and its crowd. It seemed unfair that Python made a list of cool, important functions, and neither me nor you had any say in the matter. What if I want a function that turns all of the periods in a sentence into exclamation points, or prints out a word a hundred million times? Well, turns out that isn't a problem. We can do that. Easily! And we will. If you can type def and use a colon, you can write a function. A function that you write yourself looks like this: End of explanation """ print("2 times two is", double(2)) print("10 times two is", double(10)) print("56 times two is", double(56)) age = 76 print("Double your age is", double(age)) """ Explanation: It has a handful of parts: def - tells Python "hey buddy, we're about to define a function! Get ready." And Python appropriately prepares itself. double - is the name of the function, and it's how you'll refer to the function later on. For example, len's function name is (obviously) len. (number) - defines the parameters that the function "takes." You can see that this function is called double, and you send it one parameter that will be called number. return bigger - is called the return statement. If the function is a factory, this is the shipping department - return tells you what to send back to the main program. You'll see it doesn't do anything, though. That's because we haven't called the function, which is a programmer's way of saying use the function. Let's use it! End of explanation """ def greet(name): return "Hello " + name # This one works print(greet("Soma")) # Overwrite the function greet with a string greet = "blah" # Trying the function again breaks print(greet("Soma")) """ Explanation: Function Naming Your function name has to be unique, otherwise Python will get confused. No other functions or variabels can share its name! For example, if you call it len it'll forget about the built-in len function, and if you give one of your variables the name print suddenly Python won't understand how print(...) works anymore. If you end up doing this, you'll get errors like the one below End of explanation """ def exclaim(potato_soup): return potato_soup + "!!!!!!!!!!" invitation = "I hope you can come to my wedding" print(exclaim(invitation)) line = "I am sorry to hear you have the flu" print(exclaim(line)) """ Explanation: Parameters In our function double, we have a parameter called number. py def double(number): bigger = number * 2 return bigger Notice in the last example up above, though, we called double(age). Those don't match!!! The thing is, your function doesn't care what the variable you send it is called. Whatever you send it, it will rename. It's like if someone adopted my cat Smushface, they might think calling her Petunia would be a little bit nicer (it wouldn't be, but I wouldn't do anything about it). Here's an example with my favorite variable name potato_soup End of explanation """ name = "Nancy" name_length = len(name) print("Hello", name, "your name is", name_length, "letters long") name = "Brick" name_length = len(name) print("Hello", name, "your name is", name_length, "letters long") name = "Saint Augustine" name_length = len(name) print("Hello", name, "your name is", name_length, "letters long") """ Explanation: invitation and line both get renamed to potato_soup inside of the function, so you can reuse the function with any variable of any name. Let's say I have a function that does some intense calculations: py def sum_times_two(a, b): added = a + b return added * 2 To reiterate: a and b have nothing to do with the values outside of the function. You don't have to make variables called a and b and then send them to the function, the function takes care of that by itself. For example, the below examples are perfectly fine. py sum_times_two(2, 3) r = 4 y = 7 sum_times_two(r, y) When you're outside of the function, you almost never have to think about what's inside the function. You don't care about what variabels are called or anything. It's a magic box. Think about how you don't know what len looks like inside, or print, but you use them all of the time! Why functions? Two reasons to use functions, since maybe you'll ask: Don't Repeat Yourself - If you find yourself writing the same code again and again, it's a good time to put that code into a function. len(...) is a function because Python people decided that you shouldn't have to write length-calculating code every time you wanted to see how many characters were in a string. Code Modularity - sometimes it's just nice to organize your code. All of your parts that deal with counting dog names can go over here, and all of the stuff that has to do with boroughs goes over there. In the end it can make for more readable and maintanable code. (Maintainable code = code you can edit in the future without thinking real hard) Those reasons probably don't mean much to you right now, and I sure don't blame you. Abstract programming concepts are just dumb abstract things until you actually start using them. Let's say I wanted to greet someone and then tell them how long their name is, because I'm pedantic. End of explanation """ def weird_greeting(name): name_length = len(name) print("Hello", name, "your name is", name_length, "letters long") weird_greeting("Nancy") weird_greeting("Brick") weird_greeting("Saint Augustine") """ Explanation: Do you know how exhausted I got typing all of that out? And how it makes no sense at all? Luckily, functions save us: all of our code goes into one place so we don't have to repeat ourselves, and we can give it a descriptive name. End of explanation """ # Our cool function def size_comparison(a, b): if a > b: return "Larger" else: return "Smaller" print(size_comparison(4, 5.5)) print(size_comparison(65, 2)) print(size_comparison(34.2, 33)) """ Explanation: return The role of a function is generally to do something and then send the result back to us. len sends us back the length of the string, requests.get sends us back the web page we requested. py def double(a): return a * 2 This is called the return statement. You don't have to send something back (print doesn't) but you usually want to. Writing a custom function Let's say we have some code that compares the number of boats you have to the number of cars you have. python if boat_count &gt; car_count: print "Larger" else: print "Smaller" Simple, right? But unfortunately we're at a rich people convention where they're always comparing the number of boats to the number of cars to the number of planes etc etc etc. If we have to check again and again and again and again for all of those people and always print Larger or Smaller I'm sure we'd get bored of typing all that. So let's convert it to a function! Let's give our function a name of size_comparison. Remember: We can name our functions whatever we want, as long as it's unique. Our function will take two parameters. they're boat_coat and car_count above, but we want generic, re-usable names, so maybe like, uh, a and b? For our function's return value, let's have it send back "Larger" or "Smaller". End of explanation """ def to_kmh(speeed): "YOUR CODE HERE" mph = 40 print("You are driving", mph, "in mph") print("You are driving", to_kmh(mph), "in kmh") """ Explanation: Your Turn This is a do-now even though it's not the beginning of class! 1a. Driving Speed With the code below, it tells you how fast you're driving. I figure that a lot of people are more familiar with kilometers an hour, though, so let's write a function that does the conversion. I wrote a skeleton, now you can fill in the conversion. Make it display a whole number. End of explanation """ # You have to wash ten cars on every street, along with the cars in your driveway. # With the following list of streets, how many cars do we have? def total(n): return n * 10 # Here are the streets streets = ['10th Ave', '11th Street', '45th Ave'] # Let's count them up total = len(streets) # And add one count = total + 1 # And see how many we have print(total(count)) """ Explanation: 1b. Driving Speed Part II Now write a function called to_mpm that, when given miles per hour, computes the meters per minute. 1c. Driving Speed Part III Rewrite to_mpm to use the to_kmh function. D.R.Y.! 2. Broken Function The code below won't work. Why not? End of explanation """
keras-team/keras-io
examples/generative/ipynb/cyclegan.ipynb
apache-2.0
import os import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_addons as tfa import tensorflow_datasets as tfds tfds.disable_progress_bar() autotune = tf.data.AUTOTUNE """ Explanation: CycleGAN Author: A_K_Nain<br> Date created: 2020/08/12<br> Last modified: 2020/08/12<br> Description: Implementation of CycleGAN. CycleGAN CycleGAN is a model that aims to solve the image-to-image translation problem. The goal of the image-to-image translation problem is to learn the mapping between an input image and an output image using a training set of aligned image pairs. However, obtaining paired examples isn't always feasible. CycleGAN tries to learn this mapping without requiring paired input-output images, using cycle-consistent adversarial networks. Paper Original implementation Setup End of explanation """ # Load the horse-zebra dataset using tensorflow-datasets. dataset, _ = tfds.load("cycle_gan/horse2zebra", with_info=True, as_supervised=True) train_horses, train_zebras = dataset["trainA"], dataset["trainB"] test_horses, test_zebras = dataset["testA"], dataset["testB"] # Define the standard image size. orig_img_size = (286, 286) # Size of the random crops to be used during training. input_img_size = (256, 256, 3) # Weights initializer for the layers. kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02) # Gamma initializer for instance normalization. gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02) buffer_size = 256 batch_size = 1 def normalize_img(img): img = tf.cast(img, dtype=tf.float32) # Map values in the range [-1, 1] return (img / 127.5) - 1.0 def preprocess_train_image(img, label): # Random flip img = tf.image.random_flip_left_right(img) # Resize to the original size first img = tf.image.resize(img, [*orig_img_size]) # Random crop to 256X256 img = tf.image.random_crop(img, size=[*input_img_size]) # Normalize the pixel values in the range [-1, 1] img = normalize_img(img) return img def preprocess_test_image(img, label): # Only resizing and normalization for the test images. img = tf.image.resize(img, [input_img_size[0], input_img_size[1]]) img = normalize_img(img) return img """ Explanation: Prepare the dataset In this example, we will be using the horse to zebra dataset. End of explanation """ # Apply the preprocessing operations to the training data train_horses = ( train_horses.map(preprocess_train_image, num_parallel_calls=autotune) .cache() .shuffle(buffer_size) .batch(batch_size) ) train_zebras = ( train_zebras.map(preprocess_train_image, num_parallel_calls=autotune) .cache() .shuffle(buffer_size) .batch(batch_size) ) # Apply the preprocessing operations to the test data test_horses = ( test_horses.map(preprocess_test_image, num_parallel_calls=autotune) .cache() .shuffle(buffer_size) .batch(batch_size) ) test_zebras = ( test_zebras.map(preprocess_test_image, num_parallel_calls=autotune) .cache() .shuffle(buffer_size) .batch(batch_size) ) """ Explanation: Create Dataset objects End of explanation """ _, ax = plt.subplots(4, 2, figsize=(10, 15)) for i, samples in enumerate(zip(train_horses.take(4), train_zebras.take(4))): horse = (((samples[0][0] * 127.5) + 127.5).numpy()).astype(np.uint8) zebra = (((samples[1][0] * 127.5) + 127.5).numpy()).astype(np.uint8) ax[i, 0].imshow(horse) ax[i, 1].imshow(zebra) plt.show() """ Explanation: Visualize some samples End of explanation """ class ReflectionPadding2D(layers.Layer): """Implements Reflection Padding as a layer. Args: padding(tuple): Amount of padding for the spatial dimensions. Returns: A padded tensor with the same type as the input tensor. """ def __init__(self, padding=(1, 1), **kwargs): self.padding = tuple(padding) super(ReflectionPadding2D, self).__init__(**kwargs) def call(self, input_tensor, mask=None): padding_width, padding_height = self.padding padding_tensor = [ [0, 0], [padding_height, padding_height], [padding_width, padding_width], [0, 0], ] return tf.pad(input_tensor, padding_tensor, mode="REFLECT") def residual_block( x, activation, kernel_initializer=kernel_init, kernel_size=(3, 3), strides=(1, 1), padding="valid", gamma_initializer=gamma_init, use_bias=False, ): dim = x.shape[-1] input_tensor = x x = ReflectionPadding2D()(input_tensor) x = layers.Conv2D( dim, kernel_size, strides=strides, kernel_initializer=kernel_initializer, padding=padding, use_bias=use_bias, )(x) x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x) x = activation(x) x = ReflectionPadding2D()(x) x = layers.Conv2D( dim, kernel_size, strides=strides, kernel_initializer=kernel_initializer, padding=padding, use_bias=use_bias, )(x) x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x) x = layers.add([input_tensor, x]) return x def downsample( x, filters, activation, kernel_initializer=kernel_init, kernel_size=(3, 3), strides=(2, 2), padding="same", gamma_initializer=gamma_init, use_bias=False, ): x = layers.Conv2D( filters, kernel_size, strides=strides, kernel_initializer=kernel_initializer, padding=padding, use_bias=use_bias, )(x) x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x) if activation: x = activation(x) return x def upsample( x, filters, activation, kernel_size=(3, 3), strides=(2, 2), padding="same", kernel_initializer=kernel_init, gamma_initializer=gamma_init, use_bias=False, ): x = layers.Conv2DTranspose( filters, kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, use_bias=use_bias, )(x) x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x) if activation: x = activation(x) return x """ Explanation: Building blocks used in the CycleGAN generators and discriminators End of explanation """ def get_resnet_generator( filters=64, num_downsampling_blocks=2, num_residual_blocks=9, num_upsample_blocks=2, gamma_initializer=gamma_init, name=None, ): img_input = layers.Input(shape=input_img_size, name=name + "_img_input") x = ReflectionPadding2D(padding=(3, 3))(img_input) x = layers.Conv2D(filters, (7, 7), kernel_initializer=kernel_init, use_bias=False)( x ) x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x) x = layers.Activation("relu")(x) # Downsampling for _ in range(num_downsampling_blocks): filters *= 2 x = downsample(x, filters=filters, activation=layers.Activation("relu")) # Residual blocks for _ in range(num_residual_blocks): x = residual_block(x, activation=layers.Activation("relu")) # Upsampling for _ in range(num_upsample_blocks): filters //= 2 x = upsample(x, filters, activation=layers.Activation("relu")) # Final block x = ReflectionPadding2D(padding=(3, 3))(x) x = layers.Conv2D(3, (7, 7), padding="valid")(x) x = layers.Activation("tanh")(x) model = keras.models.Model(img_input, x, name=name) return model """ Explanation: Build the generators The generator consists of downsampling blocks: nine residual blocks and upsampling blocks. The structure of the generator is the following: c7s1-64 ==&gt; Conv block with `relu` activation, filter size of 7 d128 ====| |-&gt; 2 downsampling blocks d256 ====| R256 ====| R256 | R256 | R256 | R256 |-&gt; 9 residual blocks R256 | R256 | R256 | R256 ====| u128 ====| |-&gt; 2 upsampling blocks u64 ====| c7s1-3 =&gt; Last conv block with `tanh` activation, filter size of 7. End of explanation """ def get_discriminator( filters=64, kernel_initializer=kernel_init, num_downsampling=3, name=None ): img_input = layers.Input(shape=input_img_size, name=name + "_img_input") x = layers.Conv2D( filters, (4, 4), strides=(2, 2), padding="same", kernel_initializer=kernel_initializer, )(img_input) x = layers.LeakyReLU(0.2)(x) num_filters = filters for num_downsample_block in range(3): num_filters *= 2 if num_downsample_block < 2: x = downsample( x, filters=num_filters, activation=layers.LeakyReLU(0.2), kernel_size=(4, 4), strides=(2, 2), ) else: x = downsample( x, filters=num_filters, activation=layers.LeakyReLU(0.2), kernel_size=(4, 4), strides=(1, 1), ) x = layers.Conv2D( 1, (4, 4), strides=(1, 1), padding="same", kernel_initializer=kernel_initializer )(x) model = keras.models.Model(inputs=img_input, outputs=x, name=name) return model # Get the generators gen_G = get_resnet_generator(name="generator_G") gen_F = get_resnet_generator(name="generator_F") # Get the discriminators disc_X = get_discriminator(name="discriminator_X") disc_Y = get_discriminator(name="discriminator_Y") """ Explanation: Build the discriminators The discriminators implement the following architecture: C64-&gt;C128-&gt;C256-&gt;C512 End of explanation """ class CycleGan(keras.Model): def __init__( self, generator_G, generator_F, discriminator_X, discriminator_Y, lambda_cycle=10.0, lambda_identity=0.5, ): super(CycleGan, self).__init__() self.gen_G = generator_G self.gen_F = generator_F self.disc_X = discriminator_X self.disc_Y = discriminator_Y self.lambda_cycle = lambda_cycle self.lambda_identity = lambda_identity def compile( self, gen_G_optimizer, gen_F_optimizer, disc_X_optimizer, disc_Y_optimizer, gen_loss_fn, disc_loss_fn, ): super(CycleGan, self).compile() self.gen_G_optimizer = gen_G_optimizer self.gen_F_optimizer = gen_F_optimizer self.disc_X_optimizer = disc_X_optimizer self.disc_Y_optimizer = disc_Y_optimizer self.generator_loss_fn = gen_loss_fn self.discriminator_loss_fn = disc_loss_fn self.cycle_loss_fn = keras.losses.MeanAbsoluteError() self.identity_loss_fn = keras.losses.MeanAbsoluteError() def train_step(self, batch_data): # x is Horse and y is zebra real_x, real_y = batch_data # For CycleGAN, we need to calculate different # kinds of losses for the generators and discriminators. # We will perform the following steps here: # # 1. Pass real images through the generators and get the generated images # 2. Pass the generated images back to the generators to check if we # we can predict the original image from the generated image. # 3. Do an identity mapping of the real images using the generators. # 4. Pass the generated images in 1) to the corresponding discriminators. # 5. Calculate the generators total loss (adverserial + cycle + identity) # 6. Calculate the discriminators loss # 7. Update the weights of the generators # 8. Update the weights of the discriminators # 9. Return the losses in a dictionary with tf.GradientTape(persistent=True) as tape: # Horse to fake zebra fake_y = self.gen_G(real_x, training=True) # Zebra to fake horse -> y2x fake_x = self.gen_F(real_y, training=True) # Cycle (Horse to fake zebra to fake horse): x -> y -> x cycled_x = self.gen_F(fake_y, training=True) # Cycle (Zebra to fake horse to fake zebra) y -> x -> y cycled_y = self.gen_G(fake_x, training=True) # Identity mapping same_x = self.gen_F(real_x, training=True) same_y = self.gen_G(real_y, training=True) # Discriminator output disc_real_x = self.disc_X(real_x, training=True) disc_fake_x = self.disc_X(fake_x, training=True) disc_real_y = self.disc_Y(real_y, training=True) disc_fake_y = self.disc_Y(fake_y, training=True) # Generator adverserial loss gen_G_loss = self.generator_loss_fn(disc_fake_y) gen_F_loss = self.generator_loss_fn(disc_fake_x) # Generator cycle loss cycle_loss_G = self.cycle_loss_fn(real_y, cycled_y) * self.lambda_cycle cycle_loss_F = self.cycle_loss_fn(real_x, cycled_x) * self.lambda_cycle # Generator identity loss id_loss_G = ( self.identity_loss_fn(real_y, same_y) * self.lambda_cycle * self.lambda_identity ) id_loss_F = ( self.identity_loss_fn(real_x, same_x) * self.lambda_cycle * self.lambda_identity ) # Total generator loss total_loss_G = gen_G_loss + cycle_loss_G + id_loss_G total_loss_F = gen_F_loss + cycle_loss_F + id_loss_F # Discriminator loss disc_X_loss = self.discriminator_loss_fn(disc_real_x, disc_fake_x) disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y) # Get the gradients for the generators grads_G = tape.gradient(total_loss_G, self.gen_G.trainable_variables) grads_F = tape.gradient(total_loss_F, self.gen_F.trainable_variables) # Get the gradients for the discriminators disc_X_grads = tape.gradient(disc_X_loss, self.disc_X.trainable_variables) disc_Y_grads = tape.gradient(disc_Y_loss, self.disc_Y.trainable_variables) # Update the weights of the generators self.gen_G_optimizer.apply_gradients( zip(grads_G, self.gen_G.trainable_variables) ) self.gen_F_optimizer.apply_gradients( zip(grads_F, self.gen_F.trainable_variables) ) # Update the weights of the discriminators self.disc_X_optimizer.apply_gradients( zip(disc_X_grads, self.disc_X.trainable_variables) ) self.disc_Y_optimizer.apply_gradients( zip(disc_Y_grads, self.disc_Y.trainable_variables) ) return { "G_loss": total_loss_G, "F_loss": total_loss_F, "D_X_loss": disc_X_loss, "D_Y_loss": disc_Y_loss, } """ Explanation: Build the CycleGAN model We will override the train_step() method of the Model class for training via fit(). End of explanation """ class GANMonitor(keras.callbacks.Callback): """A callback to generate and save images after each epoch""" def __init__(self, num_img=4): self.num_img = num_img def on_epoch_end(self, epoch, logs=None): _, ax = plt.subplots(4, 2, figsize=(12, 12)) for i, img in enumerate(test_horses.take(self.num_img)): prediction = self.model.gen_G(img)[0].numpy() prediction = (prediction * 127.5 + 127.5).astype(np.uint8) img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8) ax[i, 0].imshow(img) ax[i, 1].imshow(prediction) ax[i, 0].set_title("Input image") ax[i, 1].set_title("Translated image") ax[i, 0].axis("off") ax[i, 1].axis("off") prediction = keras.preprocessing.image.array_to_img(prediction) prediction.save( "generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch + 1) ) plt.show() plt.close() """ Explanation: Create a callback that periodically saves generated images End of explanation """ # Loss function for evaluating adversarial loss adv_loss_fn = keras.losses.MeanSquaredError() # Define the loss function for the generators def generator_loss_fn(fake): fake_loss = adv_loss_fn(tf.ones_like(fake), fake) return fake_loss # Define the loss function for the discriminators def discriminator_loss_fn(real, fake): real_loss = adv_loss_fn(tf.ones_like(real), real) fake_loss = adv_loss_fn(tf.zeros_like(fake), fake) return (real_loss + fake_loss) * 0.5 # Create cycle gan model cycle_gan_model = CycleGan( generator_G=gen_G, generator_F=gen_F, discriminator_X=disc_X, discriminator_Y=disc_Y ) # Compile the model cycle_gan_model.compile( gen_G_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5), gen_F_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5), disc_X_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5), disc_Y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5), gen_loss_fn=generator_loss_fn, disc_loss_fn=discriminator_loss_fn, ) # Callbacks plotter = GANMonitor() checkpoint_filepath = "./model_checkpoints/cyclegan_checkpoints.{epoch:03d}" model_checkpoint_callback = keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath ) # Here we will train the model for just one epoch as each epoch takes around # 7 minutes on a single P100 backed machine. cycle_gan_model.fit( tf.data.Dataset.zip((train_horses, train_zebras)), epochs=1, callbacks=[plotter, model_checkpoint_callback], ) """ Explanation: Train the end-to-end model End of explanation """ # This model was trained for 90 epochs. We will be loading those weights # here. Once the weights are loaded, we will take a few samples from the test # data and check the model's performance. !curl -LO https://github.com/AakashKumarNain/CycleGAN_TF2/releases/download/v1.0/saved_checkpoints.zip !unzip -qq saved_checkpoints.zip # Load the checkpoints weight_file = "./saved_checkpoints/cyclegan_checkpoints.090" cycle_gan_model.load_weights(weight_file).expect_partial() print("Weights loaded successfully") _, ax = plt.subplots(4, 2, figsize=(10, 15)) for i, img in enumerate(test_horses.take(4)): prediction = cycle_gan_model.gen_G(img, training=False)[0].numpy() prediction = (prediction * 127.5 + 127.5).astype(np.uint8) img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8) ax[i, 0].imshow(img) ax[i, 1].imshow(prediction) ax[i, 0].set_title("Input image") ax[i, 0].set_title("Input image") ax[i, 1].set_title("Translated image") ax[i, 0].axis("off") ax[i, 1].axis("off") prediction = keras.preprocessing.image.array_to_img(prediction) prediction.save("predicted_img_{i}.png".format(i=i)) plt.tight_layout() plt.show() """ Explanation: Test the performance of the model. You can use the trained model hosted on Hugging Face Huband try the demo on Hugging Face Spaces. End of explanation """
mbakker7/timml
notebooks/timml_xsection.ipynb
mit
from pylab import * %matplotlib inline from timml import * """ Explanation: TimML Notebook Cross-sectional model End of explanation """ ml = ModelMaq(kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], \ topboundary='semi', hstar=5) ls = HeadLineSink1D(ml, xls=0, hls=2, layers=0) ml.solve() x = linspace(-200, 200, 101) h = ml.headalongline(x, zeros_like(x)) plot(x, h[0], label='layer 0') plot(x, h[1], label='layer 1') legend(loc='best') """ Explanation: Two-layer model with head-specified line-sink Two-layer aquifer bounded on top by a semi-confined layer. Head above the semi-confining layer is 5. Head line-sink located at $x=0$ with head equal to 2, cutting through layer 0 only. End of explanation """ ml = ModelMaq(kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], topboundary='semi', hstar=5) StripInhomMaq(ml, x1=-inf, x2=-50, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=15) StripInhomMaq(ml, x1=-50, x2=50, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=13) StripInhomMaq(ml, x1=50, x2=inf, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=11) ml.solve() x = linspace(-200, 200, 101) h = ml.headalongline(x, zeros(101)) plot(x, h[0], label='layer 0') plot(x, h[1], label='layer 1') legend(loc='best'); ml.vcontoursf1D(x1=-200, x2=200, nx=100, levels=20) """ Explanation: 1D inhomogeneity Three strips with semi-confined conditions on top of all three End of explanation """ ml = ModelMaq(kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], topboundary='semi', hstar=5) StripInhomMaq(ml, x1=-inf, x2=-50, kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], npor=0.3, topboundary='conf') StripInhomMaq(ml, x1=-50, x2=50, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=3) StripInhomMaq(ml, x1=50, x2=inf, kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], npor=0.3, topboundary='conf') rf1 = Constant(ml, -100, 0, 4) rf2 = Constant(ml, 100, 0, 4) ml.solve() x = linspace(-200, 200, 101) h = ml.headalongline(x, zeros_like(x)) Qx, _ = ml.disvecalongline(x, zeros_like(x)) figure(figsize=(12, 4)) subplot(121) plot(x, h[0], label='layer 0') plot(x, h[1], label='layer 1') plot([-100, 100], [4, 4], 'b.', label='fixed heads') legend(loc='best') subplot(122) title('Qx') plot(x, Qx[0], label='layer 0') plot(x, Qx[1], label='layer 1') ml.vcontoursf1D(x1=-200, x2=200, nx=100, levels=20) """ Explanation: Three strips with semi-confined conditions at the top of the strip in the middle only. The head is specified in the strip on the left and in the strip on the right. End of explanation """ from timml import * from pylab import * ml = ModelMaq(kaq=[1, 2, 4], z=[5, 4, 3, 2, 1, 0], c=[5000, 1000]) uf = Uflow(ml, 0.002, 0) rf = Constant(ml, 100, 0, 20) ld1 = ImpLineDoublet1D(ml, xld=0, layers=[0, 1]) ml.solve() x = linspace(-100, 100, 101) h = ml.headalongline(x, zeros_like(x)) Qx, _ = ml.disvecalongline(x, zeros_like(x)) figure(figsize=(12, 4)) subplot(121) title('head') plot(x, h[0], label='layer 0') plot(x, h[1], label='layer 1') plot(x, h[2], label='layer 2') legend(loc='best') subplot(122) title('Qx') plot(x, Qx[0], label='layer 0') plot(x, Qx[1], label='layer 1') plot(x, Qx[2], label='layer 2') legend(loc='best') ml.vcontoursf1D(x1=-200, x2=200, nx=100, levels=20) ml = ModelMaq(kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], topboundary='conf') StripInhomMaq(ml, x1=-inf, x2=-50, kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], npor=0.3, topboundary='conf') StripInhomMaq(ml, x1=-50, x2=50, kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], npor=0.3, topboundary='conf', N=0.001) StripInhomMaq(ml, x1=50, x2=inf, kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], npor=0.3, topboundary='conf') Constant(ml, -100, 0, 10) Constant(ml, 100, 0, 10) ml.solve() ml.vcontoursf1D(x1=-100, x2=100, nx=100, levels=20) # ml2 = ModelMaq(kaq=[1, 2], z=[3, 2, 1, 0], c=[1000], topboundary='conf') StripAreaSink(ml2, -50, 50, 0.001) Constant(ml2, -100, 0, 10) ml2.solve() ml2.vcontoursf1D(x1=-100, x2=100, nx=100, levels=20) # x = np.linspace(-100, 100, 100) plt.figure() plt.plot(x, ml.headalongline(x, 0)[0], 'C0') plt.plot(x, ml.headalongline(x, 0)[1], 'C0') plt.plot(x, ml2.headalongline(x, 0)[0], '--C1') plt.plot(x, ml2.headalongline(x, 0)[1], '--C1') ml = Model3D(kaq=1, z=np.arange(5, -0.1, -0.1)) StripInhom3D(ml, x1=-inf, x2=-5, kaq=1, z=np.arange(5, -0.1, -0.1), kzoverkh=0.1) StripInhom3D(ml, x1=-5, x2=5, kaq=1, z=np.arange(5, -0.1, -0.1), kzoverkh=0.1, topboundary='semi', hstar=3, topres=3) StripInhom3D(ml, x1=5, x2=inf, kaq=1, z=np.arange(5, -0.1, -0.1), kzoverkh=0.1) rf1 = Constant(ml, -100, 0, 3.2) rf2 = Constant(ml, 100, 0, 2.97) ml.solve() ml.vcontoursf1D(x1=-20, x2=20, nx=100, levels=20) ml = Model3D(kaq=1, z=np.arange(5, -0.1, -1)) StripInhom3D(ml, x1=-inf, x2=-5, kaq=1, z=np.arange(5, -0.1, -1), kzoverkh=0.1) StripInhom3D(ml, x1=-5, x2=5, kaq=1, z=np.arange(5, -0.1, -1), kzoverkh=0.1, topboundary='semi', hstar=3, topres=3) StripInhom3D(ml, x1=5, x2=inf, kaq=1, z=np.arange(5, -0.1, -1), kzoverkh=0.1) rf1 = Constant(ml, -100, 0, 3.2) rf2 = Constant(ml, 100, 0, 2.97) ml.solve() ml.vcontoursf1D(x1=-20, x2=20, nx=100, levels=20) ml = ModelMaq(kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], topboundary='semi', hstar=5) StripInhomMaq(ml, x1=-inf, x2=-50, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=15) StripInhomMaq(ml, x1=-50, x2=50, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=13) StripInhomMaq(ml, x1=50, x2=inf, kaq=[1, 2], z=[4, 3, 2, 1, 0], c=[1000, 1000], npor=0.3, topboundary='semi', hstar=11) ml.solve() """ Explanation: Impermeable wall Flow from left to right in three-layer aquifer with impermeable wall in bottom 2 layers End of explanation """
SKA-ScienceDataProcessor/crocodile
examples/notebooks/predict.ipynb
apache-2.0
%matplotlib inline import sys sys.path.append('../..') from matplotlib import pylab from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt pylab.rcParams['figure.figsize'] = 12, 10 import functools import numpy import scipy import scipy.special from crocodile.clean import * from crocodile.synthesis import * from crocodile.simulate import * from util.visualize import * from arl.test_support import create_named_configuration """ Explanation: Illustration of predict End of explanation """ vlas = create_named_configuration('VLAA_north') ha_range = numpy.arange(numpy.radians(0), numpy.radians(90), numpy.radians(90 / 36)) dec = numpy.radians(90) vobs = xyz_to_baselines(vlas.data['xyz'], ha_range, dec) # Wavelength: 5 metres wvl=5 uvw = vobs / wvl ax = plt.figure().add_subplot(111, projection='3d') ax.scatter(uvw[:,0], uvw[:,1] , uvw[:,2]) max_uvw = numpy.amax(uvw) ax.set_xlabel('U [$\lambda$]'); ax.set_xlim((-max_uvw, max_uvw)) ax.set_ylabel('V [$\lambda$]'); ax.set_ylim((-max_uvw, max_uvw)) ax.set_zlabel('W [$\lambda$]'); ax.set_zlim((-max_uvw, max_uvw)) ax.view_init(20, 20); """ Explanation: Generate baseline coordinates for an observation with a hypothetical north-pole VLA over 6 hours, with a visibility recorded every 10 minutes. The phase center is fixed in the zenith. This results in constant $w$-values of basically zero. End of explanation """ import itertools theta = 2*0.01 lam = 18000 grid_size = int(theta * lam) image = numpy.zeros((grid_size, grid_size)) vis = numpy.zeros(len(uvw), dtype=complex) for l,m in theta/10 * numpy.array(list(itertools.product(range(-3, 4), range(-3, 4)))): vis += 1.0*simulate_point(uvw, l, m) image[grid_size//2 + int(m*lam), grid_size//2 + int(l*lam)] += 1.0 show_image(image, "image", theta) """ Explanation: We then make an image with a grid of sources to generate visibilities from. For reference we also use a direct Fourier Transform to generate visibilities: End of explanation """ vis_simple = do_predict(theta, lam, uvw, None, image, simple_predict) print("Simple: ", numpy.sum(numpy.abs(vis_simple - vis)**2) / numpy.sum(numpy.abs(vis)**2)) oversample = 1024 supports = numpy.arange(2,14) mrange = numpy.arange(0.9, 2.0, 0.1) conv_errs = [] for support in supports: condition = support aaf = anti_aliasing_function(grid_size, 0, support) kv1 = kernel_oversample(aaf, oversample, support) kv1 /= numpy.sum(kv1[0]) vis_conv = do_predict(theta, lam, uvw, None, image/numpy.outer(aaf,aaf), conv_predict, kv=kv1) conv_errs.append(numpy.sum(numpy.abs(vis_conv - vis)**2)) print("Convolution %dx%d: " % (support, support), numpy.sum(numpy.abs(vis_conv - vis)**2) / numpy.sum(numpy.abs(vis)**2), " (mean off-centre", numpy.abs(1-numpy.mean(vis_conv / vis)),")") # Show how error changes with support plt.semilogy(supports, conv_errs / numpy.sum(numpy.abs(vis)**2)) plt.xlabel("Support, PSWF c"); plt.ylabel("Error"); plt.show() # Show error distribution plt.scatter(uvw[:,0], uvw[:,1], c=numpy.abs(vis_conv - vis)) plt.scatter(-uvw[:,0], -uvw[:,1], c=numpy.abs(vis_conv - vis)); """ Explanation: Now we can attempt to generate visibilities from the image. The quality of this depends quite a bit on the quality of the used anti-aliasing function: End of explanation """
adolfoguimaraes/machinelearning
SupervisedLearning/Tutorial03_ArvoreDeDecisao.ipynb
mit
from sklearn import tree import pandas as pd data = pd.read_csv('http://www.data2learning.com/machinelearning/datasets/exemplo_jogar.csv') data """ Explanation: Aprendizado Supervisionado Árvore de Decisão Na primeira parte do curso apresentamos dois algoritmos clássicos de aprendizagem de máquian: o KNN e o algoritmo de regressão linear. O primeiro é voltado para o problema de classificação e o segundo para o de regressão. Neste tutorial, vamos explorar mais uma técnica para o problema de classificação. Para isso, vamos trabalhar com um outro método de classificação: as árvores de decisão. Nas árvores de decisão, o problema de aprendizado de máquina é visto com um problema de busca em um espaço de possíveis soluções. Este método faz uso da estratégia dividir para conquistar. A idéia básica é dividir um problema maior em problemas mais simples, aos quais são aplicadas, recursivamente, a mesma estratégia anterior. As soluções dos subproblemas podem ser combinadas, na forma de uma árvore, para produzir uma solução do problema complexo. A força dessa proposta vem da capacidade de dividir o espaço de instâncias em subespaços e cada subespaço é ajustado usando diferentes modelos. Essa é a idéia básica por trás dos principais algoritmos de ávore de decisão, tais como: ID3, ASSITANT, CART, C4.5. A imagem a seguir mostra um exemplo de árvore de decisão e as regiões de decisão dentro do espaço de soluções definidos pelos atributos $x_1$ e $x_2$. <img src="http://www.data2learning.com/machinelearning/images_notebook/print_arvores_decisao.png" width="70%"> Cada nó da árvore corresponde a uma região no espaço de decisão que foi definido pelos atributos. É importante ressaltar que as regiões nesse espaço são mutuamente excludentes, e a reunião delas compõe todo o espaço definido pelos atributos. A árvore de decisão consegue abranger todo o espaço de instâncias. Isso implica que uma árvore de decisão é capaz de fazer predição para qualquer exemplo de entrada. Algoritmo para construção da árvore de decisão O algoritmo para a construção da árvore de decisão consiste em escolher os atributos que melhor separam os dados. O ideal é que o primeiro atributo seja aquele que quando selecionado já consiga classificar o maior número de instâncias possível. Para isto, vamos calcular, para cada atributo, o que chamamos de ganho de informação (GI). O GI mede a efetividade de um atributo em classificar um conjunto de treinamento. Em outras palavras, permite avaliar o quão bom um atributo é para classificar um conjunto de treinamento. O Ganho de Informação é dado pela seguinte equação: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$, onde E é a entropia dada por: $E = \sum_{i}^{c}{-p_i\log_2{p_i}}$ A entropia mede o nível de incerteza que temos sobre um determinado evento. $S$ é o conjunto de dados que estamos considerando na análise do GI do atributo $A$. Para ilustrar, considere um evento qualquer que possui 14 exemplos ($S$). Destes exemplos, 9 são positivos e 5 são negativos. A entropia desse conjunto é dada por: $E(S) = -\frac{9}{14}\log_2{\frac{9}{14}} - \frac{5}{14}\log_2{\frac{5}{14}} = 0.94$ E se tivéssemos 7 instâncias positivas e 7 negativas ([7+, 7-]): $E(S) = -\frac{7}{14}\log_2{\frac{7}{14}} - \frac{7}{14}\log_2{\frac{7}{14}} = 0.999... \approx 1$ E para [0+,14-] ou [14+, 0-]: $E(S) = -\frac{14}{14}\log_2{\frac{14}{14}} = 0$ Entropia 0 significa nenhum grau de incerteza. Já a entropia 1 significa alto grau de incerteza. O cálculo do GI está relacionado com a Entropia. Na verdade, GI é a redução da entropia, causada pelo particionamento de exemplos de acordo com este atributo. Quanto maior a redução da entropia melhor o atributo. Exemplo 1 Dado um conjunto de treinamento $S$ contendo o atributo Vento (que pode receber dois valores: Forte ou Fraco). Considere também que $S$ possui 9 instâncias positivas e 5 instâncias negativas (classe para classificação). Considere também que 6 dos exemplos positivos e 2 exemplos dos negativos são associados a vento Fraco. Além disso, 3 exemplos negativos e 3 exemplos positivos estão associados a vento Forte. Pretende-se calcular o ganho de informação ao selecionar o valor atributo Vento para a raiz de uma árvore de decisão. Cálculo O conjunto S está dividido da seguinte forma: $S = [9+, 5-]$ e a divisão de acordo com o atributo é dada por: $S_{fraco} \leftarrow [6+, 2-]$ $S_{forte} \leftarrow [3+, 3-]$ Sabendo que o cálculo do GI é dado por: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$, onde S é o conjunto de treinamento que vai ser dividido e A é o atributo a ser considerado. O primeiro passo é calcular E(S). $E(S) = -\frac{9}{14}\log_2{\frac{9}{14}} - \frac{5}{14}\log_2{\frac{5}{14}} = 0.940$ Agora vamos trabalhar com os atributos: O atributo Vento possui dois valores: Forte e Fraco. Devemos calcular a entropia para cada um desses atributos: $E(S_{fraco}) = -\frac{2}{8}\log_2{\frac{2}{8}} - \frac{6}{8}\log_2{\frac{6}{8}} = 0.811$ $E(S_{forte}) = -\frac{3}{6}\log_2{\frac{3}{6}} - \frac{3}{6}\log_2{\frac{3}{6}} = 1$ Com isso podemos calcular: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $- \frac{8}{14}0.811 - \frac{6}{14}1 = -0.892$ Por fim, temos que o Ganho de Informação é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = 0.940 - 0.892 = 0.048$ Essa valor significa que selecionar esse atributo reduz muito pouco o nível de incerteza que tínhamos. Sendo assim, ele não é um atributo muito bom para ser selecionado. Construindo uma árvore de decisão Exemplo retirado de: http://conteudo.icmc.usp.br/pessoas/mello/courses/scc5879-aula05.pdf Vamos considerar o seguinte dataset: End of explanation """ from sklearn import preprocessing #Ler a base de dados data = pd.read_csv('http://www.data2learning.com/machinelearning/datasets/exemplo_jogar.csv') # O pacote preprocessing possui o método LabelEncoder que faz a transformação automática dos atributos textuais em # valores numéricos. le_P = preprocessing.LabelEncoder() # Panorama le_T = preprocessing.LabelEncoder() # Temperatura le_U = preprocessing.LabelEncoder() # Umidade le_V = preprocessing.LabelEncoder() # Vento le_J = preprocessing.LabelEncoder() # Jogar # Transforma os atributos de string em numéricos. Os números são atribuídos de acordo com a posição do label # na lista que está sendo impressa. data.Panorama = le_P.fit_transform(data.Panorama) print("Valores para o atributo Panorama: ", list(le_P.classes_)) data.Temperatura = le_T.fit_transform(data.Temperatura) print("Valores para o atributo Temperatura: ", list(le_T.classes_)) data.Umidade = le_U.fit_transform(data.Umidade) print("Valores para o atributo Umidade: ", list(le_U.classes_)) data.Vento = le_V.fit_transform(data.Vento) print("Valores para o atributo Vento: ", list(le_V.classes_)) data.Jogar = le_J.fit_transform(data.Jogar) print("Valores para o atributo Jogar: ", list(le_J.classes_)) data # Criando X e Y feature_columns = ['Panorama','Temperatura','Umidade','Vento'] X = data[feature_columns] y = data.Jogar """ Explanation: O dataset possui 4 atributos: (1) Panorama, (2) Temperatura, (3) Umidade e (4) Vento. A classe é o atributo Jogar. A tarefa de classificação consiste em dado um panorama sobre o clima, devemos jogar ou não jogar tênis. Para construir uma árvore de decisão o primeiro passo é escolher qual atributo será colocado na raiz da árvore. Dentre os atribuitos, o ideal é que seja escolhido sempre aquele que possui o maior ganho de informação. Vamos calcular GI para cada um dos atributos. Atributo Vento [Forte, Fraco] O GI de informação do atributo vento já foi calculado no exemplo anterior. O valor dele é 0.048. Já sabemos que ele não é um bom atributo, mas só descartamos a escolha deste quando fizermos a comparação de seu valor com o GI dos demais atributos. Atributo Panorama [Ensolarado, Nublado, Chuvoso] O atributo panorama está dividido da seguinte forma na base de treinamento: Ensolarado: $\frac{5}{14}$ onde $[2+, 3-]$ Nublado: $\frac{4}{14}$ onde $[4+, 0-]$ Chuvoso: $\frac{5}{14}$ onde $[3+, 2-]$ Vamos calcular a entropia e o ganho de informação de cada atributo: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{5}{14}E(S_{Ensolarado}) - \frac{4}{14}E(S_{Nublado}) - \frac{5}{14}*E(S_{Chuvoso})$ Calculando a entropia de cada atributo temos: $E(S_{Ensolarado}) = - \frac{2}{5}log_2{\frac{2}{5}} - \frac{3}{5}log_2{\frac{3}{5}} = 0.971$ $E(S_{Nublado}) = - \frac{4}{4}log_2{\frac{4}{4}} - \frac{0}{4}log_2{\frac{0}{4}} = 0$ $E(S_{Chuvoso}) = - \frac{3}{5}log_2{\frac{3}{5}} - \frac{2}{5}log_2{\frac{2}{5}} = 0.971$ Calculando o ganho de informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{5}{14}E(S_{Ensolarado}) - \frac{4}{14}E(S_{Nublado}) - \frac{5}{14}*E(S_{Chuvoso})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{5}{14}0.971 - \frac{4}{14}0 - \frac{5}{14}*0.971 = -0.693$ Por fim, temos que o GI de informação para o atributo Panorama é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.940 - 0.693 = 0.247$ Até o momento, temos: $GI(Vento) = 0.048$ $GI(Panorama) = 0.247$ E o atributo temperatura? E o atributo umidade? O Atributo Umidade [Alta, Normal] O atributo umidade está dividido da seguinte forma na base de treinamento: Alta: $\frac{7}{14}$ onde $[3+, 4-]$ Normal: $\frac{7}{14}$ onde $[6+, 1-]$ Vamos calcular: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{7}{14}E(S_{Alta}) - \frac{7}{14}E(S_{Normal})$ Calculando a entropia de cada atributo temos: $E(S_{Alta}) = - \frac{3}{7}log_2{\frac{3}{7}} - \frac{4}{7}log_2{\frac{4}{7}} = 0.985$ $E(S_{Normal}) = - \frac{6}{7}log_2{\frac{6}{7}} - \frac{1}{7}log_2{\frac{1}{7}} = 0.591$ Agora podemos calcular o ganho de informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{7}{14}E(S_{Alta}) - \frac{7}{14}E(S_{Normal})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{7}{14}0.985 - \frac{7}{14}0.591 = -0.788$ Por fim, temos que o GI de informação para o atributo Umidade é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.940 - 0.788 = 0.152$ Atributo Temperatura [Quente, Intermediária, Fria] O atributo Temperatura está dividido da seguinte forma na base de treinamento: Quente: $\frac{4}{14}$ onde $[2+, 2-]$ Intermediária: $\frac{6}{14}$ onde $[4+, 2-]$ Fria: $\frac{4}{14}$ onde $[3+, 1-]$ Vamos calcular: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{4}{14}E(S_{Quente}) - \frac{6}{14}E(S_{Intermediaria}) - \frac{4}{14}*E(S_{Fria})$ Calculando a entropia de cada atributo temos: $E(S_{Quente}) = - \frac{2}{4}log_2{\frac{2}{4}} - \frac{2}{4}log_2{\frac{2}{4}} = 1$ $E(S_{Intermediaria}) = - \frac{4}{6}log_2{\frac{4}{6}} - \frac{2}{6}log_2{\frac{2}{6}} = 0.918$ $E(S_{Fria}) = - \frac{3}{4}log_2{\frac{3}{4}} - \frac{1}{4}log_2{\frac{1}{4}} = 0.811$ Agora podemos calcular o ganho de informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{4}{14}E(S_{Quente}) - \frac{6}{14}E(S_{Intermediaria}) - \frac{4}{14}*E(S_{Fria})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{4}{14}1 - \frac{6}{14}0.918 - \frac{4}{14}*0.811 = -0.910$ Por fim, temos que o GI de informação para o atributo Temperatura é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.940 - 0.910 = 0.030$ Dessa forma, já temos a análise dos 4 atributos: $GI(S, Panorama) = 0.247$ $GI(S, Umidade) = 0.152$ $GI(S, Vento) = 0.048$ $GI(S, Temperatura) = 0.03$ Dos 4 atributos, devemos selecionar aquele que proporciona um maior ganho de informação. Neste caso, escolhemos o atributo Panorama. Ele é que deve ser a raiz da árvore. <img src="http://www.data2learning.com/machinelearning/images_notebook/arvoredecisao_01.png"> Observe que com a escolha desse atributo já conseguimos classificar um conjunto de dados da base de treinamento. Se o panorama for Nublado, sempre jogará tênis. No entanto, ainda não temos certeza quando o panorama for ensolarado ou chuvoso. Nestes casos, devemos analisar novamente os atributos do conjunto de treinamento para decidir qual atributo devemos escolher para os nós ensolarado e chuvoso. Para este cálculo, o atributo Panorama não entra na computação do Ganho de Informação. O processo de cálculo é o mesmo. No entanto, seu conjunto S muda. S passa ser agora, no caso de ensolarado, $ S = {D1, D2, D8, D9, D11}$ que são divididos em $[2+, 3-]$. No caso de chuvoso, o conjunto S passa a ser $S = {D4, D5, D6, D10, D14}$ que são divididos em $[3+, 2-]$. Vamos trabalhar com o caso Ensolarado. O primeiro passo é calcular a entropia de S ($E(S)$): $E = \sum_{i}^{c}{-p_i\log_2{p_i}} = -\frac{2}{5}\log_2{\frac{2}{5}} - \frac{3}{5}\log_2{\frac{3}{5}} = 0.971$ Com o valor da Entropia de $S$, calculamos o $GI$ para cada um dos atributos restantes: Umidade, Temperatura e Vento. O Atributo Umidade [Alta, Normal] O atributo umidade está dividido da seguinte forma na base de treinamento: Alta: $\frac{3}{5}$ onde $[0+, 3-]$ Normal: $\frac{2}{5}$ onde $[2+, 0-]$ Vamos calcular: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{3}{5}E(S_{Alta}) - \frac{2}{5}E(S_{Normal})$ Calculando a entropia de cada atributo temos: $E(S_{Alta}) = - \frac{0}{3}log_2{\frac{0}{3}} - \frac{3}{3}log_2{\frac{3}{3}} = 0$ $E(S_{Normal}) = - \frac{2}{2}log_2{\frac{2}{2}} - \frac{0}{2}log_2{\frac{0}{2}} = 0$ Agora podemos calcular o Ganho de Informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{3}{5}E(S_{Alta}) - \frac{3}{5}E(S_{Normal})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{3}{5}0 - \frac{3}{5}0 = 0$ Por fim, temos que o GI de informação para o atributo Umidade é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.971 - 0 = 0.971$ Atributo Temperatura [Quente, Intermediária, Fria] O atributo Temperatura está dividido da seguinte forma na base de treinamento: Quente: $\frac{2}{5}$ onde $[0+, 2-]$ Intermediária: $\frac{2}{5}$ onde $[1+, 1-]$ Fria: $\frac{1}{5}$ onde $[1+, 0-]$ É fácil definir as entropiais individiuais: $E_(S_{Quente}) = 0$ $E_(S_{Intermediaria}) = 1$ $E_(S_{Fria}) = 0$ Vamos calcular o Ganho de Informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{2}{5}E(S_{Quente}) - \frac{2}{5}E(S_{Intermediaria}) - \frac{1}{5}*E(S_{Fria})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{2}{5}0 - \frac{2}{5}1 - \frac{1}{5}*0 = -0.4$ Por fim, temos que o GI de informação para o atributo Temperatura é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.971 - 0.4 = 0.571$ Atributo Vento [Fraco, Forte] O atributo Vento está dividido da seguinte forma na base de treinamento: Fraco: $\frac{3}{5}$ onde $[1+, 2-]$ Forte: $\frac{2}{5}$ onde $[1+, 1-]$ É fácil definir as entropiais individiuais: $E_(S_{Fraco}) = 0.918$ $E_(S_{Forte}) = 1$ Vamos calcular o Ganho de Informação: $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{3}{5}E(S_{Quente}) - \frac{2}{5}E(S_{Intermediaria})$ $- \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)} = -\frac{3}{5}0.918 - \frac{2}{5}1 = -0.951$ Por fim, temos que o GI de informação para o atributo Vento é: $GI(S,A) = E(S) - \sum_{v \in Valores(A)}{\frac{S_v}{S}E(S_v)}$ $GI(S,A) = 0.971 - 0.950 = 0.020$ Dessa forma, já temos a análise dos 3 atributos restantes: $GI(S, Umidade) = 0.971$ $GI(S, Vento) = 0.571$ $GI(S, Temperatura) = 0.020$ Dos 3 atributos, devemos selecionar aquele que proporciona um maior ganho de informação. Neste caso, escolhemos o atributo Umidade. Ele é que deve ser o atributo quando ensolarado. <img src="http://www.data2learning.com/machinelearning/images_notebook/arvoredecisao_02.png"> E no caso do ramo Chuvoso, como fica a separação? Essa tarefa será passada como atividade desta aula. Implementando a árvore de decisão no Scikit Learn Apresentados os conceitos teóricos sobre como as árvores de decisão são formadas, vamos usar a biblioteca do Scikit Learn para construir um classificador baseado neste modelo. O primeiro passo é preparar a nossa base de dados. Vamos trabalhar no primeiro exemplo com a base do exemplo de Jogar Tênis. Essa base trabalha com valores dos atributos em texto. No entanto, o scikit learn trabalhar apenas com valores numéricos para os atributos e classes. O código a seguir faz esse processamento da base. Como foi dito anteriormente, a explicação dos métodos utilizados para processar estas bases fogem do escopo deste material. Para quem quiser mais detalhes do processo utilizado nesta etapa, visite este link End of explanation """ from sklearn import tree clf_tree = tree.DecisionTreeClassifier(criterion='entropy') clf_tree = clf_tree.fit(X,y) """ Explanation: Os algoritmos de aprendizagem baseados em árvores é implementado no pacote tree do scikit-learn. Vamos utilizar o DecisionTreeClassifier. O atribuito criterion passado como parâmetro indica qual métrica será utilizada para avaliar a qualidade da divisão. O critério ganho de informação é identificado por entropy. End of explanation """ from IPython.display import Image import pydotplus def print_tree(classifier, fn, cn): dot_data = tree.export_graphviz(classifier, out_file=None, feature_names=fn, class_names=cn, filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data) return graph.create_png() """ Explanation: Podemos gerar uma imagem que representa a árvore de decisão criada a partir da função a seguir. End of explanation """ img_png = print_tree(clf_tree, feature_columns, ['Nao','Sim']) Image(img_png, width=500) """ Explanation: A árvore criada não é a mesma criada manualmente porque o scikit-learn trabalhar com dados numéricos. A árvore que criamos manualmente trabalhar com dados categóricos. End of explanation """ # Importando a base da Iris from sklearn.datasets import load_iris dataset_iris = load_iris() X = dataset_iris.data y = dataset_iris.target """ Explanation: Aplicando a árvore de decisão na base da Iris O exemplo anterior foi só para mostrar como podemos construir manualmente a árvore de decisão. Vamos trabalhar agora com uma base mais completa já vista anteriormente: a base Iris. O propósito é treinarmos o modelo e analisar a acurácia deste utilizando a validação cruzada. End of explanation """ # Aplicando a validação cruzada na base from sklearn.model_selection import cross_val_score clf_tree = tree.DecisionTreeClassifier(criterion='entropy') clf_tree = clf_tree.fit(X, y) scores = cross_val_score(clf_tree, X, y, cv=5, scoring='accuracy') print("Acurácia da Ávore de Decisão: ", scores.mean()) """ Explanation: Vamos fazer o treinamento usando validação cruzada com 5 folds. Em seguida vamos imprimir a acurácia na base de treino (a média dos 5 folds). End of explanation """ tree_png = print_tree(clf_tree, dataset_iris.feature_names, dataset_iris.target_names) Image(tree_png, width=800) """ Explanation: Imprimindo a árvore treinada. End of explanation """
mathinmse/mathinmse.github.io
Lecture-10A-Taylors-Series.ipynb
mit
%matplotlib inline import numpy as np import sympy as sp import matplotlib.pyplot as plt # You can change the default figure size to be a bit larger if you want, # uncomment the next line for that: plt.rc('figure', figsize=(10, 6)) """ Explanation: In-Class Demonstration and Visualization End of explanation """ def plot_taylor_approximations(func, x0=None, orders=(2, 4), xrange=(0,1), yrange=None, npts=200): """Plot the Taylor series approximations to a function at various orders. Parameters ---------- func : a sympy function x0 : float Origin of the Taylor series expansion. If not given, x0=xrange[0]. orders : list List of integers with the orders of Taylor series to show. Default is (2, 4). xrange : 2-tuple or array. Either an (xmin, xmax) tuple indicating the x range for the plot (default is (0, 1)), or the actual array of values to use. yrange : 2-tuple (ymin, ymax) tuple indicating the y range for the plot. If not given, the full range of values will be automatically used. npts : int Number of points to sample the x range with. Default is 200. """ if not callable(func): raise ValueError('func must be callable') if isinstance(xrange, (list, tuple)): x = np.linspace(float(xrange[0]), float(xrange[1]), npts) else: x = xrange if x0 is None: x0 = x[0] xs = sp.Symbol('x') # Make a numpy-callable form of the original function for plotting fx = func(xs) f = sp.lambdify(xs, fx, modules=['numpy']) # We could use latex(fx) instead of str(), but matploblib gets confused # with some of the (valid) latex constructs sympy emits. So we play it safe. plt.plot(x, f(x), label=str(fx), lw=2) # Build the Taylor approximations, plotting as we go apps = {} for order in orders: app = fx.series(xs, x0, n=order).removeO() apps[order] = app # Must be careful here: if the approximation is a constant, we can't # blindly use lambdify as it won't do the right thing. In that case, # evaluate the number as a float and fill the y array with that value. if isinstance(app, sp.numbers.Number): y = np.zeros_like(x) y.fill(app.evalf()) else: fa = sp.lambdify(xs, app, modules=['numpy']) y = fa(x) tex = sp.latex(app).replace('$', '') plt.plot(x, y, label=r'$n=%s:\, %s$' % (order, tex) ) # Plot refinements if yrange is not None: plt.ylim(*yrange) plt.grid() plt.legend(loc='best').get_frame().set_alpha(0.8) # For an expression made from elementary functions, we must first make it into # a callable function, the simplest way is to use the Python lambda construct. # plot_taylor_approximations(lambda x: 1/sp.cos(x), 0, [2,4,6,8], (0, 2*sp.pi), (-5,5)) plot_taylor_approximations(sp.sin, 0, [2, 4, 6, 8], (0, 2*sp.pi), (-2,2)) """ Explanation: The function plot_taylor_approximations included here was written by Fernando Perez and was part of work on the original IPython project. Although attribution seems to have been lost over time, we gratefully acknowledge FP and thank him for this code! End of explanation """ import sympy as sp x, y, z, t, a = sp.symbols('x y z t a') k, m, n = sp.symbols('k m n', integer=True) f, g, h = sp.symbols('f g h', cls=sp.Function) sp.var('a1:6') sp.init_printing() """ Explanation: Lecture 10: Taylor's Series and Discrete Calculus Background It is common in physics and engineering to represent transcendental functions and other nonlinear expressions using a few terms from a Taylor series. This series provides a fast and efficient way to compute quantities such as $\mathrm{sin}(x)$ or $e^x$ to a prescribed error. Learning how to calculate the series representation of these functions will provide practical experience with the Taylor series and help the student understand the results of Python methods designed to accelerate and simplify computations. The series can be written generally as: $$f(x) = f{\left (0 \right )} + x \left. \frac{d}{d x} f{\left (x \right )} \right|{\substack{ x=0 }} + \frac{x^{2}}{2} \left. \frac{d^{2}}{d x^{2}} f{\left (x \right )} \right|{\substack{ x=0 }} + \frac{x^{3}}{6} \left. \frac{d^{3}}{d x^{3}} f{\left (x \right )} \right|{\substack{ x=0 }} + \frac{x^{4}}{24} \left. \frac{d^{4}}{d x^{4}} f{\left (x \right )} \right|{\substack{ x=0 }} + \frac{x^{5}}{120} \left. \frac{d^{5}}{d x^{5}} f{\left (x \right )} \right|_{\substack{ x=0 }} + \mathcal{O}\left(x^{6}\right)$$ Of equal importance the Taylor series permits discrete representation of derivatives and is a common way to perform numerical integration of partial and ordinary differential equations. Expansion of a general function $f(x)$ about a point, coupled with algebraic manipulation, will produce expressions that can be used to approximate derivative quantities. Although any order of derivative can be computed, this lesson will focus on first and second derivatives that will be encountered in the diffusion equation. What Skills Will I Learn? You will practice the following skills: Defining and determining the limits of infinite sequences, series and power series. Define the Taylor series and write the general form about any point and to any order. Derive the central and forward difference formulae for numerical derivatives using the Taylor's series. What Steps Should I Take? Learn to use Sympy to define and find the limits of sequences are series. Learn how to approximate transcendental functions about center points of your choosing. Differentiate an explicit series representation of a function to see that the coefficients of such a series can be determined algebraically. Use Sympy to compute a power series symbolically Derive the finite difference expressions for the first and second derivatives. Read the relevant pages from Hornbeck's text on numerical methods. Generate a list of values that approximate the function $f(x)=x^8$ on the domain ${x | 0 \leq x \leq 1}$. Using these values, numerically compute the derivative at your selected grid points and compare it to the analytical solution. Using this technique, examine how the observed error changes as the number of grid points is varied. Visualize and explain the results. Prepare a new notebook (not just modifications to this one) that describes your approach. Optional challenge: A list is one of the fundamental data structures within Python. Numpy (a Python library) and other parts of Python libraries use vectorized computations. From Wikipedia, vectorization is "a style of computer programming where operations are applied to whole arrays instead of individual elements." With this in mind, we certainly can iterate over our list of points and apply the function that you will soon write in an element by element fashion, however, it is a more common practice in Python and other modern languages to write vectorized code. If this is your first exposure to vectorized computation, I recommend two initial strategies: write out your algorithms and use "classic" flow control and iteration to compute the results. From that point you will more easily see the strategy you should use to write vectorized code. Using the discrete forms of the first and second derivatives (based on central differences) can you devise a vectorized operation that computes the derivative without looping in Python? A Sucessful Jupyter Notebook Will Present a description of the essential elements of Taylor's series and how to compute numerical derivatives; Identify the audience for which the work is intended; Run the code necessary to compute and visualize the error associated with the second order approximation and the changes in grid point spacing; Provide a narrative and equations to explain why your approach is relevant to solving the problem; Provide references and citations to any others' work you use to complete the assignment; Be checked into your GitHub repository by the due date (one week from assignment). A high quality communication provides an organized, logically progressing, blend of narrative, equations, and code that teaches the reader a particular topic or idea. You will be assessed on: * The functionality of the code (i.e. it should perform the task assigned). * The narrative you present. I should be able to read and learn from it. Choose your audience wisely. * The supporting equations and figures you choose to include. If your notebook is just computer code your assignment will be marked incomplete. Reading and Reference Essential Mathematical Methods for Physicists, H. Weber and G. Arfken, Academic Press, 2003 Advanced engineering Mathematics, E. Kreyszig, John wiley and Sons, 2010 Numerical Recipes, W. Press, Cambridge University Press, 1986 Numerical Methods, R. Hornbeck, Quantum Publishers, 1975 Infinite Sequences Ideas relating to sequences, series, and power series are used in the formulation of integral calculus and in the construction of polynomial representations of functions. The limit of functions will also be investigated as boundary conditions for differential equations. For this reason understanding concepts related to sequences and series are important to review. A sequence is an ordered list of numbers. A list such as the following represents a sequence: $$a_1, a_2, a_3, a_4, \dots, a_n, \dots $$ The sequence maps one value $a_n$ for every integer $n$. It is typical to provide a formula for construction of the nth term in the sequence. While ad-hoc strategies could be used to develop sequences using SymPy and lists in Python, SymPy has a sequence class that can be used. A short demonstration is provided next: End of explanation """ a1 = sp.SeqFormula(n**2, (n,0,5)) list(a1) """ Explanation: It is important to read about SymPy symbols at this time. We can generate a sequence using SeqFormula. End of explanation """ sp.limit_seq(a1.formula, n) """ Explanation: if we want the limit of the sequence at infinity: $$[0, 1, 4, 9, \ldots ]$$ we can use limit_seq: End of explanation """ # Your code here. """ Explanation: DIY: Determine if the following sequences are convergent or divergent. If convergent, what is the limit? $$ \begin{aligned} a_n = & \frac{1}{n} \ a_n = & 1 - (0.2)^n \ a_n = & \frac{1}{2n+1} \end{aligned} $$ End of explanation """ a2 = sp.Sum(1/2**n, (n,0,1)) a2 a2.doit() a4 = sp.Sum(n**2, (n,0,5)) a4 a5 = sp.Sum(k**2, (k, 1, m)) a5 a4.doit() a5.doit() """ Explanation: Infinite Series A series is the sum of a sequence. An infinite series will converge if the partial sums of the series has a finite limit. For example, examine the partial sums of the series: $$ \sum^{\infty}_{n=1} \frac{1}{2^n} $$ End of explanation """ M = sp.IndexedBase('M') sp.Sum(M[n]*x**n, (n,0,m)) """ Explanation: A power series is of the form: $$ \sum_{n=0}^{\infty} M_{n} x^{n} = M_0 + M_1 x + M_2 x^2 + \cdots $$ End of explanation """ sp.Sum(M[n]*(x-a)**n, (n,0,m)) """ Explanation: We can define the series about the point $a$ as follows: End of explanation """ sp.series(f(x), x, x0=0) """ Explanation: SymPy has a function that can take SymPy expressions and represent them as power series: End of explanation """ # Your code here. """ Explanation: DIY: Use SymPy to determine series approximations to $e^x$, $sin(x)$, and $cos(x)$ about the point $x=0$. End of explanation """ import sympy as sp sp.init_printing() x, A, B, C, D, E = sp.symbols('x, A, B, C, D, E') """ Explanation: Taylor's Series Below we present a derivation of Taylor's series and small algebraic argument for series representations of functions. In contrast to the ability to use sympy functions without any deeper understanding, these presentations are intended to give you insight into the origin of the series representation and the factors present within each term. While the algebraic presentation isn't a general case, the essential elements of a general polynomial representation are visible. The function $f(x)$ can be expanded into an infinite series or a finite series plus an error term. Assume that the function has a continuous nth derivative over the interval $a \le x \le b$. Integrate the nth derivative n times: $$\int_a^x f^n(x) dx = f^{(n-1)}(x) - f^{(n-1)}(a)$$ The power on the function $f$ in the equation above indicates the order of the derivative. Do this n times and then solve for f(x) to recover Taylor's series. One of the key features in this derivation is that the integral is definite. This derivation is outlined on Wolfram’s Mathworld. As a second exercise, assume that we wish to expand sin(x) about x=0. First, assume that the series exists and can be written as a power series with unknown coefficients. As a first step, differentiate the series and the function we are expanding. Next, let the value of x go to the value of the expansion point and it will be possible to evaluate the coefficients in turn: $$ \sin x = A+Bx+Cx^2+Dx^3+Ex^4 $$ We can choose an expansion point (e.g. $x = 0$) and differentiate to get a set of simultaneous equations permitting determination of the coefficients. The computer algebra system can help us with this activity: End of explanation """ sp.diff(sp.sin(x),x) """ Explanation: To help us get our work done we can use sympy's diff function. Testing this function with a known result, we can write: End of explanation """ orderOfDifferentiation = 1 powerSeries = A+B*x+C*x**2+D*x**3+E*x**4 # Differentiate, element by element, the list [sp.sin(x),powerSeries] [sp.diff(a,x,orderOfDifferentiation) for a in [sp.sin(x),powerSeries]] """ Explanation: A list comprehension is used to organize the results. In each iteration the exact function and the power series are differentiated and stored as an element of a list. The list can be inspected and a set of simultaneous equations can be written down and solved to determine the values of the coefficients. Casting the list as a sympy Matrix object clarifies the correspondance between entries in the list. End of explanation """ maximumOrder = 5 funcAndSeries = [[sp.diff(a,x,order) for a in [sp.sin(x),powerSeries]] for order in range(maximumOrder)] funcAndSeries """ Explanation: A list comprehension can be used to organize and extend the results further. We can wrap the list above into another list that changes the order of differentiation each iteration. End of explanation """ sp.Matrix(funcAndSeries) """ Explanation: Casting the results as a sympy Matrix object the list is more easily viewed in the Jupyter notebook: End of explanation """ # Your code here if you feel you need it. """ Explanation: DIY: Determine the coefficients in the above power series. You don't necessarily need to write code to complete this DIY problem. End of explanation """ from sympy import init_printing, symbols, Function init_printing() x, c = symbols("x,c") f = Function("f") f(x).series(x, x0=c, n=3) """ Explanation: Your markdown here. Computing a Taylor's Series Symbolically Using sympy the Taylor's series can be computed symbolically. End of explanation """ x, h, c = sp.symbols("x,h,c") f = sp.Function("f") # the .subs() method replaces occurences of 'x' with something else taylorExpansionPlus = f(x).series(x, x0=c, n=3).removeO().subs(x,c+h) taylorExpansionMinus = f(x).series(x, x0=c, n=3).removeO().subs(x,c-h) taylorExpansionPlus """ Explanation: One of the major uses of Taylor's series in computation is for the evaluation of derivatives. Take note of the fact that the derivatives of a function appear in the evaluation of the series. Computing Derivatives of Discrete Data It may be straightforward to compute the derivative of some functions. For example: $$f(x) = x^2$$ $$f'(x) = 2x$$ In numerical computing situations there is no analytical solution to the problem being solved and therefore no function to integrate or differentiate. The approximate solution is available as a list of discrete points in the domain of the problem's independent variables (e.g. space, time). The values could be represented as a list of numbers: $${f(x_0), f(x_1), f(x_2), ...}$$ The neighboring points $f(x_0)$ and $f(x_1)$ are seperated by a distance $\Delta x = x_1 - x_0$ in the independent variable. Although this will not be apparent from the values, it is implicit in the structure of the data. Taylor's series can be used to compute approximate derivatives of the unknown function directly from the list of points in this situation. We are going to compute a series expansion for an unknown function $f(x)$ in the vicinity of the point $c$ and then examine the relationship between that function and it's derivative quantities at a point $c\pm h$. The goal of the activity is to see if we can find expressions for the derivatives using the data point of interest ($c$) and its neighbors ($c \pm h$). We are going to use the idea of forward and backward differences. Forward differences are computed by expanding an unknown function in a Taylor series about a point “x=c” and then letting x go to c+h. Then, for backward differences, let x go to c-h. Symbolically Computing Forward and Backward Differences In the figure below we indicate the following: the unknown function $f(x)$ as a dashed line the point about which the unknown function is expanded at $x=c$ the distance between successive points is shown as $h$ the approximate values of the function given at the filled squares Imagine that we take the above series expansion and use it to compute the value of the function near the point $c$. Let us evaluate this series by adding and subtracting to the independent varable the quantity $h$. To accomplish this we write down the series expansion for our function about the point $c$, then we let the independent variable $x \rightarrow c+h$ and $c-h$. End of explanation """ taylorExpansionMinus """ Explanation: Meaning that: $$ f(c+h) = \frac{h^{2}}{2} \left. \frac{d^{2}}{d \xi_{1}^{2}} f{\left (\xi_{1} \right )} \right|{\substack{ \xi{1}=c }} + h \left. \frac{d}{d \xi_{1}} f{\left (\xi_{1} \right )} \right|{\substack{ \xi{1}=c }} + f{\left (c \right )} $$ End of explanation """ (taylorExpansionMinus-f(c-h))-(taylorExpansionPlus-f(c+h)) """ Explanation: Meaning that: $$ f(c-h) = \frac{h^{2}}{2} \left. \frac{d^{2}}{d \xi_{1}^{2}} f{\left (\xi_{1} \right )} \right|{\substack{ \xi{1}=c }} - h \left. \frac{d}{d \xi_{1}} f{\left (\xi_{1} \right )} \right|{\substack{ \xi{1}=c }} + f{\left (c \right )} $$ Solving for First and Second Derivatives Inspection of the results shows that the signs on the terms containing the first derivative are different between the two expressions. We can use this to our advantage in solving for the derivative terms explicitly. Note that each grouped expression is equal to zero as is the default in sympy. Find the first derivative in this expression: End of explanation """ (taylorExpansionMinus-f(c-h))+(taylorExpansionPlus-f(c+h)) """ Explanation: Remember that sympy expressions are zero by default. So this is true: $$ - 2 h \left. \frac{d}{d \xi_{1}} f{\left (\xi_{1} \right )} \right|{\substack{ \xi{1}=c }} - f{\left (c - h \right )} + f{\left (c + h \right )} = 0 $$ Find the second derivative in this expression: End of explanation """
opengeostat/pygslib
pygslib/Ipython_templates/.ipynb_checkpoints/nscore_ttable_raw-checkpoint.ipynb
mit
#general imports import matplotlib.pyplot as plt import pygslib from matplotlib.patches import Ellipse import numpy as np import pandas as pd #make the plots inline %matplotlib inline """ Explanation: Testing the nscore transformation table End of explanation """ #get the data in gslib format into a pandas Dataframe mydata= pygslib.gslib.read_gslib_file('../datasets/cluster.dat') # This is a 2D file, in this GSLIB version we require 3D data and drillhole name or domain code # so, we are adding constant elevation = 0 and a dummy BHID = 1 mydata['Zlocation']=0 mydata['bhid']=1 # printing to verify results print (' \n **** 5 first rows in my datafile \n\n ', mydata.head(n=5)) #view data in a 2D projection plt.scatter(mydata['Xlocation'],mydata['Ylocation'], c=mydata['Primary']) plt.colorbar() plt.grid(True) plt.show() """ Explanation: Getting the data ready for work If the data is in GSLIB format you can use the function pygslib.gslib.read_gslib_file(filename) to import the data into a Pandas DataFrame. End of explanation """ print (pygslib.gslib.__dist_transf.ns_ttable.__doc__) """ Explanation: The nscore transformation table function End of explanation """ dtransin,dtransout, error = pygslib.gslib.__dist_transf.ns_ttable(mydata['Primary'],mydata['Declustering Weight']) dttable= pd.DataFrame({'z': dtransin,'y': dtransout}) print (dttable.head(3)) print (dttable.tail(3) ) print ('there was any error?: ', error!=0) dttable.hist(bins=30) """ Explanation: Note that the input can be data or a reference distribution function Normal score transformation table using delustering wight End of explanation """ transin,transout, error = pygslib.gslib.__dist_transf.ns_ttable(mydata['Primary'],np.ones(len(mydata['Primary']))) ttable= pd.DataFrame({'z': transin,'y': transout}) print (ttable.head(3)) print (ttable.tail(3)) ttable.hist(bins=30) """ Explanation: Normal score transformation table without delustering wight End of explanation """ parameters_probplt = { 'iwt' : 0, #int, 1 use declustering weight 'va' : ttable.y, # array('d') with bounds (nd) 'wt' : np.ones(len(ttable.y))} # array('d') with bounds (nd), wight variable (obtained with declust?) parameters_probpltl = { 'iwt' : 0, #int, 1 use declustering weight 'va' : dttable.y, # array('d') with bounds (nd) 'wt' : np.ones(len(dttable.y))} # array('d') with bounds (nd), wight variable (obtained with declust?) binval,cl,xpt025,xlqt,xmed,xuqt,xpt975,xmin,xmax, \ xcvr,xmen,xvar,error = pygslib.gslib.__plot.probplt(**parameters_probplt) binvall,cll,xpt025l,xlqtl,xmedl,xuqtl,xpt975l,xminl, \ xmaxl,xcvrl,xmenl,xvarl,errorl = pygslib.gslib.__plot.probplt(**parameters_probpltl) fig = plt.figure() ax = fig.add_subplot(1,1,1) plt.plot (cl, binval, label = 'gaussian non-declustered') plt.plot (cll, binvall, label = 'gaussian declustered') plt.legend(loc=4) plt.grid(True) fig.show """ Explanation: Comparing results End of explanation """
csdms/coupling
docs/demos/cem_and_waves.ipynb
mit
%matplotlib inline import numpy as np """ Explanation: <img src="../_static/pymt-logo-header-text.png"> Coastline Evolution Model + Waves Link to this notebook: https://github.com/csdms/pymt/blob/master/docs/demos/cem_and_waves.ipynb Install command: $ conda install notebook pymt_cem This example explores how to use a BMI implementation to couple the Waves component with the Coastline Evolution Model component. Links CEM source code: Look at the files that have deltas in their name. CEM description on CSDMS: Detailed information on the CEM model. Interacting with the Coastline Evolution Model BMI using Python Some magic that allows us to view images within the notebook. End of explanation """ from pymt import models cem, waves = models.Cem(), models.Waves() """ Explanation: Import the Cem class, and instantiate it. In Python, a model with a BMI will have no arguments for its constructor. Note that although the class has been instantiated, it's not yet ready to be run. We'll get to that later! End of explanation """ waves.get_output_var_names() cem.get_input_var_names() """ Explanation: Even though we can't run our waves model yet, we can still get some information about it. Just don't try to run it. Some things we can do with our model are get the names of the input variables. End of explanation """ args = cem.setup(number_of_rows=100, number_of_cols=200, grid_spacing=200.) cem.initialize(*args) args = waves.setup() waves.initialize(*args) """ Explanation: We can also get information about specific variables. Here we'll look at some info about wave direction. This is the main input of the Cem model. Notice that BMI components always use CSDMS standard names. The CSDMS Standard Name for wave angle is, "sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity" Quite a mouthful, I know. With that name we can get information about that variable and the grid that it is on (it's actually not a one). OK. We're finally ready to run the model. Well not quite. First we initialize the model with the BMI initialize method. Normally we would pass it a string that represents the name of an input file. For this example we'll pass None, which tells Cem to use some defaults. End of explanation """ def plot_coast(spacing, z): import matplotlib.pyplot as plt xmin, xmax = 0., z.shape[1] * spacing[1] * 1e-3 ymin, ymax = 0., z.shape[0] * spacing[0] * 1e-3 plt.imshow(z, extent=[xmin, xmax, ymin, ymax], origin='lower', cmap='ocean') plt.colorbar().ax.set_ylabel('Water Depth (m)') plt.xlabel('Along shore (km)') plt.ylabel('Cross shore (km)') """ Explanation: Here I define a convenience function for plotting the water depth and making it look pretty. You don't need to worry too much about it's internals for this tutorial. It just saves us some typing later on. End of explanation """ grid_id = cem.get_var_grid('sea_water__depth') spacing = cem.get_grid_spacing(grid_id) shape = cem.get_grid_shape(grid_id) z = np.empty(shape) cem.get_value('sea_water__depth', out=z) plot_coast(spacing, z) """ Explanation: It generates plots that look like this. We begin with a flat delta (green) and a linear coastline (y = 3 km). The bathymetry drops off linearly to the top of the domain. End of explanation """ qs = np.zeros_like(z) qs[0, 100] = 750 """ Explanation: Allocate memory for the sediment discharge array and set the discharge at the coastal cell to some value. End of explanation """ cem.get_var_units('land_surface_water_sediment~bedload__mass_flow_rate') waves.set_value('sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_asymmetry_parameter', .3) waves.set_value('sea_shoreline_wave~incoming~deepwater__ashton_et_al_approach_angle_highness_parameter', .7) cem.set_value("sea_surface_water_wave__height", 2.) cem.set_value("sea_surface_water_wave__period", 7.) """ Explanation: The CSDMS Standard Name for this variable is: "land_surface_water_sediment~bedload__mass_flow_rate" You can get an idea of the units based on the quantity part of the name. "mass_flow_rate" indicates mass per time. You can double-check this with the BMI method function get_var_units. End of explanation """ for time in range(3000): waves.update() angle = waves.get_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity') cem.set_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity', angle) cem.set_value('land_surface_water_sediment~bedload__mass_flow_rate', qs) cem.update() cem.get_value('sea_water__depth', out=z) plot_coast(spacing, z) """ Explanation: Set the bedload flux and run the model. End of explanation """ qs[0, 150] = 500 for time in range(3750): waves.update() angle = waves.get_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity') cem.set_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity', angle) cem.set_value('land_surface_water_sediment~bedload__mass_flow_rate', qs) cem.update() cem.get_value('sea_water__depth', out=z) plot_coast(spacing, z) """ Explanation: Let's add another sediment source with a different flux and update the model. End of explanation """ qs.fill(0.) for time in range(4000): waves.update() angle = waves.get_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity') cem.set_value('sea_surface_water_wave__azimuth_angle_of_opposite_of_phase_velocity', angle) cem.set_value('land_surface_water_sediment~bedload__mass_flow_rate', qs) cem.update() cem.get_value('sea_water__depth', out=z) plot_coast(spacing, z) """ Explanation: Here we shut off the sediment supply completely. End of explanation """
HaoMood/cs231n
assignment1/assignment1sol/features/main.ipynb
gpl-3.0
# Run some setup code import numpy as np import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # bool var. to let program show debug info. debug = True show_img = True """ Explanation: Image features exercise Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website. We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels. All of your work for this exercise will be done in this notebook. End of explanation """ import cifar10 # Load the raw CIFAR-10 data m, m_val, m_dev, m_test = 49000, 1000, 500, 1000 m_spec = (m, m_val, m_dev, m_test) data = cifar10.load_raw('../cifar-10-batches-py', m_spec, debug = debug) X, y, X_test, y_test, X_val, y_val, X_dev, y_dev = data """ Explanation: Load data Similar to previous exercises, we will load CIFAR-10 data from disk. End of explanation """ from features import * num_color_bins = 10 # Number of bins in the color histogram feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)] X_train_feats = extract_features(X, feature_fns, verbose=True) X_val_feats = extract_features(X_val, feature_fns) X_test_feats = extract_features(X_test, feature_fns) # Preprocessing: Subtract the mean feature mean_feat = np.mean(X_train_feats, axis=0, keepdims=True) X_train_feats -= mean_feat X_val_feats -= mean_feat X_test_feats -= mean_feat # Preprocessing: Divide by standard deviation. This ensures that each feature # has roughly the same scale. std_feat = np.std(X_train_feats, axis=0, keepdims=True) X_train_feats /= std_feat X_val_feats /= std_feat X_test_feats /= std_feat # Preprocessing: Add a bias dimension # X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))]) # X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))]) # X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))]) """ Explanation: Extract Features For each image we will compute a Histogram of Oriented Gradients (HOG) as well as a color histogram using the hue channel in HSV color space. We form our final feature vector for each image by concatenating the HOG and color histogram feature vectors. Roughly speaking, HOG should capture the texture of the image while ignoring color information, and the color histogram represents the color of the input image while ignoring texture. As a result, we expect that using both together ought to work better than using either alone. Verifying this assumption would be a good thing to try for the bonus section. The hog_feature and color_histogram_hsv functions both operate on a single image and return a feature vector for that image. The extract_features function takes a set of images and a list of feature functions and evaluates each feature function on each image, storing the results in a matrix where each column is the concatenation of all feature vectors for a single image. End of explanation """ from svm import SVM n = X_train_feats.shape[1] K = 10 # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained classifer in best_svm. You might also want to play # # with different numbers of bins in the color histogram. If you are careful # # you should be able to get accuracy of near 0.44 on the validation set. # best_model = None best_val = -1 alpha, lamda, T, B = 1e-7, 3e4, 1000, 200 for lamda in [3e4]: hpara = (alpha, lamda, T, B) print hpara model = SVM(n, K) model.train(X_train_feats, y, hpara, show_img = False, debug = False) train_acc = np.mean(model.predict(X_val_feats) == y_val) val_acc = np.mean(model.predict(X_val_feats) == y_val) print 'train acc.:', train_acc, 'val. acc.:', val_acc if val_acc > best_val: best_model = model best_val = val_acc # Evaluate your trained SVM on the test set print 'test acc.', np.mean(best_model.predict(X_test_feats) == y_test) """ Explanation: Train SVM on features Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels. End of explanation """ from nnet import NNet best_model = None best_acc = -1 # TODO: Tune hyperparameters using the validation set. Store your best trained # model in best_net. # # To help debug your network, it may help to use visualizations similar to the # ones we used above; these visualizations will have significant qualitative # differences from the ones we saw above for the poorly tuned network. # # Tweaking hyperparameters by hand can be fun, but you might find it useful to # write code to sweep through possible combinations of hyperparameters # automatically like we did on the previous exercises. n0 = X_train_feats.shape[1] n1 = 500 n2 = 10 alpha, lamda, T, B, rho = 2e-3, 1e-3, 1000, 200, 0.95 for alpha in [1e-2, 1e-1, 1e0]: hpara = (alpha, lamda, T, B, rho) print hpara model = NNet(n0, n1, n2, std = 1e-1) model.train(X_train_feats, y, X_val_feats, y_val, hpara, debug, show_img) # Predict on the val. set val_acc = np.mean(model.predict(X_val_feats) == y_val) print 'val. acc.:', val_acc print '\n' if val_acc > best_acc: best_acc = val_acc best_model = model # Run your neural net classifier on the test set. You should be able to # get more than 55% accuracy. print 'test acc.', np.mean(best_model.predict(X_test_feats) == y_test) """ Explanation: Neural Network on image features Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy. End of explanation """
peterdalle/mij
1 Introduction to Python/Introduction.ipynb
gpl-3.0
print("Hello World!") # We can write comments like this. print("Hello World!") # Or here. You can write anything you want. # It's a good way of describing what your code does. # Note: we can't use upper case. We will get a "NameError". PRINT("Hello World!") """ Explanation: Introduction to Python and Jupyter Notebook Install We will use Anaconda which contains Python, Jupyter Notebooks, and common libraries. Download Anaconda from https://www.continuum.io/downloads. Choose Python 3.5 installer 64-bit (or 32-bit if you are unsure). Hello World! Lets make sure everything is working. End of explanation """ # Get help with a command by putting ? in front of it. ?print # Run file on your computer. %run 00-hello-world.py %%time # How long does the code take to execute? Put %%time at the top to find out. # Loop 10 million times. for i in range(10000000): pass """ Explanation: Jupyter Notebooks Some things are specific to Jupyter Notebooks, like these: Keyboard Shortcuts Ctrl+Enter = run cell ESC = exit out of a cell Tab = autocomplete a = insert cell above b = insert cell below dd = delete cell The Jupyter Notebook files are saved on your computer, in your home directory. Read more at 28 Jupyter Notebook tips, tricks and shortcuts End of explanation """ 10 + 5 """ Explanation: Python as a calculator End of explanation """ Name = "John Doe" # String. Age = 40 # Integer. Height = 180.3 # Float. Married = True # Boolean (True/False). Children = ["Emma", "Thomas"] # List. # Print the contents of variable. Age # Print many at the same time. print(Name) print(Age) print(Height) print(Married) print(Children) """ Explanation: Variabes Assign values to a variable. Variable names must start with a letter. No spaces allowed. Digits are OK. End of explanation """ # Change the value. Age = Age + 10 print(Age) # We can't add numbers to strings. We will get a "TypeError". Name + Age # We need to convert age to string. mytext = Name + str(Age) print(mytext) """ Explanation: Change variables End of explanation """ text = "The quick brown fox jumps over the lazy dog" # Get single characters from the text string. print(text[0]) # Get the first character print(text[4]) # Get the fifth character # Show the characters between 4th and 9th position (the word "quick"). print(text[4:9]) # Replace "dog" with "journalist. text.replace("dog", "journalist") # Make the text upper case, lower case, or title case. print(text.upper()) print(text.lower()) print(text.title()) # Get the length of the text string. len(text) # Find where the position where "dog" starts. print(text.find("dog")) # If we search for something that is not in the text string, -1 will be returned. print(text.find("candy")) # Lets combine many things! # First, we save the text to a new variable so we can keep the old. newtext = text # Then, we do a lot of replacements. newtext = newtext.replace("quick", "depressed") newtext = newtext.replace("fox", "elephant") newtext = newtext.replace("lazy", "even more depressed") newtext = newtext.replace("dog", "journalist") # After that, we title case it to make it look prettier. newtext = newtext.title() print(newtext) """ Explanation: Strings and string manipulation End of explanation """ # First, we assign a value to the variable "Name" and "Age". Name = "John Doe" Age = 40 # Check if age equals 40. if Age == 40: print(Name + " is 40 years old.") else: print(Name + " is not 40 years old.") # Lets change the age. Age = 24 # Check many things at once: Is 40 years old? If not, is he above 40? If not, # is he above 20 and below 40? If we can't find any match, run the code under "else". if Age == 40: print(Name + " is 40 years old.") elif Age > 40: print(Name + " is above 40 years old.") elif Age > 20 and Age < 40: print(Name + " is above 20 and below 40.") else: print(Name + " is 20 years or younger.") """ Explanation: If-statements if, elif, and else Don't forget to indent Checks whether expression is True or False Comparisons age == 25 # Equal age != 25 # Does not equal age &gt; 25 # Above 25 age &lt; 25 # Below 25 age &gt;= 25 # 25 or above age &lt;= 25 # 25 or below Combine comparisons or means that any conditional have to be True and means that all conditional have to be True age == 18 and height == 175 or name == "Jonas" End of explanation """ # Loop 5 times. for i in range(5): print(i) # Loop from 0 to 100, by ever 25. for i in range(0, 100, 25): print(i) # We can use for loops on lists. Children = ["Emma", "Thomas", "Nicole"] # Make a list with 3 text strings. for child in Children: print(child) # We can use for loops on numbers. YearsOld = [14, 5, 4] # Make a list with 3 numbers. for age in YearsOld: print(age) """ Explanation: For loops range(to) range(from, to, step) can also use for with list End of explanation """ # A function that we name "calc" that multiples two numbers together. # It takes to variables as input (x and y). # The function then returns the results of the multiplication. def calc(x, y): return(x * y) # Now we just use the name of the function. calc(10, 5) """ Explanation: Functions Group things together with functions. End of explanation """ # Long list with names. names = ["Adelia", "Agustin", "Ahmed", "Alethea", "Aline", "Alton", "Annett", "Arielle", "Billie", "Blake", "Brianne", "Bronwyn", "Charlesetta", "Cleopatra", "Colene", "Corina", "Cruz", "Curt", "Dawn", "Delisa", "Dolores", "Doloris", "Dominic", "Donetta", "Dusti", "Edna", "Eliana", "Elna", "Emma", "Eugenio", "Francie", "Francisca", "Georgeanna", "Gerald", "Gerry", "Gisele", "Hee", "Heidy", "Howard", "Iris", "Irving", "Izola", "Ja", "Jacinta", "Jamey", "Jana", "Jeanie", "Jeffry", "Joeann", "Jonna", "Juliann", "Kacey", "Kandra", "Karissa", "Kecia", "Kisha", "Leila", "Leslee", "Lisbeth", "Lizzette", "Lorie", "Luanna", "Lynelle", "Lynna", "Lynnette", "Maire", "Maricela", "Mario", "Marsha", "Maxwell", "Meggan", "Miquel", "Mireya", "Nakisha", "Natacha", "Nathanial", "Niesha", "Norberto", "Norene", "Patrick", "Phoebe", "Phylicia", "Rashad", "Reatha", "Rebecka", "Renate", "Riley", "Rochel", "Sadie", "Shawanna", "Sherri", "Sunshine", "Tamala", "Tish", "Vincent", "Yun"] # Create a variable that counts the number of names. count = 0 for current_name in names: # For loop that goes through each name in the list "names". if current_name[0] == "E": # Is the first character the letter E? count = count + 1 # Add 1 to the variable "counter". print("There are " + str(count) + " names that starts with the letter 'E'.") # Lets create a function so we can reuse the code. # The function takes to inputs (names and letters), # which is a list of names and the letter we want to look for. def countnames(names, letter): count = 0 # Create a variable that counts the number of names. for current_name in names: # For loop that goes through each name in the list "names". if current_name[0] == letter: # Is the first character the letter stored in the variable "letter"? count = count + 1 # Add 1 to the variable "counter". return(count) # At last, we return the counter. # Call the functions with different letters (and save the results to variables). e = countnames(names, "E") f = countnames(names, "F") p = countnames(names, "P") # Print the results. print("There are " + str(e) + " names that starts with the letter 'E'.") print("There are " + str(f) + " names that starts with the letter 'F'.") print("There are " + str(p) + " names that starts with the letter 'P'.") """ Explanation: Combine everything Now it's time to combine everything we learned so far. By combining these techniques we can write quite complex programs. Lets say we have a list of names. How many of the names start with the letter E? This is how you do it in principle: You go through the list, name by name, with a for loop. For each name, you extract the first letter. If the first letter is an "E", you increase a counter with 1. That's it! End of explanation """
weleen/mxnet
example/notebooks/recommendation_systems/cdl/collaborative-dl.ipynb
apache-2.0
import mxnet as mx import numpy as np import logging import data from math import sqrt from autoencoder import AutoEncoderModel import os """ Explanation: MXNet for Collaborative Deep Learning in Recommender Systems In this tutorial, we build on MXNet to implement the Collaborative Deep Learning (CDL) [1] model for recommender systems. Brief Introduction of CDL In CDL, a probabilistic stacked denoising autoencoder (pSDAE) is connected to a matrix factorization (MF) component. Model training will alternate between pSDAE and MF. In each epoch, a pSDAE with a reconstruction target at the end and a regression target in the bottleneck will be udpated before updating the latent factors U and V in the regularized MF. Below is the graphical model for CDL. The part in the red rectangle is pSDAE and the rest is the MF component regularized by pSDAE. Essentially, the updating will alternate between pSDAE (updating $W^+$) and the MF component (updating $u$ and $v$). Some Notation: $x_0$: the input vectors to pSDAE (corrupted data, e.g., randomly deleting some entries of the input) $x_c$: the reconstruction target vectors (the uncorrupted data) $x_{L/2}$: the output vectors of pSDAE's middle layer (bottleneck layer) $X_0$: the matrix consists of vectors $x_0$ $X_c$: the matrix consists of vectors $x_c$ $W^+$: weights and biases of pSDAE $v$: latent item vectors $u$: latent user vectors $R$: rating matrix ('1' if the article is in the user's library and '0' otherwise) $I$: number of users $J$: number of items $\lambda_u$, $\lambda_v$, $\lambda_w$, $\lambda_n$: hyperparameters Below we show a special case of CDL (from a neural network perspective), where it degenerates to simultaneously training two neural networks overlaid together with a common input layer (the corrupted input) but different output layers. This might be a lot easier to understand for people not familiar with graphical models. The objective function (which we use in this implementation) for this special case is: \begin{align} \mathscr{L}=&-\frac{\lambda_u}{2}\sum\limits_i \|u_i\|2^2 -\frac{\lambda_w}{2}\sum\limits_l(\|W_l\|_F^2+\|b_l\|_2^2)\nonumber \ &-\frac{\lambda_v}{2}\sum\limits_j\|v_j-f_e(X{0,j},W^+)^T\|2^2 \nonumber \ &-\frac{\lambda_n}{2}\sum\limits_j\|f_r(X{0,j},W^+)-X_{c,j}\|2^2 \nonumber \ &-\sum\limits{i,j}\frac{C_{ij}}{2}(R_{ij}-u_i^Tv_j)^2, \end{align} where the encoder function $f_e(\cdot,W^+)$ takes the corrupted content vector $X_{0,j}$ of item $j$ as input and computes the encoding of the item, and the function $f_r(\cdot,W^+)$ also takes $X_{0,j}$ as input, computes the encoding and then the reconstructed content vector of item $j$. Here $\lambda_w$, $\lambda_n$, $\lambda_u$, and $\lambda_v$ are hyperparameters and $C_{ij}$ is a confidence parameter ($C_{ij} = a$ if $R_{ij}=1$ and $C_{ij}=b$ otherwise). For example, if the number of layers $L=6$, $f_e(X_{0,j},W^+)$ is the output of the third layer while $f_r(X_{0,j*},W^+)$ is the output of the sixth layer. To learn CDL, we have to implement the block coordinate descent (BCD) update using numpy/mshadow and call this BCD procedure after each epoch of pSDAE. Besides the MF part, another difference between CDL and conventional deep learning models is that pSDAE has a fixed target at the end and a dynamic target (the latent item factors V) in the bottleneck layer. It might need some hacking to make this work. [1] H. Wang, N. Wang, and D. Yeung. Collaborative deep learning for recommender systems. In KDD, 2015. Implementing CDL in MXNet for Recommender Systerms End of explanation """ lambda_u = 1 # lambda_u in CDL lambda_v = 10 # lambda_v in CDL K = 50 p = 1 is_dummy = False num_iter = 100 # about 68 iterations/epoch, the recommendation results at the end need 100 epochs batch_size = 256 np.random.seed(1234) # set seed lv = 1e-2 # lambda_v/lambda_n in CDL dir_save = 'cdl%d' % p """ Explanation: Setting Hyperparameters lambda_u: regularization coefficent for user latent matrix U lambda_v: regularization coefficent for item latent matrix V K: number of latent factors is_dummy: whether to use a dummy dataset for demo num_iter: number of iterations (minibatches) to train (a epoch in the used dataset takes about 68 iterations) batch_size: minibatch size dir_save: directory to save training results lv: lambda_v/lambda_n in CDL; this controls the trade-off between reconstruction error in pSDAE and recommendation accuracy during training End of explanation """ if not os.path.isdir(dir_save): os.system('mkdir %s' % dir_save) fp = open(dir_save+'/cdl.log','w') print 'p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d' % (p,lambda_v,lambda_u,lv,K) fp.write('p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d\n' % \ (p,lambda_v,lambda_u,lv,K)) fp.close() """ Explanation: Create the directory and the log file. End of explanation """ # download data import os data_url='https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/cdl' for filename in ('mult.dat', 'cf-train-1-users.dat', 'cf-test-1-users.dat', 'raw-data.csv'): if not os.path.exists(filename): os.system("wget %s/%s" % (data_url, filename)) # read data X = data.get_mult() R = data.read_user() """ Explanation: Loading Data Here we load the text information (as input to pSDAE) in the file mult.dat and the rating matrix (as input for the MF part) in the file cf-train-1-users.dat. Code for loading the data are packed in data.py. We use the CiteULike dataset here. The input text is bag-of-words vectors normalized to [0,1]. Some details: - task: recommend articles to users - number of users: 5551 - number of items: 16980 - number of ratings for training: ~169800 - number of terms: 8000 End of explanation """ logging.basicConfig(level=logging.INFO) cdl_model = AutoEncoderModel(mx.cpu(2), [X.shape[1],100,K], pt_dropout=0.2, internal_act='relu', output_act='relu') """ Explanation: Network Definition Here we deine the logging level and construct the network. As mentioned before, pSDAE has multiple targets, we used mx.symbol.Group to group both loss. The codes snippets are shown as following, refer to autoencode.py for more details. python fe_loss = mx.symbol.LinearRegressionOutput( data=self.lambda_v_rt*self.encoder, label=self.lambda_v_rt*self.V) fr_loss = mx.symbol.LinearRegressionOutput( data=self.decoder, label=self.data) loss = mx.symbol.Group([fe_loss, fr_loss]) End of explanation """ train_X = X V = np.random.rand(train_X.shape[0],K)/10 lambda_v_rt = np.ones((train_X.shape[0],K))*sqrt(lv) """ Explanation: Initializing Variables Here we initialize several variables. V is the latent item matrix and lambda_v_rt is an ndarray with entries equal to sqrt(lv). We need this lambda_v_rt to hack the trade-off between two targets in pSDAE. End of explanation """ U, V, theta, BCD_loss = cdl_model.finetune(train_X, R, V, lambda_v_rt, lambda_u, lambda_v, dir_save, batch_size, num_iter, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) """ Explanation: Training the CDL Train the whole CDL (joint training of pSDAE and the connected MF). We use SGD for pSDAE and BCD for the MF part. U is the user latent matrix, V is the item latent matrix, theta is the output of pSDAE's middle layer, and BCD_loss equals to rating_loss+reg_loss_for_U+reg_loss_for_V. For demostration we train for only 100 iterations (about 1.5 epochs) here. The shown recommendations in later parts are results after 100 epochs. The function finetune below will call the function 'solve' in the solver.py, where the customized training loop resides. In the training loop, we call the following code after each epoch of pSDAE to update U and V using BCD. The BCD updating procedure is wrapped up in the function BCD_one. Note that after each epoch, we upate U and V for only one iteration. ```python theta = model.extract_feature(sym[0], args, auxs, data_iter, X.shape[0], xpu).values()[0] update U, V and get BCD loss U, V, BCD_loss = BCD_one(R, U, V, theta, lambda_u, lambda_v, dir_save, True) get recon' loss Y = model.extract_feature(sym[1], args, auxs, data_iter, X.shape[0], xpu).values()[0] Recon_loss = lambda_v/np.square(lambda_v_rt_old[0,0])*np.sum(np.square(Y-X))/2.0 lambda_v_rt[:] = lambda_v_rt_old[:] # back to normal lambda_v_rt data_iter = mx.io.NDArrayIter({'data': X, 'V': V, 'lambda_v_rt': lambda_v_rt}, batch_size=batch_size, shuffle=False, last_batch_handle='pad') data_iter.reset() batch = data_iter.next() ``` End of explanation """ cdl_model.save(dir_save+'/cdl_pt.arg') np.savetxt(dir_save+'/final-U.dat.demo',U,fmt='%.5f',comments='') np.savetxt(dir_save+'/final-V.dat.demo',V,fmt='%.5f',comments='') np.savetxt(dir_save+'/final-theta.dat.demo',theta,fmt='%.5f',comments='') """ Explanation: Saving Models and Parameters Save the network (pSDAE) parameters, latent matrices, and middle-layer output. End of explanation """ Recon_loss = lambda_v/lv*cdl_model.eval(train_X,V,lambda_v_rt) print "Training error: %.3f" % (BCD_loss+Recon_loss) fp = open(dir_save+'/cdl.log','a') fp.write("Training error: %.3f\n" % (BCD_loss+Recon_loss)) fp.close() """ Explanation: Computing Training Error The training loss consists of the loss in pSDAE and that in MF. End of explanation """ import numpy as np from data import read_user def cal_rec(p,cut): R_true = read_user('cf-test-1-users.dat') dir_save = 'cdl'+str(p) #U = np.mat(np.loadtxt(dir_save+'/final-U.dat')) #V = np.mat(np.loadtxt(dir_save+'/final-V.dat')) R = U*V.T num_u = R.shape[0] num_hit = 0 fp = open(dir_save+'/rec-list.dat','w') for i in range(num_u): if i!=0 and i%100==0: print 'User '+str(i) l_score = R[i,:].A1.tolist() pl = sorted(enumerate(l_score),key=lambda d:d[1],reverse=True) l_rec = list(zip(*pl)[0])[:cut] s_rec = set(l_rec) s_true = set(np.where(R_true[i,:]>0)[1].A1) cnt_hit = len(s_rec.intersection(s_true)) fp.write('%d:' % cnt_hit) fp.write(' '.join(map(str,l_rec))) fp.write('\n') fp.close() cal_rec(1,8) """ Explanation: Generating Recommendations Load the latent matrices (U and V), compute the predicted ratings R=UV^T, and generate recommendation lists for each user. There 5551 users in the dataset. End of explanation """ import csv from data import read_user import numpy as np p = 1 # read predicted results dir_save = 'cdl%d' % p csvReader = csv.reader(open('raw-data.csv','rb')) d_id_title = dict() for i,row in enumerate(csvReader): if i==0: continue d_id_title[i-1] = row[3] R_test = read_user('cf-test-1-users.dat') R_train = read_user('cf-train-1-users.dat') fp = open(dir_save+'/rec-list.dat') lines = fp.readlines() """ Explanation: Show Recommendations Load the article titles (raw-data.csv), ratings (cf-train-1-users.dat and cf-test-1-users.dat), and recommendation lists (rec-list.dat). End of explanation """ user_id = 3 s_test = set(np.where(R_test[user_id,:]>0)[1].A1) l_train = np.where(R_train[user_id,:]>0)[1].A1.tolist() l_pred = map(int,lines[user_id].strip().split(':')[1].split(' ')) print '########## User '+str(user_id)+' ##########\n' print '##### Articles in the Training Sets #####' for i in l_train: print d_id_title[i] print '\n##### Articles Recommended (Correct Ones Marked by Asterisks) #####' for i in l_pred: if i in s_test: print '* '+d_id_title[i] else: print d_id_title[i] fp.close() """ Explanation: Show the titles of articles in the training set and titles of recommended articles. Correctly recommended articles are marked by asterisks. End of explanation """
JoseGuzman/myIPythonNotebooks
MachineLearning/PCA_new.ipynb
gpl-2.0
%pylab inline %config InlineBackend.figure_format = 'retina' from matplotlib import style style.use('https://raw.githubusercontent.com/JoseGuzman/minibrain/master/minibrain/paper.mplstyle') from scipy.stats import norm """ Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Gaussian-Distribution" data-toc-modified-id="Gaussian-Distribution-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Gaussian Distribution</a></span><ul class="toc-item"><li><span><a href="#The-variance" data-toc-modified-id="The-variance-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>The variance</a></span></li><li><span><a href="#The-covariance" data-toc-modified-id="The-covariance-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>The covariance</a></span></li><li><span><a href="#Linear-transformation" data-toc-modified-id="Linear-transformation-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Linear transformation</a></span></li></ul></li><li><span><a href="#Plot" data-toc-modified-id="Plot-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Plot</a></span></li></ul></div> End of explanation """ mu = 0 sigma = 1 # std rv = norm(loc = mu, scale = sigma) x = np.linspace(-4,15, 200) plt.plot(x, rv.pdf(x), label=f'$\mu$ = {mu} $\sigma^2$ = {sigma**2:}') plt.hist(rv.rvs(10000), density=True, alpha=0.5); mu = 6 sigma = 2. rv = norm(loc = mu, scale = sigma) plt.plot(x, rv.pdf(x), label=f'$\mu$ = {mu} $\sigma^2$ = {sigma**2:}') plt.hist(rv.rvs(10000), density=True, alpha=0.5); plt.xlabel('x'); plt.ylabel('PDF') plt.legend() """ Explanation: Gaussian Distribution The function that describes a Gaussian or a normal distribution is: \begin{equation} \mathcal{N}(x;\mu,\sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}}\exp{\frac{(x-\mu)^2}{2\sigma^2}} \end{equation} The independent variable x is a random variable X, two parameters mean $\mu$ and variance $\sigma^2$, which is simply the square of the standard deviation. The man of a Gaussian distribution shifts the center of the Gaussian, whereas the standard deviation is a measure of the spread of the Gaussian. The probability density function (pdf) tell us the probability of observing an input $x$ given a normal distribution. Because it is a probability distribution, all values are negative and their sum is one. End of explanation """ np.random.seed(2020) x = np.random.normal(loc = 0, scale = 1, size = 700) y = np.random.normal(loc = 6, scale = 2, size = 700) X = np.vstack((x,y)).T plt.figure(figsize=(4,4)) plt.scatter(x, y, s=3, color='k') plt.xticks(np.arange(-4,15, 4)) plt.yticks(np.arange(-4,15, 4)) plt.xlabel('x'), plt.ylabel('y', rotation = 0) plt.grid('on') def mycov(x,y): """ Computes the covariance between x and y, being x and y two vectors. Assumes x.size == y.size """ cov = lambda x,y: np.sum( ( x-x.mean() ) * ( y-y.mean() ) )/(len(x)-1) C = np.array( [ [cov(x,x), cov(x,y) ], [cov(y,x), cov(y,y)] ]) return(C) """ Explanation: The variance The variance measures the dispersion of a random variable, like the height of a person in a population. It is given by: \begin{equation} \operatorname{Var}(x) = \sigma^2 = \operatorname{E}\left[(x - \mu)^2 \right] \end{equation} The variance of a set of equally likely values can be written as \begin{equation} \sigma^2 = \frac{1}{n-1} \sum_{i=1}^n (x_i - \mu)^2 \end{equation} where $n$ is the number of samples, $\mu$ is the mean of the random variable $x$. The covariance When we discuss multivariate data - we add dimensions e.g., taking the height and the weight of a person -. We will have now a vector whose element is the mean of every dimension. The resulting mixture of Gaussian results in a vector of means, and a vector of variances. Another source of variability arises from the tendency of the 2 dimensions to vary together. It is called the covariance and it is calculates for the x,y dimensions as follows: \begin{equation} \operatorname{cov}(x, y) = \frac{\sum(x_i-\bar{x})(y-\bar{y})}{n-1} \end{equation} End of explanation """ mycov(x,y) #x_var = 0.99, y_var = 3.64 (almost 4) """ Explanation: The covariance is always measured between two dimensions. If we have datasets with more than two dimensions there is more than one covariance to be calculated, and we can use a matrix. The covariance can be calculated as entries in a covariance matrix, which is a square matrix. The diagonal of the matrix are the variances of the random variables, and the other entries the covariances. \begin{equation} \Sigma = \begin{pmatrix} \text{var}(x) & \text{cov}(x,y) \ \text{cov}(y,x) &\text{var}(y)\end{pmatrix} \end{equation} End of explanation """ X.shape # matrix of 500 observations and 2 dimensions (features) np.matmul(X.T,X)/(len(x)-1) # didn't substract mean! F= X - np.mean(X, axis=0) np.matmul(F.T,F)/(len(x)-1) # now mean substracted! np.cov(x,y) # numpy substracts the mean np.cov(X.T) """ Explanation: The covariance matrix can be computed from a data set with zero mean with: \begin{equation} C = \frac{XX^T}{n-1} \end{equation} If X is the column matrix of variables End of explanation """ C = np.cov(X.T) X = np.random.multivariate_normal(mean = [0, 0], cov = C, size = 700) plt.figure(figsize=(4,4)) plt.scatter(X[:,0], X[:,1], s=3, color='k') plt.xticks(np.arange(-4,15, 4)) plt.yticks(np.arange(-4,15, 4)) plt.xlabel('x'), plt.ylabel('y', rotation = 0) plt.grid('on') rho = 0.8 var1, var2 = 2, 6 cov = rho*np.sqrt(var1*var2) C = np.array([[var1, cov],[cov, var2]]) X = np.random.multivariate_normal(mean = [0, 0], cov = C, size = 700) plt.figure(figsize=(4,4)) plt.scatter(X[:,0], X[:,1], s=3, color='k') plt.xticks(np.arange(-4,15, 4)) plt.yticks(np.arange(-4,15, 4)) plt.xlabel('x'), plt.ylabel('y', rotation = 0) plt.grid('on') """ Explanation: The covariance is ultimately with the correlation coefficient. The correlation coefficient is the normlized version of the covariance, and ragnes between -1 and 1. \begin{align} \rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}}. \end{align} We can use the correlation coefficient to calculate a new covariance matrix. End of explanation """ C2 = np.cov(X.T) C np.cov(X.T)#C2 """ Explanation: Linear transformation We can transform the covariance matrix with End of explanation """ A = np.array([[0.6165, 0.6154],[0.617554, 0.7165]]) np.linalg.eig(A) np.linalg.norm([-0.73454232, -0.67728279]) A = np.array([[2, 3],[2,1]]) v = np.array([1,3]) np.matmul(A,v) C2 eigVal, eigVect = np.linalg.eig(C2) eigVal eigVect comp0, comp1 = eigVect[:,0]*eigVal[0], eigVect[:,1]*eigVal[1] comp1 plt.figure(figsize=(4,4)) plt.scatter(X[:,0], X[:,1], s=3, color='gray', alpha=0.4) plt.arrow(x=0, y=0, dx =comp0[0], dy =comp0[1], lw=2, color='g') plt.arrow(x=0, y=0, dx =comp1[0], dy =comp1[1], lw=2, color='r') mysum = comp0 + comp1 plt.arrow(x=0, y=0, dx = mysum[0], dy = mysum[1], lw = 2, color='b') #plt.plot([0, e*v[0]],[0, e*v[1]], 'r-', lw=2) plt.xticks(np.arange(-4,9, 4)) plt.yticks(np.arange(-4,9, 4)) plt.xlabel('x'), plt.ylabel('y', rotation =0) plt.grid('on') np.std(X[:,1])**2 eVe[0]*eVa.T[0] """ Explanation: Plot End of explanation """
IS-ENES-Data/submission_forms
dkrz_forms/Templates/CORDEX_submission_form.ipynb
apache-2.0
# Evaluate this cell to identifiy your form from dkrz_forms import form_widgets, form_handler, checks form_infos = form_widgets.show_selection() # Evaluate this cell to generate your personal form instance form_info = form_infos[form_widgets.FORMS.value] sf = form_handler.init_form(form_info) form = sf.sub.entity_out.report """ Explanation: CORDEX ESGF submission form .. outdated .. needs adaption to future use .. General Information Data to be submitted for ESGF data publication must follow the rules outlined in the Cordex Archive Design Document <br /> (https://verc.enes.org/data/projects/documents/cordex-archive-design) Thus file names have to follow the pattern:<br /> VariableName_Domain_GCMModelName_CMIP5ExperimentName_CMIP5EnsembleMember_RCMModelName_RCMVersionID_Frequency[_StartTime-EndTime].nc <br /> Example: tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc The directory structure in which these files are stored follow the pattern:<br /> activity/product/Domain/Institution/ GCMModelName/CMIP5ExperimentName/CMIP5EnsembleMember/ RCMModelName/RCMVersionID/Frequency/VariableName <br /> Example: CORDEX/output/AFR-44/MPI-CSC/MPI-M-MPI-ESM-LR/rcp26/r1i1p1/MPI-CSC-REMO2009/v1/mon/tas/tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc Notice: If your model is not yet registered, please contact contact cordex-registration@cordex.org specifying: Full institution name, Short institution name (acronym), Contact person and e-mail, RCM Name (acronym), Terms of Use (unrestricted or non-commercial only) and the CORDEX domains in which you are interested. At some CORDEX ESGF data centers a 'data submission form' is in use in order to improve initial information exchange between data providers and the data center. The form has to be filled before the publication process can be started. In case you have questions pleas contact the individual data centers: o at DKRZ: cordex@dkrz.de o at SMHI: rossby.cordex@smhi.se Start submission procedure The submission is based on this interactive document consisting of "cells" you can modify and then evaluate evaluation of cells is done by selecting the cell and then press the keys "Shift" + "Enter" <br /> please evaluate the following cell to initialize your form End of explanation """ sf.submission_type = "..." # example: sf.submission_type = "initial_version" """ Explanation: please provide information on the contact person for this CORDEX data submission request Type of submission please specify the type of this data submission: - "initial_version" for first submission of data - "new _version" for a re-submission of previousliy submitted data - "retract" for the request to retract previously submitted data End of explanation """ sf.institution = "..." # example: sf.institution = "Alfred Wegener Institute" """ Explanation: Requested general information Please provide model and institution info as well as an example of a file name institution The value of this field has to equal the value of the optional NetCDF attribute 'institution' (long version) in the data files if the latter is used. End of explanation """ sf.institute_id = "..." # example: sf.institute_id = "AWI" """ Explanation: institute_id The value of this field has to equal the value of the global NetCDF attribute 'institute_id' in the data files and must equal the 4th directory level. It is needed before the publication process is started in order that the value can be added to the relevant CORDEX list of CV1 if not yet there. Note that 'institute_id' has to be the first part of 'model_id' End of explanation """ sf.model_id = "..." # example: sf.model_id = "AWI-HIRHAM5" """ Explanation: model_id The value of this field has to be the value of the global NetCDF attribute 'model_id' in the data files. It is needed before the publication process is started in order that the value can be added to the relevant CORDEX list of CV1 if not yet there. Note that it must be composed by the 'institute_id' follwed by the RCM CORDEX model name, separated by a dash. It is part of the file name and the directory structure. End of explanation """ sf.experiment_id = "..." # example: sf.experiment_id = "evaluation" # ["value_a","value_b"] in case of multiple experiments sf.time_period = "..." # example: sf.time_period = "197901-201412" # ["time_period_a","time_period_b"] in case of multiple values """ Explanation: experiment_id and time_period Experiment has to equal the value of the global NetCDF attribute 'experiment_id' in the data files. Time_period gives the period of data for which the publication request is submitted. If you intend to submit data from multiple experiments you may add one line for each additional experiment or send in additional publication request sheets. End of explanation """ sf.example_file_name = "..." # example: sf.example_file_name = "tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc" # Please run this cell as it is to check your example file name structure # to_do: implement submission_form_check_file function - output result (attributes + check_result) form_handler.cordex_file_info(sf,sf.example_file_name) """ Explanation: Example file name Please provide an example file name of a file in your data collection, this name will be used to derive the other End of explanation """ sf.grid_mapping_name = "..." # example: sf.grid_mapping_name = "rotated_latitude_longitude" """ Explanation: information on the grid_mapping the NetCDF/CF name of the data grid ('rotated_latitude_longitude', 'lambert_conformal_conic', etc.), i.e. either that of the native model grid, or 'latitude_longitude' for the regular -XXi grids End of explanation """ sf.grid_as_specified_if_rotated_pole = "..." # example: sf.grid_as_specified_if_rotated_pole = "yes" """ Explanation: Does the grid configuration exactly follow the specifications in ADD2 (Table 1) in case the native grid is 'rotated_pole'? If not, comment on the differences; otherwise write 'yes' or 'N/A'. If the data is not delivered on the computational grid it has to be noted here as well. End of explanation """ sf.data_qc_status = "..." # example: sf.data_qc_status = "QC2-CORDEX" sf.data_qc_comment = "..." # any comment of quality status of the files """ Explanation: Please provide information on quality check performed on the data you plan to submit Please answer 'no', 'QC1', 'QC2-all', 'QC2-CORDEX', or 'other'. 'QC1' refers to the compliancy checker that can be downloaded at http://cordex.dmi.dk. 'QC2' refers to the quality checker developed at DKRZ. If your answer is 'other' give some informations. End of explanation """ sf.terms_of_use = "..." # example: sf.terms_of_use = "unrestricted" """ Explanation: Terms of use Please give the terms of use that shall be asigned to the data. The options are 'unrestricted' and 'non-commercial only'. For the full text 'Terms of Use' of CORDEX data refer to http://cordex.dmi.dk/joomla/images/CORDEX/cordex_terms_of_use.pdf End of explanation """ sf.directory_structure = "..." # example: sf.directory_structure = "compliant" """ Explanation: Information on directory structure and data access path (and other information needed for data transport and data publication) If there is any directory structure deviation from the CORDEX standard please specify here. Otherwise enter 'compliant'. Please note that deviations MAY imply that data can not be accepted. End of explanation """ sf.data_path = "..." # example: sf.data_path = "mistral.dkrz.de:/mnt/lustre01/work/bm0021/k204016/CORDEX/archive/" sf.data_information = "..." # ...any info where data can be accessed and transfered to the data center ... " """ Explanation: Give the path where the data reside, for example: blizzard.dkrz.de:/scratch/b/b364034/. If not applicable write N/A and give data access information in the data_information string End of explanation """ sf.exclude_variables_list = "..." # example: sf.exclude_variables_list=["bnds", "vertices"] """ Explanation: Exclude variable list In each CORDEX file there may be only one variable which shall be published and searchable at the ESGF portal (target variable). In order to facilitate publication, all non-target variables are included in a list used by the publisher to avoid publication. A list of known non-target variables is [time, time_bnds, lon, lat, rlon ,rlat ,x ,y ,z ,height, plev, Lambert_Conformal, rotated_pole]. Please enter other variables into the left field if applicable (e.g. grid description variables), otherwise write 'N/A'. End of explanation """ sf.uniqueness_of_tracking_id = "..." # example: sf.uniqueness_of_tracking_id = "yes" """ Explanation: Uniqueness of tracking_id and creation_date In case any of your files is replacing a file already published, it must not have the same tracking_id nor the same creation_date as the file it replaces. Did you make sure that that this is not the case ? Reply 'yes'; otherwise adapt the new file versions. End of explanation """ sf.variable_list_day = [ "clh","clivi","cll","clm","clt","clwvi", "evspsbl","evspsblpot", "hfls","hfss","hurs","huss","hus850", "mrfso","mrro","mrros","mrso", "pr","prc","prhmax","prsn","prw","ps","psl", "rlds","rlus","rlut","rsds","rsdt","rsus","rsut", "sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund", "tas","tasmax","tasmin","tauu","tauv","ta200","ta500","ta850","ts", "uas","ua200","ua500","ua850", "vas","va200","va500","va850","wsgsmax", "zg200","zg500","zmla" ] sf.variable_list_mon = [ "clt", "evspsbl", "hfls","hfss","hurs","huss","hus850", "mrfso","mrro","mrros","mrso", "pr","psl", "rlds","rlus","rlut","rsds","rsdt","rsus","rsut", "sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund", "tas","tasmax","tasmin","ta200", "ta500","ta850", "uas","ua200","ua500","ua850", "vas","va200","va500","va850", "zg200","zg500" ] sf.variable_list_sem = [ "clt", "evspsbl", "hfls","hfss","hurs","huss","hus850", "mrfso","mrro","mrros","mrso", "pr","psl", "rlds","rlus","rlut","rsds","rsdt","rsus","rsut", "sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund", "tas","tasmax","tasmin","ta200","ta500","ta850", "uas","ua200","ua500","ua850", "vas","va200","va500","va850", "zg200","zg500" ] sf.variable_list_fx = [ "areacella", "mrsofc", "orog", "rootd", "sftgif","sftlf" ] """ Explanation: Variable list list of variables submitted -- please remove the ones you do not provide: End of explanation """ # simple consistency check report for your submission form report = checks.check_report(sf,"sub") checks.display_report(report) """ Explanation: Check your submission form Please evaluate the following cell to check your submission form. In case of errors, please go up to the corresponden information cells and update your information accordingly. End of explanation """ form_handler.save_form(sf,"..my comment..") # edit my comment info #evaluate this cell if you want a reference to the saved form emailed to you # (only available if you access this form via the DKRZ form hosting service) form_handler.email_form_info() """ Explanation: Save your form your form will be stored (the form name consists of your last name plut your keyword) End of explanation """ form_handler.form_submission(sf) """ Explanation: officially submit your form the form will be submitted to the DKRZ team to process you also receive a confirmation email with a reference to your online form for future modifications End of explanation """
spulido99/Programacion
Margarita/.ipynb_checkpoints/Taller 2 - Archivos y Bases de Datos-checkpoint.ipynb
mit
import pandas as pd df = pd.read_csv('datoss.tsv', sep='\t') df [1:1] """ Explanation: Archivos y Bases de datos La idea de este taller es manipular archivos (leerlos, parsearlos y escribirlos) y hacer lo mismo con bases de datos estructuradas. Ejercicio 1 Baje el archivo de "All associations with added ontology annotations" del GWAS Catalog. + https://www.ebi.ac.uk/gwas/docs/file-downloads Describa las columnas del archivo (que información estamos mirando? Para qué sirve? Por qué la hicieron?) End of explanation """ CREATE TABLE journal ( id_JOURNAL int auto_increment PRIMARY KEY, namejournal varchar (300) ); CREATE TABLE platform ( id_platform INT auto_increment NOT NULL PRIMARY KEY, PLATFORMname VARCHAR(300) ); CREATE TABLE study ( id_STUDY int auto_increment PRIMARY KEY, STUDY text, INITIAL_SAMPLE_SIZE int, REPLICATION_SAMPLE_SIZE int, id_platform int, foreign key (id_platform) references platform(id_platform), P_VALUE int, PVALUE_MLOG int, PVALUE_TEXT varchar (300), CI_text varchar (300), OR_BETA int, MAPPED_TRAIT varchar (300), MAPPED_TRAIT_URI varchar (300) ); CREATE TABLE publicacion ( id_publicacion int auto_increment PRIMARY KEY, PUBMEDID varchar (300), FIRSTAUTHOR varchar (300), id_journal int, foreign key (id_JOURNAL) references journal(id_JOURNAL), LINK varchar (300), STUDY_ACCESSION varchar (300), id_STUDY int, foreign key (id_STUDY) references study(id_STUDY) ); CREATE TABLE enfermedad ( id_enfermedad int auto_increment PRIMARY KEY, DISEASETRAITenfermedad VARCHAR(300) ); CREATE TABLE loci ( id_loci int auto_increment PRIMARY KEY, REGION text, CHR_ID text, CHR_POS text, REPORTED_GENE text, MAPPED_GENE text, UPSTREAM_GENE_ID text, DOWNSTREAM_GENE_ID text, SNP_GENE_IDS text, UPSTREAM_GENE_DISTANCE text, DOWNSTREAM_GENE_DISTANCE text, STRONGEST_SNP_RISK_ALLELE text, SNPS text, MERGED text, SNP_ID_CURRENT text, CONTEXT_ text, INTERGENIC text ); CREATE TABLE enfermedad_loci ( id_enfermedad int, id_loci int, PRIMARY KEY (id_enfermedad, id_loci), foreign key (id_enfermedad) references enfermedad(id_enfermedad), foreign key (id_loci) references loci(id_loci) ); """ Explanation: La base de datos se compone de 37 columnas (Variables). En términos generales los datos exponen la asociación genética de un tipo de patología (eg. Cáncer, Asma) y la información "detallada" sobre el estudio que determinó dicha relación. Esta base de datos permite almacenar de forma ordenada la relación entre rasgos genéticos y fenotípicos. Los estudios contenidos en esta base de datos tienen como intención descubrir la clave para prevenir, diagnosticar y tratar una enfermedad. Esto, también es conocido como estudio de asociación de genoma completo. Qué Entidades (tablas) puede definir? Journal Platform Study Publicacion Enfermedad Loci Enfermedad_Loci (Tabala intermedia) Cree la base de datos (copie el código SQL que se usó) End of explanation """ #Leer el archivo df.head(1) import mysql.connector cnx = mysql.connector.connect(user='root', password='fnsQFJ14', host='127.0.0.1', database='new_schema') hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' def doQuery( conn ) : cur = conn.cursor() cur.execute( "select * from platform" ) for id_nombre, nombre_plat in cur.fetchall() : print (id_nombre, nombre_plat) myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) doQuery( myConnection ) myConnection.close() # Guardar informacion de plataforma def get_platformId(names_platf): cur = myConnection.cursor() cur.execute( """select * from platform where PLATFORM_SNPS_PASSplatformING_QC = "%s" """ % (names_platf) ) id_plat = None for id_, nombre_plat in cur.fetchall() : id_plat = id_ if not id_plat: print("""insert into platform values (NULL, "%s" )""" % (names_platf)) cur.execute("""insert into platform values (NULL, "%s" )""" % (names_platf)) cur.execute("SELECT LAST_INSERT_ID()") id_plat = cur.fetchall()[0][0] myConnection.commit() return id_plat hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) for index, row in df.iterrows(): plat_name = row['PLATFORM [SNPS PASSING QC]'] platform_id = get_platformId(plat_name) print() myConnection.close() # Guardar informacion de journal def get_journalId(names_journal): cur = myConnection.cursor() cur.execute( """select * from journal where namejournal = "%s" """ % (names_journal) ) id_jour = None for id_, nombre_journ in cur.fetchall() : id_jour = id_ if not id_jour: print("""insert into journal values (NULL, "%s" )""" % (names_journal)) cur.execute("""insert into journal values (NULL, "%s" )""" % (names_journal)) cur.execute("SELECT LAST_INSERT_ID()") id_jour = cur.fetchall()[0][0] myConnection.commit() return id_jour hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) for index, row in df.iterrows(): journal_name = row['JOURNAL'] journal_id = get_journalId(journal_name) print() myConnection.close() # Guardar información de enferemdad def get_enfermedadlId(names_enferm): cur = myConnection.cursor() cur.execute( """select * from enfermedad where DISEASETRAIT = "%s" """ % (names_enferm) ) id_enfer = None for id_, id_platform in cur.fetchall() : id_enfer = id_ if not id_enfer: print("""insert into enfermedad values (NULL, "%s" )""" % (names_enferm)) cur.execute("""insert into enfermedad values (NULL, "%s" )""" % (names_enferm)) cur.execute("SELECT LAST_INSERT_ID()") id_enfer = cur.fetchall()[0][0] myConnection.commit() return id_enfer hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) for index, row in df.iterrows(): enf_name = row['DISEASE/TRAIT'] enferm_id = get_enfermedadlId(enf_name) print() myConnection.close() #Guardar información loci def get_lociId(regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs, riskalle, snps, merged, snpid, contexts, intergenic): cur = myConnection.cursor() cur.execute( """select id_loci, CHR_ID, CHR_POS from loci where CHR_ID = "%s" and CHR_POS = "%s" """ %(chro, chrpos)) id_loci = None for id_, chrm, pos in cur.fetchall() : print(id_) id_loci = id_ if not id_loci: #print("""insert into loci values (NULL, "%s", "%s", "%s", "%s", "%s","%s", "%s", "%s", "%s", "%s", "%s","%s", #"%s", "%s", "%s", "%s")""" % (regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs, #riskalle, snps, merged, snpid, contexts, intergenic)) cur.execute("""insert into loci values (NULL, "%s", "%s", "%s", "%s", "%s","%s", "%s", "%s", "%s", "%s", "%s","%s", "%s", "%s", "%s", "%s")""" % (regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs, riskalle, snps, merged, snpid, contexts, intergenic)) cur.execute("SELECT LAST_INSERT_ID()") id_loci = cur.fetchall()[0][0] myConnection.commit() return id_loci hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) for index, row in df.iterrows(): #print(type(loci[0]), type(loci[1]), type(loci[2]), type(loci[3]), type(loci[4]), type(loci[5]), type(loci[6]), #type(loci[7]),type(loci[8]), type(loci[9]), type(loci[10]), type(loci[11]), type(loci[12]), #type(loci[13]), type(loci[14]),type(loci[15])) loci = [row['REGION'], row['CHR_ID'], row['CHR_POS'],row['REPORTED GENE(S)'],row['MAPPED_GENE'],row['UPSTREAM_GENE_ID'], row['DOWNSTREAM_GENE_ID'], row['SNP_GENE_IDS'],row['UPSTREAM_GENE_DISTANCE'],row['DOWNSTREAM_GENE_DISTANCE'], row['STRONGEST SNP-RISK ALLELE'],row['SNPS'], row['MERGED'], row['SNP_ID_CURRENT'], row['CONTEXT'], row['INTERGENIC']] loci_id = get_lociId(loci[0], loci[1], loci[2], loci[3], loci[4], loci[5], loci[6], loci[7], loci[8], loci[9], loci[10], loci[11], loci[12], loci[13], loci[14], loci[15]) print() myConnection.close() #Tabla intermedia: Enfermedad_loci hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) cur = myConnection.cursor() for index, row in df.iterrows(): enfermedadAux = row['DISEASE/TRAIT'] cur.execute("""select id_enfermedad from enfermedad where DISEASETRAIT = "%s" """ % (enfermedadAux) ) idenferAux=cur.fetchall() cur.execute("""select id_loci from loci where CHR_ID = "%s" and CHR_POS = "%s" """ % (row.CHR_ID, row.CHR_POS)) idlociAux=cur.fetchall() #print(idenferAux[0][0],idlociAux[0][0]) cur.execute("""select * from enfermedad_loci where id_loci = "%d" and id_enfermedad = "%d" """ %(idlociAux[0][0], idenferAux[0][0])) idiguales = cur.fetchall() #print(idiguales) if not idiguales: cur.execute("""insert into enfermedad_loci (id_enfermedad, id_loci) values ("%d", "%d")""" %(idenferAux[0][0], idlociAux[0][0])) myConnection.commit() myConnection.close() """ Explanation: Ejercicio 2 Lea el archivo y guarde la infomación en la base de datos en las tablas que se definidieron en el Ejercicio 1. End of explanation """ #¿Cuáles genes se encuentran relacionados con el cáncer de pulmón? #Se responde a la anterior pregunta y se expone el cromosoma en el cuál se encuentra dicho gen. hostname = '127.0.0.1' username = 'root' password = 'fnsQFJ14' database = 'new_schema' def doQuery( conn ) : cur = conn.cursor() myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database ) doQuery( myConnection ) #myConnection.close() cur = myConnection.cursor() cur.execute("""SELECT r.CHR_ID, r.REPORTED_GENE FROM loci r WHERE NOT EXISTS (SELECT * FROM enfermedad i WHERE DISEASETRAIT IN ('Lung cancer') AND NOT EXISTS (SELECT * FROM enfermedad_loci ri WHERE ri.id_loci = r.id_loci AND ri.id_enfermedad = i.id_enfermedad)) """) enferme = cur.fetchall() print(enferme) """ Explanation: Ejercicio 3 Realice de la base de datos una consulta que le responda una pregunta biológica (e.g. qué genes estan relacionados con cuales enfermedades) End of explanation """ import pandas as pd my_df = pd.DataFrame(enferme) my_df.to_csv('output.csv', index=False, header=False) """ Explanation: Ejercicio 4 Guarde el resultado de la consulta anterior en un archivo csv End of explanation """
pysal/pysal
notebooks/viz/splot/libpysal_non_planar_joins_viz.ipynb
bsd-3-clause
from pysal.lib.weights.contiguity import Queen import pysal.lib from pysal.lib import examples import matplotlib.pyplot as plt import geopandas as gpd %matplotlib inline from pysal.viz.splot.pysal.lib import plot_spatial_weights """ Explanation: splot.pysal.lib: assessing neighbors & spatial weights In spatial analysis workflows it is often important and necessary to asses the relationships of neighbouring polygons. pysal.lib and splot can help you to inspect if two neighbouring polygons share an edge or not. Content: * Imports * Data Preparation * Plotting Imports End of explanation """ examples.explain('rio_grande_do_sul') """ Explanation: Data Preparation Let's first have a look at the dataset with pysal.lib.examples.explain End of explanation """ gdf = gpd.read_file(examples.get_path('map_RS_BR.shp')) gdf.head() weights = Queen.from_dataframe(gdf) """ Explanation: Load data into a geopandas geodataframe End of explanation """ plot_spatial_weights(weights, gdf) plt.show() """ Explanation: This warning tells us that our dataset contains islands. Islands are polygons that do not share edges and nodes with adjacent polygones. This can for example be the case if polygones are truly not neighbouring, eg. when two land parcels are separated by a river. However, these islands often stems from human error when digitizing features into polygons. This unwanted error can be assessed using splot.pysal.lib plot_spatial_weights functionality: Plotting End of explanation """ wnp = pysal.lib.weights.util.nonplanar_neighbors(weights, gdf) """ Explanation: This visualisation depicts the spatial weights network, a network of connections of the centroid of each polygon to the centroid of its neighbour. As we can see, there are many polygons in the south and west of this map, that are not connected to it's neighbors. This stems from digitization errors and needs to be corrected before we can start our statistical analysis. pysal.lib offers a tool to correct this error by 'snapping' incorrectly separated neighbours back together: End of explanation """ plot_spatial_weights(wnp, gdf) plt.show() """ Explanation: We can now visualise if the nonplanar_neighbors tool adjusted all errors correctly: End of explanation """ plot_spatial_weights(wnp, gdf, nonplanar_edge_kws=dict(color='#4393c3')) plt.show() """ Explanation: The visualization shows that all erroneous islands are now stored as neighbors in our new weights object, depicted by the new joins displayed in orange. We can now adapt our visualization to show all joins in the same color, by using the nonplanar_edge_kws argument in plot_spatial_weights: End of explanation """
rasilab/ferrin_elife_2017
scripts/run_simulations_based_on_experiment_fits.ipynb
gpl-3.0
%%writefile simulation_run_3.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run3/')) mrnafiles = ['../annotations/simulations/run3/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run3_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run3_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run3_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run3/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(40): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_3.py', str(index) ]) """ Explanation: Simulation Runs 3 – 16 based on experiment fits <div id="toc-wrapper"><h3> Table of Contents </h3><div id="toc" style="max-height: 787px;"><ol class="toc-item"><li><a href="#Run-3:-Predict-YFP-synthesis-rate-of-initiation-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig-4,-Fig.-4-supplement-1A–G)">Run 3: Predict YFP synthesis rate of initiation mutants based on fit of stall strengths to single mutant data (for Fig 4, Fig. 4 supplement 1A–G)</a></li><li><a href="#Run-4:-Predict-YFP-synthesis-rate-of-CTC,-CTT-double-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-5-figure-supplement-1A,-1B)">Run 4: Predict YFP synthesis rate of CTC, CTT double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1A, 1B)</a></li><li><a href="#Run-5:-Predict-YFP-synthesis-rate-of-CTC-distance-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-6-figure-supplement-1)">Run 5: Predict YFP synthesis rate of CTC distance mutants based on fit of stall strengths to single mutant data (for Fig. 6 figure supplement 1)</a></li><li><a href="#Run-14:-Predict-YFP-synthesis-rate-of-serine-initiation-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-4-supplement-1H)">Run 14: Predict YFP synthesis rate of serine initiation mutants based on fit of stall strengths to single mutant data (for Fig. 4 supplement 1H)</a></li><li><a href="#Run-15:-Predict-YFP-synthesis-rate-of-serine-double-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-5-figure-supplement-1C)">Run 15: Predict YFP synthesis rate of serine double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1C)</a></li><li><a href="#Run-16:-Predict-YFP-synthesis-rate-of-CTA-multiple-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-5)">Run 16: Predict YFP synthesis rate of CTA multiple mutants based on fit of stall strengths to single mutant data (for Fig. 5)</a></li><li><a href="#Run-6:-Vary-initiation-rate-systematically-for-3-different-models-(for-Fig.-3A)">Run 6: Vary initiation rate systematically for 3 different models (for Fig. 3A)</a></li><li><a href="#Run-7:-Vary-number-of-stall-sites-systematically-for-3-different-models-(for-Fig.-3B)">Run 7: Vary number of stall sites systematically for 3 different models (for Fig. 3B)</a></li><li><a href="#Run-8:-Vary-distance-between-stall-sites-systematically-for-3-different-models-(for-Fig.-3C)">Run 8: Vary distance between stall sites systematically for 3 different models (for Fig. 3C)</a></li><li><a href="#Run-9-:-Vary-abortive-termination-rate-systematically-for-3-different-models-(for-Fig.-7)">Run 9 : Vary abortive termination rate systematically for 3 different models (for Fig. 7)</a></li><li><a href="#Run-11:-Predict-YFP-synthesis-rate-of-CTA-distance-mutants-based-on-fit-of-stall-strengths-to-single-mutant-data-(for-Fig.-6)">Run 11: Predict YFP synthesis rate of CTA distance mutants based on fit of stall strengths to single mutant data (for Fig. 6)</a></li></ol></div></div> Run 3: Predict YFP synthesis rate of initiation mutants based on fit of stall strengths to single mutant data (for Fig 4, Fig. 4 supplement 1A–G) End of explanation """ %%writefile simulation_run_4.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run4/')) mrnafiles = ['../annotations/simulations/run4/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run4_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run4_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run4/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(30): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_4.py', str(index) ]) """ Explanation: Run 4: Predict YFP synthesis rate of CTC, CTT double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1A, 1B) End of explanation """ %%writefile simulation_run_5.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run5/')) mrnafiles = ['../annotations/simulations/run5/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run5_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run5_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run5_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run5/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(20): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_5.py', str(index) ]) """ Explanation: Run 5: Predict YFP synthesis rate of CTC distance mutants based on fit of stall strengths to single mutant data (for Fig. 6 figure supplement 1) End of explanation """ %%writefile simulation_run_14.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run14/')) mrnafiles = ['../annotations/simulations/run14/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run14_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run14_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run14_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run14/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(15): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_14.py', str(index) ]) """ Explanation: Run 14: Predict YFP synthesis rate of serine initiation mutants based on fit of stall strengths to single mutant data (for Fig. 4 supplement 1H) End of explanation """ %%writefile simulation_run_15.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run15/')) mrnafiles = ['../annotations/simulations/run15/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run15_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run15_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run15_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/serine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run15/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(15): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_15.py', str(index) ]) """ Explanation: Run 15: Predict YFP synthesis rate of serine double mutants based on fit of stall strengths to single mutant data (for Fig. 5 figure supplement 1C) End of explanation """ %%writefile simulation_run_16.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run16/')) mrnafiles = ['../annotations/simulations/run16/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run16_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run16_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run16_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run16/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(18): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_16.py', str(index) ]) """ Explanation: Run 16: Predict YFP synthesis rate of CTA multiple mutants based on fit of stall strengths to single mutant data (for Fig. 5) End of explanation """ %%writefile simulation_run_6.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run6/')) mrnafiles = ['../annotations/simulations/run6/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run6/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(8): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '10', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_6.py', str(index) ]) """ Explanation: Run 6: Vary initiation rate systematically for 3 different models (for Fig. 3A) End of explanation """ %%writefile simulation_run_7.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run7/')) mrnafiles = ['../annotations/simulations/run7/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run7/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(9): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_7.py', str(index) ]) """ Explanation: Run 7: Vary number of stall sites systematically for 3 different models (for Fig. 3B) End of explanation """ %%writefile simulation_run_8.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run8/')) mrnafiles = ['../annotations/simulations/run8/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/runs678_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/runs678_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run8/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(238): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '10', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_8.py', str(index) ]) """ Explanation: Run 8: Vary distance between stall sites systematically for 3 different models (for Fig. 3C) End of explanation """ %%writefile simulation_run_9.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys import numpy as np jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = ['../annotations/simulations/run4/yfp_cta18_initiationrate_0.3.csv'] # use experimental fits for stall strengths from run 4 terminationandStallStrengths = [ ('--5prime-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_5primepreterm.tsv'), ('--background-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'), ('--selective-preterm-rate','../processeddata/simulations/run4_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: for typeOfTermination, stallstrengthfile in terminationandStallStrengths: for terminationRate in [0] + list(10.0**np.arange(-2,1.01,0.05)): currentindex += 1 if currentindex != jobindex: continue cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.4g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run9/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(200): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '20', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_9.py', str(index) ]) """ Explanation: Run 9 : Vary abortive termination rate systematically for 3 different models (for Fig. 7) End of explanation """ %%writefile simulation_run_11.py #!/usr/bin/env python #SBATCH --mem=8000 import subprocess as sp import os import sys jobindex = int(sys.argv[1]) currentindex = -1 mrnafiles = list(filter(lambda x: x.startswith('yfp'), os.listdir('../annotations/simulations/run11/'))) mrnafiles = ['../annotations/simulations/run11/' + File for File in mrnafiles] terminationandStallStrengths = [ ('--5prime-preterm-rate',0,'../processeddata/simulations/run11_stallstrengthfits_trafficjam.tsv'), ('--5prime-preterm-rate',1,'../processeddata/simulations/run11_stallstrengthfits_5primepreterm.tsv'), ('--selective-preterm-rate',1,'../processeddata/simulations/run11_stallstrengthfits_selpreterm.tsv'), ] for mrnafile in mrnafiles: currentindex += 1 if currentindex != jobindex: continue for typeOfTermination, terminationRate, stallstrengthfile in terminationandStallStrengths: cmd = ' '.join([ './reporter_simulation', '--trna-concn', '../annotations/simulations/leucine.starvation.average.trna.concentrations.tsv', typeOfTermination, '%0.2g'%terminationRate, '--threshold-accommodation-rate', '22', '--output-prefix','../rawdata/simulations/run11/', '--stall-strength-file', stallstrengthfile, '--input-genes', mrnafile ]) sp.check_output(cmd, shell=True) import subprocess as sp # loop submits each simulation to a different node of the cluster for index in range(20): sp.check_output([ 'sbatch', # for SLURM cluster; this line can be commented out if running locally '-t', '30', # for SLURM cluster; this line can be commented out if running locally '-n', '1', # for SLURM cluster; this line can be commented out if running locally 'simulation_run_11.py', str(index) ]) """ Explanation: Run 11: Predict YFP synthesis rate of CTA distance mutants based on fit of stall strengths to single mutant data (for Fig. 6) End of explanation """
mne-tools/mne-tools.github.io
0.20/_downloads/90c71f0d36d740bc290fd9fa30bddd8c/plot_compute_covariance.ipynb
bsd-3-clause
import os.path as op import mne from mne.datasets import sample """ Explanation: Computing a covariance matrix Many methods in MNE, including source estimation and some classification algorithms, require covariance estimations from the recordings. In this tutorial we cover the basics of sensor covariance computations and construct a noise covariance matrix that can be used when computing the minimum-norm inverse solution. For more information, see minimum_norm_estimates. End of explanation """ data_path = sample.data_path() raw_empty_room_fname = op.join( data_path, 'MEG', 'sample', 'ernoise_raw.fif') raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname) raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(raw_fname) raw.set_eeg_reference('average', projection=True) raw.info['bads'] += ['EEG 053'] # bads + 1 more """ Explanation: Source estimation method such as MNE require a noise estimations from the recordings. In this tutorial we cover the basics of noise covariance and construct a noise covariance matrix that can be used when computing the inverse solution. For more information, see minimum_norm_estimates. End of explanation """ raw_empty_room.info['bads'] = [ bb for bb in raw.info['bads'] if 'EEG' not in bb] raw_empty_room.add_proj( [pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']]) noise_cov = mne.compute_raw_covariance( raw_empty_room, tmin=0, tmax=None) """ Explanation: The definition of noise depends on the paradigm. In MEG it is quite common to use empty room measurements for the estimation of sensor noise. However if you are dealing with evoked responses, you might want to also consider resting state brain activity as noise. First we compute the noise using empty room recording. Note that you can also use only a part of the recording with tmin and tmax arguments. That can be useful if you use resting state as a noise baseline. Here we use the whole empty room recording to compute the noise covariance (tmax=None is the same as the end of the recording, see :func:mne.compute_raw_covariance). Keep in mind that you want to match your empty room dataset to your actual MEG data, processing-wise. Ensure that filters are all the same and if you use ICA, apply it to your empty-room and subject data equivalently. In this case we did not filter the data and we don't use ICA. However, we do have bad channels and projections in the MEG data, and, hence, we want to make sure they get stored in the covariance object. End of explanation """ events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5, baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed verbose='error') # and ignore the warning about aliasing """ Explanation: Now that you have the covariance matrix in an MNE-Python object you can save it to a file with :func:mne.write_cov. Later you can read it back using :func:mne.read_cov. You can also use the pre-stimulus baseline to estimate the noise covariance. First we have to construct the epochs. When computing the covariance, you should use baseline correction when constructing the epochs. Otherwise the covariance matrix will be inaccurate. In MNE this is done by default, but just to be sure, we define it here manually. End of explanation """ noise_cov_baseline = mne.compute_covariance(epochs, tmax=0) """ Explanation: Note that this method also attenuates any activity in your source estimates that resemble the baseline, if you like it or not. End of explanation """ noise_cov.plot(raw_empty_room.info, proj=True) noise_cov_baseline.plot(epochs.info, proj=True) """ Explanation: Plot the covariance matrices Try setting proj to False to see the effect. Notice that the projectors in epochs are already applied, so proj parameter has no effect. End of explanation """ noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto', rank=None) """ Explanation: How should I regularize the covariance matrix? The estimated covariance can be numerically unstable and tends to induce correlations between estimated source amplitudes and the number of samples available. The MNE manual therefore suggests to regularize the noise covariance matrix (see cov_regularization_math), especially if only few samples are available. Unfortunately it is not easy to tell the effective number of samples, hence, to choose the appropriate regularization. In MNE-Python, regularization is done using advanced regularization methods described in [1]_. For this the 'auto' option can be used. With this option cross-validation will be used to learn the optimal regularization: End of explanation """ evoked = epochs.average() evoked.plot_white(noise_cov_reg, time_unit='s') """ Explanation: This procedure evaluates the noise covariance quantitatively by how well it whitens the data using the negative log-likelihood of unseen data. The final result can also be visually inspected. Under the assumption that the baseline does not contain a systematic signal (time-locked to the event of interest), the whitened baseline signal should be follow a multivariate Gaussian distribution, i.e., whitened baseline signals should be between -1.96 and 1.96 at a given time sample. Based on the same reasoning, the expected value for the :term:global field power (GFP) &lt;GFP&gt; is 1 (calculation of the GFP should take into account the true degrees of freedom, e.g. ddof=3 with 2 active SSP vectors): End of explanation """ noise_covs = mne.compute_covariance( epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True, rank=None) evoked.plot_white(noise_covs, time_unit='s') """ Explanation: This plot displays both, the whitened evoked signals for each channels and the whitened :term:GFP. The numbers in the GFP panel represent the estimated rank of the data, which amounts to the effective degrees of freedom by which the squared sum across sensors is divided when computing the whitened :term:GFP. The whitened :term:GFP also helps detecting spurious late evoked components which can be the consequence of over- or under-regularization. Note that if data have been processed using signal space separation (SSS) [2], gradiometers and magnetometers will be displayed jointly because both are reconstructed from the same SSS basis vectors with the same numerical rank. This also implies that both sensor types are not any longer statistically independent. These methods for evaluation can be used to assess model violations. Additional introductory materials can be found here &lt;https://goo.gl/ElWrxe&gt;. For expert use cases or debugging the alternative estimators can also be compared (see sphx_glr_auto_examples_visualization_plot_evoked_whitening.py) and sphx_glr_auto_examples_inverse_plot_covariance_whitening_dspm.py): End of explanation """ evoked_meg = evoked.copy().pick('meg') noise_cov['method'] = 'empty_room' noise_cov_baseline['method'] = 'baseline' evoked_meg.plot_white([noise_cov_baseline, noise_cov], time_unit='s') """ Explanation: This will plot the whitened evoked for the optimal estimator and display the :term:GFPs &lt;GFP&gt; for all estimators as separate lines in the related panel. Finally, let's have a look at the difference between empty room and event related covariance, hacking the "method" option so that their types are shown in the legend of the plot. End of explanation """
PyDataMadrid2016/Conference-Info
talks_materials/20160409_1815_Lightning_talks/02_poliastro/Going to Mars with Python in 5 minutes.ipynb
mit
%matplotlib notebook import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import astropy.units as u from astropy import time from poliastro import iod from poliastro.plotting import plot from poliastro.bodies import Sun, Earth from poliastro.twobody import State from poliastro import ephem from jplephem.spk import SPK ephem.download_kernel("de421") """ Explanation: A Marte con Python usando poliastro <img src="http://poliastro.github.io/_images/logo_text.svg" width="70%" /> Juan Luis Cano Rodríguez &#106;&#117;&#97;&#110;&#108;&#117;&#64;&#112;&#121;&#98;&#111;&#110;&#97;&#99;&#99;&#105;&#46;&#111;&#114;&#103; 2016-04-09 PyData Madrid 2016 ...en 5 minutos :) Warning: This is rocket science! ¿Qué es la Astrodinámica? Una rama de la Mecánica (a su vez una rama de la Física) que estudia problemas prácticos acerca del movimiento de cohetes y otros vehículos en el espacio ¿Qué es poliastro? Una biblioteca de puro Python para Astrodinámica http://poliastro.github.io/ ¡Vamos a Marte! <img src="tgo.jpg" style="margin: 0 auto;" width="60%" /> End of explanation """ r = [-6045, -3490, 2500] * u.km v = [-3.457, 6.618, 2.533] * u.km / u.s ss = State.from_vectors(Earth, r, v) with plt.style.context('pybonacci'): plot(ss) """ Explanation: Primero: definir la órbita End of explanation """ epoch = time.Time("2015-06-21 16:35") r_, v_ = ephem.planet_ephem(ephem.EARTH, epoch) r_ v_.to(u.km / u.s) """ Explanation: Segundo: localiza los planetas End of explanation """ date_launch = time.Time('2011-11-26 15:02', scale='utc') date_arrival = time.Time('2012-08-06 05:17', scale='utc') tof = date_arrival - date_launch r0, _ = ephem.planet_ephem(ephem.EARTH, date_launch) r, _ = ephem.planet_ephem(ephem.MARS, date_arrival) (v0, v), = iod.lambert(Sun.k, r0, r, tof) v0 v """ Explanation: Tercero: Calcula la trayectoria End of explanation """ def go_to_mars(offset=500., tof_=6000.): # Initial data N = 50 date_launch = time.Time('2016-03-14 09:31', scale='utc') + ((offset - 500.) * u.day) date_arrival = time.Time('2016-10-19 16:00', scale='utc') + ((offset - 500.) * u.day) tof = tof_ * u.h # Calculate vector of times from launch and arrival Julian days jd_launch = date_launch.jd jd_arrival = jd_launch + tof.to(u.day).value jd_vec = np.linspace(jd_launch, jd_arrival, num=N) times_vector = time.Time(jd_vec, format='jd') rr_earth, vv_earth = ephem.planet_ephem(ephem.EARTH, times_vector) rr_mars, vv_mars = ephem.planet_ephem(ephem.MARS, times_vector) # Compute the transfer orbit! r0 = rr_earth[:, 0] rf = rr_mars[:, -1] (va, vb), = iod.lambert(Sun.k, r0, rf, tof) ss0_trans = State.from_vectors(Sun, r0, va, date_launch) ssf_trans = State.from_vectors(Sun, rf, vb, date_arrival) # Extract whole orbit of Earth, Mars and transfer (for plotting) rr_trans = np.zeros_like(rr_earth) rr_trans[:, 0] = r0 for ii in range(1, len(jd_vec)): tof = (jd_vec[ii] - jd_vec[0]) * u.day rr_trans[:, ii] = ss0_trans.propagate(tof).r # Better compute backwards jd_init = (date_arrival - 1 * u.year).jd jd_vec_rest = np.linspace(jd_init, jd_launch, num=N) times_rest = time.Time(jd_vec_rest, format='jd') rr_earth_rest, _ = ephem.planet_ephem(ephem.EARTH, times_rest) rr_mars_rest, _ = ephem.planet_ephem(ephem.MARS, times_rest) # Plot figure # To add arrows: # https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/streamplot.py#L140 fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') def plot_body(ax, r, color, size, border=False, **kwargs): """Plots body in axes object. """ return ax.plot(*r[:, None], marker='o', color=color, ms=size, mew=int(border), **kwargs) # I like color color_earth0 = '#3d4cd5' color_earthf = '#525fd5' color_mars0 = '#ec3941' color_marsf = '#ec1f28' color_sun = '#ffcc00' color_orbit = '#888888' color_trans = '#444444' # Plotting orbits is easy! ax.plot(*rr_earth.to(u.km).value, color=color_earth0) ax.plot(*rr_mars.to(u.km).value, color=color_mars0) ax.plot(*rr_trans.to(u.km).value, color=color_trans) ax.plot(*rr_earth_rest.to(u.km).value, ls='--', color=color_orbit) ax.plot(*rr_mars_rest.to(u.km).value, ls='--', color=color_orbit) # But plotting planets feels even magical! plot_body(ax, np.zeros(3), color_sun, 16) plot_body(ax, r0.to(u.km).value, color_earth0, 8) plot_body(ax, rr_earth[:, -1].to(u.km).value, color_earthf, 8) plot_body(ax, rr_mars[:, 0].to(u.km).value, color_mars0, 8) plot_body(ax, rf.to(u.km).value, color_marsf, 8) # Add some text ax.text(-0.75e8, -3.5e8, -1.5e8, "ExoMars mission:\nfrom Earth to Mars", size=20, ha='center', va='center', bbox={"pad": 30, "lw": 0, "fc": "w"}) ax.text(r0[0].to(u.km).value * 2.4, r0[1].to(u.km).value * 0.4, r0[2].to(u.km).value * 1.25, "Earth at launch\n({})".format(date_launch.to_datetime().strftime("%d %b")), ha="left", va="bottom", backgroundcolor='#ffffff') ax.text(rf[0].to(u.km).value * 1.1, rf[1].to(u.km).value * 1.1, rf[2].to(u.km).value, "Mars at arrival\n({})".format(date_arrival.to_datetime().strftime("%d %b")), ha="left", va="top", backgroundcolor='#ffffff') ax.text(-1.9e8, 8e7, 1e8, "Transfer\norbit", ha="right", va="center", backgroundcolor='#ffffff') # Tune axes ax.set_xlim(-3e8, 3e8) ax.set_ylim(-3e8, 3e8) ax.set_zlim(-3e8, 3e8) # And finally! ax.view_init(30, 260) plt.show() #fig.savefig("trans_30_260.png", bbox_inches='tight') #return fig, ax go_to_mars() """ Explanation: ...y es Python puro! Truco: numba Cuarto: ¡vamos a Marte! End of explanation """ %matplotlib inline from ipywidgets import interactive from IPython.display import display w = interactive(go_to_mars, offset=(0., 1000.), tof_=(100., 12000.)) display(w) """ Explanation: Quinto: ¡¡Hagámoslo interactivo!!!1! End of explanation """
antoniomezzacapo/qiskit-tutorial
qiskit/aqua/artificial_intelligence/qsvm_kernel_classification.ipynb
apache-2.0
from qsvm_datasets import * from qiskit_aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name from qiskit_aqua.input import get_input_instance from qiskit_aqua import run_algorithm # setup aqua logging import logging from qiskit_aqua._logging import set_logging_config, build_logging_config # set_logging_config(build_logging_config(logging.DEBUG)) # choose INFO, DEBUG to see the log """ Explanation: <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> Qiskit Aqua: Experiment with classification problem with quantum-enhanced support vector machines The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial. Contributors Vojtech Havlicek<sup>[1]</sup>, Kristan Temme<sup>[1]</sup>, Antonio Córcoles<sup>[1]</sup>, Peng Liu<sup>[1]</sup>, Richard Chen<sup>[1]</sup>, Marco Pistoia<sup>[1]</sup> and Jay Gambetta<sup>[1]</sup> Affiliation <sup>[1]</sup>IBMQ Introduction Classification algorithms and methods for machine learning are essential for pattern recognition and data mining applications. Well known techniques such as support vector machines and neural networks have blossomed over the last two decades as a result of the spectacular advances in classical hardware computational capabilities and speed. This progress in computer power made it possible to apply techniques, that were theoretically developed towards the middle of the 20th century, on classification problems that were becoming increasingly challenging. A key concept in classification methods is that of a kernel. Data cannot typically be separated by a hyperplane in its original space. A common technique used to find such a hyperplane consists on applying a non-linear transformation function to the data. This function is called a feature map, as it transforms the raw features, or measurable properties, of the phenomenon or subject under study. Classifying in this new feature space -and, as a matter of fact, also in any other space, including the raw original one- is nothing more than seeing how close data points are to each other. This is the same as computing the inner product for each pair of data in the set. So, in fact we do not need to compute the non-linear feature map for each datum, but only the inner product of each pair of data points in the new feature space. This collection of inner products is called the kernel and it is perfectly possible to have feature maps that are hard to compute but whose kernels are not. In this notebook we provide an example of a classification problem that requires a feature map for which computing the kernel is not efficient classically -this means that the required computational resources are expected to scale exponentially with the size of the problem. We show how this can be solved in a quantum processor by a direct estimation of the kernel in the feature space. The method we used falls in the category of what is called supervised learning, consisting of a training phase (where the kernel is calculated and the support vectors obtained) and a test or classification phase (where new unlabelled data is classified according to the solution found in the training phase). References and additional details: [1] Vojtech Havlicek, Antonio D. C´orcoles, Kristan Temme, Aram W. Harrow, Abhinav Kandala, Jerry M. Chow, and Jay M. Gambetta1, "Supervised learning with quantum enhanced feature spaces," arXiv: 1804.11326 End of explanation """ from qiskit import IBMQ IBMQ.load_accounts() """ Explanation: [Optional] Setup token to run the experiment on a real device If you would like to run the experiement on a real device, you need to setup your account first. Note: If you do not store your token yet, use IBMQ.save_accounts() to store it first. End of explanation """ feature_dim=2 # we support feature_dim 2 or 3 sample_Total, training_input, test_input, class_labels = ad_hoc_data(training_size=20, test_size=10, n=feature_dim, gap=0.3, PLOT_DATA=True) datapoints, class_to_label = split_dataset_to_data_and_labels(test_input) print(class_to_label) """ Explanation: First we prepare the dataset, which is used for training, testing and the finally prediction. Note: You can easily switch to a different dataset, such as the Breast Cancer dataset, by replacing 'ad_hoc_data' to 'Breast_cancer' below. End of explanation """ params = { 'problem': {'name': 'svm_classification', 'random_seed': 10598}, 'algorithm': { 'name': 'QSVM.Kernel' }, 'backend': {'name': 'qasm_simulator', 'shots': 1024}, 'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2, 'entanglement': 'linear'} } algo_input = get_input_instance('SVMInput') algo_input.training_dataset = training_input algo_input.test_dataset = test_input algo_input.datapoints = datapoints[0] # 0 is data, 1 is labels """ Explanation: With the dataset ready we initialize the necessary inputs for the algorithm: - the input dictionary (params) - the input object containing the dataset info (algo_input). End of explanation """ result = run_algorithm(params, algo_input) print("testing success ratio: ", result['testing_accuracy']) print("predicted classes:", result['predicted_classes']) print("kernel matrix during the training:") kernel_matrix = result['kernel_matrix_training'] img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r') plt.show() """ Explanation: With everything setup, we can now run the algorithm. For the testing, the result includes the details and the success ratio. For the prediction, the result includes the predicted labels. End of explanation """ sample_Total, training_input, test_input, class_labels = Breast_cancer(training_size=20, test_size=10, n=2, PLOT_DATA=True) # n =2 is the dimension of each data point datapoints, class_to_label = split_dataset_to_data_and_labels(test_input) label_to_class = {label:class_name for class_name, label in class_to_label.items()} print(class_to_label, label_to_class) algo_input = get_input_instance('SVMInput') algo_input.training_dataset = training_input algo_input.test_dataset = test_input algo_input.datapoints = datapoints[0] result = run_algorithm(params, algo_input) print("testing success ratio: ", result['testing_accuracy']) print("ground truth: {}".format(map_label_to_class_name(datapoints[1], label_to_class))) print("predicted: {}".format(result['predicted_classes'])) print("kernel matrix during the training:") kernel_matrix = result['kernel_matrix_training'] img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r') plt.show() """ Explanation: The breast cancer dataset Now we run our algorithm with the real-world dataset: the breast cancer dataset, we use the first two principal components as features. End of explanation """
eaton-lab/eaton-lab.github.io
slides/ipyrad-analysis/ipyrad-analysis.ipynb
mit
# conda install -c ipyrad ipyrad structure clumpp bpp # conda install -c eaton-lab toytree toyplot # conda install -c bioconda raxml """ Explanation: The ipyrad.analysis tool kit Deren Eaton Install software All required software for this walkthrough is available on conda. End of explanation """ # ipcluster start --n=4 """ Explanation: Start an ipyparallel cluster In a separate terminal run the following command to start a cluster of engines. If working on a notebook running remotely, use the dashboard to open a new terminal. End of explanation """ ## connect to the cluster import ipyparallel as ipp ipyclient = ipp.Client() ## print number of engines print len(ipyclient), "connected engines" """ Explanation: You should then be able to connect to the engines in your notebook: End of explanation """ ## import ipyrad import ipyrad as ip """ Explanation: Assemble a RAD data set The code here is to assemble the example empirical data set from the ipyrad tutotial. End of explanation """ ## create an Assembly object data = ip.Assembly("simdata") ## set I/O paths for the data data.set_params("project_dir", "~/workshop") data.set_params("raw_fastq_path", "ipsimdata/rad_example_R1_.fastq.gz") data.set_params("barcodes_path", "ipsimdata/rad_example_barcodes.txt") ## run all steps of the Assembly data.run("1234567") """ Explanation: Minimal workflow: scroll down for details. End of explanation """ ## set params data.set_params("filter_adapters", 2) data.set_params("output_formats", "lpask") ## show params data.get_params() """ Explanation: Modify more parameters End of explanation """ ## run all steps of assembly data.run("1234567") """ Explanation: Assemble the data set You can run one or more steps just like in the CLI. End of explanation """ ## summary stats data.stats """ Explanation: Access assembly results You can easily access summary stats for the assembly as a data frame. End of explanation """ import toyplot ## plot barplot c, a, m = toyplot.bars( data.stats.hetero_est, height=250, width=500, ) ## style the axes a.x.ticks.locator = toyplot.locator.Explicit( locations=range(len(data.stats)), labels=data.stats.index) a.y.label.text = "Heterozygosity" a.y.ticks.show = True """ Explanation: Plot statistics End of explanation """ ## s2 stats file print data.stats_files.s2 ## the .loci file location print data.outfiles.loci """ Explanation: Access result files You can also access the stats files for each step, and the output files for downstream analyses. End of explanation """ ## import the toolkit import ipyrad.analysis as ipa """ Explanation: ipyrad.analysis tools The ipyrad.analysis module includes many wrapper tools that can be used to efficiently run evolutionary analysis tools in a notebook. End of explanation """ import ipyrad as ip import ipyparallel as ipp data = ip.load_json("/home/deren/workshop/simdata.json") ipyclient = ipp.Client() """ Explanation: RAxML analysis Simply enter the location of the phylip file, which can be accessed from the .outfiles attribute of the Assembly object. You can also provide a name and output directory, and set many other optional parameters. End of explanation """ ## create a raxml object s = ipa.raxml( name=data.name, phyfile=data.outfiles.phy, workdir="~/workshop/analysis-raxml"); ## run the analysis s.run() """ Explanation: Minimal workflow: scroll down for details. End of explanation """ ## modify params s.params.T = 4 s.params.N = 100 ## print the raxml command as a string print s.command ## overwrite existing result with this 'name' s.run(force=True) """ Explanation: Modify parameters and other functions End of explanation """ print s.trees import toytree tre = toytree.tree(s.trees.bipartitions) tre.root(wildcard='3') tre.draw( width=300, node_labels=tre.get_node_values("support"), node_size=20, ); """ Explanation: Access the tree files and plot End of explanation """ ## create a baba object b = ipa.baba(data=data.outfiles.loci) ## generate tests given the rooted tree b.tests = [ {"p4":["3L_0"], "p3":["2F_0"], "p2":["1D_0"], "p1":["1A_0"]}] ## run jobs distributed across the cluster b.run(ipyclient) b.results_table """ Explanation: introgression (abba-baba) analysis The baba object can be used to set up abba-baba tests, to calculate results, and to generate plots to visualize them. Minimal example, scroll down for details. End of explanation """ ## init baba object b = ipa.baba(data=data.outfiles.loci, newick=tre) ## generate all possible tests on this tree b.generate_tests_from_tree() ## set constraints on tests cdict = {"p4": ["3L_0"], "p3": ["2E_0", "2F_0"], "p2": ["1D_0"]} ## generate constrainted number of tests b.generate_tests_from_tree( constraint_dict=cdict, constraint_exact=False, ) """ Explanation: Auto-generate tests Instead of writing out many tests explicitly, you can instead enter a rooted tree to the baba object and use this function to auto-generate four-taxon test fitting the tree and constraints. End of explanation """ ## run the tests (in this case 4) linked to the baba object b.run(ipyclient) ## show results table b.results_table """ Explanation: Run all tests linked to a baba object End of explanation """ b.plot( height=350, pct_tree_x = 0.4, pct_tree_y = 0.2, ); ### Save the plot import toyplot.pdf canvas, axes, mark = b.plot(height=350, pct_tree_x=0.4, pct_tree_y=0.2) toyplot.pdf.render(canvas, "/home/deren/workshop/abba-baba.pdf") ## save the results table b.results_table.to_csv("~/workshop/abba-baba.csv", sep="\t") """ Explanation: Plot results End of explanation """ ## create a tetrad class object tet = ipa.tetrad( name=data.name, seqfile=data.outfiles.snpsphy, mapfile=data.outfiles.snpsmap, workdir="~/workshop/analysis-tetrad", nboots=100 ) ## run the analysis tet.run(ipyclient) """ Explanation: Species tree inference by phylogenetic invariants The program tetrad follows the algorithm of SVDquartets by inferring all possible quartet trees from a large SNP alignment and uses the program quartet maxcut (Snir et al. 2012) to infer a species tree by quartet joining. End of explanation """ tet.trees ## load unrooted result tree with toytree and draw tre = toytree.tree(tet.trees.cons) tre.draw( node_labels=tre.get_node_values("support"), node_size=20, ); """ Explanation: Access result tetrad trees and draw End of explanation """ # conda install bpp -c ipyrad ## setup: define how samples group into 'species' IMAP = { "1": ["1A_0", "1B_0", "1C_0"], "D": ["1D_0"], "2": ["2F_0", "2E_0", "2G_0"], "H": ["2H_0"], "3": ["3J_0", "3I_0", "3K_0"], "L": ["3L_0"], } ## setup: define a guidetree GUIDE = "(((1,D),(2,H)),(3,L));" ## init a bpp object bpp = ipa.bpp( locifile=data.outfiles.loci, imap=IMAP, guidetree=GUIDE, workdir="~/workshop/analysis-bpp" ); ## submit jobs to run on the cluster bpp.submit_bpp_jobs("A00", nreps=2, ipyclient=ipyclient) """ Explanation: Infer a species tree with BPP End of explanation """ ## set some parameters bpp.params.burnin = 1000 bpp.params.nsample = 5000 bpp.params.infer_sptree = 1 bpp.params.infer_delimit = 0 ## set some filters bpp.filters.maxloci = 200 bpp.filters.minsnps = 2 ## submit jobs to run on the cluster bpp.submit_bpp_jobs("A00", nreps=2, ipyclient=ipyclient) """ Explanation: Set parameters and filters You can define all of the parameter settings that will be used in the BPP .ctl file by modifying the .params attributes. Similarly, you can modify which loci will be included in the analysis using the .filters attributes. End of explanation """ ## a list of submitted jobs print bpp.asyncs ## a list of result files produced by jobs print bpp.files """ Explanation: Track running jobs Unlike some of the other ipyrad.analysis tools, the bpp object does not "block" while the jobs are running. Meaning that after it sends jobs to run on the cluster you can continue to interact with the notebook. This is useful since BPP is not multi-threaded, so you will likely want to submit many different types of jobs. You can check on running jobs like below. End of explanation """ import ipyrad as ip import ipyrad.analysis as ipa import ipyparallel as ipp data = ip.load_json("/home/deren/workshop/simdata.json") ipyclient = ipp.Client() # conda install structure -c ipyrad # conda install clumpp -c ipyrad ## create a structure class object s = ipa.structure( name=data.name, strfile=data.outfiles.str, mapfile=data.outfiles.snpsmap, workdir="~/workshop/analysis-structure", ); s.mainparams.burnin = 100 s.mainparams.numreps = 1000 ## submit jobs to run on the cluster for kpop in [2, 3, 4, 5]: s.submit_structure_jobs(kpop=kpop, nreps=5, ipyclient=ipyclient) """ Explanation: Structure analyses End of explanation """ s.mainparams.burnin = 10000 s.mainparams.numreps = 100000 s.extraparams.usepopinfo = 0 """ Explanation: Modify parameters settings End of explanation """ ## get results for a single K value s.get_clumpp_table(3) ## make a dict for all results tables = {} for kpop in [2, 3, 4, 5]: tables[kpop] = s.get_clumpp_table(kpop) """ Explanation: Summarize results with CLUMPP End of explanation """
bwgref/nustar_pysolar
notebooks/20210114/Planning 20210114.ipynb
mit
fname = io.download_occultation_times(outdir='../data/') print(fname) """ Explanation: Download the list of occultation periods from the MOC at Berkeley. Note that the occultation periods typically only are stored at Berkeley for the future and not for the past. So this is only really useful for observation planning. End of explanation """ tlefile = io.download_tle(outdir='../data') print(tlefile) times, line1, line2 = io.read_tle_file(tlefile) """ Explanation: Download the NuSTAR TLE archive. This contains every two-line element (TLE) that we've received for the whole mission. We'll expand on how to use this later. The times, line1, and line2 elements are now the TLE elements for each epoch. End of explanation """ tstart = '2021-01-14T09:00:00' tend = '2021-01-14T17:00:00' orbits = planning.sunlight_periods(fname, tstart, tend) orbits # Get the solar parameter from sunpy.coordinates import sun angular_size = sun.angular_radius(t='now') dx = angular_size.arcsec print(dx) pa = planning.get_nustar_roll(tstart, 0) print(tstart) print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa)) # Orbit 1 (AR) offset = [-250, -300.]*u.arcsec ind = 1 orbit = orbits[0] midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0]) sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True) print("Orbit: {}".format(ind)) print(f"Solar offset: {offset}") print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso)) print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}') print("") # Orbit 2a (AR) offset = [-250, -300.]*u.arcsec ind = 2 orbit = orbits[1].copy() print(orbit) oldend = orbit[1] newend = orbit[1] - TimeDelta(15*60., format = 'sec') # 15 minutes early orbit[1] = newend midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0]) sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True) print(f"Orbit: {ind}a") print(f"Solar offset: {offset}") print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso)) print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}') print("") # Orbit 2b (small thing) offset = [200, 300.]*u.arcsec orbit[0] = newend orbit[1] = oldend midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0]) sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True) print(f"Orbit: {ind}b") print(f"Solar offset: {offset}") print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso)) print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}') print("") # Orbit 3, 4 (PSP Footpoints) offset = [-500., -550.]*u.arcsec for oi in [2, 3]: ind = oi+1 orbit = orbits[oi] midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0]) sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True) print("Orbit: {}".format(ind)) print(f"Solar offset: {offset}") print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso)) print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}') print("") test1 = SkyCoord(289.3792274160115, -22.304595055979675, unit = 'deg') orb1 = SkyCoord(289.3855, -22.3051, unit = 'deg') orb1.separation(test1) orbit import sunpy sunpy.__version__ test1 = SkyCoord(289.898451566591, -22.158432904027155 , unit = 'deg') orb1 = SkyCoord(289.9047, -22.1589, unit = 'deg') orb1.separation(test1) """ Explanation: Here is where we define the observing window that we want to use. Note that tstart and tend must be in the future otherwise you won't find any occultation times and sunlight_periods will return an error. End of explanation """
bio-guoda/guoda-examples
02_iDigBio_and_GBIF_Specimens_By_Continent_Over_Time.ipynb
mit
# col() selects columns from a data frame, year() works on dates, and udf() creates user # defined functions from pyspark.sql.functions import col, year, udf # Plotting library and configuration to show graphs in the notebook import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Continuing Graphs of Specimens Over Time - Continents and GBIF This notebook continues the work done in 01_iDigBio_Specimens_Collected_Over_Time.ipynb, you may want to review that notebook before contiuing. If you are interested in seeing what kinds of outputs are possible, you are welcome to skip to the graphs at the end of this notebook. Our goal for this notebook is to see how many specimens were collected on each continent during each year from 1817 to 2017. A stacked bar graph with a different color for each continent is a good way to view this data. We will introduce some additional concepts such as user defined functions, crosstabs, and how to write SQL statements instead of using the native pyspark methods. We'll move quickly though concepts already covered in earlier notebooks. Setup Import needed functions and libraries. End of explanation """ df = sqlContext.read.load("/guoda/data/idigbio-20190612T171757.parquet") df.count() """ Explanation: Loading the data set End of explanation """ yc_sum = (df .groupBy(year("datecollected").cast("integer").alias("yearcollected"), "continent") .count() .orderBy("yearcollected") ) yc_sum.head(20) """ Explanation: Year collected by continent To get a feel for what a summary of year and continent look like, we will again group by the year column but also add the continent column. The result will be a row for each unique combination of year and continent with the count of how many times that occurs. End of explanation """ # allowed continents continents = set(["africa", "australia", "asia", "oceania", "europe", "north america", "south america"]) def fix_continent(c): """ If the given string contains one of the continents, return which one. If not then return "other". Lower case the string and also replace "_" with " " to be able to clean up GBIF's representation of continents that contain spaces. """ for continent in continents: if (c) and (continent in c.lower().replace("_", " ")): return continent return "other" # Some example calls to this function print(fix_continent(None)) print(fix_continent("europe")) print(fix_continent("oceania")) print(fix_continent("NORTH_AMERICA")) print(fix_continent("North America, Canada, Manitoba, Churchill")) print(fix_continent("East Indies, Indonesia: Pulo Pandjang, off Sumatra")) print(fix_continent("Asia; Thailand; Pathum Thani")) """ Explanation: There are a lot of things that are not continents there! While iDigBio cleans stuff up a bit before indexing it, not everything is obvious to do. For the purposes of our graph, we will make an "other" continent and group things that are not one of the 7 continents (plus "oceania" which GBIF uses and we'll look at GBIF data lower down) into that category. Fixing continent with a user defined function To create a new column that has a fixed continent field, we will write a user defined function in Python and apply that function to every row in the data frame. This is exactly how the year() function from the pyspark library works. But, since we are writing the function in Python, we need to convert it into a data frame aware function with the pyspark function udf(). Once we do that, we can use our custom function just the same way we used year() above. So let's write our Python function: End of explanation """ fix_continent_udf = udf(fix_continent) """ Explanation: That output looks right. So now let's make a user defined function for Spark from our Python function. Notice that we pass the Python function as an argument to udf() and udf() returns to us a new function. In Python, functions are what's called "first class objects" and we can pass them around and manipulate them just like a variable. End of explanation """ yc_sum = (df .withColumn("fixed", fix_continent_udf(df.continent)) .groupBy(year("datecollected").cast("integer").alias("yearcollected"), "fixed") .count() .orderBy("yearcollected") ) yc_sum.head(20) """ Explanation: Now re-run our grouping. This time use the fix_continent_udf() function to calculate a new column and add it to the dataframe with the withColumn() method. Then we can refer to that new column in the group by method. (Yes, we could have put the fix_continent_udf() call into the group by just like we put the year() call but this is an opportunity to introduce withColumn().) End of explanation """ pandas_yc_cross = (df .select(year("datecollected").cast("integer").alias("yearcollected"), df.continent) .filter(col("yearcollected") >= 1817) .filter(col("yearcollected") <= 2017) .withColumn("fixed", fix_continent_udf(df.continent)) .crosstab("yearcollected", "fixed") .orderBy(col("yearcollected_fixed")) .toPandas() ) pandas_yc_cross.head(5) """ Explanation: That looks much better. Notice now that we can actually see all of the yearcollected=None values. Before there were many continents so they went well past the top 20 results we were looking at. Now we can see all 8 counts, one for each continent. Cross tabs to make plottable data What we have created at this point is a long list of year, continent, and count combinations. This representation of the data is normalized and can be thought of as the "long" form. Plotting wants to see each variable in one column though so the better represenation would the de-normalized or "wide" form. We can do this in Pandas with piviot() or in R you can use one of the reformating functions. We can also do this in Spark directly. We will also take this opportunity to show how more typically a full data pipeline is shown in Spark. We will calculate the Pandas data frame all at once starting from the original df we loaded, filtering on years, creating the new fixed continents, and summarizing with group by. Each line in the pipeline below becomes the input for the next lower line. Notice that we first select only the columns of interest to let Spark know we don't need the other 200+ fields in iDigBio. Then we filter on year to cut down on the rows we need to work with. Only then do we run our udf which only runs on the filtered rows. The new step in here is the crosstab() method. This is the method that takes all values in the column that is the second argument and makes them in to columns. You'll see the output after. End of explanation """ import pandas as pd import numpy as np def stack_bar_plot(pdf, columns, colors): """ Matplotlib needs to have the bars stacked on top of each other by hand. First, we plot one column of data, then we make a ... """ plots = [] bottoms = pd.DataFrame(np.zeros((len(pdf['yearcollected_fixed']), 1))) for c in columns: plots.append( plt.bar(pdf['yearcollected_fixed'], pdf[c], color=colors[len(plots)], edgecolor='none', width=1.0, bottom=bottoms[0]) ) bottoms[0] += pdf[c] return plots """ Explanation: That looks pretty intuitive and we made the whole thing with one Spark statement. You can tell it's a Pandas dataframe now because the display is nice and tabular. Making the graph Plotting isn't the focus of this notebook. Briefly, we'll make a function that stacks up multiple variables (continent counts) on top of each other and shades them different colors. End of explanation """ # set up ordering of continents and their colors continents_list = sorted(continents) continents_list.insert(0, "other") colors = ["#888888", "#a75902", "#00e5c8", "#1bcda7", "#37b686", "#539f65", "#51827c", "#bf6603"] plots = stack_bar_plot(pandas_yc_cross, continents_list, colors) # Start of WWI plt.axvline(x=1914) # Start of WWII plt.axvline(x=1939) # "1988 - October 31: President Reagan signs the NSF Authorization Act of 1988, # thereby authorizing the doubling of the NSF budget over the next five years." plt.axvline(x=1988) plt.legend(plots, continents_list, loc=2) plt.title("Specimens in iDigBio by Collection Year and Continent") plt.ylabel("Number of Specimen Records") plt.xlabel("Year") axes = plt.gca() axes.set_xlim([1815, 2020]) axes.set_ylim([0, 1200000]) fig = plt.gcf() fig.set_size_inches(12, 4) """ Explanation: and then we'll call our function to make the graph and add some axes. End of explanation """ gbif_df = sqlContext.read.parquet("/guoda/data/gbif-idigbio.parquet/source=gbif/date=20160825") """ Explanation: There it is, the number of specimen records in iDigBio by collection date and continent. We chose to add some vertical lines to the graph at particularly interesting dates. You can read the code comments to see what those are. Doing the same with GBIF data GBIF periocdically produces a similar graph to the above on their analytics page titled "Records by year of occurrence" but it's not very comparable because it include observation records as well. It would be great if we could run the same process on their data after filtering it for only specimens so we can look at how the contents of different occurrance record aggregators differs. (This is an expected and healthy thing. One of the goals of the GUODA collaboration is to begin to characterize the relationships between sets of aggregated data across domains without that work being done by one entity. Notice too that this copy of GBIF is pretty old. Again this is GUODA's fault as we have not been running our import regularly while we worked on improving other parts of our system.) We can start by reading in the GBIF data that is stored in GUODA. End of explanation """ gbif_df.createOrReplaceTempView("gbif_df") renamed_df = sqlContext.sql(""" SELECT `http://rs.tdwg.org/dwc/terms/eventDate` as eventDate, `http://rs.tdwg.org/dwc/terms/continent` as continent FROM gbif_df WHERE `http://rs.tdwg.org/dwc/terms/basisOfRecord` LIKE "%SPECIMEN%" """).persist() renamed_df.count() """ Explanation: We use a different process for formating GBIF data and as part of GUODA's process, the columns are named with a syntax that can be difficult to work with in Python. First let's use the SQL "as" syntax to rename the columns. Spark data frames can also be manupulated with SQL statements instead of the pyspark functions. Note that Spark does not support the full SQL standard, it only supports syntx similar to the native functions. The first step is to tell Spark what name we want to use to refer to our data frame with in the SQL. That is the createOrReplaceTempView() method. After than we can send our SQL as a string to the SQL Context associated with our Spark Context and we will get back a data frame. Because we'll use this data frame more than once, we'll persist it so it will not be recalculated. As part of our SQL statement we will also filter for only those records that contain the string "SPECIMEN" in the basisOfRecord field. This may or may not be the best way to select only specimens from GBIF but for our graph it's good enough. End of explanation """ pandas_yc_cross_gbif = (renamed_df .select(year("eventDate").cast("integer").alias("yearcollected"), renamed_df.continent) .filter(col("yearcollected") >= 1817) .filter(col("yearcollected") <= 2017) .withColumn("fixed", fix_continent_udf(renamed_df.continent)) .crosstab("yearcollected", "fixed") .orderBy(col("yearcollected_fixed")) .toPandas() ) pandas_yc_cross_gbif.head(5) """ Explanation: We will again run the crosstab all at once on this data frame with renamed columns. This is exactly the same summarization we ran on the iDigBio data frame. End of explanation """ # GBIF doesn't have "australia" try: continents_list.remove("australia") colors.remove("#1bcda7") except: pass plots = stack_bar_plot(pandas_yc_cross_gbif, continents_list, colors) # Start of WWI plt.axvline(x=1914) # Start of WWII plt.axvline(x=1939) # "1988 - October 31: President Reagan signs the NSF Authorization Act of 1988, thereby authorizing the doubling of the NSF budget over the next five years." plt.axvline(x=1988) plt.legend(plots, continents_list, loc=2) plt.title("Specimens in GBIF by Collection Year and Continent") plt.ylabel("Number of Specimen Records") plt.xlabel("Year") axes = plt.gca() axes.set_xlim([1815, 2020]) axes.set_ylim([0, 1200000]) fig = plt.gcf() fig.set_size_inches(12, 4) """ Explanation: And we'll use the exact same code for graphing as well. We just need to remove the "australia" and it's color from our continent list because GBIF doesn't use it. End of explanation """
bengranett/minimask
demos/mosaic.ipynb
mit
%matplotlib notebook import os import numpy as np import tempfile import matplotlib.pyplot as pyplot import logging logging.basicConfig(level=logging.INFO) import minimask.mask as mask import minimask.healpix_projection as hp import minimask.io.mosaic as mosaic """ Explanation: minimask mosaic example Construct a mosaic of squares over the sky End of explanation """ filename = "masks/mosaic.txt" try: os.mkdir(os.path.dirname(filename)) except: pass """ Explanation: Specify the location of the mask file to write End of explanation """ tile = np.array([[[-0.5, -0.5],[0.5, -0.5],[0.5,0.5],[-0.5,0.5]]])*8 grid = hp.HealpixProjector(nside=4) lon, lat = grid.pix2ang(np.arange(grid.npix)) centers = np.transpose([lon, lat]) mosaic.Mosaic(tile, centers).write(filename) """ Explanation: Construct a mask using a tile pattern with centers specified by the healpix grid. End of explanation """ M = mask.Mask(filename) print "The number of polygons in the mask is {}.".format(len(M)) """ Explanation: Load the file as a mask object End of explanation """ import healpy healpy.mollview(title="") for x,y in M.render(1): healpy.projplot(x,y,lonlat=True) """ Explanation: Plot the mask on a mollweide projection using healpy. End of explanation """ map = M.pixelize(nside=64, n=10, weight=False) healpy.mollview(map, title="") """ Explanation: Pixelize the mask onto the healpix grid End of explanation """
zerothi/sisl
docs/visualization/viz_module/basic-tutorials/Demo.ipynb
mpl-2.0
import sisl # We define the root directory where our files are siesta_files = sisl._environ.get_environ_variable("SISL_FILES_TESTS") / "sisl" / "io" / "siesta" """ Explanation: First steps End of explanation """ import sisl.viz from sisl.viz import Plot """ Explanation: Activating the viz framework The first thing you will need to do in order to use plots is to import sisl.viz: End of explanation """ from sisl.viz import Plot Plot(siesta_files / "SrTiO3.bands") """ Explanation: This will load the appropiate things into sisl to use the visualization tools. You can also control the loading of the framework with an environment variable: SISL_VIZ_AUTOLOAD=True will load the framework on import sisl, so you won't need to explicitly import it. <div class="alert alert-info"> Note If you use sisl to run high performance calculations where you initialize sisl frequently it's better to have the autoloading turned off (default), as it might introduce an overhead of about a second. </div> Now that the framework has been loaded, we can start plotting! My first plots The most straightforward way to plot things in sisl is to call the Plot class, which you can import as shown in the next cell: End of explanation """ Plot(siesta_files / "SrTiO3.RHO", axes="xy", nsc=[2,1,1], zsmooth='best') """ Explanation: Note how we just passed the path to our bands file and sisl recognized what was the plot that we wanted to generate. Let's try now passing a .RHO file to check the electronic density: End of explanation """ rho_plot = Plot(siesta_files / "SrTiO3.RHO", axes="xy", nsc=[2,1,1], zsmooth="best") """ Explanation: You probably noticed that we used some extra arguments (axes, nsc, zsmooth) to get the exact plot that we wanted. These arguments are called settings. Settings define how the plot will process and show your plot. You can provide settings on initialization or update them later. Plot() returns a plot object. If you want to keep that plot object for later (to do any modification on it) you will have to, of course, store it in a variable. Let's do that: End of explanation """ print(rho_plot.__class__) print(rho_plot.__doc__) """ Explanation: And now that we have it, let's try to get some help from it to understand the plot object better. End of explanation """ rho_plot.settings """ Explanation: We can see two interesting things: Our plot is a GridPlot, not simply a Plot. This means that it knows you are dealing with a grid and consequently it will help you by providing useful methods and settings. On the documentation, under Parameters, you can see the arguments that this plot understands. If you've guessed these are the so-called settings, then you've guessed right! A way to know the current settings of your plot is to check the settings attribute: End of explanation """ rho_plot.scan(along="z", num=15) """ Explanation: The names might already give you a quick intuition of what each setting does, but for more detail you can go to the documentation. The showcase notebooks show examples and are designed to help you understand what each setting does in a visual way. It is always worth checking them out if you are dealing with a new plot type! One of the interesting methods that grid plots have is the scan method. Here we use it to do a simple scan of 15 steps with the default settings, but you can play with it: End of explanation """ bands_sile = sisl.get_sile(siesta_files/"SrTiO3.bands") bands_sile.plot() """ Explanation: Plotable objects In this section we'd like to point out that using the Plot class is not the most convenient thing for day to day usage. Instead, everything that is plotable in sisl will receive a plot method that you can use. One example of a plotable object is the bandsSileSiesta: End of explanation """ bands_sile.plot.bands(bands_color="red") """ Explanation: Although the plot attribute is not exactly a method, but a manager that organizes all the plotting possibilities for an object. If you call it, as we did, you get the default plot, but you can specify which plot type you want specifically: End of explanation """ dir(bands_sile.plot) """ Explanation: One can quickly check what are the options: End of explanation """ rho_plot.update_settings(z_range=[1, 3], axes="xyz", isos=[{"frac": 0.05, "color":"lightgreen", "opacity": 0.3}]) """ Explanation: And you then see that there's the option to plot the fatbands from this object. We won't do it here because it needs the .WFSX file, which we don't have. Updating your plots As we mentioned earlier, plots have settings and they can be updated. This stems from the fact that the framework is designed with GUIs in mind, where the user will have visual input fields that they may tweak to see how the plot changes. So you might do as if you were interacting from a GUI and update the settings: End of explanation """ rho_plot.save("rho_plot.plot") """ Explanation: The most important thing is that, by the time we do this update, the .RHO file could have changed its location or even disappeared and it wouldn't matter. When you update a setting, the plot reruns only from the point where that setting is used. This avoids rerunning time-consuming initializations like reading a very big file or diagonalizing a hamiltonian. However, this is not the only useful point. Since plots are self-contained, you can share this plot with someone else and they will be able to tweak all the settings that they wish if they don't involve reading data again. Isn't this nice? This brings us to the next section. Storing and loading plots After a time-consuming calculation or data reading, you clearly want your results to be saved. Plots provide a save method: End of explanation """ rho_plot_from_colleague = sisl.viz.load("rho_plot.plot") """ Explanation: As discussed in the last paragraph of the previous section this stores not only the current visualization, but the full self contained plot that you can tweak as you wish when you load it again: End of explanation """ rho_plot_from_colleague.update_settings(axes="x") # Let's clean the working directory. import os os.remove("rho_plot.plot") """ Explanation: And do whatever you want with it: End of explanation """ rho_plot_from_colleague.update_settings(backend="matplotlib", axes="x") rho_plot_from_colleague.update_settings(backend="plotly", axes="xy") rho_plot_from_colleague.update_settings(backend="matplotlib") """ Explanation: You might ask yourself now what happens if you just want to store the representation, not the full self-contained plot. For this, we first need to discuss the next section. Different plotting backends Hidden between all the settings, you can find a very special setting: backend. Initially, the visualization framework was written to plot things using plotly. However, we noticed that this might not be the appropiate choice for everyone. Therefore, we changed the design to make it very modular and allow rendering the plot with any framework you like. There's a dedicated notebook on how to register your own backends. Here however we just want to show you how you can switch between the sisl-provided backends. It is very simple: End of explanation """ rho_plot_from_colleague.get_param("backend").options """ Explanation: Note that you can always know what backends are available for the plot by checking the options of the backend parameter: End of explanation """ rho_plot_from_colleague.plot([1,2,3,4], [0,0,1,2]) rho_plot_from_colleague """ Explanation: Note that the options here will only show the backends that have been loaded. sisl only loads backends if the required python packages are present. Currently, sisl provides backends for three frameworks: plotly, matplotlib and blender. If you have one backend selected, you will have available all the methods that the framework provides. For example, if you are using the matplotlib backend, you can use all the methods that matplotlib implements for the Axes object directly on the plot. You also have the figure (axes) under the figure (axes) attribute, for whatever you want to do. Let's for example draw a line, using Axes.plot: End of explanation """ rho_plot_from_colleague.update_settings(backend="plotly") rho_plot_from_colleague.add_scatter(x=[1,2,3,4], y=[0,0,1,2]) """ Explanation: And now let's do the same with plotly. In this case, all methods are looked for in the Figure object that is stored under the figure attribute. End of explanation """ thumbnail_plot = rho_plot_from_colleague if thumbnail_plot: thumbnail_plot.show("png") """ Explanation: At this point, you probably already know how you will be able to save these plots to images, html or whatever other format. Use the methods that each framework provides! Also, this will also allow you to modify the plot as you wish (adding lines, changing titles, showing legends...) once sisl has render it. Again, you just have to use the methods that the framework provides to do so :) Discover more Until here, we have covered the most basic concepts of the framework. If you enjoyed it, we encourage you to check the rest of notebooks to find out about more specific and complex aspects of it. This next cell is just to create the thumbnail for the notebook in the docs End of explanation """
diegocavalca/Studies
deep-learnining-specialization/1. neural nets and deep learning/resources/Building your Deep Neural Network - Step by Step v3.ipynb
cc0-1.0
import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v2 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) """ Explanation: Building your Deep Neural Network: Step by Step Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want! In this notebook, you will implement all the functions required to build a deep neural network. In the next assignment, you will use these functions to build a deep neural network for image classification. After this assignment you will be able to: - Use non-linear units like ReLU to improve your model - Build a deeper neural network (with more than 1 hidden layer) - Implement an easy-to-use neural network class Notation: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - numpy is the main package for scientific computing with Python. - matplotlib is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. End of explanation """ # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x)*0.01 b1 = np.zeros((n_h, 1)) W2 = np.random.randn(n_y, n_h)*0.01 b2 = np.zeros((n_y, 1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(2,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) """ Explanation: 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: Initialize the parameters for a two-layer network and for an $L$-layer neural network. Implement the forward propagation module (shown in purple in the figure below). Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). We give you the ACTIVATION function (relu/sigmoid). Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. Compute the loss. Implement the backward propagation module (denoted in red in the figure below). Complete the LINEAR part of a layer's backward propagation step. We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> Figure 1</center></caption><br> Note that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. 3.1 - 2-layer Neural Network Exercise: Create and initialize the parameters of the 2-layer neural network. Instructions: - The model's structure is: LINEAR -> RELU -> LINEAR -> SIGMOID. - Use random initialization for the weight matrices. Use np.random.randn(shape)*0.01 with the correct shape. - Use zero initialization for the biases. Use np.zeros(shape). End of explanation """ # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) """ Explanation: Expected output: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756] [-0.00528172 -0.01072969]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.00865408 -0.02301539]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the initialize_parameters_deep, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\ m & n & o \ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\ d & e & f \ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \ t \ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ Exercise: Implement initialization for an L-layer Neural Network. Instructions: - The model's structure is [LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use np.random.rand(shape) * 0.01. - Use zeros initialization for the biases. Use np.zeros(shape). - We will store $n^{[l]}$, the number of units in different layers, in a variable layer_dims. For example, the layer_dims for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means W1's shape was (4,2), b1 was (4,1), W2 was (1,4) and b2 was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) End of explanation """ # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W, A) + b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) """ Explanation: Expected output: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> 4 - Forward propagation module 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: LINEAR LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. Exercise: Build the linear part of forward propagation. Reminder: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find np.dot() useful. If your dimensions don't match, printing W.shape may help. End of explanation """ # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) """ Explanation: Expected output: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: Sigmoid: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the sigmoid function. This function returns two items: the activation value "a" and a "cache" that contains "Z" (it's what we will feed in to the corresponding backward function). To use it you could just call: python A, activation_cache = sigmoid(Z) ReLU: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the relu function. This function returns two items: the activation value "A" and a "cache" that contains "Z" (it's what we will feed in to the corresponding backward function). To use it you could just call: python A, activation_cache = relu(Z) For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. Exercise: Implement the forward propagation of the LINEAR->ACTIVATION layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. End of explanation """ # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)], activation = "relu") caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L)], activation = "sigmoid") caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) """ Explanation: Expected output: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> Note: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (linear_activation_forward with RELU) $L-1$ times, then follows that with one linear_activation_forward with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> Figure 2 : [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID model</center></caption><br> Exercise: Implement the forward propagation of the above model. Instruction: In the code below, the variable AL will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called Yhat, i.e., this is $\hat{Y}$.) Tips: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value c to a list, you can use list.append(c). End of explanation """ # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = -1./m * np.sum(Y*np.log(AL) + (1-Y)*np.log(1-AL)) ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) """ Explanation: <table style="width:40%"> <tr> <td> **AL** </td> <td > [[ 0.17007265 0.2524272 ]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 2</td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. Exercise: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{L}\right)) \tag{7}$$ End of explanation """ # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = 1./m * np.dot(dZ, A_prev.T) db = 1./m * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(W.T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) """ Explanation: Expected Output: <table> <tr> <td>**cost** </td> <td> 0.41493159961539694</td> </tr> </table> 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. Reminder: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> Figure 3 : Forward and Backward propagation for LINEAR->RELU->LINEAR->SIGMOID <br> The purple blocks represent the forward propagation, and the red blocks represent the backward propagation. </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> Figure 4 </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{l}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ Exercise: Use the 3 formulas above to implement linear_backward(). End of explanation """ # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) """ Explanation: Expected Output: <table style="width:90%"> <tr> <td> **dA_prev** </td> <td > [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] </td> </tr> <tr> <td> **dW** </td> <td > [[-0.10076895 1.40685096 1.64992505]] </td> </tr> <tr> <td> **db** </td> <td> [[ 0.50629448]] </td> </tr> </table> 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: linear_backward and the backward step for the activation linear_activation_backward. To help you implement linear_activation_backward, we provided two backward functions: - sigmoid_backward: Implements the backward propagation for SIGMOID unit. You can call it as follows: python dZ = sigmoid_backward(dA, activation_cache) relu_backward: Implements the backward propagation for RELU unit. You can call it as follows: python dZ = relu_backward(dA, activation_cache) If $g(.)$ is the activation function, sigmoid_backward and relu_backward compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. Exercise: Implement the backpropagation for the LINEAR->ACTIVATION layer. End of explanation """ # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[L-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads['dA' + str(l + 2)], current_cache, activation = "relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print ("dW1 = "+ str(grads["dW1"])) print ("db1 = "+ str(grads["db1"])) print ("dA1 = "+ str(grads["dA1"])) """ Explanation: Expected output with sigmoid: <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> Expected output with relu <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the L_model_forward function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the L_model_backward function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> Figure 5 : Backward pass </center></caption> Initializing backpropagation: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute dAL $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL You can then use this post-activation gradient dAL to keep going backward. As seen in Figure 5, you can now feed in dAL into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a for loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in grads["dW3"]. Exercise: Implement backpropagation for the [LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID model. End of explanation """ # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) """ Explanation: Expected Output <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0. 0.52257901] [ 0. -0.3269206 ] [ 0. -0.32070404] [ 0. -0.74079187]] </td> </tr> </table> 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. Exercise: Implement update_parameters() to update your parameters using gradient descent. Instructions: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. End of explanation """
tensorflow/docs
site/en/r1/guide/keras.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 The TensorFlow Authors. End of explanation """ !pip install pyyaml # Required to save models in YAML format import tensorflow.compat.v1 as tf from tensorflow.keras import layers print(tf.version.VERSION) print(tf.keras.__version__) """ Explanation: Keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/guide/keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Note: This is an archived TF1 notebook. These are configured to run in TF2's compatibility mode but will run in TF1 as well. To use TF1 in Colab, use the %tensorflow_version 1.x magic. Keras is a high-level API to build and train deep learning models. It's used for fast prototyping, advanced research, and production, with three key advantages: User friendly<br> Keras has a simple, consistent interface optimized for common use cases. It provides clear and actionable feedback for user errors. Modular and composable<br> Keras models are made by connecting configurable building blocks together, with few restrictions. Easy to extend<br> Write custom building blocks to express new ideas for research. Create new layers, loss functions, and develop state-of-the-art models. Import tf.keras tf.keras is TensorFlow's implementation of the Keras API specification. This is a high-level API to build and train models that includes first-class support for TensorFlow-specific functionality, such as eager execution, tf.data pipelines, and Estimators. tf.keras makes TensorFlow easier to use without sacrificing flexibility and performance. To get started, import tf.keras as part of your TensorFlow program setup: End of explanation """ model = tf.keras.Sequential() # Adds a densely-connected layer with 64 units to the model: model.add(layers.Dense(64, activation='relu')) # Add another: model.add(layers.Dense(64, activation='relu')) # Add a softmax layer with 10 output units: model.add(layers.Dense(10, activation='softmax')) """ Explanation: tf.keras can run any Keras-compatible code, but keep in mind: The tf.keras version in the latest TensorFlow release might not be the same as the latest keras version from PyPI. Check tf.keras.__version__. When saving a model's weights, tf.keras defaults to the checkpoint format. Pass save_format='h5' to use HDF5. Build a simple model Sequential model In Keras, you assemble layers to build models. A model is (usually) a graph of layers. The most common type of model is a stack of layers: the tf.keras.Sequential model. To build a simple, fully-connected network (i.e. multi-layer perceptron): End of explanation """ # Create a sigmoid layer: layers.Dense(64, activation='sigmoid') # Or: layers.Dense(64, activation=tf.sigmoid) # A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix: layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01)) # A linear layer with L2 regularization of factor 0.01 applied to the bias vector: layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01)) # A linear layer with a kernel initialized to a random orthogonal matrix: layers.Dense(64, kernel_initializer='orthogonal') # A linear layer with a bias vector initialized to 2.0s: layers.Dense(64, bias_initializer=tf.keras.initializers.constant(2.0)) """ Explanation: Configure the layers There are many tf.keras.layers available with some common constructor parameters: activation: Set the activation function for the layer. This parameter is specified by the name of a built-in function or as a callable object. By default, no activation is applied. kernel_initializer and bias_initializer: The initialization schemes that create the layer's weights (kernel and bias). This parameter is a name or a callable object. The kernel defaults to the "Glorot uniform" initializer, and the bias defaults to zeros. kernel_regularizer and bias_regularizer: The regularization schemes that apply the layer's weights (kernel and bias), such as L1 or L2 regularization. By default, no regularization is applied. The following instantiates tf.keras.layers.Dense layers using constructor arguments: End of explanation """ model = tf.keras.Sequential([ # Adds a densely-connected layer with 64 units to the model: layers.Dense(64, activation='relu', input_shape=(32,)), # Add another: layers.Dense(64, activation='relu'), # Add a softmax layer with 10 output units: layers.Dense(10, activation='softmax')]) model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) """ Explanation: Train and evaluate Set up training After the model is constructed, configure its learning process by calling the compile method: End of explanation """ # Configure a model for mean-squared error regression. model.compile(optimizer=tf.train.AdamOptimizer(0.01), loss='mse', # mean squared error metrics=['mae']) # mean absolute error # Configure a model for categorical classification. model.compile(optimizer=tf.train.RMSPropOptimizer(0.01), loss=tf.keras.losses.categorical_crossentropy, metrics=[tf.keras.metrics.categorical_accuracy]) """ Explanation: tf.keras.Model.compile takes three important arguments: optimizer: This object specifies the training procedure. Pass it optimizer instances from the tf.train module, such as tf.train.AdamOptimizer, tf.train.RMSPropOptimizer, or tf.train.GradientDescentOptimizer. loss: The function to minimize during optimization. Common choices include mean square error (mse), categorical_crossentropy, and binary_crossentropy. Loss functions are specified by name or by passing a callable object from the tf.keras.losses module. metrics: Used to monitor training. These are string names or callables from the tf.keras.metrics module. The following shows a few examples of configuring a model for training: End of explanation """ import numpy as np def random_one_hot_labels(shape): n, n_class = shape classes = np.random.randint(0, n_class, n) labels = np.zeros((n, n_class)) labels[np.arange(n), classes] = 1 return labels data = np.random.random((1000, 32)) labels = random_one_hot_labels((1000, 10)) model.fit(data, labels, epochs=10, batch_size=32) """ Explanation: Input NumPy data For small datasets, use in-memory NumPy arrays to train and evaluate a model. The model is "fit" to the training data using the fit method: End of explanation """ import numpy as np data = np.random.random((1000, 32)) labels = random_one_hot_labels((1000, 10)) val_data = np.random.random((100, 32)) val_labels = random_one_hot_labels((100, 10)) model.fit(data, labels, epochs=10, batch_size=32, validation_data=(val_data, val_labels)) """ Explanation: tf.keras.Model.fit takes three important arguments: epochs: Training is structured into epochs. An epoch is one iteration over the entire input data (this is done in smaller batches). batch_size: When passed NumPy data, the model slices the data into smaller batches and iterates over these batches during training. This integer specifies the size of each batch. Be aware that the last batch may be smaller if the total number of samples is not divisible by the batch size. validation_data: When prototyping a model, you want to easily monitor its performance on some validation data. Passing this argument—a tuple of inputs and labels—allows the model to display the loss and metrics in inference mode for the passed data, at the end of each epoch. Here's an example using validation_data: End of explanation """ # Instantiates a toy dataset instance: dataset = tf.data.Dataset.from_tensor_slices((data, labels)) dataset = dataset.batch(32) dataset = dataset.repeat() # Don't forget to specify `steps_per_epoch` when calling `fit` on a dataset. model.fit(dataset, epochs=10, steps_per_epoch=30) """ Explanation: Input tf.data datasets Use the Datasets API to scale to large datasets or multi-device training. Pass a tf.data.Dataset instance to the fit method: End of explanation """ dataset = tf.data.Dataset.from_tensor_slices((data, labels)) dataset = dataset.batch(32).repeat() val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels)) val_dataset = val_dataset.batch(32).repeat() model.fit(dataset, epochs=10, steps_per_epoch=30, validation_data=val_dataset, validation_steps=3) """ Explanation: Here, the fit method uses the steps_per_epoch argument—this is the number of training steps the model runs before it moves to the next epoch. Since the Dataset yields batches of data, this snippet does not require a batch_size. Datasets can also be used for validation: End of explanation """ data = np.random.random((1000, 32)) labels = random_one_hot_labels((1000, 10)) model.evaluate(data, labels, batch_size=32) model.evaluate(dataset, steps=30) """ Explanation: Evaluate and predict The tf.keras.Model.evaluate and tf.keras.Model.predict methods can use NumPy data and a tf.data.Dataset. To evaluate the inference-mode loss and metrics for the data provided: End of explanation """ result = model.predict(data, batch_size=32) print(result.shape) """ Explanation: And to predict the output of the last layer in inference for the data provided, as a NumPy array: End of explanation """ inputs = tf.keras.Input(shape=(32,)) # Returns a placeholder tensor # A layer instance is callable on a tensor, and returns a tensor. x = layers.Dense(64, activation='relu')(inputs) x = layers.Dense(64, activation='relu')(x) predictions = layers.Dense(10, activation='softmax')(x) """ Explanation: Build advanced models Functional API The tf.keras.Sequential model is a simple stack of layers that cannot represent arbitrary models. Use the Keras functional API to build complex model topologies such as: Multi-input models, Multi-output models, Models with shared layers (the same layer called several times), Models with non-sequential data flows (e.g. residual connections). Building a model with the functional API works like this: A layer instance is callable and returns a tensor. Input tensors and output tensors are used to define a tf.keras.Model instance. This model is trained just like the Sequential model. The following example uses the functional API to build a simple, fully-connected network: End of explanation """ model = tf.keras.Model(inputs=inputs, outputs=predictions) # The compile step specifies the training configuration. model.compile(optimizer=tf.train.RMSPropOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) # Trains for 5 epochs model.fit(data, labels, batch_size=32, epochs=5) """ Explanation: Instantiate the model given inputs and outputs. End of explanation """ class MyModel(tf.keras.Model): def __init__(self, num_classes=10): super(MyModel, self).__init__(name='my_model') self.num_classes = num_classes # Define your layers here. self.dense_1 = layers.Dense(32, activation='relu') self.dense_2 = layers.Dense(num_classes, activation='sigmoid') def call(self, inputs): # Define your forward pass here, # using layers you previously defined (in `__init__`). x = self.dense_1(inputs) return self.dense_2(x) def compute_output_shape(self, input_shape): # You need to override this function if you want to use the subclassed model # as part of a functional-style model. # Otherwise, this method is optional. shape = tf.TensorShape(input_shape).as_list() shape[-1] = self.num_classes return tf.TensorShape(shape) """ Explanation: Model subclassing Build a fully-customizable model by subclassing tf.keras.Model and defining your own forward pass. Create layers in the __init__ method and set them as attributes of the class instance. Define the forward pass in the call method. Model subclassing is particularly useful when eager execution is enabled since the forward pass can be written imperatively. Key Point: Use the right API for the job. While model subclassing offers flexibility, it comes at a cost of greater complexity and more opportunities for user errors. If possible, prefer the functional API. The following example shows a subclassed tf.keras.Model using a custom forward pass: End of explanation """ model = MyModel(num_classes=10) # The compile step specifies the training configuration. model.compile(optimizer=tf.train.RMSPropOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) # Trains for 5 epochs. model.fit(data, labels, batch_size=32, epochs=5) """ Explanation: Instantiate the new model class: End of explanation """ class MyLayer(layers.Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyLayer, self).__init__(**kwargs) def build(self, input_shape): shape = tf.TensorShape((input_shape[1], self.output_dim)) # Create a trainable weight variable for this layer. self.kernel = self.add_weight(name='kernel', shape=shape, initializer='uniform', trainable=True) # Make sure to call the `build` method at the end super(MyLayer, self).build(input_shape) def call(self, inputs): return tf.matmul(inputs, self.kernel) def compute_output_shape(self, input_shape): shape = tf.TensorShape(input_shape).as_list() shape[-1] = self.output_dim return tf.TensorShape(shape) def get_config(self): base_config = super(MyLayer, self).get_config() base_config['output_dim'] = self.output_dim return base_config @classmethod def from_config(cls, config): return cls(**config) """ Explanation: Custom layers Create a custom layer by subclassing tf.keras.layers.Layer and implementing the following methods: build: Create the weights of the layer. Add weights with the add_weight method. call: Define the forward pass. compute_output_shape: Specify how to compute the output shape of the layer given the input shape. Optionally, a layer can be serialized by implementing the get_config method and the from_config class method. Here's an example of a custom layer that implements a matmul of an input with a kernel matrix: End of explanation """ model = tf.keras.Sequential([ MyLayer(10), layers.Activation('softmax')]) # The compile step specifies the training configuration model.compile(optimizer=tf.train.RMSPropOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) # Trains for 5 epochs. model.fit(data, labels, batch_size=32, epochs=5) """ Explanation: Create a model using your custom layer: End of explanation """ callbacks = [ # Interrupt training if `val_loss` stops improving for over 2 epochs tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'), # Write TensorBoard logs to `./logs` directory tf.keras.callbacks.TensorBoard(log_dir='./logs') ] model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks, validation_data=(val_data, val_labels)) """ Explanation: Callbacks A callback is an object passed to a model to customize and extend its behavior during training. You can write your own custom callback, or use the built-in tf.keras.callbacks that include: tf.keras.callbacks.ModelCheckpoint: Save checkpoints of your model at regular intervals. tf.keras.callbacks.LearningRateScheduler: Dynamically change the learning rate. tf.keras.callbacks.EarlyStopping: Interrupt training when validation performance has stopped improving. tf.keras.callbacks.TensorBoard: Monitor the model's behavior using TensorBoard. To use a tf.keras.callbacks.Callback, pass it to the model's fit method: End of explanation """ model = tf.keras.Sequential([ layers.Dense(64, activation='relu', input_shape=(32,)), layers.Dense(10, activation='softmax')]) model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) # Save weights to a TensorFlow Checkpoint file model.save_weights('./weights/my_model') # Restore the model's state, # this requires a model with the same architecture. model.load_weights('./weights/my_model') """ Explanation: <a name='save_and_restore'></a> Save and restore <a name='weights_only'></a> Weights only Save and load the weights of a model using tf.keras.Model.save_weights: End of explanation """ # Save weights to a HDF5 file model.save_weights('my_model.h5', save_format='h5') # Restore the model's state model.load_weights('my_model.h5') """ Explanation: By default, this saves the model's weights in the TensorFlow checkpoint file format. Weights can also be saved to the Keras HDF5 format (the default for the multi-backend implementation of Keras): End of explanation """ # Serialize a model to JSON format json_string = model.to_json() json_string import json import pprint pprint.pprint(json.loads(json_string)) """ Explanation: Configuration only A model's configuration can be saved—this serializes the model architecture without any weights. A saved configuration can recreate and initialize the same model, even without the code that defined the original model. Keras supports the JSON serialization format: End of explanation """ fresh_model = tf.keras.models.model_from_json(json_string) """ Explanation: Recreate the model (newly initialized) from the JSON: End of explanation """ # Create a trivial model model = tf.keras.Sequential([ layers.Dense(64, activation='relu', input_shape=(32,)), layers.Dense(10, activation='softmax') ]) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, batch_size=32, epochs=5) # Save entire model to a HDF5 file model.save('my_model.h5') # Recreate the exact same model, including weights and optimizer. model = tf.keras.models.load_model('my_model.h5') """ Explanation: Caution: Subclassed models are not serializable because their architecture is defined by the Python code in the body of the call method. Entire model The entire model can be saved to a file that contains the weight values, the model's configuration, and even the optimizer's configuration. This allows you to checkpoint a model and resume training later—from the exact same state—without access to the original code. End of explanation """ model = tf.keras.Sequential([layers.Dense(64, activation='relu', input_shape=(32,)), layers.Dense(10,activation='softmax')]) model.compile(optimizer=tf.train.RMSPropOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) estimator = tf.keras.estimator.model_to_estimator(model) """ Explanation: <a name='eager_execution'></a> Eager execution Eager execution is an imperative programming environment that evaluates operations immediately. This is not required for Keras, but is supported by tf.keras and useful for inspecting your program and debugging. All of the tf.keras model-building APIs are compatible with eager execution. And while the Sequential and functional APIs can be used, eager execution especially benefits model subclassing and building custom layers—the APIs that require you to write the forward pass as code (instead of the APIs that create models by assembling existing layers). See the eager execution guide for examples of using Keras models with custom training loops and tf.GradientTape. Distribution Estimators The Estimators API is used for training models for distributed environments. This targets industry use cases such as distributed training on large datasets that can export a model for production. A tf.keras.Model can be trained with the tf.estimator API by converting the model to an tf.estimator.Estimator object with tf.keras.estimator.model_to_estimator. See Creating Estimators from Keras models. End of explanation """ model = tf.keras.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10,))) model.add(layers.Dense(1, activation='sigmoid')) optimizer = tf.train.GradientDescentOptimizer(0.2) model.compile(loss='binary_crossentropy', optimizer=optimizer) model.summary() """ Explanation: Note: Enable eager execution for debugging Estimator input functions and inspecting data. Multiple GPUs tf.keras models can run on multiple GPUs using tf.distribute.DistributionStrategy. This API provides distributed training on multiple GPUs with almost no changes to existing code. Currently, tf.distribute.MirroredStrategy is the only supported distribution strategy. MirroredStrategy does in-graph replication with synchronous training using all-reduce on a single machine. To use DistributionStrategy with Keras, convert the tf.keras.Model to a tf.estimator.Estimator with tf.keras.estimator.model_to_estimator, then train the estimator The following example distributes a tf.keras.Model across multiple GPUs on a single machine. First, define a simple model: End of explanation """ def input_fn(): x = np.random.random((1024, 10)) y = np.random.randint(2, size=(1024, 1)) x = tf.cast(x, tf.float32) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(10) dataset = dataset.batch(32) return dataset """ Explanation: Define an input pipeline. The input_fn returns a tf.data.Dataset object used to distribute the data across multiple devices—with each device processing a slice of the input batch. End of explanation """ strategy = tf.distribute.MirroredStrategy() config = tf.estimator.RunConfig(train_distribute=strategy) """ Explanation: Next, create a tf.estimator.RunConfig and set the train_distribute argument to the tf.distribute.MirroredStrategy instance. When creating MirroredStrategy, you can specify a list of devices or set the num_gpus argument. The default uses all available GPUs, like the following: End of explanation """ keras_estimator = tf.keras.estimator.model_to_estimator( keras_model=model, config=config, model_dir='/tmp/model_dir') """ Explanation: Convert the Keras model to a tf.estimator.Estimator instance: End of explanation """ keras_estimator.train(input_fn=input_fn, steps=10) """ Explanation: Finally, train the Estimator instance by providing the input_fn and steps arguments: End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/structured/solutions/4b_keras_dnn_babyweight.ipynb
apache-2.0
import datetime import os import shutil import matplotlib.pyplot as plt import tensorflow as tf print(tf.__version__) """ Explanation: LAB 4b: Create Keras DNN model. Learning Objectives Set CSV Columns, label column, and column defaults Make dataset of features and label from CSV files Create input layers for raw features Create feature columns for inputs Create DNN dense hidden layers and output layer Create custom evaluation metric Build DNN model tying all of the pieces together Train and evaluate Introduction In this notebook, we'll be using Keras to create a DNN model to predict the weight of a baby before it is born. We'll start by defining the CSV column names, label column, and column defaults for our data inputs. Then, we'll construct a tf.data Dataset of features and the label from the CSV files and create inputs layers for the raw features. Next, we'll set up feature columns for the model inputs and build a deep neural network in Keras. We'll create a custom evaluation metric and build our DNN model. Finally, we'll train and evaluate our model. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook. Load necessary libraries End of explanation """ BUCKET = # REPLACE BY YOUR BUCKET os.environ['BUCKET'] = BUCKET """ Explanation: Set your bucket: End of explanation """ TRAIN_DATA_PATH = "gs://{bucket}/babyweight/data/train*.csv".format(bucket=BUCKET) EVAL_DATA_PATH = "gs://{bucket}/babyweight/data/eval*.csv".format(bucket=BUCKET) !gsutil ls $TRAIN_DATA_PATH !gsutil ls $EVAL_DATA_PATH """ Explanation: Verify CSV files exist In the seventh lab of this series 1b_prepare_data_babyweight, we sampled from BigQuery our train, eval, and test CSV files. Verify that they exist, otherwise go back to that lab and create them. End of explanation """ # Determine CSV, label, and key columns # Create list of string column headers, make sure order matches. CSV_COLUMNS = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Add string name for label column LABEL_COLUMN = "weight_pounds" # Set default values for each CSV column as a list of lists. # Treat is_male and plurality as strings. DEFAULTS = [[0.0], ["null"], [0.0], ["null"], [0.0]] """ Explanation: Create Keras model Set CSV Columns, label column, and column defaults. Now that we have verified that our CSV files exist, we need to set a few things that we will be using in our input function. * CSV_COLUMNS are going to be our header names of our columns. Make sure that they are in the same order as in the CSV files * LABEL_COLUMN is the header name of the column that is our label. We will need to know this to pop it from our features dictionary. * DEFAULTS is a list with the same length as CSV_COLUMNS, i.e. there is a default for each column in our CSVs. Each element is a list itself with the default value for that CSV column. End of explanation """ def features_and_labels(row_data): """Splits features and labels from feature dictionary. Args: row_data: Dictionary of CSV column names and tensor values. Returns: Dictionary of feature tensors and label tensor. """ label = row_data.pop(LABEL_COLUMN) return row_data, label # features, label def load_dataset(pattern, batch_size=1, mode='eval'): """Loads dataset using the tf.data API from CSV files. Args: pattern: str, file pattern to glob into list of files. batch_size: int, the number of examples per batch. mode: 'train' | 'eval' to determine if training or evaluating. Returns: `Dataset` object. """ # Make a CSV dataset dataset = tf.data.experimental.make_csv_dataset( file_pattern=pattern, batch_size=batch_size, column_names=CSV_COLUMNS, column_defaults=DEFAULTS) # Map dataset to features and label dataset = dataset.map(map_func=features_and_labels) # features, label # Shuffle and repeat for training if mode == 'train': dataset = dataset.shuffle(buffer_size=1000).repeat() # Take advantage of multi-threading; 1=AUTOTUNE dataset = dataset.prefetch(buffer_size=1) return dataset """ Explanation: Make dataset of features and label from CSV files. Next, we will write an input_fn to read the data. Since we are reading from CSV files we can save ourself from trying to recreate the wheel and can use tf.data.experimental.make_csv_dataset. This will create a CSV dataset object. However we will need to divide the columns up into features and a label. We can do this by applying the map method to our dataset and popping our label column off of our dictionary of feature tensors. End of explanation """ def create_input_layers(): """Creates dictionary of input layers for each feature. Returns: Dictionary of `tf.Keras.layers.Input` layers for each feature. """ inputs = { colname: tf.keras.layers.Input( name=colname, shape=(), dtype="float32") for colname in ["mother_age", "gestation_weeks"]} inputs.update({ colname: tf.keras.layers.Input( name=colname, shape=(), dtype="string") for colname in ["is_male", "plurality"]}) return inputs """ Explanation: Create input layers for raw features. We'll need to get the data read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers (tf.Keras.layers.Input) by defining: * shape: A shape tuple (integers), not including the batch size. For instance, shape=(32,) indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. * name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. * dtype: The data type expected by the input, as a string (float32, float64, int32...) End of explanation """ def categorical_fc(name, values): """Helper function to wrap categorical feature by indicator column. Args: name: str, name of feature. values: list, list of strings of categorical values. Returns: Indicator column of categorical feature. """ cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key=name, vocabulary_list=values) return tf.feature_column.indicator_column(categorical_column=cat_column) def create_feature_columns(): """Creates dictionary of feature columns from inputs. Returns: Dictionary of feature columns. """ feature_columns = { colname : tf.feature_column.numeric_column(key=colname) for colname in ["mother_age", "gestation_weeks"] } feature_columns["is_male"] = categorical_fc( "is_male", ["True", "False", "Unknown"]) feature_columns["plurality"] = categorical_fc( "plurality", ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)", "Multiple(2+)"]) return feature_columns """ Explanation: Create feature columns for inputs. Next, define the feature columns. mother_age and gestation_weeks should be numeric. The others, is_male and plurality, should be categorical. Remember, only dense feature columns can be inputs to a DNN. End of explanation """ def get_model_outputs(inputs): """Creates model architecture and returns outputs. Args: inputs: Dense tensor used as inputs to model. Returns: Dense tensor output from the model. """ # Create two hidden layers of [64, 32] just in like the BQML DNN h1 = tf.keras.layers.Dense(64, activation="relu", name="h1")(inputs) h2 = tf.keras.layers.Dense(32, activation="relu", name="h2")(h1) # Final output is a linear activation because this is regression output = tf.keras.layers.Dense( units=1, activation="linear", name="weight")(h2) return output """ Explanation: Create DNN dense hidden layers and output layer. So we've figured out how to get our inputs ready for machine learning but now we need to connect them to our desired output. Our model architecture is what links the two together. Let's create some hidden dense layers beginning with our inputs and end with a dense output layer. This is regression so make sure the output layer activation is correct and that the shape is right. End of explanation """ def rmse(y_true, y_pred): """Calculates RMSE evaluation metric. Args: y_true: tensor, true labels. y_pred: tensor, predicted labels. Returns: Tensor with value of RMSE between true and predicted labels. """ return tf.sqrt(tf.reduce_mean((y_pred - y_true) ** 2)) """ Explanation: Create custom evaluation metric. We want to make sure that we have some useful way to measure model performance for us. Since this is regression, we would like to know the RMSE of the model on our evaluation dataset, however, this does not exist as a standard evaluation metric, so we'll have to create our own by using the true and predicted labels. End of explanation """ def build_dnn_model(): """Builds simple DNN using Keras Functional API. Returns: `tf.keras.models.Model` object. """ # Create input layer inputs = create_input_layers() # Create feature columns feature_columns = create_feature_columns() # The constructor for DenseFeatures takes a list of numeric columns # The Functional API in Keras requires: LayerConstructor()(inputs) dnn_inputs = tf.keras.layers.DenseFeatures( feature_columns=feature_columns.values())(inputs) # Get output of model given inputs output = get_model_outputs(dnn_inputs) # Build model and compile it all together model = tf.keras.models.Model(inputs=inputs, outputs=output) model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"]) return model print("Here is our DNN architecture so far:\n") model = build_dnn_model() print(model.summary()) """ Explanation: Build DNN model tying all of the pieces together. Excellent! We've assembled all of the pieces, now we just need to tie them all together into a Keras Model. This is a simple feedforward model with no branching, side inputs, etc. so we could have used Keras' Sequential Model API but just for fun we're going to use Keras' Functional Model API. Here we will build the model using tf.keras.models.Model giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics. End of explanation """ tf.keras.utils.plot_model( model=model, to_file="dnn_model.png", show_shapes=False, rankdir="LR") """ Explanation: We can visualize the DNN using the Keras plot_model utility. End of explanation """ TRAIN_BATCH_SIZE = 32 NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around NUM_EVALS = 5 # how many times to evaluate # Enough to get a reasonable sample, but not so much that it slows down NUM_EVAL_EXAMPLES = 10000 trainds = load_dataset( pattern=TRAIN_DATA_PATH, batch_size=TRAIN_BATCH_SIZE, mode='train') evalds = load_dataset( pattern=EVAL_DATA_PATH, batch_size=1000, mode='eval').take(count=NUM_EVAL_EXAMPLES // 1000) steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS) logdir = os.path.join( "logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=logdir, histogram_freq=1) history = model.fit( trainds, validation_data=evalds, epochs=NUM_EVALS, steps_per_epoch=steps_per_epoch, callbacks=[tensorboard_callback]) """ Explanation: Run and evaluate model Train and evaluate. We've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data. Also, don't forget to add the callback to TensorBoard. End of explanation """ # Plot import matplotlib.pyplot as plt nrows = 1 ncols = 2 fig = plt.figure(figsize=(10, 5)) for idx, key in enumerate(["loss", "rmse"]): ax = fig.add_subplot(nrows, ncols, idx+1) plt.plot(history.history[key]) plt.plot(history.history["val_{}".format(key)]) plt.title("model {}".format(key)) plt.ylabel(key) plt.xlabel("epoch") plt.legend(["train", "validation"], loc="upper left"); """ Explanation: Visualize loss curve End of explanation """ OUTPUT_DIR = "babyweight_trained" shutil.rmtree(OUTPUT_DIR, ignore_errors=True) EXPORT_PATH = os.path.join( OUTPUT_DIR, datetime.datetime.now().strftime("%Y%m%d%H%M%S")) tf.saved_model.save( obj=model, export_dir=EXPORT_PATH) # with default serving function print("Exported trained model to {}".format(EXPORT_PATH)) !ls $EXPORT_PATH """ Explanation: Save the model End of explanation """
d-k-b/udacity-deep-learning
batch-norm/Batch_Normalization_Exercises.ipynb
mit
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False) """ Explanation: Batch Normalization – Practice Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now. This is not a good network for classfying MNIST digits. You could create a much simpler network and get better results. However, to give you hands-on experience with batch normalization, we had to make an example that was: 1. Complicated enough that training would benefit from batch normalization. 2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization. 3. Simple enough that the architecture would be easy to understand without additional resources. This notebook includes two versions of the network that you can edit. The first uses higher level functions from the tf.layers package. The second is the same network, but uses only lower level functions in the tf.nn package. Batch Normalization with tf.layers.batch_normalization Batch Normalization with tf.nn.batch_normalization The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named mnist. You'll need to run this cell before running anything else in the notebook. End of explanation """ """ DO NOT MODIFY THIS CELL """ def fully_connected(prev_layer, num_units): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu) return layer """ Explanation: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a> This version of the network uses tf.layers for almost everything, and expects you to implement batch normalization using tf.layers.batch_normalization We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function. This version of the function does not include batch normalization. End of explanation """ """ DO NOT MODIFY THIS CELL """ def conv_layer(prev_layer, layer_depth): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu) return conv_layer """ Explanation: We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network. This version of the function does not include batch normalization. End of explanation """ """ DO NOT MODIFY THIS CELL """ def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]]}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) """ Explanation: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions). This cell builds the network without batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training. End of explanation """ def fully_connected(prev_layer, num_units): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu) return layer """ Explanation: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.) Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches. Add batch normalization We've copied the previous three cells to get you started. Edit these cells to add batch normalization to the network. For this exercise, you should use tf.layers.batch_normalization to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference. If you get stuck, you can check out the Batch_Normalization_Solutions notebook to see how we did things. TODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. End of explanation """ def conv_layer(prev_layer, layer_depth): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu) return conv_layer """ Explanation: TODO: Modify conv_layer to add batch normalization to the convolutional layers it creates. Feel free to change the function's parameters if it helps. End of explanation """ def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]]}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) """ Explanation: TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training, and you'll need to make sure it updates and uses its population statistics correctly. End of explanation """ def fully_connected(prev_layer, num_units): """ Create a fully connectd layer with the given layer as input and the given number of neurons. :param prev_layer: Tensor The Tensor that acts as input into this layer :param num_units: int The size of the layer. That is, the number of units, nodes, or neurons. :returns Tensor A new fully connected layer """ layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu) return layer """ Explanation: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output: Accuracy on 100 samples. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference. Batch Normalization using tf.nn.batch_normalization<a id="example_2"></a> Most of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things. This version of the network uses tf.nn for almost everything, and expects you to implement batch normalization using tf.nn.batch_normalization. Optional TODO: You can run the next three cells before you edit them just to see how the network performs without batch normalization. However, the results should be pretty much the same as you saw with the previous example before you added batch normalization. TODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. Note: For convenience, we continue to use tf.layers.dense for the fully_connected layer. By this point in the class, you should have no problem replacing that with matrix operations between the prev_layer and explicit weights and biases variables. End of explanation """ def conv_layer(prev_layer, layer_depth): """ Create a convolutional layer with the given layer as input. :param prev_layer: Tensor The Tensor that acts as input into this layer :param layer_depth: int We'll set the strides and number of feature maps based on the layer's depth in the network. This is *not* a good way to make a CNN, but it helps us create this example with very little code. :returns Tensor A new convolutional layer """ strides = 2 if layer_depth % 3 == 0 else 1 in_channels = prev_layer.get_shape().as_list()[3] out_channels = layer_depth*4 weights = tf.Variable( tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05)) bias = tf.Variable(tf.zeros(out_channels)) conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME') conv_layer = tf.nn.bias_add(conv_layer, bias) conv_layer = tf.nn.relu(conv_layer) return conv_layer """ Explanation: TODO: Modify conv_layer to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps. Note: Unlike in the previous example that used tf.layers, adding batch normalization to these convolutional layers does require some slight differences to what you did in fully_connected. End of explanation """ def train(num_batches, batch_size, learning_rate): # Build placeholders for the input samples and labels inputs = tf.placeholder(tf.float32, [None, 28, 28, 1]) labels = tf.placeholder(tf.float32, [None, 10]) # Feed the inputs into a series of 20 convolutional layers layer = inputs for layer_i in range(1, 20): layer = conv_layer(layer, layer_i) # Flatten the output from the convolutional layers orig_shape = layer.get_shape().as_list() layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]]) # Add one fully connected layer layer = fully_connected(layer, 100) # Create the output layer with 1 node for each logits = tf.layers.dense(layer, 10) # Define loss and training operations model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss) # Create operations to test accuracy correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Train and test the network with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for batch_i in range(num_batches): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # train this batch sess.run(train_opt, {inputs: batch_xs, labels: batch_ys}) # Periodically check the validation or training loss and accuracy if batch_i % 100 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc)) elif batch_i % 25 == 0: loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys}) print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc)) # At the end, score the final accuracy for both the validation and test sets acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels}) print('Final validation accuracy: {:>3.5f}'.format(acc)) acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels}) print('Final test accuracy: {:>3.5f}'.format(acc)) # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly. correct = 0 for i in range(100): correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]], labels: [mnist.test.labels[i]]}) print("Accuracy on 100 samples:", correct/100) num_batches = 800 batch_size = 64 learning_rate = 0.002 tf.reset_default_graph() with tf.Graph().as_default(): train(num_batches, batch_size, learning_rate) """ Explanation: TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training. End of explanation """
noammor/coursera-machinelearning-python
ex7/ml-ex7-pca.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import scipy.io import itertools %matplotlib inline """ Explanation: # Exercise 7 | Principle Component Analysis End of explanation """ ex7data1 = scipy.io.loadmat('ex7data1.mat') X = ex7data1['X'] def plot_data(X, ax): ax.plot(X[:,0], X[:,1], 'bo') fig, ax = plt.subplots() plot_data(X, ax) def normalize_features(X): mu = np.mean(X, 0) X_norm = X - mu sigma = np.std(X_norm, 0) X_norm = X_norm / sigma return X_norm, mu, sigma X_norm, mu, sigma = normalize_features(X) """ Explanation: Part 1: Load Example Dataset We start this exercise by using a small dataset that is easily to visualize. End of explanation """ def pca(X): #PCA Run principal component analysis on the dataset X # [U, S] = pca(X) computes eigenvectors of the covariance matrix of X # Returns the eigenvectors U, the eigenvalues in S # m, n = X.shape # You need to return the following variables correctly. U = np.zeros((n, n)) S = np.zeros(n) # ====================== YOUR CODE HERE ====================== # Instructions: You should first compute the covariance matrix. Then, you # should use the "scipy.linalg.svd" function to compute the eigenvectors # and eigenvalues of the covariance matrix. # # Note: When computing the covariance matrix, remember to divide by m (the # number of examples). # # ========================================================================= return U, S U, S = pca(X_norm) U """ Explanation: Part 2: Principal Component Analysis You should now implement PCA, a dimension reduction technique. You should complete the following code. End of explanation """ def draw_line(a, b, ax, *args): ax.plot([a[0], b[0]], [a[1], b[1]], *args) fig, ax = plt.subplots(figsize=(5,5)) ax.set_ylim(2, 8) ax.set_xlim(0.5, 6.5) ax.set_aspect('equal') plot_data(X, ax) ax.plot(mu[0], mu[1]) draw_line(mu, mu + 1.5 * S[0] * U[0, :], ax, '-k') draw_line(mu, mu + 1.5 * S[1] * U[1, :], ax, '-k') """ Explanation: Draw the eigenvectors centered at mean of data. These lines show the directions of maximum variations in the dataset. End of explanation """ U[0] """ Explanation: The top eigenvector should be [-0.707107, -0.707107]. End of explanation """ def project_data(X, U, K): #PROJECTDATA Computes the reduced data representation when projecting only #on to the top k eigenvectors # Z = projectData(X, U, K) computes the projection of # the normalized inputs X into the reduced dimensional space spanned by # the first K columns of U. It returns the projected examples in Z. # # You need to return the following variables correctly. Z = np.zeros((X.shape[0], K)) # ====================== YOUR CODE HERE ====================== # Instructions: Compute the projection of the data using only the top K # eigenvectors in U (first K columns). # For the i-th example X(i,:), the projection on to the k-th # eigenvector is given as follows: # x = X[i, :].T # projection_k = x.T.dot(U(:, k)); # # ============================================================= return Z """ Explanation: Part 3: Dimension Reduction You should now implement the projection step to map the data onto the first k eigenvectors. The code will then plot the data in this reduced dimensional space. This will show you what the data looks like when using only the corresponding eigenvectors to reconstruct it. You should complete the code in project_data. End of explanation """ K = 1 Z = project_data(X_norm, U, K) Z[0,0] def recover_data(Z, U, K): #RECOVERDATA Recovers an approximation of the original data when using the #projected data # X_rec = RECOVERDATA(Z, U, K) recovers an approximation the # original data that has been reduced to K dimensions. It returns the # approximate reconstruction in X_rec. # # You need to return the following variables correctly. X_rec = np.zeros((Z.shape[0], U.shape[0])) # ====================== YOUR CODE HERE ====================== # Instructions: Compute the approximation of the data by projecting back # onto the original space using the top K eigenvectors in U. # # For the i-th example Z(i,:), the (approximate) # recovered data for dimension j is given as follows: # v = Z(i, :)'; # recovered_j = v' * U(j, 1:K)'; # # Notice that U(j, 1:K) is a row vector. # # ============================================================= return X_rec """ Explanation: Projection of the first example: (should be about 1.49631261) End of explanation """ X_rec = recover_data(Z, U, K) X_rec[0] """ Explanation: Approximation of the first example: (should be about [-1.05805279, -1.05805279]) End of explanation """ fig, ax = plt.subplots(figsize=(5,5)) ax.set_ylim(-3, 3) ax.set_xlim(-3, 3) ax.set_aspect('equal') plot_data(X_norm, ax) ax.plot(X_rec[:,0], X_rec[:,1], 'ro') for x_norm, x_rec in zip(X_norm, X_rec): draw_line(x_norm, x_rec, ax, '--k') """ Explanation: Draw lines connecting the projected points to the original points End of explanation """ X = scipy.io.loadmat('ex7faces.mat')['X'] X.shape def display_faces(X, example_width=None): example_size = len(X[0]) if example_width is None: example_width = int(np.sqrt(example_size)) num_examples = len(X) figures_row_length = int(np.sqrt(num_examples)) fig, axes = plt.subplots(nrows=figures_row_length, ncols=figures_row_length, figsize=(6,6)) fig.subplots_adjust(wspace=0, hspace=0) for i, j in itertools.product(range(figures_row_length), range(figures_row_length)): ax = axes[i][j] ax.set_axis_off() ax.set_aspect('equal') example = X[i*figures_row_length + j].reshape(example_size//example_width, example_width).T ax.imshow(example, cmap='Greys_r') display_faces(X[:100]) """ Explanation: Part 4: Loading and Visualizing Face Data We start the exercise by first loading and visualizing the dataset. The following code will load the dataset into your environment, and later display the first 100 faces in the dataset. End of explanation """ X_norm, mu, sigma = normalize_features(X) U, S = pca(X_norm) display_faces(U[:, :64].T) """ Explanation: Part 5: PCA on Face Data: Eigenfaces Run PCA and visualize the eigenvectors which are in this case eigenfaces We display the first 64 eigenfaces. Before running PCA, it is important to first normalize X. End of explanation """ K = 100 Z = project_data(X_norm, U, K) Z.shape """ Explanation: Part 6: Dimension Reduction for Faces Project images to the eigen space using the top k eigenvectors End of explanation """ X_rec = recover_data(Z, U, K) X_rec.shape display_faces(X_rec[:100]) """ Explanation: Part 7: Visualization of Faces after PCA Dimension Reduction Project images to the eigen space using the top K eigen vectors and visualize only using those K dimensions. Compare to the original input. End of explanation """
vk3105/Data-Intensive-Programming-CSE-587
Lab5/Lab5-Task1.ipynb
apache-2.0
import pyspark import string import re import time import os from pylab import * from itertools import * from pyspark.sql import SQLContext, Row """ Explanation: LAB 5 : TASK 1 - GENERATING THE 2-GRAM & 3-GRAM Load the libraries End of explanation """ regex = re.compile('[%s]' % re.escape(string.punctuation)) """ Explanation: Create a regex for removing the punctuations from the text file End of explanation """ sc = pyspark.SparkContext() """ Explanation: Generate a pyspark Context End of explanation """ sqlContext = SQLContext(sc) """ Explanation: Generate a SQL Context for our files End of explanation """ def buildLemmaDict(x): lemmas = x.split(",") lemmaWords = [] for lemma in lemmas: if lemma!="": lemmaWords.append(lemma) lemmaDic = [(lemmas[0],list(set(lemmaWords)))] return(lemmaDic) """ Explanation: A function to read the string and generate the lemma dictionary End of explanation """ lemma_rdd = sc.textFile("./new_lemmatizer.csv") """ Explanation: Read the lemmatizer CSV file to build the lemma dictionary End of explanation """ lemmaDictionary_rdd = (lemma_rdd.flatMap(lambda x : buildLemmaDict(x))) """ Explanation: Create a rdd from the above file to apply a common function "buildLemmaDict" End of explanation """ lemmaDictionary = lemmaDictionary_rdd.collectAsMap() """ Explanation: Collect the rdd as a map to get the dictionary End of explanation """ def getLemmasList(word): cooccuranceList = [] wordLemmaList = [] if word in lemmaDictionary: wordLemmaList = wordLemmaList + lemmaDictionary.get(word) else : wordLemmaList = [word] return wordLemmaList """ Explanation: A function to provide the lemmas by lookup from the lemma dictionary End of explanation """ path = "./input/" subDirList = next(os.walk(path))[1] print(subDirList) subDirList = [int(x) for x in subDirList] subDirList.sort() subDirList = [path+str(x) for x in subDirList] print(subDirList) """ Explanation: Provide the location of the input file End of explanation """ for dirPath in subDirList: outputPath = dirPath.replace("input","output2") start_time = time.time() data_rdd = sc.textFile(dirPath) test = data_rdd.filter(lambda y : y.strip()!="")\ .map(lambda x : x.replace('\t','').lower().split(">"))\ .map(lambda (x,y): (x,regex.sub('',y).strip().replace("j","i").replace("v","u").split(" ")))\ .flatMap(lambda (x,y): [(pair,x[1:]+" |"+str(1+y.index(pair[0]))+"."+str(1+y.index(pair[1]))+"| ") for pair in combinations(y,2)])\ .filter(lambda (x,y): x[0]!="" and x[1]!="")\ .flatMap(lambda (x,y): [(lemma,y) for lemma in product(getLemmasList(x[0]),getLemmasList(x[1]))])\ .reduceByKey(lambda x,y : x + ", "+y).sortByKey(True) print("Input Directory Path :" + dirPath) print("Ouput Directory Path :" + outputPath) print("Time taken for "+ dirPath[-1:] +" files %s" % (time.time() - start_time)) test = test.map(lambda (x,y):("{"+x[0]+","+x[1]+"}",y)) test.take(5) df = sqlContext.createDataFrame(test, ['n-gram (n =2)', 'Location']) df.show() df.coalesce(1).write.option("header", "true").csv(outputPath+"/result.csv") """ Explanation: 2-Gram Generator A for loop to generate the time duration and set up the path for output We can specify a folder structure having different number of files. And similarly we can define the output folder based on the input folder Also showing the data in a tabular format. Following is just a test result we did using small amount of data. Running the same code with better RAM provides much better performance. End of explanation """ for dirPath in subDirList: outputPath = dirPath.replace("input","output3") start_time = time.time() data_rdd = sc.textFile(dirPath) test = data_rdd.filter(lambda y : y.strip()!="")\ .map(lambda x : x.replace('\t','').lower().split(">"))\ .map(lambda (x,y): (x,regex.sub('',y).strip().replace("j","i").replace("v","u").split(" ")))\ .flatMap(lambda (x,y): [(pair,x[1:]+" |"+str(1+y.index(pair[0]))+"."+str(1+y.index(pair[1]))+"."+str(1+y.index(pair[2]))+"| ") for pair in combinations(y,3)])\ .filter(lambda (x,y): x[0]!="" and x[1]!="" and x[2]!="")\ .flatMap(lambda (x,y): [(lemma,y) for lemma in product(getLemmasList(x[0]),getLemmasList(x[1]),getLemmasList(x[2]))])\ .reduceByKey(lambda x,y : x + ", "+y).sortByKey(True) print("Input Directory Path :" + dirPath) print("Ouput Directory Path :" + outputPath) print("Time taken for "+ dirPath[-1:] +" files %s" % (time.time() - start_time)) test = test.map(lambda (x,y):("{"+x[0]+","+x[1]+","+x[2]+"}",y)) test.take(5) df = sqlContext.createDataFrame(test, ['n-gram (n =3)', 'Location']) df.show() df.coalesce(1).write.option("header", "true").csv(outputPath+"/result.csv") """ Explanation: 3-Gram Generator A for loop to generate the time duration and set up the path for output We can specify a folder structure having different number of files. And similarly we can define the output folder based on the input folder Also showing the data in a tabular format. Following is just a test result we did using small amount of data. Running the same code with better RAM provides much better performance. End of explanation """ num_of_files = [1,2,5,10,20,40,80,90] linear_line = [1,2,5,10,20,40,80,90] execution_time_2_Gram = [14.7669999599,16.5650000572,19.5239999294,29.0959999561,47.1339998245,85.7990000248,167.852999926,174.265000105] execution_time_3_Gram = [15.2109999657,17.493999958,23.0810000896,32.367000103,54.4680001736,101.955999851,184.548999786,189.816999912] plot(num_of_files, execution_time_2_Gram, 'o-',num_of_files, execution_time_3_Gram, 'o-',num_of_files, linear_line, 'o-') xlabel('Number of files') ylabel('Execution time (secs)') title('Performace Analysis of 2-Gram and 3-Gram Generator with different number of files') legend(('2Gram','3Gram','Linear Line(y=x)'), loc='upper left') grid(True) savefig("performance.png") show() """ Explanation: Scale up Graph results for 2-Gram and 3-Gram The values used are the actual data we collected from various number of files End of explanation """
Linusp/panic-notebook
ipynb/stdlib.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') """ Explanation: Python 标准库 End of explanation """ import collections """ Explanation: collections End of explanation """ d = {'id': 1, 'content': 'hello world'} d['author'] """ Explanation: collections 中实现了一些高性能、易用的容器类型 defaultdict 继承了 dict 类型,当 key 不存在时,可以用指定的工厂方法来产生默认值。使用 dict 时,如果 key 不存在,是会抛出异常的 End of explanation """ dd = collections.defaultdict(int) dd.update(d) dd['author'] """ Explanation: 如果使用 defaultdict ,就不会有这个问题 End of explanation """ def Tree(): return collections.defaultdict(Tree) def tree_to_dict(tree): """将自定义的树状结构转换为 dict""" return {k: tree_to_dict(tree[k]) for k in tree} tree = Tree() tree['a']['b']['c']['d'] tree['a']['m']['n']['z'] tree_to_dict(tree) """ Explanation: 还可以用来定义树状结构 End of explanation """ Point = collections.namedtuple('Point', ['x', 'y']) Point(3, y=4) """ Explanation: namedtuple 用来创建带有命名字段的 tuple ,使代码更易读。 End of explanation """ cats = ['cat'] * 10 dogs = ['dog'] * 20 birds = ['bird'] * 5 dolphins = ['dolphin'] * 9 animal_counter = collections.Counter() for animal in cats + dogs + birds + dolphins: animal_counter[animal] += 1 mammals_counter = collections.Counter() for animal in cats + dogs + dolphins: mammals_counter[animal] += 1 print 'Top 2:', animal_counter.most_common(2) print 'Animal not mammals:', animal_counter - mammals_counter """ Explanation: Counter 顾名思义可用做计数器,不同的 Counter 类型值之间可以进行 +/- 运算,并且内部实现了 Top-K 功能,用来做简单的统计非常的方便。 End of explanation """ d = collections.deque([1, 2, 3, 4, 5]) print d d.pop() print 'After pop:', d d.popleft() print 'After popleft:', d d.append('a') print 'After append:', d d.appendleft('A') print 'After appendleft:', d d.extend(['x', 'y', 'z']) print 'After extend:', d d.extendleft(['X', 'Y', 'Z']) print 'After extendleft:', d """ Explanation: deque 实现了一个双向队列 End of explanation """ d1 = dict([ ('first', 1), ('second', 2), ('third', 3), ('fourth', 4) ]) print d1.keys() d2 = collections.OrderedDict([ ('first', 1), ('second', 2), ('third', 3), ('fourth', 4) ]) print d2.keys() """ Explanation: OrderedDict 同 defaultdict 一样继承自 dict ,可以使用 dict 的所有操作,在此基础上,OrderedDict 可以 记住每个 key 被插入的顺序 End of explanation """ import functools """ Explanation: functools End of explanation """ def search_job(query, redis_config, sql_config, index_schema): """docstring here""" # blablabla return query """ Explanation: functools 里包含一些高阶函数,我们用得比较多的大概是 partial, wraps 这两个。 partial 以一个函数对象作为参数,并为该函数的某些参数设置默认值,来得到一个新的函数。比如我们有一个函数叫 search_job ,如下: End of explanation """ REDIS_CONFIG = {'host': 'localhost', 'port': 6379, 'db': 0} SQL_CONFIG = {'host': 'localhost', 'port': 4000, 'name': 'test'} INDEX_SCHEMA = {'title': str, 'content': str} """ Explanation: 其中的 redis_config、sql_config 和 index_schema 在项目启动的时候通过读取配置文件已经确定了,假设它们的值分别如下: End of explanation """ search_job_with_default_args = functools.partial( search_job, redis_config=REDIS_CONFIG, sql_config=SQL_CONFIG, index_schema=INDEX_SCHEMA, ) search_job_with_default_args('hello world') """ Explanation: 那么可以用 partial 将这些配置设为 search_job 函数中相应参数的默认值,并产生一个新的函数 End of explanation """ search_job.__name__, search_job.__doc__ """ Explanation: wraps 用来使应用了装饰器(decorator)的函数保持其属性(如 __name__ 和 __doc__)。比如上面定义的 search_job 方法,其属性为: End of explanation """ def func_wrapper(func): def wrap_it(*args, **kwargs): """docstring of wrap_it""" return func(*args, **kwargs) return wrap_it @func_wrapper def search_job_wrapped(query, redis_config, sql_config, index_schema): """docstring here""" # blablabla return query search_job_wrapped.__name__, search_job_wrapped.__doc__ """ Explanation: 为其应用一个装饰器后 End of explanation """ def func_wrapper(func): @functools.wraps(func) def wrap_it(*args, **kwargs): """docstring of wrap_it""" return func(*args, **kwargs) return wrap_it @func_wrapper def search_job_wrapped(query, redis_config, sql_config, index_schema): """docstring here""" # blablabla return query search_job.__name__, search_job.__doc__ """ Explanation: 用 wraps 可以使被装饰方法的属性被正确输出: End of explanation """ import itertools """ Explanation: itertools End of explanation """ print list(itertools.chain('1234', 'abc')) print list(itertools.chain([1, 2, 3, 4], ['a', 'b', 'c'])) print list(itertools.chain([1, 2, 3, 4], 'abc')) """ Explanation: itertools 中实现了很多实用的迭代器(iterator),以及一些用来处理、操作可迭代(iterable)对象的方法。 chain 可以将多个可迭代对象连接起来得到一个新的迭代器 End of explanation """ print list(itertools.chain.from_iterable(['1234', 'abc'])) print list(itertools.chain.from_iterable([[1, 2, 3, 4], ['a', 'b', 'c']])) print list(itertools.chain.from_iterable([[1, 2, 3, 4], 'abc'])) """ Explanation: chain 还有一个类方法 from_iterable ,和 chain 的构造方法不一样,它要求参数只有一个,但这个参数是一个可迭代对象,其中每个元素又是一个可迭代对象。 End of explanation """ print list(itertools.combinations('abc', 2)) print list(itertools.combinations(xrange(4), 2)) """ Explanation: combinations 对给定的 长度有限的可迭代对象 生成指定长度的所有子序列(也就是数学里的 组合)。 End of explanation """ print list(itertools.permutations('abc', 2)) print list(itertools.permutations(xrange(4), 2)) """ Explanation: 对应的, permutations 生成指定长度的所有可能的排列。 End of explanation """ print list(itertools.compress('ABCDF', [1, 0, 1, 0, 1])) print list(itertools.compress('ABCDF', [True, False, True, False, True])) """ Explanation: compress 可以用给定的一个 mask 对象或说 selector ,从另一个给定的可迭代对象中选取对应的元素,然后返回一个新的可迭代对象。 End of explanation """ x = '0123hello 123 world' print ''.join(itertools.dropwhile(lambda x: x.isdigit(), x)) def is_vowel(ch): return ch in set('aeiou') def is_consonant(ch): return not is_vowel(ch) def pig_latin(word): if is_vowel(word[0]): return word + 'yay' else: remain = ''.join(itertools.dropwhile(is_consonant, word)) removed = word[:len(word)-len(remain)] return remain + removed + 'ay' print pig_latin('hello') print pig_latin('ok') """ Explanation: dropwhile 将一个可迭代对象中前几个满足条件的、连续的值删除,并返回一个新的可迭代对象 End of explanation """ def another_pig_latin(word): if is_vowel(word[0]): return word + 'yay' else: removed = ''.join(itertools.takewhile(is_consonant, word)) remain = word[len(word)-len(removed):] return remain + removed + 'ay' print pig_latin('hello') print pig_latin('ok') """ Explanation: 与 dropwhile 对应的,还有一个叫做 takewhile 的方法,它返回的是前几个满足条件的、连续的元素。上面的 pig_latin可以用这个方法进行改写: End of explanation """ def add(a, b): return a + b list(itertools.starmap(add, [(1, 2), (3, 4), (4, 5), (5, 6)])) """ Explanation: starmap 的功能和内建方法 map 类似,但是接受有多个参数的方法 End of explanation """ for key, group in itertools.groupby(xrange(12), lambda x: x / 5): print key, list(group) """ Explanation: groupby 顾名思义,可以按照特定条件将输入分组 End of explanation """ for key, group in itertools.groupby(xrange(5), lambda x: x % 2): print key, list(group) """ Explanation: 但需要注意的是,它只将连续的满足相同条件的元素分成同一组 End of explanation """ import random """ Explanation: 另外还有 ifiler, imap, islice, izip ,其功能与 filter, map, slice, zip 一样,但返回的结果都是 generator 。 而 count, cycle, repeat 可以用来产生长度无限的 generator。 random End of explanation """ samples = [random.random() for _ in range(10000)] plt.hist(samples) plt.title('Samples of random.random') plt.xlabel('sample') plt.ylabel('count') """ Explanation: random 模块可以用来生成随机数、在已有数据上进行随机采样,也是很实用的一个模块。 random 方法可以用来生成服从 [0, 1) 区间内均匀分布的随机数: End of explanation """ def my_random(): return random.uniform(0, 1) """ Explanation: uniform 返回服从给定区间内的均匀分布的随机数,用 uniform 可以实现和前面的 random 方法相同功能的方法 End of explanation """ [random.randint(0, 10) for _ in range(10)] """ Explanation: randint 则返回服从给定区间内 离散均匀分布 的随机整数 End of explanation """ seq = range(10) samples = [random.choice(seq) for _ in range(10000)] plt.hist(samples) plt.title('Samples of random.choice') plt.xlabel('sample') plt.ylabel('count') """ Explanation: choice 方法可以从一个给定序列中进行均匀随机采样 End of explanation """ seq = range(10) random.sample(seq, 3) """ Explanation: sample 方法可以从给定序列中随机采样得到 K 个元素,这 K 个元素任意两个在原序列中的位置都不同。 End of explanation """ seq = range(10) random.shuffle(seq) seq """ Explanation: shuffle 用来对一个 list 中的所有元素进行重排,即打乱其原有顺序,也是一个比较常用的方法 End of explanation """ seq = range(10) new_seq = random.sample(seq, len(seq)) print 'origin sequence:', seq print 'new sequence:', new_seq """ Explanation: shuffle 方法是有副作用的,会修改 list 本身,如果希望不修改原 list,而是将打乱后的结果返回,可以用之前提到的 sample 方法来达成目的 End of explanation """ plt.subplots_adjust(hspace=1.) plt.subplot(211) samples_by_gauss = [random.gauss(0, 1) for _ in range(10000)] plt.hist(samples_by_gauss) plt.title('Samples of random.gauss') plt.xlabel('sample') plt.ylabel('count') plt.subplot(212) samples_by_normalvariate = [random.normalvariate(0, 1) for _ in range(10000)] plt.hist(samples_by_normalvariate) plt.title('Samples of random.normalvariate') plt.xlabel('sample') plt.ylabel('count') """ Explanation: gauss 和 normalvariate 可以用来产生服从正态分布的随机数,不过需要注意的是,gauss 方法是线程不安全的 End of explanation """
benwaugh/NuffieldProject2016
notebooks/SimpleMassHistogram.ipynb
mit
import pylab import matplotlib.pyplot as plt %matplotlib inline pylab.rcParams['figure.figsize'] = 12,8 """ Explanation: These commands give us access to some tools for plotting histograms and other graphs: End of explanation """ data_file = open('Invariant_Masses.txt') masses = [] # Create an empty list, ready to store the invariant masses for line in data_file: # Loop over each line in the file mass, channel = line.split() # Each line contains a mass (in GeV) and a "channel" (m for mu+mu-, etc.) m = float(mass) # Mass is read in as a string, but we need to interpret it as a (floating point) number masses.append(m) # Add the mass from this line to the list of masses """ Explanation: This command opens the file. You may have to edit it to reflect the actual name of the file containing your results: End of explanation """ print(masses) """ Explanation: Print the list of masses, just to make sure it looks sensible: End of explanation """ plt.hist(masses, bins=100, range=(0,200)) plt.xlim(0,200) plt.xlabel('Mass [GeV]') """ Explanation: If it looks OK, we can try plotting the results as a histogram: End of explanation """
eriksalt/jupyter
Python Quick Reference/Functions.ipynb
mit
def print_text(): print('this is text') # call the function print_text() """ Explanation: Python Function Quick Reference Table of contents <a href="#1.-Declaring-Functions">Declaring Functions</a> <a href="#2.-Return-Values">Return Values</a> <a href="#3.-Parameters">Parameters</a> <a href="#4.-DocStrings">DocStrings</a> <a href="#5.-Parameter-Unpacking">Parameter Unpacking</a> <a href="#6.-Generator-Functions">Generator Functions</a> <a href="#7.-Lambas-Anonymous-Functions">Lambdas Anonymous Functions</a> <a href="#8.-Partial">Partial</a> <a href="#9.-Closures-Nested-Functions">Closures (Nested Functions)</a> 1. Declaring Functions Define a function with no arguments and no return values: End of explanation """ def stub(): pass """ Explanation: Use pass as a placeholder if you haven't written the function body: End of explanation """ def say_hello(): return 'hello' say_hello() """ Explanation: 2. Return Values End of explanation """ def min_max(nums): return min(nums), max(nums) # return values can be assigned into multiple variables using tuple unpacking nums = [3, 6, 5, 8, 2, 19, 7] min_num, max_num = min_max(nums) print(min_num) print(max_num) """ Explanation: Return two values from a single function: End of explanation """ def print_this(x): print (x) print_this(3) """ Explanation: 3. Parameters End of explanation """ def calc(a, b, op='add'): if op == 'add': return a+b elif op == 'sub': return a-b else: print('valid operations are add and sub') calc(10, 4) calc(10,4, op='add') # unnamed arguments are inferred by position calc(10, 4, 'add') x = 42 def spam(a, b=x): print(a, b) spam(1) x = 23 # Has no effect spam(1) """ Explanation: Define a function with a default value: End of explanation """ def spam(a, b=[]): # b escapes the function as a return variable, which can be altered! return b x = spam(1) x x = spam(1) x.append(99) x.append('Yow!') spam(1) # Modified list gets returned! """ Explanation: Default values should always be const values, or you can get in trouble End of explanation """ # arbitrary positional arguments def print_all(seperator, *args): print(seperator.join(args)) print_all(',', 'first','second','third') # arbitrary positional AND keyword arguments def anyargs(*args, **kwargs): print(args) # A tuple print(kwargs) # A dict anyargs(3, 'ddddd', 5.666, foo='bar', blah='zed') # keyword arguments have access to attribute name import html def make_element(name, value, **attrs): keyvals = [' %s="%s"' % item for item in attrs.items()] attr_str = ''.join(keyvals) element = '<{name}{attrs}>{value}</{name}>'.format( name=name, attrs=attr_str, value=html.escape(value)) return element # Example # Creates '<item size="large" quantity="6">Albatross</item>' make_element('item', 'Albatross', size='large', quantity=6) """ Explanation: Function taking an arbitrary number of arguments End of explanation """ def recv(maxsize, *, block): 'Receives a message' pass recv(1024, block=True) # Ok # the following will fail if uncommented #recv(1024, True) # TypeError """ Explanation: Define a function that only accepts keyword arguments End of explanation """ def dedupe(items): seen = set() for item in items: if item not in seen: yield item seen.add(item) a = [1, 5, 2, 1, 9, 1, 5, 10] list(dedupe(a)) """ Explanation: Define a function that take a callable (function) as a parameter): End of explanation """ def calc(a, b, op='add'): """ calculates the result of a simple math operation. :param a: the first parameter in the math operation :param b: the first parameter in the math operation :param op: which type of math operation (valid values are 'add', 'sub') :returns: this result of applying the math argument to the two parameters :raises keyError: raises an exception """ if op == 'add': return a+b elif op == 'sub': return a-b else: print('valid operations are add and sub') help(calc) """ Explanation: 4. DocStrings End of explanation """ # the compiler does not check any of this, it is just documentation! def add(x:int, y:int) -> int: return x + y add('hello', 'world') help(add) """ Explanation: Attaching additional metadata to a function definition End of explanation """ # range takes start and stop parameters list(range(3, 6)) # normal call with separate arguments [3, 4, 5] # can also pass start and stop arguments using argument unpacking args = [3,6] #can be used on tuple too list(range(*args)) """ Explanation: 5. Unpacking Parameters Unpacking iterables into positional function arguments (star operator) End of explanation """ # a dictionary can be unpacked into names arguments with the ** / double star operator from collections import namedtuple Stock = namedtuple('Stock', ['name', 'shares', 'price', 'date', 'time']) # Create a prototype instance stock_prototype = Stock('', 0, 0.0, None, None) # Function to convert a dictionary to a Stock def dict_to_stock(s): return stock_prototype._replace(**s) a = {'name': 'ACME', 'shares': 100, 'price': 123.45} dict_to_stock(a) """ Explanation: Unpacking dictionaries into named arguments (double-star operator) End of explanation """ def myrange(n): for i in range(n): yield i max(myrange(5)) """ Explanation: 6. Generator Functions Generator functions with yield End of explanation """ #using yield from def myrange(n): yield from range(n) print(max(myrange(5))) """ Explanation: Generator function that uses an internal iterator End of explanation """ squared = lambda x: x**2 squared(3) simpsons = ['bart', 'maggie', 'homer', 'lisa', 'marge'] sorted(simpsons, key = lambda word: word[-1]) # no parameter lambda say_hello = lambda : 'hello' say_hello() """ Explanation: 7. Lambas Anonymous Functions End of explanation """ talkback = lambda message='hello' : message talkback() talkback('hello world') """ Explanation: Default Parameters in Lambdas End of explanation """ test = 'hello world' talkback = lambda : test talkback() # parameters are resolved when the code runs, not when lambda is declared test = 'what???' talkback() # to prevent this, use a default parameter set to the local variable test = 'hello world' talkback = lambda message = test: message test='nope' talkback() """ Explanation: Capturing local variables in lambdas End of explanation """ def spam(a, b, c, d): print(a, b, c, d) from functools import partial s1 = partial(spam, 1) # a = 1 s1(2, 3, 4) s1(4, 5, 6) s2 = partial(spam, d=42) # d = 42 s2(1, 2, 3) s2(4, 5, 5) s3 = partial(spam, 1, 2, d=42) # a = 1, b = 2, d = 42 s3(3) s3(4) s3(5) """ Explanation: 8. Partial Partial allows you convert an n-parameter function into a function with less arguments End of explanation """ # this inner closure is used to carry state around def name_func_from_family(last_name): def print_name(first_name): print('{} {}'.format(first_name, last_name)) return print_name #the key here is that the outer function RETURNS the inner function / closure print_saltwell = name_func_from_family('saltwell') print_saltwell('erik') print_saltwell('kasia') print_saltwell('jacob') """ Explanation: 9. Closures Nested Functions End of explanation """ def outside(): msg = "Outside!" def inside(): msg = "Inside!" print(msg) inside() print(msg) # this prints 'Outside!' even though Inside() mosifies a variable called msg (its a local copy) outside() # to have a variable refer to something outside local scope use nonlocal def outside(): msg = "Outside!" def inside(): nonlocal msg msg = "Inside!" print(msg) inside() print(msg) outside() # the global keyword makes a variable reference a global variable rather then a copy msg = 'Global!!' def outside(): msg = "Outside!" def inside(): global msg msg = "Inside!" print(msg) inside() print(msg) # this prints 'Outside!' because the copy in Inside() references the global variable outside() msg """ Explanation: 10. Nonlocal and global nonlocal allows you to modify a variable outside your scope (but not global scope) End of explanation """
enbanuel/phys202-2015-work
assignments/assignment12/FittingModelsEx01.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt """ Explanation: Fitting Models Exercise 1 Imports End of explanation """ a_true = 0.5 b_true = 2.0 c_true = -4.0 """ Explanation: Fitting a quadratic curve For this problem we are going to work with the following model: $$ y_{model}(x) = a x^2 + b x + c $$ The true values of the model parameters are as follows: End of explanation """ # YOUR CODE HERE N = 30 xdata = np.linspace(-5, 5, N) np.random.seed(0) dy = 2.0 ydata = c_true + b_true * xdata + a_true * xdata**2 + np.random.normal(0.0, dy, size=N) plt.errorbar(xdata, ydata, dy,fmt='og', ecolor='darkgray') plt.xlabel('x') plt.ylabel('y') plt.grid(); assert True # leave this cell for grading the raw data generation and plot """ Explanation: First, generate a dataset using this model using these parameters and the following characteristics: For your $x$ data use 30 uniformly spaced points between $[-5,5]$. Add a noise term to the $y$ value at each point that is drawn from a normal distribution with zero mean and standard deviation 2.0. Make sure you add a different random number to each point (see the size argument of np.random.normal). After you generate the data, make a plot of the raw data (use points). End of explanation """ # YOUR CODE HERE def chi2(theta, x, y, dy): # theta = [c, b, a] return np.sum(((y - theta[0] - theta[1] * x - theta[2] * x**2) / dy) ** 2) theta_guess = [0.0,1.0,2.0] result = opt.minimize(chi2, theta_guess, args=(xdata,ydata,dy)) theta_best = result.x print(theta_best) xfit = np.linspace(-5, 5) yfit = theta_best[2]*xfit**2 + theta_best[1]*xfit + theta_best[0] plt.figure(figsize=(7,5)) plt.plot(xfit, yfit) plt.errorbar(xdata, ydata, dy, fmt='og', ecolor='darkgray') plt.xlabel('x') plt.ylabel('y') plt.grid(); assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors """ Explanation: Now fit the model to the dataset to recover estimates for the model's parameters: Print out the estimates and uncertainties of each parameter. Plot the raw data and best fit of the model. End of explanation """
marcino239/notebooks
nlp_bag_of_words.ipynb
gpl-2.0
%matplotlib inline import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from bs4 import BeautifulSoup from matplotlib import pyplot as plt from sklearn.ensemble import GradientBoostingClassifier from sklearn.feature_selection import SelectKBest, chi2 from sklearn.metrics import roc_curve, auc # load data df = pd.read_csv( 'labeledTrainData.tsv', sep='\t' ) # convert the data df.insert( 3, 'converted', df.iloc[ :, 2 ].apply( lambda x: BeautifulSoup( x ).get_text() ) ) print( 'available columns: {0}'.format( df.columns ) ) # train test / ratio of 0.66 tt_index = np.random.binomial( 1, 0.66, size=df.shape[0] ) train = df[ tt_index == 1 ] test = df[ tt_index == 0 ] vectorizer = TfidfVectorizer( encoding='latin1' ) vectorizer.fit_transform( train.iloc[ :, 3 ] ) # prepare data X_train = vectorizer.transform( train.iloc[ :, 3 ] ) y_train = train.iloc[ :, 1 ] X_test = vectorizer.transform( test.iloc[ :, 3 ] ) y_test = test.iloc[ :, 1 ] # let's take a look how input classes are distributed. # Having more or less equall frequency will help predictor training train.hist( column=(1) ) plt.show() ch2 = SelectKBest(chi2, k=100 ) X_train = ch2.fit_transform( X_train, y_train ).toarray() X_test = ch2.transform( X_test ).toarray() # we're going to use Gradient Boosted Tree classifier. These methods showed good performance on few Kaggle competitions clf = GradientBoostingClassifier( n_estimators=100, learning_rate=1.0, max_depth=5, random_state=0 ) clf.fit(X_train, y_train) y_score = clf.decision_function( X_test ) fpr, tpr, thresholds = roc_curve( y_test.ravel(), y_score.ravel() ) roc_auc = auc( fpr, tpr ) # Plot Precision-Recall curve plt.clf() plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc ) plt.legend(loc="lower right") plt.show() """ Explanation: In today's post we will take a look at the NLP classification task. One of the simpler algorithms is Bag-Of-Words. Each word is one-hot encoded, then the words of a document are averaged and put through the classifier. As a dataset we are going to use movie reviews which can be downloaded from Kaggle. A word of disclaimer: the code below is partially based on the sklearn tutorial as well as on very good NLP course CS224d from Stanford University End of explanation """ from sklearn.svm import SVC clf2 = SVC( kernel='linear' ) clf2.fit( X_train, y_train ) y_score = clf2.decision_function( X_test ) fpr, tpr, thresholds = roc_curve( y_test.ravel(), y_score.ravel() ) roc_auc = auc( fpr, tpr ) # Plot Precision-Recall curve plt.clf() plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc ) plt.legend(loc="lower right") plt.show() """ Explanation: AUC of Receiver Operating Characteristic curve shows 0.87 which is a decent margin vs random binary classification. Given we have two classes: good review or bad review, it make sense to try a linear hyperplane classifier: SVM. End of explanation """ y_pred = clf2.predict( X_test ) y_pred[ 0:10 ] y_test[ 0:10 ] test.iloc[ 1, 3 ] """ Explanation: A bit better, although I am not too happy with the 'scientifc' method here. Better choice will be to use a parameter grid search over dev set defined by cross validation method, but I'll reserve this for the next post. For a moment this should suffice. Let's take a look where the classifier fails on a random data sample. End of explanation """
mrGeen/metaseq
doc/source/example_session_2.ipynb
mit
# Enable in-line plots for this IPython Notebook %matplotlib inline """ Explanation: Example 2: Differential expression scatterplots This example looks more closely at using the results table part of :mod:metaseq, and highlights the flexibility in plotting afforded by :mod:metaseq. End of explanation """ import metaseq from metaseq import example_filename from metaseq.results_table import ResultsTable import pandas import numpy as np import matplotlib import pybedtools import gffutils from gffutils.helpers import asinterval import os """ Explanation: Setup In this section, we'll get the example data for control and knockdown samples, combine the data, and and create :class:ResultsTable object out of them. If you haven't already done so, run the download_metaseq_example_data.py script, which will download and prepare from public sources. Import what we'll be using: End of explanation """ %%bash example_dir="metaseq-example" if [ -e $example_dir ]; then echo "already exists"; else mkdir -p $example_dir (cd $example_dir \ && wget --progress=dot:giga https://raw.githubusercontent.com/daler/metaseq-example-data/master/metaseq-example-data.tar.gz \ && tar -xzf metaseq-example-data.tar.gz \ && rm metaseq-example-data.tar.gz) fi data_dir = 'metaseq-example/data' control_filename = os.path.join(data_dir, 'GSM847565_SL2585.table') knockdown_filename = os.path.join(data_dir, 'GSM847566_SL2592.table') """ Explanation: We'll be using tables prepared from Cufflinks GTF output from GEO entries GSM847565 and GSM847566. These represent results control and ATF3 knockdown experiments in the K562 human cell line. You can read more about the data on GEO; this example will be more about the features of :mod:metaseq than the biology. Let's get the example files: End of explanation """ # System call; IPython only! !head -n5 $control_filename """ Explanation: Let's take a quick peak to see what these files look like: End of explanation """ # Create two pandas.DataFrames control = pandas.read_table(control_filename, index_col=0) knockdown = pandas.read_table(knockdown_filename, index_col=0) """ Explanation: As documented at http://cufflinks.cbcb.umd.edu/manual.html#gtfout, the score field indicates relative expression of one isoform compared to other isoforms of the same gene, times 1000. The max score is 1000, and an isoform with this score is considered the major isoform. A score of 800 would mean an isoform's FPKM is 0.8 that of the major isoform. If you're working with DESeq results, the :mod:metaseq.results_table.DESeqResults class is a nice wrapper around those results with one-step import. But here, we'll construct a pandas.DataFrame first and then create a ResultsTable object out of it. End of explanation """ control.head() knockdown.head() """ Explanation: Here's what the first few entries look like: End of explanation """ # Merge control and knockdown into one DataFrame df = pandas.merge(control, knockdown, left_index=True, right_index=True, suffixes=('_ct', '_kd')) df.head() """ Explanation: These are two separate objects. It will be easier to work with the data if we first combine the data into a single dataframe. For this we will use standard pandas routines: End of explanation """ # Create a ResultsTable d = ResultsTable(df) """ Explanation: Now we'll create a :class:metaseq.results_table.ResultsTable out of it: End of explanation """ # DataFrame is always accessible via .data print type(d), type(d.data) """ Explanation: :class:ResultsTable objects are wrappers around pandas.DataFrame objects, and are useful for working with annotations and tablular data. You can always access the DataFrame with the .data attribute: End of explanation """ # Get gene annotations for chr17 gtf = os.path.join(data_dir, 'Homo_sapiens.GRCh37.66_chr17.gtf') print open(gtf).readline() """ Explanation: The metaseq example data includes a GFF file of the genes on chromosome 17 of the hg19 human genome assembly: End of explanation """ # Get a list of transcript IDs on chr17, and subset the dataframe. # Here we use pybedtools, but the list of names can come from anywhere names = list(set([i['transcript_id'] for i in pybedtools.BedTool(gtf)])) names.sort() # Make a copy of d d2 = d.copy() # And subset d2.data = d2.data.ix[names] # How many did we omit? print "original:", len(d.data) print "chr17 subset:", len(d2.data) """ Explanation: Subsetting data The data we loaded from the knockdown experiment contains genes from all chromosomes. For the sake of argument, let's say we're only interested in the expression data for these genes on chr17. We can simply use pandas.DataFrame.ix to subset dataframe by a list of genes. Note that for this to work, the items in the list need to be in the index of the dataframe. Since the data frame index consists of Ensembl transcript IDs, we'll need to create a list of Ensembl transcript IDs on chromosome 17: End of explanation """ # Scatterplot of control vs knockdown FPKM d2.scatter( x='fpkm_ct', y='fpkm_kd'); """ Explanation: Scatterplots Let's plot some data. The :meth:ResultsTable.scatter method helps with plotting genome-wide data, and offers lots of flexibility. For its most basic usage, we need to at least supply x and y. These are names of variables in the dataframe. We'll add more data later, but for now, let's plot the FPKM of control vs knockdown: End of explanation """ # arbitrary gene for demonstration purposes interesting_gene = np.argmax(d2.fpkm_ct) interesting_gene # What happens if you were to click on the points in an interactive session d2._default_callback(interesting_gene) """ Explanation: If you're following along in a terminal with interactive matplotlib plots, you can click on a point to see what gene it is. In this IPython Notebook (and the HTML documentation generated from it), we don't have that interactive ability. We can simulate it here by choosing a gene ID to show, and then manually call the _default_callback like this: End of explanation """ # Adding extra variables gets verbose and cluttered d2.data['log_fpkm_ct'] = np.log1p(d2.data.fpkm_ct) """ Explanation: Clicking around interactively on the points is a great way to get a feel for the data. OK, it looks like this plot could use log scaling. Recall though that the ResultsTable.scatter method needs to have x and y variables available in the dataframe. So one way to do this would be to do something like this: End of explanation """ # We'll use a better way, so delete it. del d2.data['log_fpkm_ct'] """ Explanation: But when playing around with different scales, this quickly pollutes the dataframe with extra columns. Let's delete that column . . . End of explanation """ # Scale x and y axes using log2(x + 1) def log2p1(x): return np.log2(x + 1) d2.scatter( x='fpkm_ct', y='fpkm_kd', #---------------- xfunc=log2p1, yfunc=log2p1, ); """ Explanation: . . . and show another way. You may find it more streamlined to use the xfunc and/or yfunc arguments. We can use any arbitrary function for these, and the axes labels will reflect that. Since we're about to start incrementally improving the figure by adding additional keyword arguments (kwargs), the stuff we've already talked about will be at the top, and a comment line like this will mark the start of new stuff to pay attention to: # ------------- (marks the start of new stuff) Here's the next version of the scatterplot: End of explanation """ # Manually specify x and y labels ax = d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, #----------------------------- # specify xlabel xlab='Control, log2(FPKM + 1)' ); # adjust the ylabel afterwards ax.set_ylabel('Knockdown, log2(FPKM + 1)'); """ Explanation: Of course, we can specify axes labels either directly in the method call with xlab or ylab, or after the fact using standard matplotlib functionality: End of explanation """ # Crude differential expression detection.... d2.data['foldchange'] = d2.fpkm_kd / d2.fpkm_ct up = (d2.foldchange > 2).values dn = (d2.foldchange < 0.5).values """ Explanation: Let's highlight some genes. How about those that change expression > 2 fold in upon knockdown in red, and < 2 fold in blue? While we're at it, let's add another variable to the dataframe. End of explanation """ # Use the genes_to_highlight argument to show up/downregulated genes # in different colors d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', #------------------------------- genes_to_highlight=[ (up, dict(color='#da3b3a')), (dn, dict(color='#00748e'))] ); """ Explanation: The way to highlight genes is with the genes_to_highlight argument. OK, OK, it's a little bit of a misnomer here because we're actually working with transcripts. But the idea is the same. The genes_to_highlight argument takes a list of tuples. Each tuple consists of two items: an index (boolean or integer, doesn't matter) and a style dictionary. This dictionary is passed directly to matplotlib.scatter, so you can use any supported arguments here. Here's the plot with up/downregulated genes highlighted: End of explanation """ # Add a 1:1 line d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', genes_to_highlight=[ (up, dict(color='#da3b3a')), (dn, dict(color='#00748e'))], #------------------------------------------ one_to_one=dict(color='r', linestyle='--'), ); """ Explanation: We can add a 1-to-1 line for reference: End of explanation """ # Style changes: # default gray small dots; make changed genes stand out more d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', one_to_one=dict(color='k', linestyle=':'), #------------------------------------------------------ genes_to_highlight=[ (up, dict(color='#da3b3a', alpha=0.8)), (dn, dict(color='#00748e', alpha=0.8))], general_kwargs=dict(marker='.', color='0.5', alpha=0.2, s=5), ); """ Explanation: Let's change the plot style a bit. The general_kwargs argument determines the base style of all points. By default, it's dict(color='k', alpha=0.2, linewidths=0). Let's change the default style to smaller gray dots, and make the red and blue stand out more by adjusting their alpha: End of explanation """ # Add marginal histograms d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', genes_to_highlight=[ (up, dict(color='#da3b3a', alpha=0.8)), (dn, dict(color='#00748e', alpha=0.8))], one_to_one=dict(color='k', linestyle=':'), general_kwargs=dict(marker='.', color='0.5', alpha=0.2, s=5), #------------------------------------------------------ marginal_histograms=True, ); """ Explanation: Marginal histograms :mod:metaseq also offers support for marginal histograms, which are stacked up on either axes for each set of genes that were plotted. There are lots of ways for configuring this. First, let's turn them on for everything: End of explanation """ # Tweak the marginal histograms: # 50 bins, don't show unchanged genes, and remove outlines d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', one_to_one=dict(color='k', linestyle=':'), general_kwargs=dict(marker='.', color='0.5', alpha=0.2, s=5), #------------------------------------------------------ # Go back go disabling them globally... marginal_histograms=False, # ...and then turn them back on for each set of genes # to highlight. # # By the way, genes_to_highlight is indented to better show the # the structure. genes_to_highlight=[ ( up, dict( color='#da3b3a', alpha=0.8, marginal_histograms=True, xhist_kwargs=dict(bins=50, linewidth=0), yhist_kwargs=dict(bins=50, linewidth=0), ) ), ( dn, dict( color='#00748e', alpha=0.8, marginal_histograms=True, xhist_kwargs=dict(bins=50, linewidth=0), yhist_kwargs=dict(bins=50, linewidth=0), ) ) ], ); """ Explanation: As a contrived example to illustrate the flexibility for plotting marginal histograms, lets: only show histograms for up/down regulated change the number of bins to 50 remove the edge around each bar End of explanation """ matplotlib.rcParams['font.family'] = "Arial" ax = d2.scatter( x='fpkm_ct', y='fpkm_kd', xfunc=log2p1, yfunc=log2p1, xlab='Control, log2(FPKM + 1)', ylab='Knockdown, log2(FPKM + 1)', one_to_one=dict(color='k', linestyle=':'), marginal_histograms=False, #------------------------------------------------------ # add the "unchanged" label general_kwargs=dict(marker='.', color='0.5', alpha=0.2, s=5, label='unchanged'), genes_to_highlight=[ ( up, dict( color='#da3b3a', alpha=0.8, marginal_histograms=True, xhist_kwargs=dict(bins=50, linewidth=0), yhist_kwargs=dict(bins=50, linewidth=0), # add label label='upregulated', ) ), ( dn, dict( color='#00748e', alpha=0.8, marginal_histograms=True, xhist_kwargs=dict(bins=50, linewidth=0), yhist_kwargs=dict(bins=50, linewidth=0), # add label label='downregulated' ) ) ], ); # Get handles and labels, and then reverse their order handles, legend_labels = ax.get_legend_handles_labels() handles = handles[::-1] legend_labels = legend_labels[::-1] # Draw a legend using the flipped handles and labels. leg = ax.legend(handles, legend_labels, # These values may take some tweaking. # By default they are in axes coordinates, so this means # the legend is slightly outside the axes. loc=(1.01, 1.05), # Various style fixes to default legend. fontsize=9, scatterpoints=1, borderpad=0.1, handletextpad=0.05, frameon=False, title='chr17 transcripts', ); # Adjust the legend title after it's created leg.get_title().set_weight('bold') """ Explanation: Let's clean up the plot by adding a legend (using label in genes_to_highlight), and adding it outside the axes. While we're at it we'll add a title, too. There's a trick here -- for each set of genes, the histograms are incrementally added on top of each other but the legend, lists them going down. So we need to flip the order of legend entries to make it nicely match the order of the histograms. End of explanation """ # Another trick: every time `d2.scatter` is called, top_axes = d2.marginal.top_hists[-1] top_axes.set_title('Differential expression, ATF3 knockdown'); for ax in d2.marginal.top_hists: ax.set_ylabel('No.\ntranscripts', rotation=0, ha='right', va='center', size=8) for ax in d2.marginal.right_hists: ax.set_xlabel('No.\ntranscripts', rotation=-90, ha='left', va='top', size=8) fig = ax.figure fig.savefig('expression-demo.png') fig """ Explanation: We'd also like to add a title. But how to access the top-most axes? Whenever the scatter method is called, the MarginalHistograms object created as a by-product of the plotting is stored in the marginal attribute. This, in turn, has a top_hists attribute, and we can grab the last one created. While we're at it, let's histograms axes as well. End of explanation """
jshudzina/keras-tutorial
notebooks/02-Yellowstone-visitors-part2.ipynb
apache-2.0
from pandas import DataFrame from pandas import Series from pandas import concat from pandas import read_csv from pandas import datetime from sklearn.metrics import mean_squared_error from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.layers import GRU from math import sqrt from matplotlib import pyplot import numpy """ Explanation: LSTM Time Series Example Before we get into the example, let's talk about old fashioned computer memory. Mercury delay lines are an early form of computer memory. They basically recycled electrical signals until they where needed. They also could replace or reshape the signal with new information (i.e. forgeting the old information). Image Source: Delay Line Memory Note: This tutorial is based on Time Series Forecasting with the Long Short-Term Memory Network in Python by Jason Brownlee. Part 2 - Train LSTM End of explanation """ def parser(x): return datetime.strptime(x, '%Y-%m-%d') dataset = read_csv('../data/yellowstone-visitors-ur-weather.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) dataset.head() """ Explanation: Load Data Unlike part 1, this example includes additional training features. These include the US unemployment rate and weather data for a yellowstone weather station. End of explanation """ def difference(dataset, interval=1): diff = list() for i in range(interval, len(dataset)): value = dataset[i] - dataset[i - interval] diff.append(value) return diff diff_values = difference(dataset.values) diff_df = DataFrame(diff_values, columns=dataset.columns.values) diff_df.head() """ Explanation: Convert to Stationary Data Like the visitors, the weather contains seasonal trends. We'll convert all the values to the changes over the last month. The differences remove the season trends from the dataset. This conversion from non-stationary to stationary helps model longer term changes. End of explanation """ labels = diff_df['visitors'].rename(index='label') shifted = diff_df.shift(1) supervised = concat([shifted, labels], axis=1) supervised.fillna(0, inplace=True) supervised.head() """ Explanation: Label Data The label is the proceeding month's change in visitors. This makes the training set a supervised training set. End of explanation """ supervised_values = supervised.values train, test = supervised_values[0:-12], supervised_values[-12:] scaler = MinMaxScaler(feature_range=(-1, 1)) scaler = scaler.fit(train) train = train.reshape(train.shape[0], train.shape[1]) train_scaled = scaler.transform(train) test = test.reshape(test.shape[0], test.shape[1]) test_scaled = scaler.transform(test) print('training set shape: {}'.format(train_scaled.shape)) print(train_scaled[0]) """ Explanation: Split Training & Test Datasets This step also scales the values between -1 and 1. End of explanation """ batch_size = 1 # required for stateful LSTM neurons = 20 features = 6 labels = 1 model = Sequential() model.add(LSTM(neurons, batch_input_shape=(batch_size, 1, features), stateful=True)) model.add(Dense(features)) model.add(Dropout(0.5)) model.add(Dense(1)) """ Explanation: Define the Model This network defines the following layers (excluding dropouts): Stateful Long Short Term Memory (LSTM) Hidden Dense Layer Output Dense Layer Long Short Term Memory The north remebers and so do LSTMs... Until they forget on purpose. LSTM maintain state over sequences. Unlike simple RNNs, LSTM can forget... Image Source: Christopher Olah, Understanding LSTMs Stateful vs Stateless LSTMs Stateless LSTMs reinitialize the state after each batch. By default, Keras uses stateless LSTMs. Stateful LSTMs retain memory across batches. model.reset_states() will clear out the state. If the model uses a larger batch than this example, the Stateless LSTM could update the gradients more efficiently because the state is only maintained over one batch. This example only uses a batch size of one, so a stateless LSTM will only remember one month. Not very helpful. Dense Layers The model adds a hidden layer because why not? The model also uses a single neuron for the layer output to predict next months visitors. End of explanation """ model.compile(loss='mean_squared_error', optimizer='adam') """ Explanation: Optimizer Unlike the first example, this model predicts a scalar value and not a class probability. Thus the loss function minimizes the mean square error. End of explanation """ nb_epoch = 300 X, y = train_scaled[:, 0:-1], train_scaled[:, -1] X = X.reshape(X.shape[0], 1, X.shape[1]) for i in range(nb_epoch): if(i % 50 == 0): print(i) model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() """ Explanation: Train the Model Note: Stateful models update gradients for previous batches. Get a cup of coffee... End of explanation """ # inverse scaling for a forecasted value def invert_scale(scaler, X, value): new_row = [x for x in X] + [value] array = numpy.array(new_row) array = array.reshape(1, len(array)) inverted = scaler.inverse_transform(array) return inverted[0, -1] visitor_history = dataset['visitors'].values predictions = list() for i in range(len(test_scaled)): X, y = test_scaled[i, 0:-1], test_scaled[i, -1] X = X.reshape(1, 1, len(X)) scaled_pred = model.predict(X, batch_size=batch_size) visitor_delta = invert_scale(scaler, X[0,0], scaled_pred[0,0]) prev_mon_vistitor = visitor_history[-len(test_scaled)+1-i] pred = prev_mon_vistitor + visitor_delta expected = visitor_history[len(test_scaled) + i + 1] print('Month=%d, Predicted=%f, Expected=%f' % (i+1, pred, expected)) predictions.append(pred) """ Explanation: Create Predictions To get the monthly visitors, We'll need to invert the scaling plus convert to absoult values. End of explanation """ pyplot.plot(visitor_history[-12:]) pyplot.plot(predictions) pyplot.show() """ Explanation: Let's plot the results... End of explanation """ rmse = sqrt(mean_squared_error(visitor_history[-12:], predictions)) print('Test RMSE: %.3f' % rmse) """ Explanation: Did the LSTM work any better than the simple monthly average? End of explanation """
gasabr/AtoD
examples/hero_example.ipynb
mit
am = Hero(1) # You can use attributes to get some hero properties which depends on lvl, examples: print('Anti-Mage stats on lvl {}'.format(am.lvl)) print('\tstrength = {}'.format(am.str)) print('\tagility = {}'.format(am.agi)) print('\tintellect = {}'.format(am.int)) print('\t...') """ Explanation: Hero creation Create a hero. The same can be done with Hero.from_name('Anti-Mage') End of explanation """ sf = Hero.from_name('Shadow Fiend') sf.in_game_name """ Explanation: Class attributes It can be very useful to have hero's in-game name (how it is used in DotA files): End of explanation """ pprint(am.specs) """ Explanation: Also you can get all the specs. This is the dictionary with a lot of parameters. End of explanation """ am.get_role() """ Explanation: Getting info The way to get info about certain side of the hero. List of all 'categories': * type * role * laning * attributes functions look like hero.get_category(), where category is one of the above words. End of explanation """ print(am.get_description(include=['laning', 'role', 'name'])) """ Explanation: If you want to combine few descriptions -- Hero.get_description() is the way to go. Note that laning, roles and hero type are read from the game files and sometimes can be strange. End of explanation """
ernestyalumni/Propulsion
Physique/Physique_jupyter.ipynb
gpl-2.0
import os, sys """ Explanation: Physique Mini Table of Contents Using Physique from a working directory not containing Physique itself NIST Fundamental Constants NIST Official Conversions (to metric) Webscraping example: JPL Solar System Dynamics (JPL SSD) - Planets and Pluto Using Physique from a working directory not containing Physique itself End of explanation """ currentdir = os.getcwd(); os.getcwd(); """ Explanation: Get the current directory End of explanation """ currentdir # I'm on a different computer now sys.path.append('/home/topolo/PropD/Propulsion/') import Physique """ Explanation: Then append the directory containing the Physique package/library (it's just a folder) with sys.path.append; the absolute path for where I placed it just happened to be "/Users/ernestyeung/Documents/TeslaModelSP85D": substitute that for the absolute path you find (look at your Finder or File organizing program) End of explanation """ from Physique import FundConst print Physique.FundConst.columns Physique.FundConst """ Explanation: Programming note: __init__.py in the main directory uses os.path.dirname(__file__) with __file__ (literally that, it's not a placeholder name) being the string with the absolute pathname of the "file from which the module was loaded, if it was loaded from a file" (cf. stackoverflow Python file attribute absolute or relative?), i.e. "When a module is loaded in Python, __file__ is set to its name. You can then use that with other functions to find the directory that the file is located in." (cf. stackoverflow what does the file wildcard mean/do?) NIST Fundamental Constants End of explanation """ g_0pd = FundConst[ FundConst["Quantity"].str.contains("gravity") ] # standard acceleration of gravity as a panda DataFrame g_0pd # access the values you're interested in print g_0pd.Quantity print g_0pd.Value.get_values()[0] print g_0pd.Unit.get_values()[0] # you can also grab just the 1 entry from this DataFrame using the .loc module FundConst[FundConst["Quantity"].str.contains("Boltzmann")].loc[49,:] g_0pd.loc[303,:] """ Explanation: Find a Fundamental Constant you are interested in using the usual panda modules End of explanation """ convDF = Physique.conv convDF.columns """ Explanation: NIST Official Conversions (to metric) This is the pandas DataFrame containing all the NIST Official Conversions to SI. End of explanation """ convDF[convDF['Toconvertfrom'].str.contains("pound-force ")] """ Explanation: From the list of columns, search for the quantity you desired by trying out different search terms: e.g. I'm reading Huzel and Huang's Modern Engineering for Design of Liquid-Propellant Rocket Engines and I want to know how to convert from * lb (pound or pound-force) for thrust into force in Newton (N) * psia (pounds per square inch absolute) for (chamber) pressure into pressure in Pascal (Pa) We can try to look up the U.S. or Imperial units from the Toconvertfrom column. End of explanation """ convDF[convDF['to'].str.contains("newton ")] """ Explanation: Or we can look up the SI unit we want to convert to. End of explanation """ lbf2N = convDF.loc[340,:]; lbf2N """ Explanation: Look at what you want and see the index; it happens to be 340 in this example. End of explanation """ print lbf2N.Toconvertfrom, lbf2N.to, lbf2N.Multiplyby """ Explanation: Then the attributes can accessed by the column names. End of explanation """ print 470000*lbf2N.Multiplyby, lbf2N.to """ Explanation: So for example, the reusable SSME delivers a vacuum thrust of 470000 lb or End of explanation """ convDF[convDF['Toconvertfrom'].str.match("psi")] """ Explanation: To obtain the conversion for pressure in psia, which we search for with "psi" End of explanation """ psi2Pa = convDF.loc[372,:] print 3028*psi2Pa.Multiplyby, psi2Pa.to """ Explanation: So for a chamber pressure of 3028 psia for the SSME, End of explanation """ convDF[convDF['Toconvertfrom'].str.match("atm")] atm2Pa = convDF.loc[15,:] print 3028*psi2Pa.Multiplyby/atm2Pa.Multiplyby, atm2Pa.Toconvertfrom """ Explanation: Also, get the conversion for atmospheres (atm): End of explanation """ JPL_SSD_URL = "http://ssd.jpl.nasa.gov/" # JPL NASA Solar System Dynamics webpage jpl_ssd_BS = Physique.scrape_BS.scraped_BS(JPL_SSD_URL) """ Explanation: Webscraping example: JPL Solar System Dynamics (JPL SSD) - Planets and Pluto Take a look at the file scrape_BS.py in this Physique folder. BS stands for the BeautifulSoup python module that's extensively used here. Start at the class called scraped_BS which will use the python module requests to put the html out from a url address into a BeautifulSoup object. End of explanation """ # for table in jpl_ssd_BS.soup.find_all("table"): # for subtable in table.find_all("table"): # print subtable.find("table") # uncomment this out and run it to see the whole darn thing """ Explanation: Take a look at it with the usual BeautifulSoup modules (i.e. functions). Now, as the Udacity Data Wrangling instructor said, Shannon Bradshaw, taught, we're going to need to use the Inspect Element (Firefox), or Develop -> Web Inspector (Mac OS X Safari) functions on your web browswer to see what the relevant html codes are. Now in this particular case (webpage formats are all different; assume the worst), there are no distinguishing classes for the tables (they're just nested tables on tables). cf. stackoverflow.com BeautifulSoup scraping nested tables I'm using the solution from this stackoverflow answer. End of explanation """ jpl_ssd_BS.soup.find('img',{"alt":"PHYSICAL DATA"}).parent['href'] JPL_SSD_PHYS_DATA_URL = JPL_SSD_URL + jpl_ssd_BS.soup.find('img',{"alt":"PHYSICAL DATA"}).parent['href'][1:] JPL_SSD_PHYS_DATA_URL jpl_ssd_phys_data_BS = Physique.scrape_BS.scraped_BS(JPL_SSD_PHYS_DATA_URL) """ Explanation: Let's just focus on the Physical Data subpage for today. This is the way to find a specific tag (in this case img) with a specific attribute (in this case alt="PHYSICAL DATA"), and then the parent module gets its parent. Then the href index in the square brackets [] gets the web address we desire. End of explanation """ jpl_ssd_phys_data_BS.soup.find('h2',text="Planets").find_next('a') JPL_SSD_PLANET_PHYS_PAR_URL = JPL_SSD_URL + jpl_ssd_phys_data_BS.soup.find('h2',text="Planets").find_next('a')['href'] jpl_ssd_planet_phys_par_BS = Physique.scrape_BS.scraped_BS(JPL_SSD_PLANET_PHYS_PAR_URL) jpl_ssd_planet_phys_parTBL = jpl_ssd_planet_phys_par_BS.soup.find("div", {"class":"page_title"}).find_next("table") """ Explanation: At this point, I wish there was a rational and civilized manner to scrape all the relevant quantitative data from here for all the links (using Scrapy?) but I need help at this point for that endeavor. Otherwise, I manually look at the webpage itself and manually use Inspect Element to find what I want and then use BeautifulSoup accordingly. End of explanation """ data = [] for row in jpl_ssd_planet_phys_parTBL.find_all('tr', recursive=False): cols = row.find_all('td', recursive=False) cols = [ele.text if ele.text != u'\xa0' else u'' for ele in cols] data.append(cols) hdrs = data[:2] # get the headers first jpl_ssd_planet_phys_parTBL.find_all('tr')[2].find_all('td')[18].text data = [[row[0].replace(u'\xa0',''),]+row[1:] for row in data[2:]] # remove the space, \xa0 from each of the planet's names data = [[row[0],]+[ele.replace('\n','') for ele in row[1:]] for row in data] # remove the '\n' strings data = [[row[0],]+[ele.split(u'\xb1')[0] for ele in row[1:]] for row in data] # let's just get the values data = [[row[0],]+[ele.split(u'\xa0')[0] for ele in row[1:]] for row in data] # let's just get the values """ Explanation: Time to scrape the actual html code for the table we desire: jpl_ssd_planet_phys_parTBL. Take a look at the function make_conv_lst in scrape_BS.py and take a look at that first for loop. That's the procedure we'll take (and I confirmed that this is in practice what's done on stackoverflow). But wait: the data values are themselves tables. So again, there is no rhyme or reason for the logic or rationale for the html tables for data, in general, for any websites. So I'll get the headers first (which makes sense) and do an ugly hack for the data values (also notice the recursive=False option). End of explanation """ data = [hdrs[1],] + data import pandas as pd data = pd.DataFrame( data ) data.columns = hdrs[0] data """ Explanation: I'll add back the units as part of the data (I don't know a sane and civilized way of attaching to each of the column names in pandas, a pandas DataFrame, the units, as extra information) End of explanation """ data.to_pickle('./rawdata/JPL_NASA_SSD_Planet_Phys_Par_values.pkl') # values only """ Explanation: Time to save our work as a "pickle'd" pandas DataFrame. End of explanation """ PlanetParDF = pd.read_pickle('./rawdata/JPL_NASA_SSD_Planet_Phys_Par_values.pkl') """ Explanation: And so to access this, to use in Python, do the following, using .read_pickle of pandas: End of explanation """
probprog/pyprob
examples/gaussian_unknown_mean_marsaglia.ipynb
bsd-2-clause
class GaussianUnknownMean(Model): def __init__(self): super().__init__(name='Gaussian with unknown mean (Marsaglia)') # give the model a name self.prior_mean = 1 self.prior_std = math.sqrt(5) self.likelihood_std = math.sqrt(2) def marsaglia(self, mean, stddev): uniform = Uniform(-1, 1) s = 1 while float(s) >= 1: x = pyprob.sample(uniform) y = pyprob.sample(uniform) s = x*x + y*y return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s)) def forward(self): # Needed to specifcy how the generative model is run forward # sample the (latent) mean variable to be inferred: mu = self.marsaglia(self.prior_mean, self.prior_std) # define the likelihood likelihood = Normal(mu, self.likelihood_std) # Lets add two observed variables # -> the 'name' argument is used later to assignment values: pyprob.observe(likelihood, name='obs0') # NOTE: observe -> denotes observable variables pyprob.observe(likelihood, name='obs1') # return the latent quantity of interest return mu model = GaussianUnknownMean() """ Explanation: Defining the model First, we define the model as a probabilistic program inheriting from pyprob.Model. Models inherit from torch.nn.Module and can be potentially trained with gradient-based optimization (not covered in this example). The forward function can have any number and type of arguments as needed. End of explanation """ def plot_function(min_val, max_val, func, *args, **kwargs): x = np.linspace(min_val,max_val,int((max_val-min_val)*50)) plt.plot(x, np.vectorize(func)(x), *args, **kwargs) def get_dist_pdf(dist): return lambda x: math.exp(dist.log_prob(x)) class CorrectDistributions: def __init__(self, model): self.prior_mean = model.prior_mean self.prior_std = model.prior_std self.likelihood_std = model.likelihood_std self.prior_dist = Normal(self.prior_mean, self.prior_std) @property def observed_list(self): return self.__observed_list @observed_list.setter def observed_list(self, new_observed_list): self.__observed_list = new_observed_list self.construct_correct_posterior() def construct_correct_posterior(self): n = len(self.observed_list) posterior_var = 1/(n/self.likelihood_std**2 + 1/self.prior_std**2) posterior_mu = posterior_var * (self.prior_mean/self.prior_std**2 + n*np.mean(self.observed_list)/self.likelihood_std**2) self.posterior_dist = Normal(posterior_mu, math.sqrt(posterior_var)) def prior_pdf(self, model, x): p = Normal(model.prior_mean,model.prior_stdd) return math.exp(p.log_prob(x)) def plot_posterior(self, min_val, max_val): if not hasattr(self, 'posterior_dist'): raise AttributeError('observed values are not set yet, and posterior is not defined.') plot_function(min_val, max_val, get_dist_pdf(self.posterior_dist), label='correct posterior', color='orange') def plot_prior(self, min_val, max_val): plot_function(min_val, max_val, get_dist_pdf(self.prior_dist), label='prior', color='green') correct_dists = CorrectDistributions(model) """ Explanation: Finding the correct posterior analytically Altough this model seems to implement complicated distributions, the likelihood is the same as Gaussian unknown mean. Therefore, posterior is defined in the same way as normal Gaussian unknown mean distribution. Assuming that the prior and likelihood are $p(x) = \mathcal{N}(\mu_0, \sigma_0)$ and $p(y|x) = \mathcal{N}(x, \sigma)$ respectively and, $y_1, y_2, \ldots y_n$ are the observed values, the posterior would be $p(x|y) = \mathcal{N}(\mu_p, \sigma_p)$ where, $$ \begin{align} \sigma_{p}^{2} & = \frac{1}{\frac{n}{\sigma^2} + \frac{1}{\sigma_{0}^{2}}} \ \mu_p & = \sigma_{p}^{2} \left( \frac{\mu_0}{\sigma_{0}^{2}} + \frac{n\overline{y}}{\sigma^2} \right) \end{align} $$ The following class implements computing this posterior distribution. We also implement some helper functions and variables for plotting the correct posterior and prior. End of explanation """ prior = model.prior_results(num_traces=1000) """ Explanation: Prior distribution We inspect the prior distribution to see if it behaves in the way we intended. First we construct an Empirical distribution with forward samples from the model. Note: Extra arguments passed to prior_distribution will be forwarded to model's forward function. End of explanation """ prior.plot_histogram(show=False, alpha=0.75, label='emprical prior') correct_dists.plot_prior(min(prior.values_numpy()),max(prior.values_numpy())) plt.legend(); """ Explanation: We can plot a historgram of these samples that are held by the Empirical distribution. End of explanation """ correct_dists.observed_list = [8, 9] # Observations # sample from posterior (5000 samples) posterior = model.posterior_results( num_traces=5000, # the number of samples estimating the posterior inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING, # specify which inference engine to use observe={'obs0': correct_dists.observed_list[0], 'obs1': correct_dists.observed_list[1]} # assign values to the observed values ) """ Explanation: Posterior inference with importance sampling For a given set of observations, we can get samples from the posterior distribution. End of explanation """ posterior_unweighted = posterior.unweighted() posterior_unweighted.plot_histogram(show=False, alpha=0.75, label='empirical proposal') correct_dists.plot_prior(min(posterior_unweighted.values_numpy()), max(posterior_unweighted.values_numpy())) correct_dists.plot_posterior(min(posterior_unweighted.values_numpy()), max(posterior_unweighted.values_numpy())) plt.legend(); """ Explanation: Regular importance sampling uses proposals from the prior distribution. We can see this by plotting the histogram of the posterior distribution without using the importance weights. As expected, this is the same with the prior distribution. End of explanation """ posterior.plot_histogram(show=False, alpha=0.75, bins=50, label='inferred posterior') correct_dists.plot_posterior(min(posterior.values_numpy()), max(posterior_unweighted.values_numpy())) plt.legend(); """ Explanation: When we do use the weights, we end up with the correct posterior distribution. The following shows the sampled posterior with the correct posterior (orange curve). End of explanation """ print(posterior.sample()) print(posterior.mean) print(posterior.stddev) print(posterior.expectation(lambda x: torch.sin(x))) """ Explanation: In practice, it is advised to use methods of the Empirical posterior distribution instead of dealing with the weights directly, which ensures that the weights are used in the correct way. For instance, we can get samples from the posterior, compute its mean and standard deviation, and evaluate expectations of a function under the distribution: End of explanation """ model.learn_inference_network(num_traces=100000, observe_embeddings={'obs0' : {'dim' : 32}, 'obs1': {'dim' : 32}}, inference_network=pyprob.InferenceNetwork.LSTM) """ Explanation: Inference compilation Inference compilation is a technique where a deep neural network is used for parameterizing the proposal distribution in importance sampling (https://arxiv.org/abs/1610.09900). This neural network, which we call inference network, is automatically generated and trained with data sampled from the model. We can learn an inference network for our model. End of explanation """ # sample from posterior (500 samples) posterior = model.posterior_results( num_traces=500, # the number of samples estimating the posterior inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, # specify which inference engine to use observe={'obs0': correct_dists.observed_list[0], 'obs1': correct_dists.observed_list[1]} # assign values to the observed values ) posterior_unweighted = posterior.unweighted() posterior_unweighted.plot_histogram(show=False, bins=50, alpha=0.75, label='empirical proposal') correct_dists.plot_posterior(min(posterior.values_numpy()), max(posterior.values_numpy())) plt.legend(); posterior.plot_histogram(show=False, bins=50, alpha=0.75, label='inferred posterior') correct_dists.plot_posterior(min(posterior.values_numpy()), max(posterior.values_numpy())) plt.legend(); """ Explanation: We now construct the posterior distribution using samples from inference compilation, using the trained inference network. A much smaller number of samples are enough (500 vs. 5000) because the inference network provides good proposals based on the given observations. We can see that the proposal distribution given by the inference network is doing a job much better than the prior, by plotting the posterior samples without the importance weights, for a selection of observations. End of explanation """ correct_dists.observed_list = [12, 10] # New observations posterior = model.posterior_results( num_traces=500, inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, # specify which inference engine to use observe={'obs0': correct_dists.observed_list[0], 'obs1': correct_dists.observed_list[1]} ) posterior_unweighted = posterior.unweighted() posterior_unweighted.plot_histogram(show=False, bins=50, alpha=0.75, label='empirical proposal') correct_dists.plot_posterior(min(posterior.values_numpy()), max(posterior.values_numpy())) plt.legend(); posterior.plot_histogram(show=False, bins=50, alpha=0.75, label='inferred posterior') correct_dists.plot_posterior(min(posterior.values_numpy()), max(posterior.values_numpy())) plt.legend(); """ Explanation: Inference compilation performs amortized inferece which means, the same trained network provides proposal distributions for any observed values. We can try performing inference using the same trained network with different observed values. End of explanation """
agconti/kaggle-titanic
Titanic.ipynb
apache-2.0
import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pandas as pd import statsmodels.api as sm from statsmodels.nonparametric.kde import KDEUnivariate from statsmodels.nonparametric import smoothers_lowess from pandas import Series, DataFrame from patsy import dmatrices from sklearn import datasets, svm from KaggleAux import predict as ka # see github.com/agconti/kaggleaux for more details """ Explanation: Kaggle Competition | Titanic Machine Learning from Disaster The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships. One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class. In this contest, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy. This Kaggle Getting Started Competition provides an ideal starting place for people who may not have a lot of experience in data science and machine learning." From the competition homepage. Goal for this Notebook: Show a simple example of an analysis of the Titanic disaster in Python using a full complement of PyData utilities. This is aimed for those looking to get into the field or those who are already in the field and looking to see an example of an analysis done with Python. This Notebook will show basic examples of: Data Handling Importing Data with Pandas Cleaning Data Exploring Data through Visualizations with Matplotlib Data Analysis Supervised Machine learning Techniques: Logit Regression Model Plotting results Support Vector Machine (SVM) using 3 kernels Basic Random Forest Plotting results Valuation of the Analysis K-folds cross validation to valuate results locally Output the results from the IPython Notebook to Kaggle Required Libraries: NumPy IPython Pandas SciKit-Learn SciPy StatsModels Patsy Matplotlib To run this notebook interactively, get it from my Github here. The competition's website is located on Kaggle.com. End of explanation """ df = pd.read_csv("data/train.csv") """ Explanation: Data Handling Let's read our data in using pandas: End of explanation """ df """ Explanation: Show an overview of our data: End of explanation """ df = df.drop(['Ticket','Cabin'], axis=1) # Remove NaN values df = df.dropna() """ Explanation: Let's take a look: Above is a summary of our data contained in a Pandas DataFrame. Think of a DataFrame as a Python's super charged version of the workflow in an Excel table. As you can see the summary holds quite a bit of information. First, it lets us know we have 891 observations, or passengers, to analyze here: Int64Index: 891 entries, 0 to 890 Next it shows us all of the columns in DataFrame. Each column tells us something about each of our observations, like their name, sex or age. These colunms are called a features of our dataset. You can think of the meaning of the words column and feature as interchangeable for this notebook. After each feature it lets us know how many values it contains. While most of our features have complete data on every observation, like the survived feature here: survived 891 non-null values some are missing information, like the age feature: age 714 non-null values These missing values are represented as NaNs. Take care of missing values: The features ticket and cabin have many missing values and so can’t add much value to our analysis. To handle this we will drop them from the dataframe to preserve the integrity of our dataset. To do that we'll use this line of code to drop the features entirely: df = df.drop(['ticket','cabin'], axis=1) While this line of code removes the NaN values from every remaining column / feature: df = df.dropna() Now we have a clean and tidy dataset that is ready for analysis. Because .dropna() removes an observation from our data even if it only has 1 NaN in one of the features, it would have removed most of our dataset if we had not dropped the ticket and cabin features first. End of explanation """ # specifies the parameters of our graphs fig = plt.figure(figsize=(18,6), dpi=1600) alpha=alpha_scatterplot = 0.2 alpha_bar_chart = 0.55 # lets us plot many diffrent shaped graphs together ax1 = plt.subplot2grid((2,3),(0,0)) # plots a bar graph of those who surived vs those who did not. df.Survived.value_counts().plot(kind='bar', alpha=alpha_bar_chart) # this nicely sets the margins in matplotlib to deal with a recent bug 1.3.1 ax1.set_xlim(-1, 2) # puts a title on our graph plt.title("Distribution of Survival, (1 = Survived)") plt.subplot2grid((2,3),(0,1)) plt.scatter(df.Survived, df.Age, alpha=alpha_scatterplot) # sets the y axis lable plt.ylabel("Age") # formats the grid line style of our graphs plt.grid(b=True, which='major', axis='y') plt.title("Survival by Age, (1 = Survived)") ax3 = plt.subplot2grid((2,3),(0,2)) df.Pclass.value_counts().plot(kind="barh", alpha=alpha_bar_chart) ax3.set_ylim(-1, len(df.Pclass.value_counts())) plt.title("Class Distribution") plt.subplot2grid((2,3),(1,0), colspan=2) # plots a kernel density estimate of the subset of the 1st class passangers's age df.Age[df.Pclass == 1].plot(kind='kde') df.Age[df.Pclass == 2].plot(kind='kde') df.Age[df.Pclass == 3].plot(kind='kde') # plots an axis lable plt.xlabel("Age") plt.title("Age Distribution within classes") # sets our legend for our graph. plt.legend(('1st Class', '2nd Class','3rd Class'),loc='best') ax5 = plt.subplot2grid((2,3),(1,2)) df.Embarked.value_counts().plot(kind='bar', alpha=alpha_bar_chart) ax5.set_xlim(-1, len(df.Embarked.value_counts())) # specifies the parameters of our graphs plt.title("Passengers per boarding location") """ Explanation: For a detailed look at how to use pandas for data analysis, the best resource is Wes Mckinney's book. Additional interactive tutorials that cover all of the basics can be found here (they're free). If you still need to be convinced about the power of pandas check out this wirlwhind look at all that pandas can do. Let's take a Look at our data graphically: End of explanation """ plt.figure(figsize=(6,4)) fig, ax = plt.subplots() df.Survived.value_counts().plot(kind='barh', color="blue", alpha=.65) ax.set_ylim(-1, len(df.Survived.value_counts())) plt.title("Survival Breakdown (1 = Survived, 0 = Died)") """ Explanation: Exploratory Visualization: The point of this competition is to predict if an individual will survive based on the features in the data like: Traveling Class (called pclass in the data) Sex Age Fare Price Let’s see if we can gain a better understanding of who survived and died. First let’s plot a bar graph of those who Survived Vs. Those who did not. End of explanation """ fig = plt.figure(figsize=(18,6)) #create a plot of two subsets, male and female, of the survived variable. #After we do that we call value_counts() so it can be easily plotted as a bar graph. #'barh' is just a horizontal bar graph df_male = df.Survived[df.Sex == 'male'].value_counts().sort_index() df_female = df.Survived[df.Sex == 'female'].value_counts().sort_index() ax1 = fig.add_subplot(121) df_male.plot(kind='barh',label='Male', alpha=0.55) df_female.plot(kind='barh', color='#FA2379',label='Female', alpha=0.55) plt.title("Who Survived? with respect to Gender, (raw value counts) "); plt.legend(loc='best') ax1.set_ylim(-1, 2) #adjust graph to display the proportions of survival by gender ax2 = fig.add_subplot(122) (df_male/float(df_male.sum())).plot(kind='barh',label='Male', alpha=0.55) (df_female/float(df_female.sum())).plot(kind='barh', color='#FA2379',label='Female', alpha=0.55) plt.title("Who Survived proportionally? with respect to Gender"); plt.legend(loc='best') ax2.set_ylim(-1, 2) """ Explanation: Now let’s tease more structure out of the data, Let’s break the previous graph down by gender End of explanation """ fig = plt.figure(figsize=(18,4), dpi=1600) alpha_level = 0.65 # building on the previous code, here we create an additional subset with in the gender subset # we created for the survived variable. I know, thats a lot of subsets. After we do that we call # value_counts() so it it can be easily plotted as a bar graph. this is repeated for each gender # class pair. ax1=fig.add_subplot(141) female_highclass = df.Survived[df.Sex == 'female'][df.Pclass != 3].value_counts() female_highclass.plot(kind='bar', label='female, highclass', color='#FA2479', alpha=alpha_level) ax1.set_xticklabels(["Survived", "Died"], rotation=0) ax1.set_xlim(-1, len(female_highclass)) plt.title("Who Survived? with respect to Gender and Class"); plt.legend(loc='best') ax2=fig.add_subplot(142, sharey=ax1) female_lowclass = df.Survived[df.Sex == 'female'][df.Pclass == 3].value_counts() female_lowclass.plot(kind='bar', label='female, low class', color='pink', alpha=alpha_level) ax2.set_xticklabels(["Died","Survived"], rotation=0) ax2.set_xlim(-1, len(female_lowclass)) plt.legend(loc='best') ax3=fig.add_subplot(143, sharey=ax1) male_lowclass = df.Survived[df.Sex == 'male'][df.Pclass == 3].value_counts() male_lowclass.plot(kind='bar', label='male, low class',color='lightblue', alpha=alpha_level) ax3.set_xticklabels(["Died","Survived"], rotation=0) ax3.set_xlim(-1, len(male_lowclass)) plt.legend(loc='best') ax4=fig.add_subplot(144, sharey=ax1) male_highclass = df.Survived[df.Sex == 'male'][df.Pclass != 3].value_counts() male_highclass.plot(kind='bar', label='male, highclass', alpha=alpha_level, color='steelblue') ax4.set_xticklabels(["Died","Survived"], rotation=0) ax4.set_xlim(-1, len(male_highclass)) plt.legend(loc='best') """ Explanation: Here it’s clear that although more men died and survived in raw value counts, females had a greater survival rate proportionally (~25%), than men (~20%) Great! But let’s go down even further: Can we capture more of the structure by using Pclass? Here we will bucket classes as lowest class or any of the high classes (classes 1 - 2). 3 is lowest class. Let’s break it down by Gender and what Class they were traveling in. End of explanation """ fig = plt.figure(figsize=(18,12), dpi=1600) a = 0.65 # Step 1 ax1 = fig.add_subplot(341) df.Survived.value_counts().plot(kind='bar', color="blue", alpha=a) ax1.set_xlim(-1, len(df.Survived.value_counts())) plt.title("Step. 1") # Step 2 ax2 = fig.add_subplot(345) df.Survived[df.Sex == 'male'].value_counts().plot(kind='bar',label='Male') df.Survived[df.Sex == 'female'].value_counts().plot(kind='bar', color='#FA2379',label='Female') ax2.set_xlim(-1, 2) plt.title("Step. 2 \nWho Survived? with respect to Gender."); plt.legend(loc='best') ax3 = fig.add_subplot(346) (df.Survived[df.Sex == 'male'].value_counts()/float(df.Sex[df.Sex == 'male'].size)).plot(kind='bar',label='Male') (df.Survived[df.Sex == 'female'].value_counts()/float(df.Sex[df.Sex == 'female'].size)).plot(kind='bar', color='#FA2379',label='Female') ax3.set_xlim(-1,2) plt.title("Who Survied proportionally?"); plt.legend(loc='best') # Step 3 ax4 = fig.add_subplot(349) female_highclass = df.Survived[df.Sex == 'female'][df.Pclass != 3].value_counts() female_highclass.plot(kind='bar', label='female highclass', color='#FA2479', alpha=a) ax4.set_xticklabels(["Survived", "Died"], rotation=0) ax4.set_xlim(-1, len(female_highclass)) plt.title("Who Survived? with respect to Gender and Class"); plt.legend(loc='best') ax5 = fig.add_subplot(3,4,10, sharey=ax1) female_lowclass = df.Survived[df.Sex == 'female'][df.Pclass == 3].value_counts() female_lowclass.plot(kind='bar', label='female, low class', color='pink', alpha=a) ax5.set_xticklabels(["Died","Survived"], rotation=0) ax5.set_xlim(-1, len(female_lowclass)) plt.legend(loc='best') ax6 = fig.add_subplot(3,4,11, sharey=ax1) male_lowclass = df.Survived[df.Sex == 'male'][df.Pclass == 3].value_counts() male_lowclass.plot(kind='bar', label='male, low class',color='lightblue', alpha=a) ax6.set_xticklabels(["Died","Survived"], rotation=0) ax6.set_xlim(-1, len(male_lowclass)) plt.legend(loc='best') ax7 = fig.add_subplot(3,4,12, sharey=ax1) male_highclass = df.Survived[df.Sex == 'male'][df.Pclass != 3].value_counts() male_highclass.plot(kind='bar', label='male highclass', alpha=a, color='steelblue') ax7.set_xticklabels(["Died","Survived"], rotation=0) ax7.set_xlim(-1, len(male_highclass)) plt.legend(loc='best') """ Explanation: Awesome! Now we have a lot more information on who survived and died in the tragedy. With this deeper understanding, we are better equipped to create better more insightful models. This is a typical process in interactive data analysis. First you start small and understand the most basic relationships and slowly increment the complexity of your analysis as you discover more and more about the data you’re working with. Below is the progression of process laid out together: End of explanation """ # model formula # here the ~ sign is an = sign, and the features of our dataset # are written as a formula to predict survived. The C() lets our # regression know that those variables are categorical. # Ref: http://patsy.readthedocs.org/en/latest/formulas.html formula = 'Survived ~ C(Pclass) + C(Sex) + Age + SibSp + C(Embarked)' # create a results dictionary to hold our regression results for easy analysis later results = {} # create a regression friendly dataframe using patsy's dmatrices function y,x = dmatrices(formula, data=df, return_type='dataframe') # instantiate our model model = sm.Logit(y,x) # fit our model to the training data res = model.fit() # save the result for outputing predictions later results['Logit'] = [res, formula] res.summary() # Plot Predictions Vs Actual plt.figure(figsize=(18,4)); plt.subplot(121, axisbg="#DBDBDB") # generate predictions from our fitted model ypred = res.predict(x) plt.plot(x.index, ypred, 'bo', x.index, y, 'mo', alpha=.25); plt.grid(color='white', linestyle='dashed') plt.title('Logit predictions, Blue: \nFitted/predicted values: Red'); # Residuals ax2 = plt.subplot(122, axisbg="#DBDBDB") plt.plot(res.resid_dev, 'r-') plt.grid(color='white', linestyle='dashed') ax2.set_xlim(-1, len(res.resid_dev)) plt.title('Logit Residuals'); """ Explanation: I've done my best to make the plotting code readable and intuitive, but if you’re looking for a more detailed look on how to start plotting in matplotlib, check out this beautiful notebook here. Now that we have a basic understanding of what we are trying to predict, let’s predict it. Supervised Machine Learning Logistic Regression: As explained by Wikipedia: In statistics, logistic regression or logit regression is a type of regression analysis used for predicting the outcome of a categorical dependent variable (a dependent variable that can take on a limited number of values, whose magnitudes are not meaningful but whose ordering of magnitudes may or may not be meaningful) based on one or more predictor variables. That is, it is used in estimating empirical values of the parameters in a qualitative response model. The probabilities describing the possible outcomes of a single trial are modeled, as a function of the explanatory (predictor) variables, using a logistic function. Frequently (and subsequently in this article) "logistic regression" is used to refer specifically to the problem in which the dependent variable is binary—that is, the number of available categories is two—and problems with more than two categories are referred to as multinomial logistic regression or, if the multiple categories are ordered, as ordered logistic regression. Logistic regression measures the relationship between a categorical dependent variable and one or more independent variables, which are usually (but not necessarily) continuous, by using probability scores as the predicted values of the dependent variable.[1] As such it treats the same set of problems as does probit regression using similar techniques. The skinny, as explained by yours truly: Our competition wants us to predict a binary outcome. That is, it wants to know whether some will die, (represented as a 0), or survive, (represented as 1). A good place to start is to calculate the probability that an individual observation, or person, is likely to be a 0 or 1. That way we would know the chance that someone survives, and could start making somewhat informed predictions. If we did, we'd get results like this:: (Y axis is the probability that someone survives, X axis is the passenger’s number from 1 to 891.) While that information is useful it doesn’t let us know whether someone ended up alive or dead. It just lets us know the chance that they will survive or die. We still need to translate these probabilities into the binary decision we’re looking for. But how? We could arbitrarily say that our survival cutoff is anyone with a probability of survival over 50%. In fact, this tactic would actually perform pretty well for our data and would allow you to make decently accurate predictions. Graphically it would look something like this: If you’re a betting man like me, you don’t like to leave everything to chance. What are the odds that setting that cutoff at 50% works? Maybe 20% or 80% would work better. Clearly we need a more exact way to make that cutoff. What can save the day? In steps the Logistic Regression. A logistic regression follows the all steps we took above but mathematically calculates the cutoff, or decision boundary (as stats nerds call it), for you. This way it can figure out the best cut off to choose, perhaps 50% or 51.84%, that most accurately represents the training data. The three cells below show the process of creating our Logitist regression model, training it on the data, and examining its performance. First, we define our formula for our Logit regression. In the next cell we create a regression friendly dataframe that sets up boolean values for the categorical variables in our formula and lets our regression model know the types of inputs we're giving it. The model is then instantiated and fitted before a summary of the model's performance is printed. In the last cell we graphically compare the predictions of our model to the actual values we are trying to predict, as well as the residual errors from our model to check for any structure we may have missed. End of explanation """ fig = plt.figure(figsize=(18,9), dpi=1600) a = .2 # Below are examples of more advanced plotting. # It it looks strange check out the tutorial above. fig.add_subplot(221, axisbg="#DBDBDB") kde_res = KDEUnivariate(res.predict()) kde_res.fit() plt.plot(kde_res.support,kde_res.density) plt.fill_between(kde_res.support,kde_res.density, alpha=a) plt.title("Distribution of our Predictions") fig.add_subplot(222, axisbg="#DBDBDB") plt.scatter(res.predict(),x['C(Sex)[T.male]'] , alpha=a) plt.grid(b=True, which='major', axis='x') plt.xlabel("Predicted chance of survival") plt.ylabel("Gender Bool") plt.title("The Change of Survival Probability by Gender (1 = Male)") fig.add_subplot(223, axisbg="#DBDBDB") plt.scatter(res.predict(),x['C(Pclass)[T.3]'] , alpha=a) plt.xlabel("Predicted chance of survival") plt.ylabel("Class Bool") plt.grid(b=True, which='major', axis='x') plt.title("The Change of Survival Probability by Lower Class (1 = 3rd Class)") fig.add_subplot(224, axisbg="#DBDBDB") plt.scatter(res.predict(),x.Age , alpha=a) plt.grid(True, linewidth=0.15) plt.title("The Change of Survival Probability by Age") plt.xlabel("Predicted chance of survival") plt.ylabel("Age") """ Explanation: So how well did this work? Lets look at the predictions we generated graphically: End of explanation """ test_data = pd.read_csv("data/test.csv") """ Explanation: Now lets use our model to predict the test set values and then save the results so they can be outputed to Kaggle Read the test data End of explanation """ test_data """ Explanation: Examine our dataframe End of explanation """ test_data['Survived'] = 1.23 """ Explanation: Add our independent variable to our test data. (It is usually left blank by Kaggle because it is the value you are trying to predict.) End of explanation """ results # Use your model to make prediction on our test set. compared_resuts = ka.predict(test_data, results, 'Logit') compared_resuts = Series(compared_resuts) # convert our model to a series for easy output # output and submit to kaggle compared_resuts.to_csv("data/output/logitregres.csv") """ Explanation: Our binned results data: End of explanation """ # Create an acceptable formula for our machine learning algorithms formula_ml = 'Survived ~ C(Pclass) + C(Sex) + Age + SibSp + Parch + C(Embarked)' """ Explanation: Results as scored by Kaggle: RMSE = 0.77033 That result is pretty good. ECT ECT ECT End of explanation """ # set plotting parameters plt.figure(figsize=(8,6)) # create a regression friendly data frame y, x = dmatrices(formula_ml, data=df, return_type='matrix') # select which features we would like to analyze # try chaning the selection here for diffrent output. # Choose : [2,3] - pretty sweet DBs [3,1] --standard DBs [7,3] -very cool DBs, # [3,6] -- very long complex dbs, could take over an hour to calculate! feature_1 = 2 feature_2 = 3 X = np.asarray(x) X = X[:,[feature_1, feature_2]] y = np.asarray(y) # needs to be 1 dimenstional so we flatten. it comes out of dmatirces with a shape. y = y.flatten() n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) # do a cross validation nighty_precent_of_sample = int(.9 * n_sample) X_train = X[:nighty_precent_of_sample] y_train = y[:nighty_precent_of_sample] X_test = X[nighty_precent_of_sample:] y_test = y[nighty_precent_of_sample:] # create a list of the types of kerneks we will use for your analysis types_of_kernels = ['linear', 'rbf', 'poly'] # specify our color map for plotting the results color_map = plt.cm.RdBu_r # fit the model for fig_num, kernel in enumerate(types_of_kernels): clf = svm.SVC(kernel=kernel, gamma=3) clf.fit(X_train, y_train) plt.figure(fig_num) plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=color_map) # circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=color_map) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show() """ Explanation: Support Vector Machine (SVM) "So uhhh, what if a straight line just doesn’t cut it." Wikipeda: In machine learning, support vector machines (SVMs, also support vector networks[1]) are supervised learning models with associated learning algorithms that analyze data and recognize patterns, used for classification and regression analysis. The basic SVM takes a set of input data and predicts, for each given input, which of two possible classes forms the output, making it a non-probabilistic binary linear classifier. Given a set of training examples, each marked as belonging to one of two categories, an SVM training algorithm builds a model that assigns new examples into one category or the other. An SVM model is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall on. In addition to performing linear classification, SVMs can efficiently perform non-linear classification using what is called the kernel trick, implicitly mapping their inputs into high-dimensional feature spaces. From me The logit model we just implemented was great in that it showed exactly where to draw our decision boundary or our 'survival cut off'. But if you’re like me, you could have thought, "So uhhh, what if a straight line just doesn’t cut it". A linear line is okay, but can we do better? Perhaps a more complex decision boundary like a wave, circle, or maybe some sort of strange polygon would describe the variance observed in our sample better than a line. Imagine if we were predicating survival based on age. It could be a linear decision boundary, meaning each additional time you've gone around the sun you were 1 unit more or less likely to survive. But I think it could be easy to imagine some sort of curve, where a young healthy person would have the best chance of survival, and sadly the very old and very young a like: a poor chance. Now that’s a interesting question to answer. But our logit model can only evaluate a linear decision boundary. How do we get around this? With the usual answer to life the universe and everything; $MATH$. The answer: We could transform our logit equation from expressing a linear relationship like so: $survived = \beta_0 + \beta_1pclass + \beta_2sex + \beta_3age + \beta_4sibsp + \beta_5parch + \beta_6embarked$ Which we'll represent for convenience as: $y = x$ to a expressing a linear expression of a non-linear relationship: $\log(y) = \log(x)$ By doing this we're not breaking the rules. Logit models are only efficient at modeling linear relationships, so we're just giving it a linear relationship of a non-linear thing. An easy way to visualize this by looking at a graph an exponential relationship. Like the graph of $x^3$: Here its obvious that this is not linear. If used it as an equation for our logit model, $y = x^3$; we would get bad results. But if we transformed it by taking the log of our equation, $\log(y) = \log(x^3)$. We would get a graph like this: That looks pretty linear to me. This process of transforming models so that they can be better expressed in a different mathematical plane is exactly what the Support Vector Machine does for us. The math behind how it does that is not trivial, so if your interested; put on your reading glasses and head over here. Below is the process of implementing a SVM model and examining the results after the SVM transforms our equation into three different mathematical plains. The first is linear, and is similar to our logic model. Next is an exponential, polynomial, transformation and finally a blank transformation. End of explanation """ # Here you can output which ever result you would like by changing the Kernel and clf.predict lines # Change kernel here to poly, rbf or linear # adjusting the gamma level also changes the degree to which the model is fitted clf = svm.SVC(kernel='poly', gamma=3).fit(X_train, y_train) y,x = dmatrices(formula_ml, data=test_data, return_type='dataframe') # Change the interger values within x.ix[:,[6,3]].dropna() explore the relationships between other # features. the ints are column postions. ie. [6,3] 6th column and the third column are evaluated. res_svm = clf.predict(x.ix[:,[6,3]].dropna()) res_svm = DataFrame(res_svm,columns=['Survived']) res_svm.to_csv("data/output/svm_poly_63_g10.csv") # saves the results for you, change the name as you please. """ Explanation: Any value in the blue survived while anyone in the read did not. Checkout the graph for the linear transformation. It created its decision boundary right on 50%! That guess from earlier turned out to be pretty good. As you can see, the remaining decision boundaries are much more complex than our original linear decision boundary. These more complex boundaries may be able to capture more structure in the dataset, if that structure exists, and so might create a more powerful predictive model. Pick a decision boundary that you like, adjust the code below, and submit the results to Kaggle to see how well it worked! End of explanation """ # import the machine learning library that holds the randomforest import sklearn.ensemble as ske # Create the random forest model and fit the model to our training data y, x = dmatrices(formula_ml, data=df, return_type='dataframe') # RandomForestClassifier expects a 1 demensional NumPy array, so we convert y = np.asarray(y).ravel() #instantiate and fit our model results_rf = ske.RandomForestClassifier(n_estimators=100).fit(x, y) # Score the results score = results_rf.score(x, y) print "Mean accuracy of Random Forest Predictions on the data was: {0}".format(score) """ Explanation: Random Forest "Well, What if this line / decision boundary thing doesn’t work at all." Wikipedia, crystal clear as always: Random forests are an ensemble learning method for classification (and regression) that operate by constructing a multitude of decision trees at training time and outputting the class that is the mode of the classes output by individual trees. Once again, the skinny and why it matters to you: There are always skeptics, and you just might be one about all the fancy lines we've created so far. Well for you, here’s another option; the Random Forest. This technique is a form of non-parametric modeling that does away with all those equations we created above, and uses raw computing power and a clever statistical observation to tease the structure out of the data. An anecdote to explain how this the forest works starts with the lowly gumball jar. We've all guess how many gumballs are in that jar at one time or another, and odds are not a single one of us guessed exactly right. Interestingly though, while each of our individual guesses for probably were wrong, the average of all of the guesses, if there were enough, usually comes out to be pretty close to the actual number of gumballs in the jar. Crazy, I know. This idea is that clever statistical observation that lets random forests work. How do they work? A random forest algorithm randomly generates many extremely simple models to explain the variance observed in random subsections of our data. These models are like our gumball guesses. They are all awful individually. Really awful. But once they are averaged, they can be powerful predictive tools. The averaging step is the secret sauce. While the vast majority of those models were extremely poor; they were all as bad as each other on average. So when their predictions are averaged together, the bad ones average their effect on our model out to zero. The thing that remains, if anything, is one or a handful of those models have stumbled upon the true structure of the data. The cell below shows the process of instantiating and fitting a random forest, generating predictions form the resulting model, and then scoring the results. End of explanation """
GoogleCloudPlatform/vertex-ai-samples
community-content/tf_keras_image_classification_distributed_multi_worker_with_vertex_sdk/multi_worker_vertex_training_on_gpu_with_custom_container.ipynb
apache-2.0
PROJECT_ID = "YOUR PROJECT ID" BUCKET_NAME = "gs://YOUR BUCKET NAME" REGION = "YOUR REGION" SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT" content_name = "tf-keras-img-cls-dist-multi-worker-gpu-cust-cont" """ Explanation: TF-Keras Image Classification Distributed Multi-Worker Training on GPU using Vertex Training with Custom Container <table align="left"> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/community-content/tf_keras_image_classification_distributed_multi_worker_with_vertex_sdk/multi_worker_vertex_training_on_gpu_with_custom_container.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> Setup End of explanation """ hostname = "gcr.io" image_name = content_name tag = "latest" custom_container_image_uri = f"{hostname}/{PROJECT_ID}/{image_name}:{tag}" ! cd trainer && docker build -t $custom_container_image_uri -f gpu.Dockerfile . ! docker run --rm $custom_container_image_uri --epochs 2 --local-mode ! docker push $custom_container_image_uri ! gcloud container images list --repository $hostname/$PROJECT_ID """ Explanation: Vertex Training using Vertex SDK and Custom Container Build Custom Container End of explanation """ ! pip install -r requirements.txt from google.cloud import aiplatform aiplatform.init( project=PROJECT_ID, staging_bucket=BUCKET_NAME, location=REGION, ) """ Explanation: Initialize Vertex SDK End of explanation """ tensorboard = aiplatform.Tensorboard.create( display_name=content_name, ) """ Explanation: Create a Vertex Tensorboard Instance End of explanation """ display_name = content_name gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}" replica_count = 4 machine_type = "n1-standard-4" accelerator_count = 1 accelerator_type = "NVIDIA_TESLA_K80" container_args = [ "--epochs", "50", "--batch-size", "32", ] custom_container_training_job = aiplatform.CustomContainerTrainingJob( display_name=display_name, container_uri=custom_container_image_uri, ) custom_container_training_job.run( args=container_args, base_output_dir=gcs_output_uri_prefix, replica_count=replica_count, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, tensorboard=tensorboard.resource_name, service_account=SERVICE_ACCOUNT, ) print(f"Custom Training Job Name: {custom_container_training_job.resource_name}") print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}") """ Explanation: Option: Use a Previously Created Vertex Tensorboard Instance tensorboard_name = "Your Tensorboard Resource Name or Tensorboard ID" tensorboard = aiplatform.Tensorboard(tensorboard_name=tensorboard_name) Run a Vertex SDK CustomContainerTrainingJob End of explanation """ ! gsutil ls $gcs_output_uri_prefix """ Explanation: Training Output Artifact End of explanation """ ! gsutil rm -rf $gcs_output_uri_prefix """ Explanation: Clean Up Artifact End of explanation """
AstroHackWeek/AstroHackWeek2017
day1/notebooks/demo-SDSS.ipynb
mit
from os import path from astropy.table import Table import h5py import matplotlib.pyplot as plt plt.style.use('notebook.mplstyle') %matplotlib inline import numpy as np data_path = '../data/' """ Explanation: SDSS/BOSS spectroscopic data The SDSS dataset contains 100,000 spectra and catalog data for the same sources from the BOSS survey. Spectra: spPlate-merged.hdf5 All of the spectra have the same rest-frame wavelength grid (things are redshifted). They have 4603 pixels, and we provide the wavelength grid, flux values, and inverse-variance (uncertainties) for each source. Spectroscopic catalog info: specObj-merged.hdf5 We provide a row-matched table of spectroscopic catalog information derived from the spectra by the SDSS pipeline. This is provided as a table stored in an HDF5 file. Photometric catalog info: photoPosPlate-merged.hdf5 We also provide a row-matched table of photometric catalog information from the SDSS imaging and derived by the SDSS photometric pipeline. This is provided as a table stored in an HDF5 file. End of explanation """ with h5py.File(path.join(data_path, 'sdss', 'spPlate-merged.hdf5')) as f: print(list(f.keys())) print(f['flux'].shape) """ Explanation: Spectra The spectra are stored in 3 datasets in the spPlate-merged.hdf5 file: End of explanation """ with h5py.File(path.join(data_path, 'sdss', 'spPlate-merged.hdf5')) as f: wave = f['wave'][:] flux = f['flux'][71924] ivar = f['ivar'][71924] plt.figure(figsize=(12,4)) plt.plot(wave, flux, marker='None', linewidth=1, linestyle='-', drawstyle='steps-mid') plt.plot(wave, 1/np.sqrt(ivar), marker='None', linestyle='-', drawstyle='steps-mid', alpha=0.75, linewidth=1) plt.xlim(wave.min(), wave.max()) plt.xlabel(r'wavelength [${\rm \AA}$]') """ Explanation: The spectral flux for all objects is stored in the 'flux' dataset as a single 2D array. There are 100000 spectra, each with 4603 pixels. The inverse-variance (uncertainty) for the flux is stored in the 'ivar' dataset, also as a single 2D array, with the same shape as the flux array. The inverse-variance array will be 0 where the flux data are bad. The wavelength array is stored as a 1D array in the 'wave' dataset: the wavelength grid is the same for all spectra. Example: Let's read a random spectrum (at index 71924), and plot the wavelength, flux, and inverse-variance: End of explanation """ specObj = Table.read(path.join(data_path, 'sdss', 'specObj-merged.hdf5'), path='specObj') len(specObj) """ Explanation: Spectroscopic catalog data End of explanation """ print(specObj.colnames) """ Explanation: This table has many columns - some that might be useful as labels or for filtering: CLASS - the best guess of the type of object, can be "STAR", "QSO", or "GALAXY" Z and Z_ERR - the estimated redshift and redshift error VDISP - the stellar velocity dispersion (measured from line widths) from a galaxy spectrum SN_MEDIAN - the signal to noise. might be useful if you want to first test on only high SN sources ELODIE_TEFF, ELODIE_LOGG, ELODIE_FEH - for stars, the effective temperature, surface gravity, and metallicity measured from template spectra End of explanation """ spec_class = specObj['CLASS'].astype(str) spec_classes = np.unique(spec_class) for cls in spec_classes: print(cls, (spec_class == cls).sum()) """ Explanation: Example: What are the possible spectral classes and how many spectra do we have of each? End of explanation """ bins = np.linspace(0, 5, 24) for cls in ['GALAXY', 'QSO']: plt.hist(specObj['Z'][specObj['CLASS'] == cls], bins=bins, label=cls, alpha=0.4) plt.legend(fontsize=20) plt.xlabel('redshift, $z$') """ Explanation: Example: What are the redshift distributions of all objects classified as GALAXY or QSO? End of explanation """ photoPos = Table.read(path.join(data_path, 'sdss', 'photoPosPlate-merged.hdf5'), path='photoPosPlate') print(photoPos.colnames) """ Explanation: Photometric catalog data This table also has many columns - some that might be useful as labels or for filtering: PSFMAG / PSFMAGERR - the PSF magnitudes in each of the 5 SDSS filters, $ugriz$ EXTINCTION - the extinction in each of the 5 filters End of explanation """ photoPos['PSFMAG'].shape """ Explanation: This column has 5 elements per source, one magnitude for each of $ugriz$: End of explanation """ g_r = photoPos['PSFMAG'][:,1] - photoPos['PSFMAG'][:,2] r_i = photoPos['PSFMAG'][:,2] - photoPos['PSFMAG'][:,3] fig, axes = plt.subplots(1, len(spec_classes), figsize=(12.5,5), sharex=True, sharey=True) for i, cls in enumerate(spec_classes): axes[i].plot(g_r[spec_class == cls], r_i[spec_class == cls], marker='.', linestyle='none', alpha=0.1) axes[i].set_title(cls) axes[i].set_xlabel('$g-r$ [mag]') axes[0].set_xlim(-0.5, 2.5) axes[0].set_ylim(-1, 2) axes[0].set_ylabel('$r-i$ [mag]') fig.tight_layout() """ Explanation: Example: Let's plot the g-r, r-i colors of all of our sources for each of the spectroscopic classes: End of explanation """ color_cut = (g_r > 0.45) & (g_r < 0.55) & (r_i > 0.) & (r_i < 0.4) print("{0} objects pass this cut".format(color_cut.sum())) fig, ax = plt.subplots(1, 1, figsize=(5,5)) ax.plot(g_r[spec_class == 'STAR'], r_i[spec_class == 'STAR'], marker='.', linestyle='none', alpha=0.25, color='#aaaaaa') ax.plot(g_r[(spec_class == 'STAR') & color_cut], r_i[(spec_class == 'STAR') & color_cut], marker='.', linestyle='none', alpha=0.5) ax.set_xlim(-0.5, 2.5) ax.set_ylim(-1, 2) ax.set_xlabel('$g-r$ [mag]') ax.set_ylabel('$r-i$ [mag]') """ Explanation: Example: Select all spectra that meet some photometric cuts We'll define a color cut in $g-r$, $r-i$ colors and co-add all spectra in that box that are also stars: End of explanation """ with h5py.File(path.join(data_path, 'sdss', 'spPlate-merged.hdf5')) as f: wave = f['wave'][:] color_cut_flux = f['flux'][color_cut, :] color_cut_ivar = f['ivar'][color_cut, :] color_cut_coadd = np.sum(color_cut_flux, axis=0) plt.figure(figsize=(12,6)) plt.plot(wave, color_cut_coadd, marker='None', linewidth=1, linestyle='-', drawstyle='steps-mid') plt.xlim(wave.min(), wave.max()) plt.xlabel(r'wavelength [${\rm \AA}$]') """ Explanation: Now we'll load the spectra for those objects: End of explanation """
msampathkumar/data_science_sessions
Session-1-Introduction_to_Data_Sciences/Data Science Presentation.ipynb
mit
%%sh # ls -l ~/Downloads/G20*csv # mv ~/Downloads/G20*csv G20.csv """ Explanation: INDIA, G-20 AND THE WORLD - Statistical Year Book India 2016 Navigation Path: Home > Statistical Year Book India 2016 > INDIA, G-20 AND THE WORLD The G20 (or G-20 or Group of Twenty) is an international forum for the governments and central bank governors from 20 major economies. It was founded in 1999 with the aim of studying, reviewing, and promoting high-level discussion of policy issues pertaining to the promotion of international financial stability.[3] It seeks to address issues that go beyond the responsibilities of any one organization.[3] The G20 heads of government or heads of state have periodically conferred at summits since their initial meeting in 2008, and the group also hosts separate meetings of finance ministers and central bank governors. The members include 19 individual countries and along with the European Union (EU). The EU is represented by the European Commission and by the European Central Bank. Collectively, the G20 economies account for around 85% of the gross world product (GWP), 80% of world trade (or, if excluding EU intra-trade, 75%), and two-thirds of the world population.[2] Data Source: * http://mospi.nic.in/statistical-year-book-india/2016/170 References: * Wikipedia G20 Data Gathering wget http://mospi.nic.in/statistical-year-book-india/2016/170 Country Area Population (Millions) GDP Billions (USD) Gross Domestic Product Per Capita Income at Current Price (USD) Gross domestic product based on Purchasing-Power-Parity (PPP) valuation of Country GDP in Billions ( Current International Dollar) wget https://docs.google.com/a/imaginea.com/spreadsheets/d/1jbwyZsHy_SsJ-ANWlNVgMKOl5PkoMMcqkMiMJRXDXms/edit?usp=sharing End of explanation """ data = pd.read_csv('G20.csv') cols = ['Area', 'Population_2010', 'Population_2011', 'Population_2012', 'Population_2013', 'Population_2014', 'Population_2015', 'GDP_2010', 'GDP_2011', 'GDP_2012', 'GDP_2013', 'GDP_2014', 'GDP_2015', 'GDP_PCI_2010', 'GDP_PCI_2011', 'GDP_PCI_2012', 'GDP_PCI_2013', 'GDP_PCI_2014', 'GDP_PCI_2015', 'GDP_PPP_2010', 'GDP_PPP_2011', 'GDP_PPP_2012', 'GDP_PPP_2013', 'GDP_PPP_2014', 'GDP_PPP_2015'] data[cols] = data[cols].applymap(lambda x: float(str(x).replace(',', ''))) all_countries = sorted(data.Country.unique()) country_labler = all_countries.index # country_labler('India') # data.Country = data.Country.map(country_labler) sorted(data.columns.tolist()) cols1 = ['GDP_2010', 'GDP_2011', 'GDP_2012', 'GDP_2013', 'GDP_2014', 'GDP_2015',] cols2 = [ 'GDP_PPP_2010', 'GDP_PPP_2011', 'GDP_PPP_2012', 'GDP_PPP_2013', 'GDP_PPP_2014', 'GDP_PPP_2015'] cols3 = [] data1 = data[['Area', 'Country', 'GDP_2010', 'GDP_2011', 'GDP_2012', 'GDP_2013', 'GDP_2014', 'GDP_2015',]].copy() data2 = data[['Area', 'Country', 'GDP_PPP_2010', 'GDP_PPP_2011', 'GDP_PPP_2012', 'GDP_PPP_2013', 'GDP_PPP_2014', 'GDP_PPP_2015',]].copy() data3 = data[['Area', 'Country', 'GDP_PCI_2010', 'GDP_PCI_2011', 'GDP_PCI_2012', 'GDP_PCI_2013', 'GDP_PCI_2014', 'GDP_PCI_2015',]].copy() data4 = data[['Area', 'Country', 'Population_2010', 'Population_2011', 'Population_2012', 'Population_2013', 'Population_2014', 'Population_2015']].copy() """ Explanation: Data Cleanup End of explanation """ import sklearn.cluster clf = sklearn.cluster.AgglomerativeClustering(5) pred = clf.fit_predict(data1['GDP_2010 GDP_2011 GDP_2012 GDP_2013 GDP_2014 GDP_2015'.split()]) pred new_data.metric.unique() new_data.head(20).copy(deep=True) # segregating year & param new_data['year'] = new_data.metric.map(lambda x: int(x.rsplit('_')[-1])) new_data['param'] = new_data.metric.map(lambda x: ''.join(x.rsplit('_')[:-1])) # drop metric column new_data.drop('metric', axis=1, inplace=True) # converting data into integers # Key values to check how the world print('Country', new_data.country.unique()) print('Country', new_data.param.unique()) temp = new_data[(new_data.country == 'USA') & (new_data.param == 'GDP')].copy(deep=True) temp X_Label = 'USA' Y_Label = 'GDP' plt.figure(figsize=(15, 5)) temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True) _x, _y = temp.year.values, temp.value.values plt.plot(_x, _y) plt.xticks(_x, map(str, _x)) X_Label = 'European Union' Y_Label = 'GDP' plt.figure(figsize=(15, 5)) temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True) _x, _y = temp.year.values, temp.value.values plt.plot(_x, _y) plt.xticks(_x, map(str, _x)) X_Label = 'USA' Y_Label = 'GDP' plt.figure(figsize=(15, 5)) temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True) _x, _y = temp.year.values, temp.value.values plt.plot(_x, _y) plt.xticks(_x, map(str, _x)) _y _y - _y.min() Y_Label = 'Population' plt.figure(figsize=(15, 8)) all_countries = new_data.country.unique()[:5] for X_Label in all_countries: temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True) _x, _y = temp.year.values, temp.value.values _y = _y - _y.min() plt.plot(_x, _y) plt.xticks(_x, map(str, _x)) plt.legend(all_countries) """ Explanation: Experiments End of explanation """ country_codes = {'Argentina': 'ARG', 'Australia': 'AUS', 'Brazil': 'BRA', 'Canada': 'CAN', 'China': 'CHN', 'European Union': 'USA', 'France': 'FRA', 'Germany': 'DEU', 'India': 'IND', 'Indonesia': 'IDN', 'Italy': 'ITA', 'Japan': 'JPN', 'Mexico': 'MEX', 'Republic of Korea': 'USA', 'Russia': 'RUS', 'Saudi Arabia': 'SAU', 'South Africa': 'ZAF', 'Turkey': 'TUR', 'USA': 'USA', 'United Kingdom': 'GBR'} chart_colors = ["rgb(0,0,0)", "rgb(255,255,255)", "rgb(255,0,0)", "rgb(0,255,0)", "rgb(0,0,255)", "rgb(255,255,0)", "rgb(0,255,255)", "rgb(255,0,255)", "rgb(192,192,192)", "rgb(128,128,128)", "rgb(128,0,0)", "rgb(128,128,0)", "rgb(0,128,0)", "rgb(128,0,128)", "rgb(0,128,128)", "rgb(0,0,128)",] chart_colors += chart_colors chart_colors = chart_colors[:len(country_codes)] data1['Country_Codes'] = data1['Country'].map(lambda x: country_codes[x]) import sklearn.cluster clf = sklearn.cluster.AgglomerativeClustering(5) pred = clf.fit_predict(data1['GDP_2010 GDP_2011 GDP_2012 GDP_2013 GDP_2014 GDP_2015'.split()]) pred data1['cluster'] = pred data1['text'] = 'Cluster ID' + data1.cluser data1.head() import plotly.plotly as py import pandas as pd # df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv') data = [ dict( type = 'choropleth', locations = data1['Country_Codes'], z = data1['cluser'], text = data1['Country_Codes'], # colorscale = [[0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\ # [0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"]], # autocolorscale = True, # reversescale = True, # marker = dict( # line = dict ( # color = 'rgb(180,180,180)', # width = 0.5 # ) ), colorbar = dict( autotick = False, tickprefix = '$', title = 'GDP<br>Billions US$'), ) ] layout = dict( title = 'G-20"s GDP', geo = dict( showframe = False, showcoastlines = False, projection = dict( type = 'Mercator' ) ) ) fig = dict(data=data, layout=layout) # py.iplot( fig, validate=False, filename='d3-world-map' ) plot( fig, validate=False, filename='d3-world-map') fig = { 'data': [ { 'x': df2007.gdpPercap, 'y': df2007.lifeExp, 'text': df2007.country, 'mode': 'markers', 'name': '2007'}, { 'x': df1952.gdpPercap, 'y': df1952.lifeExp, 'text': df1952.country, 'mode': 'markers', 'name': '1952'} ], 'layout': { 'xaxis': {'title': 'GDP per Capita', 'type': 'log'}, 'yaxis': {'title': "Life Expectancy"} } } data = [] year = 'GDP_2015' data.append({ 'x': data1[year], 'y': data1['cluster'], 'mode': 'markers', 'text': data1['Country'], 'name': year, 'colors': chart_colors }) fig = dict(data=data, layout=layout) # py.iplot( fig, validate=False, filename='d3-world-map' ) plot( fig, validate=False, filename='d3-world-map') """ Explanation: Ideas Show top 5 countries Show only comparable countries End of explanation """ from sklearn import datasets # import some data to play with iris = datasets.load_iris() X = iris.data # [:, :2] # we only take the first two features. Y = iris.target X[:5] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.25, random_state=0) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) from sklearn.metrics import accuracy_score """ Explanation: IRIS Dataset End of explanation """ from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf = clf.fit(X_train, y_train) accuracy_score(clf.predict(X_train), y_train) accuracy_score(clf.predict(X_test), y_test) accuracy_score(clf.predict(X), Y) """ Explanation: Random Forest End of explanation """ from sklearn import svm clf = svm.SVC(kernel='linear', C=2) clf = clf.fit(X_train, y_train) accuracy_score(clf.predict(X_train), y_train) accuracy_score(clf.predict(X_test), y_test) accuracy_score(clf.predict(X), Y) """ Explanation: SVM End of explanation """
stallmanifold/cs229-machine-learning-stanford-fall-2016
src/homework2/q3/homework2_3.ipynb
apache-2.0
import numpy as np import pandas as pd import read_matrix as rm import nb_train import nb_test import svm_train import svm_test """ Explanation: Homework 2 Problem 3 End of explanation """ df_train = rm.read_data('spam_data/MATRIX.TRAIN') nb_model = nb_train.train(df_train) """ Explanation: Part 3.a The first machine learning algorithm for classifying spam emails is the Naive Bayes model. First we trained the model using the MATRIX.TRAIN data file. End of explanation """ df_test = rm.read_data('spam_data/MATRIX.TEST') nb_predictions = nb_test.test(nb_model, df_test) """ Explanation: Next we ran the model against the testing data. End of explanation """ y = df_test.iloc[:,0] nb_error = nb_test.compute_error(y, nb_predictions) print('NB Test error: {}'.format(nb_error)) """ Explanation: The following is the testing error. End of explanation """ words = nb_test.k_most_indicative_words(5, nb_model.to_dataframe().iloc[:,1:]) print('The {} most spam-worthy words are: {}'.format(len(words), words)) """ Explanation: Part 3.b. The five most indicative words of a spam message are the following. End of explanation """ training_set_files = { 50 : 'spam_data/MATRIX.TRAIN.50', 100 : 'spam_data/MATRIX.TRAIN.100', 200 : 'spam_data/MATRIX.TRAIN.200', 400 : 'spam_data/MATRIX.TRAIN.400', 800 : 'spam_data/MATRIX.TRAIN.800', 1400 : 'spam_data/MATRIX.TRAIN.1400' } """ Explanation: Part 3.c. To test the convergence properties of the Naive Bayes classifier on the email data set, it needs to be run on different training set sizes. Here we use six different sized training sets to see how the error rate progresses. End of explanation """ nb_models = {} for size, filename in training_set_files.items(): df_next = rm.read_data(filename) m = nb_train.train(df_next) nb_models[size] = m nb_errors = {} for size, model in nb_models.items(): guessed_y = nb_test.test(model, df_test) err = nb_test.compute_error(y, guessed_y) nb_errors[size] = err """ Explanation: Estimate the models and compute the errors. End of explanation """ print('Naive Bayes') for size, error in nb_errors.items(): print('size: {}; error: {}'.format(size, error)) """ Explanation: The resulting errors are End of explanation """ tau = 8 max_iters = 40 svm_model = svm_train.train(df_train, tau, max_iters) """ Explanation: As the training set size increases, the error rate for the Naive Bayes classifier decreases. It converges above a training set size of about 1000 emails. Part 3.d. The second model used to classify the emails is a support vector machine. As in part (a), we train the SVM model using the MATRIX.TRAIN data. End of explanation """ svm_predictions = svm_test.test(svm_model, df_test) print(svm_predictions.shape) """ Explanation: Next, we run the trained SVM model against the testing data. End of explanation """ ytest = 2 * df_test.iloc[:,0].as_matrix() - 1 svm_error = svm_test.compute_error(ytest, svm_predictions) print('SVM Test Error: {}'.format(svm_error)) """ Explanation: The testing error is: End of explanation """ svm_models = {} for size, filename in training_set_files.items(): df_next = rm.read_data(filename) m = svm_train.train(df_next, tau, max_iters) svm_models[size] = m """ Explanation: For the varying sized training sets, we estimate an SVM model. End of explanation """ svm_errors = {} for size, model in svm_models.items(): guessed_y = svm_test.test(model, df_test) err = svm_test.compute_error(ytest, guessed_y) svm_errors[size] = err """ Explanation: And we compute the errors for each model. End of explanation """ print('Support Vector Machine') for size, error in svm_errors.items(): print('size: {}; error: {}'.format(size, error)) """ Explanation: The resulting errors are End of explanation """
ramseylab/networkscompbio
class18_motifs_python3_template.ipynb
apache-2.0
import igraph import random import collections """ Explanation: Example implementation of the ESU subgraph enumeration algorithm in python Load the packages that we will need End of explanation """ def exclusive_neighborhood(graph, v, Vp): assert type(graph)==igraph.Graph assert type(v)==int assert type(Vp)==set Nv = set(graph.neighborhood(v)) NVpll = graph.neighborhood(list(Vp)) NVp = set([u for sublist in NVpll for u in sublist]) return Nv - NVp """ Explanation: Set the random number seed to 1337: Define an exclusive_neighborhood function which takes three arguments: - graph the whole graph object - v a single vertex, as an integer - Vp a set of vertices returns: a set of vertex IDs of the set difference N(v)-N(Vp) side effects: none End of explanation """ def extend_subgraph(graph, Vsubgraph, Vextension, v, k, k_subgraphs): assert type(graph)==igraph.Graph assert type(Vsubgraph)==set assert type(Vextension)==set assert type(v)==int assert type(k)==int assert type(k_subgraphs)==list if len(Vsubgraph) == k: k_subgraphs.append(Vsubgraph) assert 1==len(set(graph.subgraph(Vsubgraph).clusters(mode=igraph.WEAK).membership)) return while len(Vextension) > 0: w = random.choice(tuple(Vextension)) Vextension.remove(w) ## obtain the "exclusive neighborhood" Nexcl(w, vsubgraph) NexclwVsubgraph = exclusive_neighborhood(graph, w, Vsubgraph) VpExtension = Vextension | set([u for u in NexclwVsubgraph if u > v]) extend_subgraph(graph, Vsubgraph | set([w]), VpExtension, v, k, k_subgraphs) return """ Explanation: Define the extend_subgraph function, which takes six arguments: - graph the whole graph object - Vsubgraph which is a set of vertices (cardinality 1--k) - Vextension which is a set of vertices (cardinality 0--N) - v which is the start vertex from which we are to extend - k the integer number of vertices in the motif (only sane values are 3 or 4) - k_subgraphs a list of subgraph objects (modified) Returns: nothing (but see k_subgraphs which is really the return data) side effects: Vextension and k_subgraphs are modified End of explanation """ def enumerate_subgraphs(graph, k): assert type(graph)==igraph.Graph assert type(k)==int k_subgraphs = [] for vertex_obj in graph.vs: v = vertex_obj.index Vextension = set([u for u in G.neighbors(v) if u > v]) extend_subgraph(graph, set([v]), Vextension, v, k, k_subgraphs) return k_subgraphs """ Explanation: Define the enumerate_subgraphs function, which takes two arguments: - graph, the whole graph object - k, the integer number of vertices in the motif (only sane values are 3 or 4) returns: a list of set objects containing the vertices of each of the size k subgraphs side effects: none End of explanation """ N = 6 K = 2 G = """ Explanation: Make an undirected Barabasi-Albert graph G with 20 vertices and 3 edges per step (using igraph.Graph.Barabasi); as usual, print the graph summary End of explanation """ sgset = """ Explanation: Let's take a look at the structure of this graph that we made, using igraph.drawing.plot: Now let's run our ESU algorithm code with k=4, and get back the list of subgraphs: End of explanation """ sgset """ Explanation: Let's print the list of subgraphs: (What type is each list element?) End of explanation """ subgraph_isoclass_list = subgraph_isoclass_list """ Explanation: We don't know the isomorphism class of each of these subgraphs. Let's use igraph.Graph.isoclass for that: End of explanation """
keras-team/keras-io
examples/nlp/ipynb/text_classification_from_scratch.ipynb
apache-2.0
import tensorflow as tf import numpy as np """ Explanation: Text classification from scratch Authors: Mark Omernick, Francois Chollet<br> Date created: 2019/11/06<br> Last modified: 2020/05/17<br> Description: Text sentiment classification starting from raw text files. Introduction This example shows how to do text classification starting from raw text (as a set of text files on disk). We demonstrate the workflow on the IMDB sentiment classification dataset (unprocessed version). We use the TextVectorization layer for word splitting & indexing. Setup End of explanation """ !curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -xf aclImdb_v1.tar.gz """ Explanation: Load the data: IMDB movie review sentiment classification Let's download the data and inspect its structure. End of explanation """ !ls aclImdb !ls aclImdb/test !ls aclImdb/train """ Explanation: The aclImdb folder contains a train and test subfolder: End of explanation """ !cat aclImdb/train/pos/6248_7.txt """ Explanation: The aclImdb/train/pos and aclImdb/train/neg folders contain text files, each of which represents one review (either positive or negative): End of explanation """ !rm -r aclImdb/train/unsup """ Explanation: We are only interested in the pos and neg subfolders, so let's delete the rest: End of explanation """ batch_size = 32 raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory( "aclImdb/train", batch_size=batch_size, validation_split=0.2, subset="training", seed=1337, ) raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory( "aclImdb/train", batch_size=batch_size, validation_split=0.2, subset="validation", seed=1337, ) raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory( "aclImdb/test", batch_size=batch_size ) print(f"Number of batches in raw_train_ds: {raw_train_ds.cardinality()}") print(f"Number of batches in raw_val_ds: {raw_val_ds.cardinality()}") print(f"Number of batches in raw_test_ds: {raw_test_ds.cardinality()}") """ Explanation: You can use the utility tf.keras.preprocessing.text_dataset_from_directory to generate a labeled tf.data.Dataset object from a set of text files on disk filed into class-specific folders. Let's use it to generate the training, validation, and test datasets. The validation and training datasets are generated from two subsets of the train directory, with 20% of samples going to the validation dataset and 80% going to the training dataset. Having a validation dataset in addition to the test dataset is useful for tuning hyperparameters, such as the model architecture, for which the test dataset should not be used. Before putting the model out into the real world however, it should be retrained using all available training data (without creating a validation dataset), so its performance is maximized. When using the validation_split & subset arguments, make sure to either specify a random seed, or to pass shuffle=False, so that the validation & training splits you get have no overlap. End of explanation """ # It's important to take a look at your raw data to ensure your normalization # and tokenization will work as expected. We can do that by taking a few # examples from the training set and looking at them. # This is one of the places where eager execution shines: # we can just evaluate these tensors using .numpy() # instead of needing to evaluate them in a Session/Graph context. for text_batch, label_batch in raw_train_ds.take(1): for i in range(5): print(text_batch.numpy()[i]) print(label_batch.numpy()[i]) """ Explanation: Let's preview a few samples: End of explanation """ from tensorflow.keras.layers import TextVectorization import string import re # Having looked at our data above, we see that the raw text contains HTML break # tags of the form '<br />'. These tags will not be removed by the default # standardizer (which doesn't strip HTML). Because of this, we will need to # create a custom standardization function. def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ") return tf.strings.regex_replace( stripped_html, f"[{re.escape(string.punctuation)}]", "" ) # Model constants. max_features = 20000 embedding_dim = 128 sequence_length = 500 # Now that we have our custom standardization, we can instantiate our text # vectorization layer. We are using this layer to normalize, split, and map # strings to integers, so we set our 'output_mode' to 'int'. # Note that we're using the default split function, # and the custom standardization defined above. # We also set an explicit maximum sequence length, since the CNNs later in our # model won't support ragged sequences. vectorize_layer = TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode="int", output_sequence_length=sequence_length, ) # Now that the vocab layer has been created, call `adapt` on a text-only # dataset to create the vocabulary. You don't have to batch, but for very large # datasets this means you're not keeping spare copies of the dataset in memory. # Let's make a text-only dataset (no labels): text_ds = raw_train_ds.map(lambda x, y: x) # Let's call `adapt`: vectorize_layer.adapt(text_ds) """ Explanation: Prepare the data In particular, we remove &lt;br /&gt; tags. End of explanation """ def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # Vectorize the data. train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) # Do async prefetching / buffering of the data for best performance on GPU. train_ds = train_ds.cache().prefetch(buffer_size=10) val_ds = val_ds.cache().prefetch(buffer_size=10) test_ds = test_ds.cache().prefetch(buffer_size=10) """ Explanation: Two options to vectorize the data There are 2 ways we can use our text vectorization layer: Option 1: Make it part of the model, so as to obtain a model that processes raw strings, like this: python text_input = tf.keras.Input(shape=(1,), dtype=tf.string, name='text') x = vectorize_layer(text_input) x = layers.Embedding(max_features + 1, embedding_dim)(x) ... Option 2: Apply it to the text dataset to obtain a dataset of word indices, then feed it into a model that expects integer sequences as inputs. An important difference between the two is that option 2 enables you to do asynchronous CPU processing and buffering of your data when training on GPU. So if you're training the model on GPU, you probably want to go with this option to get the best performance. This is what we will do below. If we were to export our model to production, we'd ship a model that accepts raw strings as input, like in the code snippet for option 1 above. This can be done after training. We do this in the last section. End of explanation """ from tensorflow.keras import layers # A integer input for vocab indices. inputs = tf.keras.Input(shape=(None,), dtype="int64") # Next, we add a layer to map those vocab indices into a space of dimensionality # 'embedding_dim'. x = layers.Embedding(max_features, embedding_dim)(inputs) x = layers.Dropout(0.5)(x) # Conv1D + global max pooling x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x) x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x) x = layers.GlobalMaxPooling1D()(x) # We add a vanilla hidden layer: x = layers.Dense(128, activation="relu")(x) x = layers.Dropout(0.5)(x) # We project onto a single unit output layer, and squash it with a sigmoid: predictions = layers.Dense(1, activation="sigmoid", name="predictions")(x) model = tf.keras.Model(inputs, predictions) # Compile the model with binary crossentropy loss and an adam optimizer. model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) """ Explanation: Build a model We choose a simple 1D convnet starting with an Embedding layer. End of explanation """ epochs = 3 # Fit the model using the train and test datasets. model.fit(train_ds, validation_data=val_ds, epochs=epochs) """ Explanation: Train the model End of explanation """ model.evaluate(test_ds) """ Explanation: Evaluate the model on the test set End of explanation """ # A string input inputs = tf.keras.Input(shape=(1,), dtype="string") # Turn strings into vocab indices indices = vectorize_layer(inputs) # Turn vocab indices into predictions outputs = model(indices) # Our end to end model end_to_end_model = tf.keras.Model(inputs, outputs) end_to_end_model.compile( loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"] ) # Test it with `raw_test_ds`, which yields raw strings end_to_end_model.evaluate(raw_test_ds) """ Explanation: Make an end-to-end model If you want to obtain a model capable of processing raw strings, you can simply create a new model (using the weights we just trained): End of explanation """
mtpain/metacorps
notebooks/Results Plots Walkthrough.ipynb
bsd-3-clause
metaphors_url = 'http://metacorps.io/static/viomet-snapshot-project-df.csv' project_df = get_project_data_frame(metaphors_url) print(project_df.columns) """ Explanation: Publically-available .csv for reproducibility I generate two files currently named viomet-snapshot-project-df.csv and viomet-2012-snapshot-project-df.csv, which are the September to Novermber dataframes for 2016 and 2012, respectively. These contain all rows that have been identified as metaphor. These were built using the following commands in Python: python from projects.common import get_project_data_frame df = get_project_data_frame('Viomet Sep-Nov 2016') df.to_csv('/Users/mt/Desktop/viomet-snapshot-project-df.csv', header=True, index=False, na_rep=None) I then uploaded it to the Metacorps server using scp. For completeness I will soon upload the full dataset in .csv form, which will include the potential instances that were either not metaphor or not about politics. This and the other .csv will be made available on a data publishing portal, and mirrored on the Metacorps server. End of explanation """ from viomet_9_10_17 import fit_all_networks import pandas as pd date_range = pd.date_range('2016-9-1', '2016-11-30', freq='D') # uncomment below to run model fits; takes tens of seconds at least fit_networks = fit_all_networks(project_df, date_range=date_range, iatv_corpus_name='Viomet Sep-Nov 2016') print(fit_networks) # set by_network=False to get the fit for all networks taken together fit_sum = fit_all_networks(project_df, by_network=False, date_range=date_range, iatv_corpus_name='Viomet Sep-Nov 2016') print(fit_sum) """ Explanation: Fitting excited state models to each network and all networks Given the project dataframe, the desired date range, and the corresponding IatvCorpus name (xxx need to add downloadable data to read from like project_df xxx), the excited state frequency change model can be calculated for every cable news source, and for the frequency of the sources taken as a whole. By inspecting fit_all_networks, we can dig deeper into how the model fitting works. We'll return to this. For now, notice that fit_networks is a dictionary with three keys, one for each of the cable networks we are studying: 'MSNBCW', 'CNNW', and 'FOXNEWSW'. The W stands for west, since the western version of these were the versions recorded in San Francisco. This information can be confirmed by examining the TVNA metadata blobs for each show. The resulting data, printed to the console below, is presented in tables at the beginning of the Results section. End of explanation """ from viomet_9_10_17 import by_network_frequency_figure partition_infos = {network: fit_networks[network][0] for network in ['MSNBCW', 'CNNW', 'FOXNEWSW']} by_network_frequency_figure( project_df, date_range=date_range, iatv_corpus_name='Viomet Sep-Nov 2016', partition_infos=partition_infos, save_path='Figures/model_fits.pdf' ) from IPython.display import IFrame IFrame("Figures/model_fits.pdf", width=600, height=450) """ Explanation: Visualize model fits overlaid on timeseries data Once we find all the parameters of the models (partition dates and ground/excited state means or "levels") we can plot the model and the data together to compare. End of explanation """ soa_dict = subject_object_analysis( project_df, plot=True, save_dir=SAVE_DIR, font_scale=1.5 ) # check that the figures were saved to disk os.listdir(SAVE_DIR) """ Explanation: Trump, Clinton as Subject and Object, and vice-versa End of explanation """ from viomet_9_10_17 import by_facet_word excited, ground = by_facet_word( project_df, partition_infos, facet_words=['attack', 'beat', 'hit'] ) from IPython.display import display print('Excited:') display(excited) print('\nGround:') display(ground) print('\nExcited - Ground:') display(excited - ground) """ Explanation: Violent phrase activating source domain In this calculation, we need the partition dates from all models that we calculated above, stored in partition_infos. We calculate the daily average of the number of times a given violent word was used to activate the source domain. The average daily usage increases disproportionately with attack as the violent word, at least on Fox News. On the other networks, there is a drop in usage of the next most common violent words used, hit, and beat. These appear as tables in the paper. We'll just print out the tables here in the notebook. End of explanation """ IFrame('https://books.google.com/ngrams/graph?content=attack%2Chit%2Cbeat&year_start=2000&year_end=2016&corpus=17&smoothing=3&share=&direct_url=t1%3B%2Cattack%3B%2Cc0%3B.t1%3B%2Chit%3B%2Cc0%3B.t1%3B%2Cbeat%3B%2Cc0', width=650, height=400) """ Explanation: September to November 2012 and $Q$ Two to-dos are coming together below. One is to generate more intuitive and powerful observables. These are outlined and calculated below. The other is to analyze the 2012 data. I'll do both at the same time below, saving plots for the end. Observables: We should avoid terse variables when possible for NHB. We want to calculate in one table: Excited Start Date Excited End Date Ground Frequency Excited Frequency Change in Frequency In another table: Sum total of ground Sum total of excited Excitability quotient = Sum of ground / sum of excited $Q_\alpha$, where $\alpha$ indicates the source domain cross-section of interest. Specifically, we will calculate excitability quotients for cross sections of the specific violent word in the metaphorical construction, so $\alpha \in {\text{attack}, \text{hit}, \text{beat}}$, the three most-common words used for metaphorical violence. We will also look at sums of cross-sections of who is the subject of metaphorical violence, the one who does the metaphorical violence, and the object of the metaphorical violence, or the victim of the metaphorical violence. As for individuals who could be the subject or object of metaphorical violence, we consider the two Republican and Democratic presidential candidates Mitt Romney and Barack Obama in 2012 and Donald Trump and Hillary Clinton in 2016. We will consider each of them as the subject or object, paired with all other objects/subjects except their rival, and then we'll also consider each candidate as the subject/object with their rival the object/subject. Then for 2016 we would have $\alpha \in {(\text{Trump}, \text{All}), (\text{Clinton}, \text{All}), (\text{Trump}, \text{Clinton}), (\text{Clinton}, \text{Trump}), (\text{All}, \text{Trump}), (\text{All}, \text{Clinton})}$. We will calculate total ground state usage and the excitability quotient for each subject/object pair, for each cable news station. End of explanation """ from project.common import get_project_data_frame metaphors_url = 'http://metacorps.io/static/data/viomet-2012-snapshot-project-df.csv' project_df = get_project_data_frame(metaphors_url) print(project_df.columns) from viomet_9_10_17 import fit_all_networks import pandas as pd IATV_CORPUS_NAME = 'Viomet Sep-Nov 2012' date_range = pd.date_range('2012-9-1', '2012-11-30', freq='D') # uncomment below to run model fits; takes tens of seconds at least fit_networks = fit_all_networks(project_df, date_range=date_range, iatv_corpus_name=IATV_CORPUS_NAME) from viomet_9_10_17 import by_network_frequency_figure partition_infos = {network: fit_networks[network][0] for network in ['MSNBCW', 'CNNW', 'FOXNEWSW']} by_network_frequency_figure( project_df, date_range=date_range, iatv_corpus_name=IATV_CORPUS_NAME, partition_infos=partition_infos, save_path='Figures/model_fits_2012.pdf' ) from IPython.display import IFrame IFrame("Figures/model_fits_2012.pdf", width=600, height=450) soa_dict = subject_object_analysis( project_df, subj_obj=[ ('Romney', 'Obama'), ('Obama', 'Romney'), ('Romney', None), ('Obama', None), (None, 'Romney'), (None, 'Obama') ], date_range=date_range, plot=True, save_dir=SAVE_DIR, font_scale=1.5 ) from viomet_9_10_17 import by_facet_word excited, ground = by_facet_word( project_df, partition_infos, facet_words=['attack', 'beat', 'hit'] ) from IPython.display import display print('Excited:') display(excited) print('\nGround:') display(ground) print('\nExcited - Ground:') display(excited - ground) """ Explanation: From Google Ngram Viewer, we get that the frequency of attack, hit, and beat are .0067, .0062, and .0034 for their American English corpus in 2008. We can use this to compare frequencies of metaphor with attack, hit, and beat. We could also use the total instances identified through search in our corpus. All this is well and good, now on to calculating these excitability quotients for 2012. End of explanation """
openstreams/wflow
notebooks/wflow-test-sbm.ipynb
gpl-3.0
%pylab inline # First import the model. Here we use the HBV version from wflow.wflow_sbm import * import IPython from IPython.display import display, clear_output #clear_output = IPython.core.display.clear_output """ Explanation: Use of the wflow OpenStreams framework API This ipython notebook demonstrates how to load an openstreams python model and execute it step-by-step and investigate the (intermediate) results. The first steps is to load the model and framework: End of explanation """ # define start and stop time of the run startTime = 1 stopTime = 200 currentTime = 1 # set runid, cl;onemap and casename. Also define the ini file runId = "memtest" #configfile="wflow_hbv_mem.ini" configfile="wflow_sbm.ini" wflow_cloneMap = 'wflow_subcatch.map' # the casename points to the complete model setup with both static and dynamic input caseName="../examples/wflow_rhine_sbm/" #make a usermodel object myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile) # initialise the framework dynModelFw = wf_DynamicFramework(myModel, stopTime,startTime) dynModelFw.createRunId(NoOverWrite=False,level=logging.ERROR) dynModelFw.setQuiet(1) # Run the initial part of the model (reads parameters and sets initial values) dynModelFw._runInitial() # Runs initial part dynModelFw._runResume() # gets the state variables from disk # Get list of variables supplied by the model #print dynModelFw.wf_supplyVariableNamesAndRoles() """ Explanation: Set model run-time parameters Set the: start and time time set the runid (this is where the results are stored, relative to the casename) set the name of the configfile (stire in the case directory set the clone mape (usually the wflow_subcatch.map) set the casename. This is where all the model the model resides End of explanation """ # sm = dynModelFw.wf_supplyMapAsNumpy("UStoreDepth") sm[sm == -999] = np.nan uz = dynModelFw.wf_supplyMapAsNumpy("FirstZoneCapacity") uz[uz == -999] = np.nan imshow(sm) title("UStoreDepth") colorbar() imshow(uz) title("FirstZoneCapacity") colorbar() dynModelFw.wf_setValueLdd("TopoLdd",5.0,6.46823,51.6821) npmap0 = dynModelFw.wf_supplyMapAsNumpy("TopoLdd") ltt = dynModelFw.wf_supplyMapAsList("SurfaceRunoff") """ Explanation: Investigate a couple of model variables End of explanation """ f, ax = plt.subplots(1,3,figsize=(16, 6)) plotar = [] for ts in range(20,80): # Get value at pit inflowQ = dynModelFw.wf_supplyScalar("SurfaceRunoff",5.68962,50.7307) plotar.append(inflowQ) # Add inflow to outflow #dynModelFw.wf_setValue("ForecQ_qmec", -1.0 * inflowQ ,6.46823,51.6821) Resoutflow = inflowQ dynModelFw.wf_setValues("PET",scalar(2.0)) #dynModelFw.wf_setValue("ForecQ_qmec",inflowQ * 1000 ,6.47592,51.7288) # update runoff ONLY NEEDED IF YOU FIDDLE WITH THE KIN_WAVE RESERVOIR myModel.updateRunOff() dynModelFw._runDynamic(ts,ts) # runs for all timesteps #dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.46823,51.6821) #dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.11535,51.8425) npmap0 = dynModelFw.wf_supplyMapAsNumpy("ForecQ_qmec") npmap1 = dynModelFw.wf_supplyMapAsNumpy("P") run = dynModelFw.wf_supplyMapAsNumpy("SurfaceRunoff") uz = dynModelFw.wf_supplyMapAsNumpy("FirstZoneCapacity") sm = dynModelFw.wf_supplyMapAsNumpy("UStoreDepth") sm[sm == -999] = np.nan #dynModelFw.wf_setValues("UpperZoneStorage",uz * 10.1) uz[uz == -999] = np.nan run[run == -999] = np.nan ax[0].imshow(uz) ax[1].imshow(sm) #ax[2].imshow(log(run)) ax[2].plot(plotar,'b') ax[2].set_title(str(ts)) clear_output() display(f) plt.close() dynModelFw._runSuspend() # saves the state variables dynModelFw._wf_shutdown() run = dynModelFw.wf_supplyMapAsNumpy("SurfaceRunoff") run[run == -999] = np.nan imshow(log(run)) plotar """ Explanation: Run for a number of timesteps End of explanation """
hubenjm/scrapediscogs
discogs_client_example.ipynb
gpl-3.0
import discogs_client token = open('./token.txt').readline().strip() print token d = discogs_client.Client('MusicDBCreater/0.1', user_token=token) """ Explanation: discogs_client brief overview: End of explanation """ results = d.search('Bob Dylan - Time Out of Mind', type = 'release') type(results) len(results) print type(results[0]), results[0] """ Explanation: Search for a particular release: End of explanation """ r = results[0] print r.title print r.year print r.artists[0] print r.genres print r.credits print r.tracklist[0].position print r.formats print results[2].tracklist[0].position print r.companies print r.labels print r.id """ Explanation: Go to https://github.com/discogs/discogs_client/blob/master/discogs_client/models.py to find out about the different API objects, including the Release class object End of explanation """