code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
# Práctica Pandas: # INFORME SOBRE CUENTAS BANCARIAS DE LOS AÑOS 2010 Y 2011 Introducción a la Programación en Python 2021 by Fernando Carazo ``` %config IPCompleter.greedy=True import pandas as pd ``` ### Load data ``` data = pd.read_excel("Banco.xlsx") data = pd.DataFrame(data) data.head() ``` ### 0. Numero de cuentas por año. ¿Y por mes y año? ``` print("Número de cuentas por año:\n\r") print(data.groupby(["Año"]).count()) # Meses diferentes # set(data["Mes"]) print("Numero de cuentas por año y mes") print(data.groupby(["Año", "Mes"]).count()) ``` ### 1. Cantidad total de importe de clientes por año ``` print(data.groupby(["Año"])["Importe"].sum()) ``` ### 2. ¿Cuánto ha aumentado /disminuido 2011 vs 2010? ¿Y según tipo de cuenta? ``` diffImportePorAño = (data.groupby(["Año"])["Importe"].sum()).diff() print(diffImportePorAño) print("\n\r") diffImportePorAñoYTC = (data.groupby(["Año", "Tipo Cta"]).sum()).diff(periods=4) print(diffImportePorAñoYTC.columns) print(diffImportePorAñoYTC) ``` ### 3. ¿Cómo se distribuye el importe según las sucursales? ¿y en porcentajes? ¿Qué sucursal ha ingresado más? ``` # print(data.head(5)) sucursalGroup = data.groupby(["Sucursal"]) print("\n\n\nImporte por sucursal en bruto:") importeTotalPorSucursal = sucursalGroup["Importe"].sum() print(importeTotalPorSucursal.sort_values(ascending=False)) total = importeTotalPorSucursal.sum() print("\n\n\nImporte en porcentaje") print(importeTotalPorSucursal / total*100) print("\n\n\nLa sucursal que más ha ganado es:") print(importeTotalPorSucursal[importeTotalPorSucursal == importeTotalPorSucursal.max()]) ``` ### 4. ¿Y por tipo de cuenta bancaria? ¿Qué tipo de cuenta habría que fortalecer? ``` tipoGroup = data.groupby(["Tipo Cta"]) ImportePorTipo = tipoGroup["Importe"].sum() print("Importe por tipo de cuenta:\n\r") print(ImportePorTipo.sort_values(ascending=False)) print("\n\n\nImporte en porcentaje") print((ImportePorTipo / ImportePorTipo.sum() * 100).sort_values(ascending=False)) print("\n\n\nEl tipo de cuenta que hay que fortalecer es:\n\r") print(ImportePorTipo[ImportePorTipo == ImportePorTipo.min()]) ``` ### 5. ¿Cómo se distribuyen los tipos de cuenta según ciudad? ``` groupCityType = data.groupby(["Sucursal", "Tipo Cta"]) print("Número de cuentas por ciudad y tipo de cuenta:\n\r") pd.DataFrame(groupCityType["Importe"].count()) ``` ### 6. ¿Qué tipo de cuenta es la más fuerte en cada ciudad? ``` mejorCuenta = pd.DataFrame(data.groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()) mejorCuenta1 = mejor_cuenta.max(level=0).reset_index() mejorTipo = pd.DataFrame(mejor_cuenta.max(level=1).reset_index()["Tipo Cta"]) mejorCuenta1["Tipo Cta"] = mejor_tipo["Tipo Cta"] mejorCuenta1 ``` ### 7. ¿Qué cuenta habría que fortalecer en Barcelona? ¿Y en Madrid? ``` mejorarBarcelona = pd.DataFrame(data[data["Sucursal"] == "Barcelona"].groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()).sort_values("Importe",ascending=False) mejorarBarcelona = mejorarBarcelona[mejorarBarcelona["Importe"] == mejorarBarcelona["Importe"].min()] mejorarBarcelona mejorarMadrid = pd.DataFrame(data[data["Sucursal"] == "Madrid"].groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()).sort_values("Importe",ascending=False) mejorarMadrid = mejorarMadrid[mejorarMadrid["Importe"] == mejorarMadrid["Importe"].min()] mejorarMadrid ``` ### 8. ¿Cuántos nuevos clientes se han conseguido en 2010? ¿y en 2011? ``` nClientesAño = pd.DataFrame(data[data["Nuevo Cliente"] == "Sí"].groupby(["Año"])["Nuevo Cliente"].count()) nClientesAño ``` ### 9. Haga un análisis de los nuevos clientes. Si usted fuera el director del banco, ¿Dónde le gustaría mejorar? ``` nClientesAñoSucursal = pd.DataFrame(data[data["Nuevo Cliente"] == "Sí"].groupby(["Año", "Sucursal"])["Nuevo Cliente"].count()) nClientesAñoSucursal nClientesAñoSucursal2010 = pd.DataFrame(data.loc[(data["Año"] == 2010) & (data["Nuevo Cliente"] == "Sí")].groupby(["Sucursal"])["Importe"].count()) nClientesAñoSucursal2010 = nClientesAñoSucursal2010[nClientesAñoSucursal2010["Importe"] == nClientesAñoSucursal2010["Importe"].min()] nClientesAñoSucursal2010 ``` Tendríamos que mejorar en este caso en Barcelona ya que es la sucursal que menos consiguió en el 2010 ``` nClientesAñoSucursal2011 = pd.DataFrame(data.loc[(data["Año"] == 2011) & (data["Nuevo Cliente"] == "Sí")].groupby(["Sucursal"])["Importe"].count()) nClientesAñoSucursal2011 = nClientesAñoSucursal2011[nClientesAñoSucursal2011["Importe"] == nClientesAñoSucursal2011["Importe"].min()] nClientesAñoSucursal2011 ``` También habría que mejorar Salamanca que en el 2011 fue la que menos clientes nuevos obtuvo
github_jupyter
%config IPCompleter.greedy=True import pandas as pd data = pd.read_excel("Banco.xlsx") data = pd.DataFrame(data) data.head() print("Número de cuentas por año:\n\r") print(data.groupby(["Año"]).count()) # Meses diferentes # set(data["Mes"]) print("Numero de cuentas por año y mes") print(data.groupby(["Año", "Mes"]).count()) print(data.groupby(["Año"])["Importe"].sum()) diffImportePorAño = (data.groupby(["Año"])["Importe"].sum()).diff() print(diffImportePorAño) print("\n\r") diffImportePorAñoYTC = (data.groupby(["Año", "Tipo Cta"]).sum()).diff(periods=4) print(diffImportePorAñoYTC.columns) print(diffImportePorAñoYTC) # print(data.head(5)) sucursalGroup = data.groupby(["Sucursal"]) print("\n\n\nImporte por sucursal en bruto:") importeTotalPorSucursal = sucursalGroup["Importe"].sum() print(importeTotalPorSucursal.sort_values(ascending=False)) total = importeTotalPorSucursal.sum() print("\n\n\nImporte en porcentaje") print(importeTotalPorSucursal / total*100) print("\n\n\nLa sucursal que más ha ganado es:") print(importeTotalPorSucursal[importeTotalPorSucursal == importeTotalPorSucursal.max()]) tipoGroup = data.groupby(["Tipo Cta"]) ImportePorTipo = tipoGroup["Importe"].sum() print("Importe por tipo de cuenta:\n\r") print(ImportePorTipo.sort_values(ascending=False)) print("\n\n\nImporte en porcentaje") print((ImportePorTipo / ImportePorTipo.sum() * 100).sort_values(ascending=False)) print("\n\n\nEl tipo de cuenta que hay que fortalecer es:\n\r") print(ImportePorTipo[ImportePorTipo == ImportePorTipo.min()]) groupCityType = data.groupby(["Sucursal", "Tipo Cta"]) print("Número de cuentas por ciudad y tipo de cuenta:\n\r") pd.DataFrame(groupCityType["Importe"].count()) mejorCuenta = pd.DataFrame(data.groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()) mejorCuenta1 = mejor_cuenta.max(level=0).reset_index() mejorTipo = pd.DataFrame(mejor_cuenta.max(level=1).reset_index()["Tipo Cta"]) mejorCuenta1["Tipo Cta"] = mejor_tipo["Tipo Cta"] mejorCuenta1 mejorarBarcelona = pd.DataFrame(data[data["Sucursal"] == "Barcelona"].groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()).sort_values("Importe",ascending=False) mejorarBarcelona = mejorarBarcelona[mejorarBarcelona["Importe"] == mejorarBarcelona["Importe"].min()] mejorarBarcelona mejorarMadrid = pd.DataFrame(data[data["Sucursal"] == "Madrid"].groupby(["Sucursal", "Tipo Cta"])["Importe"].sum()).sort_values("Importe",ascending=False) mejorarMadrid = mejorarMadrid[mejorarMadrid["Importe"] == mejorarMadrid["Importe"].min()] mejorarMadrid nClientesAño = pd.DataFrame(data[data["Nuevo Cliente"] == "Sí"].groupby(["Año"])["Nuevo Cliente"].count()) nClientesAño nClientesAñoSucursal = pd.DataFrame(data[data["Nuevo Cliente"] == "Sí"].groupby(["Año", "Sucursal"])["Nuevo Cliente"].count()) nClientesAñoSucursal nClientesAñoSucursal2010 = pd.DataFrame(data.loc[(data["Año"] == 2010) & (data["Nuevo Cliente"] == "Sí")].groupby(["Sucursal"])["Importe"].count()) nClientesAñoSucursal2010 = nClientesAñoSucursal2010[nClientesAñoSucursal2010["Importe"] == nClientesAñoSucursal2010["Importe"].min()] nClientesAñoSucursal2010 nClientesAñoSucursal2011 = pd.DataFrame(data.loc[(data["Año"] == 2011) & (data["Nuevo Cliente"] == "Sí")].groupby(["Sucursal"])["Importe"].count()) nClientesAñoSucursal2011 = nClientesAñoSucursal2011[nClientesAñoSucursal2011["Importe"] == nClientesAñoSucursal2011["Importe"].min()] nClientesAñoSucursal2011
0.127625
0.849909
<h1> 1. Exploring natality dataset </h1> This notebook illustrates: <ol> <li> Exploring a BigQuery dataset using Datalab </ol> ``` # change these to try this notebook out BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi ``` <h2> Explore data </h2> The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that -- this way, twins born on the same day won't end up in different cuts of the data. ``` # Create SQL query using natality data after the year 2000 query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ # Call BigQuery and examine in dataframe import google.datalab.bigquery as bq df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe() df.head() ``` Let's write a query to find the unique values for each of the columns and the count of those values. This is important to ensure that we have enough examples of each data value, and to verify our hunch that the parameter has predictive value. ``` # Create function that finds the number of records and the average weight for each value of the chosen column def get_distinct_values(column_name): sql = """ SELECT {0}, COUNT(1) AS num_babies, AVG(weight_pounds) AS avg_wt FROM publicdata.samples.natality WHERE year > 2000 GROUP BY {0} """.format(column_name) return bq.Query(sql).execute().result().to_dataframe() # Bar plot to see is_male with avg_wt linear and num_babies logarithmic df = get_distinct_values('is_male') df.plot(x='is_male', y='num_babies', kind='bar'); df.plot(x='is_male', y='avg_wt', kind='bar'); # Line plots to see mother_age with avg_wt linear and num_babies logarithmic df = get_distinct_values('mother_age') df = df.sort_values('mother_age') df.plot(x='mother_age', y='num_babies'); df.plot(x='mother_age', y='avg_wt'); # Bar plot to see plurality(singleton, twins, etc.) with avg_wt linear and num_babies logarithmic df = get_distinct_values('plurality') df = df.sort_values('plurality') df.plot(x='plurality', y='num_babies', logy=True, kind='bar'); df.plot(x='plurality', y='avg_wt', kind='bar'); # Bar plot to see gestation_weeks with avg_wt linear and num_babies logarithmic df = get_distinct_values('gestation_weeks') df = df.sort_values('gestation_weeks') df.plot(x='gestation_weeks', y='num_babies', logy=True, kind='bar'); df.plot(x='gestation_weeks', y='avg_wt', kind='bar'); ``` All these factors seem to play a part in the baby's weight. Male babies are heavier on average than female babies. Teenaged and older moms tend to have lower-weight babies. Twins, triplets, etc. are lower weight than single births. Preemies weigh in lower as do babies born to single moms. In addition, it is important to check whether you have enough data (number of babies) for each input value. Otherwise, the model prediction against input values that doesn't have enough data may not be reliable. <p> In the next notebook, I will develop a machine learning model to combine all of these factors to come up with a prediction of a baby's weight. Copyright 2017-2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# change these to try this notebook out BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi # Create SQL query using natality data after the year 2000 query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ # Call BigQuery and examine in dataframe import google.datalab.bigquery as bq df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe() df.head() # Create function that finds the number of records and the average weight for each value of the chosen column def get_distinct_values(column_name): sql = """ SELECT {0}, COUNT(1) AS num_babies, AVG(weight_pounds) AS avg_wt FROM publicdata.samples.natality WHERE year > 2000 GROUP BY {0} """.format(column_name) return bq.Query(sql).execute().result().to_dataframe() # Bar plot to see is_male with avg_wt linear and num_babies logarithmic df = get_distinct_values('is_male') df.plot(x='is_male', y='num_babies', kind='bar'); df.plot(x='is_male', y='avg_wt', kind='bar'); # Line plots to see mother_age with avg_wt linear and num_babies logarithmic df = get_distinct_values('mother_age') df = df.sort_values('mother_age') df.plot(x='mother_age', y='num_babies'); df.plot(x='mother_age', y='avg_wt'); # Bar plot to see plurality(singleton, twins, etc.) with avg_wt linear and num_babies logarithmic df = get_distinct_values('plurality') df = df.sort_values('plurality') df.plot(x='plurality', y='num_babies', logy=True, kind='bar'); df.plot(x='plurality', y='avg_wt', kind='bar'); # Bar plot to see gestation_weeks with avg_wt linear and num_babies logarithmic df = get_distinct_values('gestation_weeks') df = df.sort_values('gestation_weeks') df.plot(x='gestation_weeks', y='num_babies', logy=True, kind='bar'); df.plot(x='gestation_weeks', y='avg_wt', kind='bar');
0.55254
0.97367
This notebook will be purely focusing on the modeling part. I will start off by installing the GPU variant of TensorFlow 2.0 (Colab gives you T4 Tesla GPUs for free). **Note** that this notebook does not explore the dataset. If you are interested to know about that check this [EDA notebook](https://github.com/sayakpaul/TF-2.0-Hacks/blob/master/Predicting%20publisher's%20name%20from%20an%20article%20with%20TF%202.0/Modeling_with_TensorFlow_2_0_and_Keras.ipynb). ``` !pip install tensorflow-gpu==2.0.0-alpha0 ``` I have the data serialized already. So, I would just load them and remove the index column which is a result of my inattentiveness. ``` import pandas as pd import numpy as np train = pd.read_csv('data/train.csv') valid = pd.read_csv('data/valid.csv') test = pd.read_csv('data/test.csv') train.drop('Unnamed: 0', axis='columns', inplace=True) valid.drop('Unnamed: 0', axis='columns', inplace=True) test.drop('Unnamed: 0', axis='columns', inplace=True) train.columns train.source.value_counts() ``` I like to define the constants and their values which would be used throughout the modeling process at the very beginning. Here, I will define: - A mapping for encoding the sources to integers (computers understand numbers) - Total size of the vocabulary that would be formed from the titles - The maximum sequence length which would be needed for the preprocessing steps ``` # Label encode CLASSES = {'blogspot': 0, 'github': 1, 'techcrunch': 2, 'nytimes': 3} # Maximum vocabulary size used for tokenization TOP_K = 20000 # Sentences will be truncated/padded to this length MAX_SEQUENCE_LENGTH = 50 ``` Next I would define a small helper function which would take a DataFrame and will - prepare a list of titles from the DstaFrame - take the sources from the DataFrame, map them to integers and append to a NumPy array ``` def return_data(df): return list(df['title']), np.array(df['source'].map(CLASSES)) # Apply it to the three splits train_text, train_labels = return_data(train) valid_text, valid_labels = return_data(valid) test_text, test_labels = return_data(test) train_text[0], train_labels[0] # TensorFlow imports import tensorflow as tf from tensorflow.keras.preprocessing import sequence, text from tensorflow.keras import models from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D # Create a vocabulary from training corpus tokenizer = text.Tokenizer(num_words=TOP_K) tokenizer.fit_on_texts(train_text) # Save token dictionary to use during inference import pickle pickle.dump(tokenizer, open('tokenizer.pickled', 'wb')) ``` I am going to the `GloVe` embeddings to represent the words in the titles to a dense representation. The embeddings are of more than 650 MB and the GCP team has it stored in a Google Storage Bucket. This is going to be incredibly helpful since it will allow me to directly use it at a very fast speed. ``` !gsutil cp gs://cloud-training-demos/courses/machine_learning/deepdive/09_sequence/text_classification/glove.6B.200d.txt glove.6B.200d.txt ``` I will now define a helper function which will map the words in the titles with respect to the Glove embeddings. In literature this is often referred to as _embedding matrix_. ``` def get_embedding_matrix(word_index, embedding_path, embedding_dim): embedding_matrix_all = {} with open(embedding_path) as f: for line in f: # Every line contains word followed by the vector value values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embedding_matrix_all[word] = coefs # Prepare embedding matrix with just the words in our word_index dictionary num_words = min(len(word_index) + 1, TOP_K) embedding_matrix = np.zeros((num_words, embedding_dim)) for word, i in word_index.items(): if i >= TOP_K: continue embedding_vector = embedding_matrix_all.get(word) if embedding_vector is not None: # Words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector return embedding_matrix ``` Let's now define the hyperparameters for the network and also define the constants. ``` filters=64 dropout_rate=0.2 embedding_dim=200 kernel_size=3 pool_size=3 word_index=tokenizer.word_index embedding_path = '/content/glove.6B.200d.txt' embedding_dim=200 ``` ### Model building Here I am using a CNN which would basically start by convolving on the embeddings fed to it. Locality is important in sequential data and CNNs would allow me to capture that effectively. The trick is to do all the fundamental CNN operations (convolution, pooling) in 1D. ``` # Create model instance model = models.Sequential() num_features = min(len(word_index) + 1, TOP_K) # Add embedding layer - GloVe embeddings model.add(Embedding(input_dim=num_features, output_dim=embedding_dim, input_length=MAX_SEQUENCE_LENGTH, weights=[get_embedding_matrix(word_index, embedding_path, embedding_dim)], trainable=True)) model.add(Dropout(rate=dropout_rate)) model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', bias_initializer='he_normal', padding='same')) model.add(MaxPooling1D(pool_size=pool_size)) model.add(Conv1D(filters=filters * 2, kernel_size=kernel_size, activation='relu', bias_initializer='he_normal', padding='same')) model.add(GlobalAveragePooling1D()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(len(CLASSES), activation='softmax')) # Compile model with learning parameters. optimizer = tf.keras.optimizers.Adam(lr=0.001) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc']) tf.keras.utils.plot_model(model, to_file='cnn_txt_cls.png') ``` ### Data preprocessing: Tokenize + Pad ``` # Preprocess the train, validation and test sets # Tokenize and pad sentences tokenizer = pickle.load( open( "tokenizer.pickled", "rb" ) ) preproc_train = tokenizer.texts_to_sequences(train_text) preproc_train = sequence.pad_sequences(preproc_train, maxlen=MAX_SEQUENCE_LENGTH) preproc_valid = tokenizer.texts_to_sequences(valid_text) preproc_valid = sequence.pad_sequences(preproc_valid, maxlen=MAX_SEQUENCE_LENGTH) preproc_test = tokenizer.texts_to_sequences(test_text) preproc_test = sequence.pad_sequences(preproc_test, maxlen=MAX_SEQUENCE_LENGTH) ``` ### Model training ``` H = model.fit(preproc_train, train_labels, validation_data=(preproc_valid, valid_labels), batch_size=128, epochs=10, verbose=1) ``` The model overfits :( But still let's evaluate it. ``` model.evaluate(preproc_test, test_labels) ``` I am now interested in predicting on a few latest title samples which I gathered from Hacker News. ``` # Helper function to test on single samples def test_on_single_sample(text): category = None text_tokenized = tokenizer.texts_to_sequences(text) text_tokenized = sequence.pad_sequences(text_tokenized,maxlen=50) prediction = int(model.predict_classes(text_tokenized)) for key, value in CLASSES.items(): if value==prediction: category=key return category # Prepare the samples github=['Invaders game in 512 bytes'] nytimes = ['Michael Bloomberg Promises $500M to Help End Coal'] techcrunch = ['Facebook plans June 18th cryptocurrency debut'] blogspot = ['Android Security: A walk-through of SELinux'] for sample in [github, nytimes, techcrunch, blogspot]: print(test_on_single_sample(sample)) ``` ### Further directions: Just like in the image domain, where we expect models that understand the domain to be robust against certain transformations like rotation and translation, in the sequence domain, it's important then models be robust to changes in the length of the pattern. Keeping that in mind, here's a list of what I would try in the near future: - Try other sequence models - A bit of hyperparamter tuning - Learn the embeddings from scratch - Try different embeddings like universal sentence encoder, nnlm-128 and so on ### References and resources: - [Guide on Text Classification](https://developers.google.com/machine-learning/guides/text-classification/) by Google - [Deep Learning for Time Series Forecasting](https://machinelearningmastery.com/deep-learning-for-time-series-forecasting/) by Jason Brownlee (Machine Learning Mastery) - [Deep learning with Python](https://www.manning.com/books/deep-learning-with-python) by François Chollet - [Sequence Models for Time Series and Natural Language Processing](https://www.coursera.org/learn/sequence-models-tensorflow-gcp/home/welcome) a course designed and developed by the Google Cloud team (offered via Coursera)
github_jupyter
!pip install tensorflow-gpu==2.0.0-alpha0 import pandas as pd import numpy as np train = pd.read_csv('data/train.csv') valid = pd.read_csv('data/valid.csv') test = pd.read_csv('data/test.csv') train.drop('Unnamed: 0', axis='columns', inplace=True) valid.drop('Unnamed: 0', axis='columns', inplace=True) test.drop('Unnamed: 0', axis='columns', inplace=True) train.columns train.source.value_counts() # Label encode CLASSES = {'blogspot': 0, 'github': 1, 'techcrunch': 2, 'nytimes': 3} # Maximum vocabulary size used for tokenization TOP_K = 20000 # Sentences will be truncated/padded to this length MAX_SEQUENCE_LENGTH = 50 def return_data(df): return list(df['title']), np.array(df['source'].map(CLASSES)) # Apply it to the three splits train_text, train_labels = return_data(train) valid_text, valid_labels = return_data(valid) test_text, test_labels = return_data(test) train_text[0], train_labels[0] # TensorFlow imports import tensorflow as tf from tensorflow.keras.preprocessing import sequence, text from tensorflow.keras import models from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D # Create a vocabulary from training corpus tokenizer = text.Tokenizer(num_words=TOP_K) tokenizer.fit_on_texts(train_text) # Save token dictionary to use during inference import pickle pickle.dump(tokenizer, open('tokenizer.pickled', 'wb')) !gsutil cp gs://cloud-training-demos/courses/machine_learning/deepdive/09_sequence/text_classification/glove.6B.200d.txt glove.6B.200d.txt def get_embedding_matrix(word_index, embedding_path, embedding_dim): embedding_matrix_all = {} with open(embedding_path) as f: for line in f: # Every line contains word followed by the vector value values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embedding_matrix_all[word] = coefs # Prepare embedding matrix with just the words in our word_index dictionary num_words = min(len(word_index) + 1, TOP_K) embedding_matrix = np.zeros((num_words, embedding_dim)) for word, i in word_index.items(): if i >= TOP_K: continue embedding_vector = embedding_matrix_all.get(word) if embedding_vector is not None: # Words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector return embedding_matrix filters=64 dropout_rate=0.2 embedding_dim=200 kernel_size=3 pool_size=3 word_index=tokenizer.word_index embedding_path = '/content/glove.6B.200d.txt' embedding_dim=200 # Create model instance model = models.Sequential() num_features = min(len(word_index) + 1, TOP_K) # Add embedding layer - GloVe embeddings model.add(Embedding(input_dim=num_features, output_dim=embedding_dim, input_length=MAX_SEQUENCE_LENGTH, weights=[get_embedding_matrix(word_index, embedding_path, embedding_dim)], trainable=True)) model.add(Dropout(rate=dropout_rate)) model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', bias_initializer='he_normal', padding='same')) model.add(MaxPooling1D(pool_size=pool_size)) model.add(Conv1D(filters=filters * 2, kernel_size=kernel_size, activation='relu', bias_initializer='he_normal', padding='same')) model.add(GlobalAveragePooling1D()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(len(CLASSES), activation='softmax')) # Compile model with learning parameters. optimizer = tf.keras.optimizers.Adam(lr=0.001) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc']) tf.keras.utils.plot_model(model, to_file='cnn_txt_cls.png') # Preprocess the train, validation and test sets # Tokenize and pad sentences tokenizer = pickle.load( open( "tokenizer.pickled", "rb" ) ) preproc_train = tokenizer.texts_to_sequences(train_text) preproc_train = sequence.pad_sequences(preproc_train, maxlen=MAX_SEQUENCE_LENGTH) preproc_valid = tokenizer.texts_to_sequences(valid_text) preproc_valid = sequence.pad_sequences(preproc_valid, maxlen=MAX_SEQUENCE_LENGTH) preproc_test = tokenizer.texts_to_sequences(test_text) preproc_test = sequence.pad_sequences(preproc_test, maxlen=MAX_SEQUENCE_LENGTH) H = model.fit(preproc_train, train_labels, validation_data=(preproc_valid, valid_labels), batch_size=128, epochs=10, verbose=1) model.evaluate(preproc_test, test_labels) # Helper function to test on single samples def test_on_single_sample(text): category = None text_tokenized = tokenizer.texts_to_sequences(text) text_tokenized = sequence.pad_sequences(text_tokenized,maxlen=50) prediction = int(model.predict_classes(text_tokenized)) for key, value in CLASSES.items(): if value==prediction: category=key return category # Prepare the samples github=['Invaders game in 512 bytes'] nytimes = ['Michael Bloomberg Promises $500M to Help End Coal'] techcrunch = ['Facebook plans June 18th cryptocurrency debut'] blogspot = ['Android Security: A walk-through of SELinux'] for sample in [github, nytimes, techcrunch, blogspot]: print(test_on_single_sample(sample))
0.752922
0.930427
<img src="../fasp/runner/credits/images/nb1.jpg" style="float: right;"> ### TCGA and GTEx This variant of the GTEX TCGA workflow uses FASPRunner which is simply called twice in succession with the relevant Search and WES clients. As the DRS ids returned by the searches are prefixed with CURIEs, DRSMetaResolver can be used as the DRS Client in both cases. ``` from fasp.search import DiscoverySearchClient, Gen3ManifestClient from fasp.loc import DRSMetaResolver, anvilDRSClient from fasp.runner import FASPRunner faspRunner = FASPRunner() runNote = 'GTEX and TCGA via FASPRunner' ``` The following sets clients to handle the TCGA data. Note that the DRS ids prefixed with CURIEs (crdc for the Cancer Research Data Commons and anv for Anvil). This indicates which namespace the ids come from and allows the referenced file to be retrieved from the correct DRS server. Note that for the data in the Google Cloud we are using GCPLSsamtools a fasp class which accesses Google Cloud's Life Science Pipeline API. The plan is to replace that with the DNA Stack WES server when that is updated. ``` # TCGA Query - CRDC crdcquery = """ SELECT 'case_'||associated_entities__case_gdc_id case_id, 'crdc:'||file_id drs_id FROM search_cloud.cshcodeathon.gdc_rel24_filedata_active where data_format = 'BAM' and project_disease_type = 'Breast Invasive Carcinoma' limit 3""" searchClient = DiscoverySearchClient('https://ga4gh-search-adapter-presto-public.prod.dnastack.com/') drsClient = DRSMetaResolver() from fasp.workflow import GCPLSsamtools settings = faspRunner.settings gcplocation = 'projects/{}/locations/{}'.format(settings['GCPProject'], settings['GCPPipelineRegion']) wesClient = GCPLSsamtools(gcplocation, settings['GCPOutputBucket']) faspRunner.configure(searchClient, drsClient, wesClient) runList = faspRunner.runQuery(crdcquery, runNote) ``` A Search and WES client are then set up to work with the Anvil data The Search client here is a placeholder to search a local file. That file contains file ids downloaded as a manifest from the Gen3 Anvil portal. That list of files in that manifest had already been filtered to relevant samples. The anv: DRS prefix was added in an edited version of the file. #Todo check what access_ids DRSMetaresolver is using for each run ``` from fasp.workflow import sbcgcWESClient searchClient = Gen3ManifestClient('../fasp/data/gtex/gtex-cram-manifest.json') drsClient = anvilDRSClient('~/.keys/anvil_credentials.json', '', 's3') wesClient = sbcgcWESClient(settings['SevenBridgesProject']) faspRunner.configure(searchClient, drsClient, wesClient) runList2 = faspRunner.runQuery(3, runNote) faspRunner.getFASPicon() faspRunner.rollCredits() ```
github_jupyter
from fasp.search import DiscoverySearchClient, Gen3ManifestClient from fasp.loc import DRSMetaResolver, anvilDRSClient from fasp.runner import FASPRunner faspRunner = FASPRunner() runNote = 'GTEX and TCGA via FASPRunner' # TCGA Query - CRDC crdcquery = """ SELECT 'case_'||associated_entities__case_gdc_id case_id, 'crdc:'||file_id drs_id FROM search_cloud.cshcodeathon.gdc_rel24_filedata_active where data_format = 'BAM' and project_disease_type = 'Breast Invasive Carcinoma' limit 3""" searchClient = DiscoverySearchClient('https://ga4gh-search-adapter-presto-public.prod.dnastack.com/') drsClient = DRSMetaResolver() from fasp.workflow import GCPLSsamtools settings = faspRunner.settings gcplocation = 'projects/{}/locations/{}'.format(settings['GCPProject'], settings['GCPPipelineRegion']) wesClient = GCPLSsamtools(gcplocation, settings['GCPOutputBucket']) faspRunner.configure(searchClient, drsClient, wesClient) runList = faspRunner.runQuery(crdcquery, runNote) from fasp.workflow import sbcgcWESClient searchClient = Gen3ManifestClient('../fasp/data/gtex/gtex-cram-manifest.json') drsClient = anvilDRSClient('~/.keys/anvil_credentials.json', '', 's3') wesClient = sbcgcWESClient(settings['SevenBridgesProject']) faspRunner.configure(searchClient, drsClient, wesClient) runList2 = faspRunner.runQuery(3, runNote) faspRunner.getFASPicon() faspRunner.rollCredits()
0.327561
0.706849
Periodic DMRG and Calculations ============== Here we demonstrate 2-site periodic DMRG for finding the groundstate of the spin-1/2 Heisenberg model, and performing a couple of calculations efficiently with the resulting periodic MPS. ``` from quimb import * from quimb.tensor import * H = MPO_ham_heis(300, cyclic=True) ``` ``quimb`` has the function ``heisenberg_energy`` which can calculate the analytic energy we are looking for: ``` E_exact = heisenberg_energy(300) E_exact ``` Let's create the core DMRG object that handles all the algorithm: ``` dmrg = DMRG2(H) ``` `DMRG2` internally forms the needed energy and norm overlaps, reusing views of the same data. We can graph, for example, the full energy expectation: ``` %matplotlib inline dmrg.TN_energy.draw(color=['_KET', '_HAM', '_BRA']) # might be slow as uses force repulsion ``` Or if we want to plot with fixed positions: ``` from cmath import exp, pi fix = { **{(f'I{i}', '_KET'): (100 * exp(2j*pi * i / 300).real, 100 * exp(2j*pi * i / 300).imag) for i in range(300)}, **{(f'I{i}', '_HAM'): (105 * exp(2j*pi * i / 300).real, 105 * exp(2j*pi * i / 300).imag) for i in range(300)}, **{(f'I{i}', '_BRA'): (110 * exp(2j*pi * i / 300).real, 110 * exp(2j*pi * i / 300).imag) for i in range(300)}, } dmrg.TN_energy.draw(color=['_KET', '_HAM', '_BRA'], fix=fix, iterations=0) ``` The default algorithm settings are reasonable enough to get started with: ``` dmrg.solve(max_sweeps=4, verbosity=1, cutoffs=1e-6) ``` We are getting pretty close to the known energy already (closer than OBC at this length can get). The relative error is: ``` (dmrg.energy - E_exact) / abs(E_exact) ``` Note that for PBC, the algorithm splits the chain into segments, and approximates the other segments with a SVD (the accuracies of the energies above are limited by this). Thus progress appears to pause at these points. The number of singular values kept for this environment approximation is recorded in ``dmrg.bond_sizes_ham`` and ``dmrg.bond_sizes_norm``: ``` dmrg.bond_sizes_norm dmrg.bond_sizes_ham ``` To progress further might require tweaking the advanced options, for example, setting tighter tolerances for some of the settings found in: ``` dmrg.opts ``` See ``quimb.tensor.tensor_dmrg.get_default_opts`` for detailed explanations of these quantities. One could also supply custom sequences for the maximum allowed bond dimensions (e.g. ``dmrg.solve(..., bond_dims=[70, 80, 90])``) or bond compression cutoffs (e.g. ``dmrg.solve(..., cutoffs=[1e-9, 3e-10, 1e-10])``). PBC DMRG error is, in particular, limited by the segment compression tolerances. The full state can be retrieved from ``dmrg.state``: ``` gs = dmrg.state gs.max_bond() ``` Z-Correlations ------------- We could then calculate the ground-state z-correlations for example. ``MatrixProductState.correlation`` internally uses ``quimb.tensor.expect_TN_1D`` which can perform transfer matrix compression in order to efficiently compute expectations. ``` sz = spin_operator('Z').real gs.correlation(sz, 0, 1) %debug ``` However, if one was computing this for many sites, it would make sense to manually reuse parts of each contraction. For example, if we are only interested in the first ``n`` sites, we can approximate the rest with an SVD: ``` # Set up an overlap p = dmrg.state p.add_tag('KET') q = p.H.retag({'KET': 'BRA'}) qp = q & p # Replace all but 20 sites with an SVD qp.replace_section_with_svd(20, 300, eps=1e-6, inplace=True, ltags='L', rtags='R') qp.draw(color=['BRA', 'KET', 'L', 'R']) ``` Now we can define a correlation function on this much smaller network: ``` def sz_corr(i, j): itag = f"I{i}" jtag = f"I{j}" qp_i = qp.insert_operator(sz, ('KET', itag), ('BRA', itag)) c_i = qp_i ^ all qp_j = qp.insert_operator(sz, ('KET', jtag), ('BRA', jtag)) c_j = qp_j ^ all qp_ij = qp_i.insert_operator(sz, ('KET', jtag), ('BRA', jtag)) c_ij = qp_ij ^ all return c_ij - c_i * c_j ``` We can then use this to compute the 20 correlations efficiently: ``` js = range(1, 20) cs = [sz_corr(0, j) for j in js] import matplotlib.pyplot as plt plt.plot(js, cs) ``` Which looks as expected. Compressed Density Matrix ------------------------ For operators on more than a few qubits we can compute a compressed density matrix. E.g. for 50 + 50 = 100 qubits: ``` sysa = range(0, 50) sysb = range(50, 100) rho_ab = gs.partial_trace_compress(sysa, sysb, max_bond=2**6, method='isvd') rho_ab.ind_sizes() ``` Let's plot this: ``` # specify some coordinates to plot the remaining tensors fix = {('_UP', '_SYSA'): (-1, +1), ('_DOWN', '_SYSA'): (-1, -1), 'kA': (-1, 1.5), 'bA': (-1, -1.5), ('_UP', '_SYSB'): (+1, +1), ('_DOWN', '_SYSB'): (+1, -1), 'kB': (+1, 1.5), 'bB': (+1, -1.5)} rho_ab.draw(color=['_SYSA', '_ENVR', '_SYSB'], show_inds=False, fix=fix) ``` You can see that because the state has PBC, there is a split 'environment' tensor carrying correlations the 'long-way-round'. We can also check it's still normalized: ``` rho_ab.trace(['kA', 'kB'], ['bA', 'bB']) ``` We could also estimate the genuine entanglement between the two subsytems. First we convert the compressed representation into a dense matrix, whilst also partially transposing one side: ``` # form single tensor rho_ab_d = rho_ab ^ all # turn tensor into a normal array whilst also partially transposing rho_ab_pt_d = rho_ab_d.to_dense(['kA', 'bB'], ['bA', 'kB']) rho_ab_pt_d.shape ``` Finally compute $\log_2 \left|\rho_{AB}^{T_B} \right|$: ``` E = log2(sum(abs(eigvalsh(rho_ab_pt_d)))) ``` Which gives the logarithmic negativity between the two regions as (approximately because of the limited bond in the compression): ``` E ```
github_jupyter
from quimb import * from quimb.tensor import * H = MPO_ham_heis(300, cyclic=True) E_exact = heisenberg_energy(300) E_exact dmrg = DMRG2(H) %matplotlib inline dmrg.TN_energy.draw(color=['_KET', '_HAM', '_BRA']) # might be slow as uses force repulsion from cmath import exp, pi fix = { **{(f'I{i}', '_KET'): (100 * exp(2j*pi * i / 300).real, 100 * exp(2j*pi * i / 300).imag) for i in range(300)}, **{(f'I{i}', '_HAM'): (105 * exp(2j*pi * i / 300).real, 105 * exp(2j*pi * i / 300).imag) for i in range(300)}, **{(f'I{i}', '_BRA'): (110 * exp(2j*pi * i / 300).real, 110 * exp(2j*pi * i / 300).imag) for i in range(300)}, } dmrg.TN_energy.draw(color=['_KET', '_HAM', '_BRA'], fix=fix, iterations=0) dmrg.solve(max_sweeps=4, verbosity=1, cutoffs=1e-6) (dmrg.energy - E_exact) / abs(E_exact) dmrg.bond_sizes_norm dmrg.bond_sizes_ham dmrg.opts gs = dmrg.state gs.max_bond() sz = spin_operator('Z').real gs.correlation(sz, 0, 1) %debug # Set up an overlap p = dmrg.state p.add_tag('KET') q = p.H.retag({'KET': 'BRA'}) qp = q & p # Replace all but 20 sites with an SVD qp.replace_section_with_svd(20, 300, eps=1e-6, inplace=True, ltags='L', rtags='R') qp.draw(color=['BRA', 'KET', 'L', 'R']) def sz_corr(i, j): itag = f"I{i}" jtag = f"I{j}" qp_i = qp.insert_operator(sz, ('KET', itag), ('BRA', itag)) c_i = qp_i ^ all qp_j = qp.insert_operator(sz, ('KET', jtag), ('BRA', jtag)) c_j = qp_j ^ all qp_ij = qp_i.insert_operator(sz, ('KET', jtag), ('BRA', jtag)) c_ij = qp_ij ^ all return c_ij - c_i * c_j js = range(1, 20) cs = [sz_corr(0, j) for j in js] import matplotlib.pyplot as plt plt.plot(js, cs) sysa = range(0, 50) sysb = range(50, 100) rho_ab = gs.partial_trace_compress(sysa, sysb, max_bond=2**6, method='isvd') rho_ab.ind_sizes() # specify some coordinates to plot the remaining tensors fix = {('_UP', '_SYSA'): (-1, +1), ('_DOWN', '_SYSA'): (-1, -1), 'kA': (-1, 1.5), 'bA': (-1, -1.5), ('_UP', '_SYSB'): (+1, +1), ('_DOWN', '_SYSB'): (+1, -1), 'kB': (+1, 1.5), 'bB': (+1, -1.5)} rho_ab.draw(color=['_SYSA', '_ENVR', '_SYSB'], show_inds=False, fix=fix) rho_ab.trace(['kA', 'kB'], ['bA', 'bB']) # form single tensor rho_ab_d = rho_ab ^ all # turn tensor into a normal array whilst also partially transposing rho_ab_pt_d = rho_ab_d.to_dense(['kA', 'bB'], ['bA', 'kB']) rho_ab_pt_d.shape E = log2(sum(abs(eigvalsh(rho_ab_pt_d)))) E
0.408513
0.957893
# Custom pre-processors with the V2 protocol Most of the time, the requests that we send to our model need some kind of processing. For example, extra information may need to be fetched (e.g. from a feature store), or processed, in order to obtain the actual tensors required by the model. One example for this use case are NLP models, where natural language needs first to be tokenised according to a vocabulary, or embedded by a 2nd model. In this tutorial, we will focus on this latter scenario. In particular, we will explore how to deploy a _tokeniser_ pre-transformer that converts our natural language text to tokens. This tokeniser will then be part of an inference graph, so that its output gets routed to a [GPT-2 model deployed using Triton](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example.html). > **NOTE**: The tokeniser logic and the Triton artifacts are taken from the [GPT-2 Model example](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example.html). To learn more about these, feel free to check that tutorial. ![Inference graph with tokeniser and GPT-2 model](./gpt2-graph.svg) ## Creating a Tokeniser In order to create a custom pre-processing step, the first step will be to [write a **custom runtime**](https://mlserver.readthedocs.io/en/latest/runtimes/custom.html) using [MLServer](https://mlserver.readthedocs.io/en/latest/). MLServer is a production-grade inference server, whose main goal is to ease up the serving of models through a REST and gRPC interface compatible with the [V2 Inference Protocol](https://kserve.github.io/website/modelserving/inference_api/). As well as an inference server, MLServer also exposes a *framework* which can be leveraged to easily **write your custom inference runtimes**. These custom runtimes can be used to write any custom logic, including (you guessed it!) our tokeniser pre-processor. Therefore, we will start by extending the base `mlserver.MLModel` class, adding our custom logic. Note that this logic is taken (almost) verbatim from the [GPT-2 Model example](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example.html). ``` %%writefile tokeniser/runtime.py import os from mlserver import MLModel from mlserver.types import InferenceRequest, InferenceResponse from mlserver.codecs import NumpyCodec from mlserver.codecs.string import StringRequestCodec, StringCodec from transformers import GPT2Tokenizer TOKENIZER_TYPE_ENV_NAME = "SELDON_TOKENIZER_TYPE" TOKENIZER_TYPE_ENCODE = "ENCODER" class Tokeniser(MLModel): async def load(self) -> bool: self._tokeniser = GPT2Tokenizer.from_pretrained("gpt2") self._tokenizer_type = os.environ.get(TOKENIZER_TYPE_ENV_NAME, TOKENIZER_TYPE_ENCODE) self.ready = True return self.ready async def predict(self, inference_request: InferenceRequest) -> InferenceResponse: outputs = None if self._tokenizer_type == TOKENIZER_TYPE_ENCODE: sentences = StringRequestCodec.decode(inference_request) tokenised = self._tokeniser(sentences, return_tensors="np") outputs = [] for name, payload in tokenised.items(): inference_output = NumpyCodec.encode(name=name, payload=payload) # Transformer's TF GPT2 model expects `INT32` inputs by default, so # let's enforce them inference_output.datatype = "INT32" outputs.append(inference_output) else: logits = NumpyCodec.decode(inference_request.inputs[0]) # take the best next token probability of the last token of input ( greedy approach) next_token = logits.argmax(axis=2)[0] next_token_str = self._tokeniser.decode( next_token[-1:], skip_special_tokens=True, clean_up_tokenization_spaces=True ).strip() outputs = [StringCodec.encode("next_token", [next_token_str])] return InferenceResponse( model_name=self.name, model_version=self.version, outputs=outputs ) ``` Note that the pre-processing logic is implemented in the `predict()` method. At the moment, the MLServer framework doesn't expose the concept of pre- and post-processing. However, it's possible to implement this is a _"pseudo-model"_, thus relying on the service orchestrator of Seldon Core, who will be responsible of chaining the output of our tokeniser to the next model. ### Requirements and default model settings Besides writing the logic of our custom runtime, we will also need to provide the extra requirements that will be used by our environment. This can be done through a plain `requirements.txt` file. Alternatively, for a finer control, it'd also be possible to leverage [Conda's environment files](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#create-env-file-manually) to specify our environment. ``` %%writefile tokeniser/requirements.txt mlserver==1.0.1 transformers==4.12.3 ``` On top of this, we will also add a `model-settings.json` file with the default settings for our model. MLServer uses these files to provide extra configuration (e.g. number of parallel workers, adaptive batching configuration, etc.) for each model. In our case, we will use this file to tell MLServer that it should always use our custom runtime by default and name our models as `tokeniser` (unless other name is specified). ``` %%writefile tokeniser/model-settings.json { "implementation": "runtime.Tokeniser" } ``` ### Testing our tokeniser > **NOTE**: To test our custom runtime locally, we will need to install the same set of dependencies that will be bundled and deployed remotely. To achieve this, we can re-use the environment that was described on the previous section: ```bash pip install -r ./tokeniser/requirements.txt ``` Since we're leveraging MLServer to write our custom pre-processor, it should be **easy to test it locally**. For this, we will start MLServer using the [`mlserver start` subcommand](https://mlserver.readthedocs.io/en/latest/reference/cli.html#mlserver-start). Note that this command has to be carried out on a separate terminal: ```bash mlserver start ./tokeniser ``` We can then send a test request using `curl` as follows: ``` %%bash curl localhost:8080/v2/models/tokeniser/infer \ -H 'Content-Type: application/json' \ -d '{"inputs": [{"name": "sentences", "datatype": "BYTES", "shape": [1, 11], "data": "hello world"}]}' \ | python -m json.tool ``` As we can see above, the input `hello world` gets tokenised into `[31373, 995]`, thus confirming that our custom runtime is working as expected locally. ### Building the image Once we have our custom code tested and ready, we should be able to build our custom image by using the [`mlserver build` subcommand](https://mlserver.readthedocs.io/en/latest/reference/cli.html#mlserver-build). This image will be created under the `gpt2-tokeniser:0.1.0` tag. ``` %%bash mlserver build ./tokeniser --tag seldonio/gpt2-tokeniser:0.1.0 ``` ## Deploying our inference graph Now that we have our custom tokeniser built and ready, we are able to deploy it alongside our GPT-2 model. This can be achieved through a `SeldonDeployment` manifest which **links both models**. That is, our tokeniser, plus the actual GPT-2 model. As outlined above, this manifest will re-use the image and resources built in the [GPT-2 Model example](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example.html), which is accessible from GCS. > **NOTE:** This manifest expects that the `gpt2-tokeniser:0.1.0` image built in the previous section **is accessible** from within the cluster where Seldon Core has been installed. If you are [using `kind`](https://docs.seldon.io/projects/seldon-core/en/latest/install/kind.html), you should be able to load the image into your local cluster with the following command: ```bash kind load docker-image gpt2-tokeniser:0.1.0 ``` ``` %%writefile seldondeployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: gpt2 spec: protocol: v2 predictors: - name: default graph: name: tokeniser-encoder children: - name: gpt2 implementation: TRITON_SERVER modelUri: gs://seldon-models/triton/onnx_gpt2 children: - name: tokeniser-decoder componentSpecs: - spec: containers: - name: tokeniser-encoder image: seldonio/gpt2-tokeniser:0.1.0 env: # Use always a writable HuggingFace cache location regardless of the user - name: TRANSFORMERS_CACHE value: /opt/mlserver/.cache - name: MLSERVER_MODEL_NAME value: "tokeniser-encoder" - name: tokeniser-decoder image: seldonio/gpt2-tokeniser:0.1.0 env: - name: SELDON_TOKENIZER_TYPE value: "DECODER" # Use always a writable HuggingFace cache location regardless of the user - name: TRANSFORMERS_CACHE value: /opt/mlserver/.cache - name: MLSERVER_MODEL_NAME value: "tokeniser-decoder" ``` The final step will be to apply this manifest into the cluster, where Seldon Core is running. For example, to deploy the manifest into the `models` namespace, we could run the following command: ``` !kubectl apply -f seldondeployment.yaml ``` ### Testing our deployed inference graph Finally, we can test that our deployed inference graph is working as expected by sending a request. If we assume that our cluster can be reached in `localhost:8003`, we can send a request using `cURL` as: ``` %%bash curl localhost:80/seldon/default/gpt2/v2/models/infer \ -H 'Content-Type: application/json' \ -d '{"inputs": [{"name": "sentences", "datatype": "BYTES", "shape": [1, 11], "data": ["Seldon Technologies is very"]}]}' ``` As we can see above, our plain-text request is now going successfully through the `tokeniser`, acting as a pre-processor, whose output then gets routed to the actual GPT-2 model.
github_jupyter
%%writefile tokeniser/runtime.py import os from mlserver import MLModel from mlserver.types import InferenceRequest, InferenceResponse from mlserver.codecs import NumpyCodec from mlserver.codecs.string import StringRequestCodec, StringCodec from transformers import GPT2Tokenizer TOKENIZER_TYPE_ENV_NAME = "SELDON_TOKENIZER_TYPE" TOKENIZER_TYPE_ENCODE = "ENCODER" class Tokeniser(MLModel): async def load(self) -> bool: self._tokeniser = GPT2Tokenizer.from_pretrained("gpt2") self._tokenizer_type = os.environ.get(TOKENIZER_TYPE_ENV_NAME, TOKENIZER_TYPE_ENCODE) self.ready = True return self.ready async def predict(self, inference_request: InferenceRequest) -> InferenceResponse: outputs = None if self._tokenizer_type == TOKENIZER_TYPE_ENCODE: sentences = StringRequestCodec.decode(inference_request) tokenised = self._tokeniser(sentences, return_tensors="np") outputs = [] for name, payload in tokenised.items(): inference_output = NumpyCodec.encode(name=name, payload=payload) # Transformer's TF GPT2 model expects `INT32` inputs by default, so # let's enforce them inference_output.datatype = "INT32" outputs.append(inference_output) else: logits = NumpyCodec.decode(inference_request.inputs[0]) # take the best next token probability of the last token of input ( greedy approach) next_token = logits.argmax(axis=2)[0] next_token_str = self._tokeniser.decode( next_token[-1:], skip_special_tokens=True, clean_up_tokenization_spaces=True ).strip() outputs = [StringCodec.encode("next_token", [next_token_str])] return InferenceResponse( model_name=self.name, model_version=self.version, outputs=outputs ) %%writefile tokeniser/requirements.txt mlserver==1.0.1 transformers==4.12.3 %%writefile tokeniser/model-settings.json { "implementation": "runtime.Tokeniser" } pip install -r ./tokeniser/requirements.txt ``` Since we're leveraging MLServer to write our custom pre-processor, it should be **easy to test it locally**. For this, we will start MLServer using the [`mlserver start` subcommand](https://mlserver.readthedocs.io/en/latest/reference/cli.html#mlserver-start). Note that this command has to be carried out on a separate terminal: We can then send a test request using `curl` as follows: As we can see above, the input `hello world` gets tokenised into `[31373, 995]`, thus confirming that our custom runtime is working as expected locally. ### Building the image Once we have our custom code tested and ready, we should be able to build our custom image by using the [`mlserver build` subcommand](https://mlserver.readthedocs.io/en/latest/reference/cli.html#mlserver-build). This image will be created under the `gpt2-tokeniser:0.1.0` tag. ## Deploying our inference graph Now that we have our custom tokeniser built and ready, we are able to deploy it alongside our GPT-2 model. This can be achieved through a `SeldonDeployment` manifest which **links both models**. That is, our tokeniser, plus the actual GPT-2 model. As outlined above, this manifest will re-use the image and resources built in the [GPT-2 Model example](https://docs.seldon.io/projects/seldon-core/en/latest/examples/triton_gpt2_example.html), which is accessible from GCS. > **NOTE:** This manifest expects that the `gpt2-tokeniser:0.1.0` image built in the previous section **is accessible** from within the cluster where Seldon Core has been installed. If you are [using `kind`](https://docs.seldon.io/projects/seldon-core/en/latest/install/kind.html), you should be able to load the image into your local cluster with the following command: The final step will be to apply this manifest into the cluster, where Seldon Core is running. For example, to deploy the manifest into the `models` namespace, we could run the following command: ### Testing our deployed inference graph Finally, we can test that our deployed inference graph is working as expected by sending a request. If we assume that our cluster can be reached in `localhost:8003`, we can send a request using `cURL` as:
0.82559
0.933188
``` import numpy as np import matplotlib.pyplot as plt from theory.fitTau import xMean, xVariance, xStationaryMean, xStationaryVariance fN = "tau.eta0.X10" # model params X0 = 10 nAgents = 1000000 eta = 0 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",") fN = "tau.eta-100.X10" # model params X0 = 10 nAgents = 1000000 eta = -1 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",") fN = "tau.eta50.X10" # model params X0 = 10 nAgents = 1000000 eta = 0.5 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",") ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from theory.fitTau import xMean, xVariance, xStationaryMean, xStationaryVariance fN = "tau.eta0.X10" # model params X0 = 10 nAgents = 1000000 eta = 0 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",") fN = "tau.eta-100.X10" # model params X0 = 10 nAgents = 1000000 eta = -1 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",") fN = "tau.eta50.X10" # model params X0 = 10 nAgents = 1000000 eta = 0.5 epsi1 = 3 epsi2 = 5 # observation params startTime = 1e-6 endTime = 3 obs = 100 data = np.loadtxt(f"data/{fN}.data",delimiter=",").astype(float) data[data==0] = 0.5 data[data==nAgents] = nAgents - 0.5 xSeries = data/nAgents T = np.logspace(np.log10(startTime), np.log10(endTime), num=obs) eMean = np.mean(xSeries,axis=0) tMean = xMean(T, X0/nAgents, eta, epsi1, epsi2) lMean = np.ones(T.shape)*xStationaryMean(eta, epsi1, epsi2) eVar = np.var(xSeries,axis=0) tVar = xVariance(T, X0/nAgents, eta, epsi1, epsi2) lVar = np.ones(T.shape)*xStationaryVariance(eta, epsi1, epsi2) plt.figure(figsize=(10,3)) plt.subplot(121) plt.loglog() plt.plot(T, eMean, "ro") plt.plot(T, tMean, "k-") plt.plot(T, lMean, "k--") plt.subplot(122) plt.loglog() plt.plot(T, eVar, "ro") plt.plot(T, tVar, "k-") plt.plot(T, lVar, "k--") plt.show() out = np.vstack((T, eMean, eVar, tMean, tVar, lMean, lVar)) np.savetxt(f"data/{fN}.csv", np.log10(out).T, fmt="%.4f", delimiter=",")
0.5564
0.698715
# Python3 Example In this example we will show ho to use pdnd-explorer. The libraries imported into the docker are those released by jupyter in this [docker file](https://github.com/jupyter/docker-stacks/tree/master/datascience-notebook). And follow the realeases from jupyter [docker stacks](https://github.com/jupyter/docker-stacks/). For a deepening you can read [this](https://jupyter-docker-stacks.readthedocs.io/en/latest/), we for the moment are using the datascience docker image with all libraries, and programming languages imported. #### Import some libs. Very important import these libraries in all notebooks using this project. ``` import pandas as pd import requests from io import StringIO import os pd.options.display.html.table_schema = True pd.set_option('display.max_rows', 5000) ``` #### Search for a dataset and load it ``` url = "https://api.daf.teamdigitale.it/dataset-manager/v1/dataset/daf%3A%2F%2Fopendata%2Flecce_o_quoziente_i_immigrazione_ed_emigrazione_1?format=json" payload = "" headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("GET", url, data=payload, headers=headers) emigrazione = pd.read_json(StringIO(response.text)) emigrazione ``` #### Clean the dataset ``` emigrazione.loc[emigrazione['Anno'] == 2.1, 'Anno'] = 2010 emigrazione.loc[emigrazione['Anno'] == 2.11, 'Anno'] = 2011 emigrazione ``` #### Open a new dataset with same dimensions for merging ``` url = "https://api.daf.teamdigitale.it/dataset-manager/v1/dataset/daf%3A%2F%2Fopendata%2Flecce_o_quoziente_i_immigrazione_ed_emigrazione_0?format=json" payload = "" headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("GET", url, data=payload, headers=headers) immigrazione = pd.read_json(StringIO(response.text)) immigrazione ``` Merge the dataset above into one ``` emigrazione_immigrazione = pd.merge(emigrazione, immigrazione[['Anno', 'Totale immigrati']], on='Anno', how='left') emigrazione_immigrazione emigrazione_immigrazione ``` #### Save the resulted dataset. Remember this is an experimental feature only EDITOR and ADMIN user for a specific PA can use this feature. By the way if you are doing interesting analyses make a PR on the github repo with you notebook file. ``` data = pd.DataFrame.from_dict(emigrazione_immigrazione) if 'processing_dttm' in data.columns: data.drop('processing_dttm', axis=1, inplace=True) data.columns = data.columns.str.replace(' ', '_') data.to_csv("daf_data_emigrazione_immigrazione_demo_101.csv", sep=';', encoding='utf-8', index=False) url = "http://localhost:8080/pdnd-openapi/dataset/save" file = open('./daf_data_emigrazione_immigrazione_demo_101.csv', 'rb').read() files = [ ("file", ("daf_data_emigrazione_immigrazione_demo_101", file, "text/csv")), ] metadata = { 'name' : 'emigrazione_immigrazione_demo_101', 'theme' : 'SOCI', 'subtheme' : 'demografia', 'org' : 'daf_data', 'user' : 'd_ale', 'description' : 'emigrazione_immigrazione_demo_101' } headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("POST", url, data=metadata, files=files, headers=headers) os.remove('./daf_data_emigrazione_immigrazione_demo_101.csv') print(response.text) ```
github_jupyter
import pandas as pd import requests from io import StringIO import os pd.options.display.html.table_schema = True pd.set_option('display.max_rows', 5000) url = "https://api.daf.teamdigitale.it/dataset-manager/v1/dataset/daf%3A%2F%2Fopendata%2Flecce_o_quoziente_i_immigrazione_ed_emigrazione_1?format=json" payload = "" headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("GET", url, data=payload, headers=headers) emigrazione = pd.read_json(StringIO(response.text)) emigrazione emigrazione.loc[emigrazione['Anno'] == 2.1, 'Anno'] = 2010 emigrazione.loc[emigrazione['Anno'] == 2.11, 'Anno'] = 2011 emigrazione url = "https://api.daf.teamdigitale.it/dataset-manager/v1/dataset/daf%3A%2F%2Fopendata%2Flecce_o_quoziente_i_immigrazione_ed_emigrazione_0?format=json" payload = "" headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("GET", url, data=payload, headers=headers) immigrazione = pd.read_json(StringIO(response.text)) immigrazione emigrazione_immigrazione = pd.merge(emigrazione, immigrazione[['Anno', 'Totale immigrati']], on='Anno', how='left') emigrazione_immigrazione emigrazione_immigrazione data = pd.DataFrame.from_dict(emigrazione_immigrazione) if 'processing_dttm' in data.columns: data.drop('processing_dttm', axis=1, inplace=True) data.columns = data.columns.str.replace(' ', '_') data.to_csv("daf_data_emigrazione_immigrazione_demo_101.csv", sep=';', encoding='utf-8', index=False) url = "http://localhost:8080/pdnd-openapi/dataset/save" file = open('./daf_data_emigrazione_immigrazione_demo_101.csv', 'rb').read() files = [ ("file", ("daf_data_emigrazione_immigrazione_demo_101", file, "text/csv")), ] metadata = { 'name' : 'emigrazione_immigrazione_demo_101', 'theme' : 'SOCI', 'subtheme' : 'demografia', 'org' : 'daf_data', 'user' : 'd_ale', 'description' : 'emigrazione_immigrazione_demo_101' } headers = {'authorization': 'Bearer YOU_MUST_BE_LOGGEDIN'} response = requests.request("POST", url, data=metadata, files=files, headers=headers) os.remove('./daf_data_emigrazione_immigrazione_demo_101.csv') print(response.text)
0.179854
0.89115
<a href="https://colab.research.google.com/github/Tanu-N-Prabhu/Python/blob/master/Exploratory%20Data%20Analysis/Exploratory_data_Analysis_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Exploring the data using python. ## In this tutorial, we will use the exploratory data analysis approach to summarize and analyze the main characteristics of a cars data set. ![alt text](https://www.statistika.co/images/services/Exploratory%20Data%20Analysis%20-%20EDA%201000x468.jpg) ## Let us understand how to explore the data using python and later build a machine learning model on that data in the next tutorial. ## 1) Choosing a Data Set I chose a data-set titled “Cars” data from Kaggle the author of this data set is Lilit Janughazyan [1]. From my childhood, I was interested in and fascinated about cars. I still remember I used to maintain a book wherein I used to stick all the pictures of different cars along with its specifications. I was more up to date about the latest cars and their specifications. I was more like a specs sheet remembering almost all information about cars explaining people about different cars available in the market. And it was my dream when I was young that I wanted to predict the prices of cars given its specifications. With the help of this interest, I wanted to choose a data set based on Cars in this assignment. I wanted to fulfill my dream of creating a model that would be fed with the specifications of the cars such as Horsepower, Cylinders or Engine Size, and then the model should predict the price of the car based on these specifications. The data set can be found here: [Cars dataset](https://www.kaggle.com/ljanjughazyan/cars1) The main reason for me choosing the data set over others was that there were almost 110 data sets about cars under the most voted category in Kaggle (the most voted meaning the best and famous collection of data sets that are available on Kaggle) almost all these data sets had one or the other features missing. For example, the data set “Automobile Data set” [2] had most features of a car but did not have a price feature in it, which is the most important feature according to my interest. Hence I took a lot of time short-listing many data sets and then I concluded the “Cars” data set because this data set had almost every important feature of a car such as Horsepower, MSRP, Invoice, Cylinders, Engine Size and many more because of these good features this was the main reason I chose this data set over the other data sets available in Kaggle. This data set was straightaway stored in a CSV (Comma Separated Value) format on Kaggle. I did not have to perform any operations to get the data into a format. Since the data was already in a CSV format it needed very little work to import the data set all I had to do is just download, read the CSV data and store it in a pandas data frame, for this I had to import pandas library.  --- ## 2) Obtaining the data To get or load the dataset into the notebook, all I did was one trivial step. In Google Colab at the left-hand side of the notebook, you will find a “> “(greater than symbol). On clicking that you will find a tab with three options, out of which you have to select Files. Then you can easily upload your dataset with the help of the Upload option. No need to mount to the google drive or use any specific libraries just upload the data set and your job is done. This is how I got the dataset into my notebook. --- ## 3) Scrubbing and Formatting **Formatting the data into a data frame** Since the data set was already in a CSV format. All I had to do is just format the data into a pandas data frame. This was done by using a pandas data frame method called (read_csv) by importing pandas library. The read_csv data frame method was used by passing the filename as an argument. And then by executing this, it converted the CSV file into a neatly organized pandas data frame format. ``` # Importing the required libraries import pandas as pd import numpy as np import seaborn as sns #visualisation import matplotlib.pyplot as plt #visualisation %matplotlib inline sns.set(color_codes=True) # Loading the CSV file into a pandas dataframe. df = pd.read_csv("CARS.csv") df.head(5) ``` --- **Determining instances and the number of features.** This data set has 428 instances and 15 features also called as rows and columns. The instances here represent different car brands such as BMW, Mercedes, Audi, and 35 more, features represent Make, Model, Type, Origin, Drive Train, MSRP, Invoice, Engine Size, Cylinders, Horsepower, MPG-City, MPG-Highway, Weight, Wheelbase, and Length of the car. --- **Removing irrelevant features.** I will remove some features such as Drive Train, Model, Invoice, Type, and Origin from this dataset. Because these features do not contribute to the prediction of price. As of now, I will remove the Drive Train, the Drive Train will not support for predicting the price of the car because most of the cars in this data set were front-wheel drive (52.8%) and the rest were rear wheel and all wheel drive. Similarly, the model, type and origin are irrelevant and are not needed in this context, it’s the brand which is important not the model of the car, and when it comes to type of the car most of the cars were of type Sedan and I kept the weight and length features of the cars in which case I can easily determine whether if it’s an SUV, Sedan or a truck. I will also be removing the Invoice feature of the car because I have the MSRP as the price I don't need the invoice because having any one type of price of the car makes more sense and it prevents in leading in ambiguous results (because both MSRP and Invoice are very closely related and you cannot predict the MSRP given the invoice). Lastly, the origin of cars has nothing to do with the prediction rate so I had to remove it and most of the cars were originated from Europe. ``` # Removing irrelevant features df = df.drop(['Model','DriveTrain','Invoice', 'Origin', 'Type'], axis=1) df.head(5) ``` --- ## 4) Exploratory Data Analysis **Identifying the type of data using info()** To identify the data types, I use the info method. The info method prints a summary of the data in the data frame along with its data types. Here, there are 428 entries (0-427 rows). The data frame after removing irrelevant columns comprises 10 columns. Here the Make, MSRP are of an object type whereas Engine size and Cylinders are of float type and Horsepower, MPG_City, MPG_Highway, Weight, Wheelbase and Length are of integer type. Hence there are 2 object types, 2 float types and 6 integer types of data present in the data frame. ``` # To identify the type of data df.info() ``` --- **Finding the dimensions of the data frame** To get the number of rows and columns of the data frame, I used the shape method. The shape method gets the number of rows and the number of columns of the data frame. Here, there are 428 rows and 10 columns. Hence the shape method returns (428, 10). And to find the dimensions of the data frame I used ndim (dimension) method. This method prints the dimensions of the data frame. Here, the whole data frame is of 2 dimensional (rows and columns). ``` # Getting the number of instances and features df.shape # Getting the dimensions of the data frame df.ndim ``` --- **Finding the duplicate data.** This is a handy thing to perform on a data set because often there might be duplicate or redundant data in the data sets, to remove this I used the MSRP as a reference such that there cannot be more than two same MSRP prices of the car, it shows that few data are redundant because prices of the cars can never match very accurately. So before removing the duplicates, there were 428 rows and after removing there are 410 meaning that there were 18 duplicate data. ``` df = df.drop_duplicates(subset='MSRP', keep='first') df.count() ``` --- **Finding the missing or null values.** Many times there might be a lot of missing values in the dataset. There are several approaches to deal with this scenario either we can drop those values or fill those values with the mean of that column. Here, 2 entries were having N/A in the Cylinders feature. This can be found by using the is_null( ) method which returns the null or missing values in the data frame. So rather than deleting those two entries, I filled those values with the mean of the cylinders columns and their value came as 6.0 each. I was able to find this while I was peeking at the first and last few rows of the data set. I think rather than deleting this is a good approach because every entry of data is vital. ``` # To peek at first five rows df.head(5) # To peek at last five rows df.tail(5) ``` While using both head and tail method I found out that there were two values stored a NaN (Not a number) in the Cylinders features. So I printed them by using the slicing technique of mentioning their index. ``` # Finding the null values print(df.isnull().sum()) # Printing the null value rows df[240:242] # Filling the rows with the mean of the column val = df['Cylinders'].mean() df['Cylinders'][247] = round(val) val = df['Cylinders'].mean() df['Cylinders'][248]= round(val) ``` --- **Converting the object values to integer type.** While having a look at the data, the MSRP was stored as an object type. This is a serious problem because it is impossible to plot those values on a graph because it is a primary requirement that during plotting a graph all the values must be of type integer data. The author has stored, the MSRP in a different format ($36, 000) so I had to remove the formatting and then convert them to an integer. ``` # Removing the formatting df['MSRP'] = [x.replace('$', '') for x in df['MSRP']] df['MSRP'] = [x.replace(',', '') for x in df['MSRP']] df['MSRP']=pd.to_numeric(df['MSRP'],errors='coerce') ``` --- **Detecting Outliers** An outlier is a point or set of points different from other points. Sometimes they can be very high or very low. It’s often a good idea to detect and remove the outliers. Because outliers are one of the primary reasons for resulting in a less accurate model. Hence it’s a good idea to remove them. I will perform the IQR score technique to detect and remove the outliers. Often outliers can be seen with visualizations using a box plot. Shown below is the box plot of MSRP. In the plot, you can find some points are outside the box they are none other than outliers. I referred the above outlier technique from towards data science article which can be found in the references section [3]. ``` sns.boxplot(x=df['MSRP']) Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 print(IQR) df = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)] ``` After using the technique now as seen below the MSRP box plot contains no outlier points this is a big improvement. Previously there were over 15 points of outliers now I have removed those outliers. ``` sns.boxplot(x=df['MSRP']) ``` --- **Performing a 5 number summary (min, lower quartile, median, upper quartile, max)** Next step is to perform a 5-number summary for the numeric data. As discussed earlier the numeric data, in this case, are MSRP, Engine Size, Horsepower, Cylinders, Horsepower, MPG_City, MPG_Highway, Weight, Wheelbase, and Length. The five-number summary includes minimum, lower quartile, median, upper quartile, and the maximum values all these values can be obtained by using the describe method. ``` df.describe() ``` --- **Plotting different features against one another.** **Histogram** Histogram refers to the frequency of occurrence of variables in an interval. Here, there are mainly 10 different car manufacturing companies, but it is often important to know who has the maximum number of cars. I'm just plotting histograms to find the total number of car manufacturers and this plot does not support my predictions or doesn't have relations with the price feature. Plotting a histogram is one of a trivial solution which lets us know the total number of different car manufacturers. From the histogram below it can be seen that Ford has almost several cars (20) followed by Chevrolet (19) and many more. ``` # Plotting a Histogram df.Make.value_counts().nlargest(40).plot(kind='bar', figsize=(10,5)) plt.title("Number of cars by make") plt.ylabel('Number of cars') plt.xlabel('Make'); ``` **Heat Maps** Heat Maps is a plot which is necessary when we need to find the dependent variables. One of the best ways to find the correlation between the features can be done using heat maps. As shown below the price feature (MSRP) has a strong correlation with Horsepower of 83% this is very important because the more the relationship between the variables the more accurate the model will be. This is how the correlation between the features can be found using heat maps. With the help of heat maps I can use these related features in building my model. ``` # Plotting a heat map plt.figure(figsize=(10,5)) c= df.corr() sns.heatmap(c,cmap="BrBG",annot=True) ``` **Scatterplot between two related varirables** I know the features especially MSRP and the Horsepower are more related. Since I have two related variables I used a scatter plot to show their relationship. Here the scatter plots are plotted between Horsepower and MSRP are as shown below. With the plot given below, we can easily draw a trend line during modeling. I can easily see a line of best fit in the plot. I have not included the scatter plot between MSRP and Engine Size or Cylinders the reason for this is that these data have comparatively less correlation with the MSRP than that of MSRP and Horsepower which is 83%. Because as seen above the correlation between MSRP and Engine Size is of 54% and that of MSRP and Cylinders is of 64% so there is no reason to plot these features. ``` # Plotting a scatter plot fig, ax = plt.subplots(figsize=(5,5)) ax.scatter(df['Horsepower'], df['MSRP']) plt.title('Scatter plot between MSRP and Horsepower') ax.set_xlabel('Horsepower') ax.set_ylabel('MSRP') plt.show() ``` --- ## 5) Reporting Initial Findings I think there is a high relationship between the MSRP (Price) and the Horsepower feature of the car. I will explore more about that in assignment 4. Now I know my problem statement is “Predicting the price (MSRP) of the car given the specifications of the car”. The main idea is to predict the (MSRP) price of the car. Now I know that I have to predict a value so I should use Regression Algorithms because I have two related features (independent and dependent features). But there are many types of Regression Algorithms such as Linear Regression, Random Forest Regression, Lasso and Ridge Regression and many more. So I might use one of these algorithms and implement a machine learning model to predict the price in assignment 4. Hence this assignment which mainly deals with Exploratory Data Analysis where I prepared my data in such a way that it is now ready for building a model. --- ## References [1] Janjughazyan, L. (2017). Cars Data. [online] Kaggle.com. Available at: https://www.kaggle.com/ljanjughazyan/cars1 [Accessed 15 Aug. 2019]. [2] Srinivasan, R. (2017). Automobile Dataset. [online] Kaggle.com. Available at: https://www.kaggle.com/toramky/automobile-dataset [Accessed 16 Aug. 2019]. [3] Sharma, N. (2018). Ways to Detect and Remove the Outliers. [online] Medium. Available at: https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba [Accessed 15 Aug. 2019].
github_jupyter
# Importing the required libraries import pandas as pd import numpy as np import seaborn as sns #visualisation import matplotlib.pyplot as plt #visualisation %matplotlib inline sns.set(color_codes=True) # Loading the CSV file into a pandas dataframe. df = pd.read_csv("CARS.csv") df.head(5) # Removing irrelevant features df = df.drop(['Model','DriveTrain','Invoice', 'Origin', 'Type'], axis=1) df.head(5) # To identify the type of data df.info() # Getting the number of instances and features df.shape # Getting the dimensions of the data frame df.ndim df = df.drop_duplicates(subset='MSRP', keep='first') df.count() # To peek at first five rows df.head(5) # To peek at last five rows df.tail(5) # Finding the null values print(df.isnull().sum()) # Printing the null value rows df[240:242] # Filling the rows with the mean of the column val = df['Cylinders'].mean() df['Cylinders'][247] = round(val) val = df['Cylinders'].mean() df['Cylinders'][248]= round(val) # Removing the formatting df['MSRP'] = [x.replace('$', '') for x in df['MSRP']] df['MSRP'] = [x.replace(',', '') for x in df['MSRP']] df['MSRP']=pd.to_numeric(df['MSRP'],errors='coerce') sns.boxplot(x=df['MSRP']) Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 print(IQR) df = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)] sns.boxplot(x=df['MSRP']) df.describe() # Plotting a Histogram df.Make.value_counts().nlargest(40).plot(kind='bar', figsize=(10,5)) plt.title("Number of cars by make") plt.ylabel('Number of cars') plt.xlabel('Make'); # Plotting a heat map plt.figure(figsize=(10,5)) c= df.corr() sns.heatmap(c,cmap="BrBG",annot=True) # Plotting a scatter plot fig, ax = plt.subplots(figsize=(5,5)) ax.scatter(df['Horsepower'], df['MSRP']) plt.title('Scatter plot between MSRP and Horsepower') ax.set_xlabel('Horsepower') ax.set_ylabel('MSRP') plt.show()
0.628635
0.982707
``` for row in range(6): for col in range(6): if row==0 and col==1 or row==0 and col==2 or row==3 and col==1 or row==3 and col==2 or row==1 and col==0 or row==2 and col==0 or row==1 and col==3 or row==2 and col==3 or row==3 and col==3 or row==4 and col==4: print("*", end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==0)or(row==2 and col<=3)or(row==4 and col<=3)or(row==3 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==0 and col==1 or row==0 and col==2 or row==0 and col==3 or row==4 and col==1 or row==4 and col==2 or row==4 and col==3 or row==1 and col==0 or row==2 and col==0 or row==3 and col==0: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if col==4 or row==2 and col>=1 or row==4 and col>=1 or row==3 and col==0: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==0 and (col==1 or col==2 or col==3) or (row==2 and (col==1 or col==2 or col==3)) or (row==4 and (col==1 or col==2 or col==3))or(row==1 and col==0)or(row==2 and col==0)or(row==3 and col==0)or (row==1 and col==4)or(row==4 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if row==1 and col==3 or row==2 and col==3 or row==3 and col==3 or row==4 and col==3 or row==0 and col==4 or row==0 and col==5 or row==2 and col==4 or row==2 and col==5 or row==1 and col==6 or row==5 and col==3 or row==6 and col==3 or (row==4 and col==1)or(row==4 and col==2)or(row==4 and col==4)or(row==4 and col==5): print("*",end=" ") else: print(" ",end=" ") print() for row in range(8): for col in range(6): if (row==0 and col==2) or (row==0 and col==3) or row==2 and col==2 or row==2 and col==3 or(row==1 and col==1)or (row==1 and col==4)or(row==5 and col==2)or(row==5 and col==3)or(row==7 and col==2)or (row==7 and col==3)or(row==6 and col==1)or(row==6 and col==4)or(row==2 and col==4)or(row==3 and col==4)or(row==4 and col==4)or(row==5 and col==4): print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(6): if col==0 or row==3 and (col==1 or col==2 or col==3 or col==4)or(row==4 and col==5)or(row==5 and col==5)or(row==6 and col==5)or(row==7 and col==5): print("*", end=" ") else: print(" ",end=" ") print() for row in range(6): for col in range(6): if col==0 and row>=2 or row==0 and col==0: print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (col==2 and row!=6 and row!=1) or(row==6 and col==1)or(row==5 and col==0)or(row==4 and col==0): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(3): if (col==0 )or(row==3 and col==1)or(row==2 and col==2)or(row==1 and col==3)or(row==0 and col==4)or(row==4 and col==2)or(row==5 and col==3)or(row==6 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==2 and row!=4)or(row==4 and (col==1 or col==3)): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(8): if col==1 and row!=0 or col==4 and row!=0 or col==7 and row!=0 or row==0 and col==0 or row==0 and col==2 or row==0 and col==3or row==0 and col==5or row==0 and col==6: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(8): if col==4 and row!=0 or col==7 and row!=0 or row==0 and col==3or row==0 and col==5 or row==0 and col==6 : print("*",end=" ") else: print(" ",end=" ") print() for row in range(4): for col in range(4): if row==0 and col==1 or (row==0 and col==2) or (row==1 and col==0) or (row==1 and col==3) or (row==2 and col==0) or (row==2 and col==3) or (row==3 and col==1) or (row==3 and col==2): print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (col==0 and row!=0)or(row==0 and col==1)or(row==0and col==2)or(row==0 and col==3)or(row==3and col<4)or(row==1 and col==4)or(row==2 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if col==3 and row!=0 or row==0 and col==1 or row==0 and col==2 or row==1 and col==0 or(row==2 and col==1)or(row==2 and col==2): print("*", end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==2 and col==2or(row==0 and col==0)or(row==0 and col==4)or(row==3 and col==1)or (row==3 and col==3)or(row==4 and col==2)or(row==1 and col==1)or(row==1 and col==3): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col!=0 and row==0)or(row==2 and col!=0 and col!=4)or(row==4 and col!=4)or(row==1 and col==0)or(row==3 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(6): if (col==2 and row!=4)or(row==4 and (col==3 or col==4))or(row==2 and col==1)or(row==2 and col==3)or(row==4 and col==5): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if (col==0 and row!=6)or(col==4 and row!=6)or(row==6 and col!=0 and col!=4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (row==0 and col==0) or (row==0 and col==4)or (row==1 and col==1) or (row==1 and col==3)or(row==2 and col==2):#or(row==2 and col==4)or(row==3 and col==3): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==1 and row!=4) or (row!=4 and col==3)or(row==2 and col==2): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==2 and col==2or(row==0 and col==0)or(row==0 and col==4)or(row==3 and col==1)or (row==3 and col==3)or(row==4 and col==2)or(row==1 and col==1)or(row==1 and col==3)or(row==4 and col==4)or(row==2 and col==3)or(row==2 and col==1): print("*",end=" ") else: print(" ",end=" ") print() for row in range(8): for col in range(5): if col==3 or row==2and col==2 or row==1 and col==1 or row==0 and col==1 or (row==5 and col==1)or(row==6 and col==1)or(row==7 and col==2): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if (row==0)or(row==6)or(row==3 and col==3)or(row==2and col==4)or(row==1and col==5)or(row==4and col==2)or(row==5and col==1)or(row==3): print("*",end=" ") else: print(" ",end=" ") print() ```
github_jupyter
for row in range(6): for col in range(6): if row==0 and col==1 or row==0 and col==2 or row==3 and col==1 or row==3 and col==2 or row==1 and col==0 or row==2 and col==0 or row==1 and col==3 or row==2 and col==3 or row==3 and col==3 or row==4 and col==4: print("*", end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==0)or(row==2 and col<=3)or(row==4 and col<=3)or(row==3 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==0 and col==1 or row==0 and col==2 or row==0 and col==3 or row==4 and col==1 or row==4 and col==2 or row==4 and col==3 or row==1 and col==0 or row==2 and col==0 or row==3 and col==0: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if col==4 or row==2 and col>=1 or row==4 and col>=1 or row==3 and col==0: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==0 and (col==1 or col==2 or col==3) or (row==2 and (col==1 or col==2 or col==3)) or (row==4 and (col==1 or col==2 or col==3))or(row==1 and col==0)or(row==2 and col==0)or(row==3 and col==0)or (row==1 and col==4)or(row==4 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if row==1 and col==3 or row==2 and col==3 or row==3 and col==3 or row==4 and col==3 or row==0 and col==4 or row==0 and col==5 or row==2 and col==4 or row==2 and col==5 or row==1 and col==6 or row==5 and col==3 or row==6 and col==3 or (row==4 and col==1)or(row==4 and col==2)or(row==4 and col==4)or(row==4 and col==5): print("*",end=" ") else: print(" ",end=" ") print() for row in range(8): for col in range(6): if (row==0 and col==2) or (row==0 and col==3) or row==2 and col==2 or row==2 and col==3 or(row==1 and col==1)or (row==1 and col==4)or(row==5 and col==2)or(row==5 and col==3)or(row==7 and col==2)or (row==7 and col==3)or(row==6 and col==1)or(row==6 and col==4)or(row==2 and col==4)or(row==3 and col==4)or(row==4 and col==4)or(row==5 and col==4): print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(6): if col==0 or row==3 and (col==1 or col==2 or col==3 or col==4)or(row==4 and col==5)or(row==5 and col==5)or(row==6 and col==5)or(row==7 and col==5): print("*", end=" ") else: print(" ",end=" ") print() for row in range(6): for col in range(6): if col==0 and row>=2 or row==0 and col==0: print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (col==2 and row!=6 and row!=1) or(row==6 and col==1)or(row==5 and col==0)or(row==4 and col==0): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(3): if (col==0 )or(row==3 and col==1)or(row==2 and col==2)or(row==1 and col==3)or(row==0 and col==4)or(row==4 and col==2)or(row==5 and col==3)or(row==6 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==2 and row!=4)or(row==4 and (col==1 or col==3)): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(8): if col==1 and row!=0 or col==4 and row!=0 or col==7 and row!=0 or row==0 and col==0 or row==0 and col==2 or row==0 and col==3or row==0 and col==5or row==0 and col==6: print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(8): if col==4 and row!=0 or col==7 and row!=0 or row==0 and col==3or row==0 and col==5 or row==0 and col==6 : print("*",end=" ") else: print(" ",end=" ") print() for row in range(4): for col in range(4): if row==0 and col==1 or (row==0 and col==2) or (row==1 and col==0) or (row==1 and col==3) or (row==2 and col==0) or (row==2 and col==3) or (row==3 and col==1) or (row==3 and col==2): print("*", end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (col==0 and row!=0)or(row==0 and col==1)or(row==0and col==2)or(row==0 and col==3)or(row==3and col<4)or(row==1 and col==4)or(row==2 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if col==3 and row!=0 or row==0 and col==1 or row==0 and col==2 or row==1 and col==0 or(row==2 and col==1)or(row==2 and col==2): print("*", end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==2 and col==2or(row==0 and col==0)or(row==0 and col==4)or(row==3 and col==1)or (row==3 and col==3)or(row==4 and col==2)or(row==1 and col==1)or(row==1 and col==3): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col!=0 and row==0)or(row==2 and col!=0 and col!=4)or(row==4 and col!=4)or(row==1 and col==0)or(row==3 and col==4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(6): if (col==2 and row!=4)or(row==4 and (col==3 or col==4))or(row==2 and col==1)or(row==2 and col==3)or(row==4 and col==5): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if (col==0 and row!=6)or(col==4 and row!=6)or(row==6 and col!=0 and col!=4): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(5): if (row==0 and col==0) or (row==0 and col==4)or (row==1 and col==1) or (row==1 and col==3)or(row==2 and col==2):#or(row==2 and col==4)or(row==3 and col==3): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if (col==1 and row!=4) or (row!=4 and col==3)or(row==2 and col==2): print("*",end=" ") else: print(" ",end=" ") print() for row in range(5): for col in range(5): if row==2 and col==2or(row==0 and col==0)or(row==0 and col==4)or(row==3 and col==1)or (row==3 and col==3)or(row==4 and col==2)or(row==1 and col==1)or(row==1 and col==3)or(row==4 and col==4)or(row==2 and col==3)or(row==2 and col==1): print("*",end=" ") else: print(" ",end=" ") print() for row in range(8): for col in range(5): if col==3 or row==2and col==2 or row==1 and col==1 or row==0 and col==1 or (row==5 and col==1)or(row==6 and col==1)or(row==7 and col==2): print("*",end=" ") else: print(" ",end=" ") print() for row in range(7): for col in range(7): if (row==0)or(row==6)or(row==3 and col==3)or(row==2and col==4)or(row==1and col==5)or(row==4and col==2)or(row==5and col==1)or(row==3): print("*",end=" ") else: print(" ",end=" ") print()
0.022618
0.942876
# Demo: Defining Control_M Workflows using Python # Step 1 - Setup ## Step 1A - Install the library ``` !pip --version !pip install --upgrade --no-deps --force-reinstall git+https://github.com/tadinve/naga.git from ctm_python_client.core.bmc_control_m import CmJobFlow from ctm_python_client.jobs.dummy import DummyJob ``` # Step 2 - Instantiate, Authenticate and Schedule ## Step 2A - Create the object ``` # Please change the URfrI, and ctm_user and enter ctm_password to match your environment from ctm_python_client.session.session import Session import getpass ctm_uri = "https://acb-rhctmv20.centralus.cloudapp.azure.com:8443/automation-api" ctm_user = "vtadinad" ctm_pwd = "P4ssw0rd" if "ctm_pwd" not in locals(): # has not been enterd once, will skip next time ctm_pwd = getpass.getpass("Enter your Control M Password ") session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd) session.get_token() t1_flow = CmJobFlow( application="Naga0.3_Examples", sub_application="Demo-OR_JOB", session=session ) ``` ## Step 2B - Define the Schedule ``` t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20") # Define the schedule months = ["JAN", "OCT", "DEC"] monthDays = ["ALL"] weekDays = ["MON", "TUE", "WED", "THU", "FRI"] fromTime = "0300" toTime = "2100" t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime) ``` # Step 3 - Create Folder ``` # Create Fodler f1 = t1_flow.create_folder(name="OR-JOB") ``` # Step 4 - Create Tasks ``` start = t1_flow.add_job(f1, DummyJob(f1, "Start-Flow")) job1 = DummyJob(f1, "Job1") job1.add_if_output("if-true", "*true*", "Job1-TO-Job2") job1.add_if_output("if-flase", "*false*", "Job1-TO-Job3") job1_id = t1_flow.add_job(f1, job1) job2 = DummyJob(f1, "Job2") job2_id = t1_flow.add_job(f1, job2) job3 = DummyJob(f1, "Job3") job3_id = t1_flow.add_job(f1, job3) job4 = DummyJob(f1, "Job4") job4.wait_for_jobs(job1.get_job_name(), job3.get_job_name(), condition="OR") job4_id = t1_flow.add_job(f1, job4) end = t1_flow.add_job(f1, DummyJob(f1, "End-Flow")) ``` # Step 5 - Chain Tasks ``` # start >> hello_world_id >> end t1_flow.chain_jobs(f1, [start, job1_id]) t1_flow.chain_jobs(f1, [job2_id, job3_id]) t1_flow.chain_jobs(f1, [job4_id, end]) ``` # Step 6 - Display Workflow ## Step 6A - Display DAG ``` # View the t1_flow Details nodes, edges = t1_flow.get_nodes_and_edges() nodes, edges # display using graphviz from ctm_python_client.utils.displayDAG import DisplayDAG # sudo apt-get install graphviz (on unix) # or # brew install graphviz (for mac) # DisplayDAG(t1_flow).display_graphviz() ``` ## Step 6B - Display JSON ``` t1_flow.display_json() ``` # Step 7 - Submit Workflow to Control-M ``` t1_flow.deploy() t1_flow.run() ```
github_jupyter
!pip --version !pip install --upgrade --no-deps --force-reinstall git+https://github.com/tadinve/naga.git from ctm_python_client.core.bmc_control_m import CmJobFlow from ctm_python_client.jobs.dummy import DummyJob # Please change the URfrI, and ctm_user and enter ctm_password to match your environment from ctm_python_client.session.session import Session import getpass ctm_uri = "https://acb-rhctmv20.centralus.cloudapp.azure.com:8443/automation-api" ctm_user = "vtadinad" ctm_pwd = "P4ssw0rd" if "ctm_pwd" not in locals(): # has not been enterd once, will skip next time ctm_pwd = getpass.getpass("Enter your Control M Password ") session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd) session.get_token() t1_flow = CmJobFlow( application="Naga0.3_Examples", sub_application="Demo-OR_JOB", session=session ) t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20") # Define the schedule months = ["JAN", "OCT", "DEC"] monthDays = ["ALL"] weekDays = ["MON", "TUE", "WED", "THU", "FRI"] fromTime = "0300" toTime = "2100" t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime) # Create Fodler f1 = t1_flow.create_folder(name="OR-JOB") start = t1_flow.add_job(f1, DummyJob(f1, "Start-Flow")) job1 = DummyJob(f1, "Job1") job1.add_if_output("if-true", "*true*", "Job1-TO-Job2") job1.add_if_output("if-flase", "*false*", "Job1-TO-Job3") job1_id = t1_flow.add_job(f1, job1) job2 = DummyJob(f1, "Job2") job2_id = t1_flow.add_job(f1, job2) job3 = DummyJob(f1, "Job3") job3_id = t1_flow.add_job(f1, job3) job4 = DummyJob(f1, "Job4") job4.wait_for_jobs(job1.get_job_name(), job3.get_job_name(), condition="OR") job4_id = t1_flow.add_job(f1, job4) end = t1_flow.add_job(f1, DummyJob(f1, "End-Flow")) # start >> hello_world_id >> end t1_flow.chain_jobs(f1, [start, job1_id]) t1_flow.chain_jobs(f1, [job2_id, job3_id]) t1_flow.chain_jobs(f1, [job4_id, end]) # View the t1_flow Details nodes, edges = t1_flow.get_nodes_and_edges() nodes, edges # display using graphviz from ctm_python_client.utils.displayDAG import DisplayDAG # sudo apt-get install graphviz (on unix) # or # brew install graphviz (for mac) # DisplayDAG(t1_flow).display_graphviz() t1_flow.display_json() t1_flow.deploy() t1_flow.run()
0.344223
0.741814
# Programming and Database Fundamentals for Data Scientists - EAS503 ## Programming Basics in Python ### Some pythonisms ``` import this ``` ## What does Python give you from get go ### The Python Standard Library > https://docs.python.org/3/library/ - **Built-in Functions** https://docs.python.org/3/library/functions.html - **Keywords** ```python import keyword print(keyword.kwlist) ``` - **Pre-defined Data Types** https://docs.python.org/3/library/stdtypes.html Python supports several operations (see built-in functions) on the data types - **Base Modules** `os`, `sys`, `math`, and many more ... ``` # keywords import keyword print(keyword.kwlist) dir() In type(dir) ``` ### variables - Variables hold values. Use `dir()` to list current set of variables. - Each variable has a `type` that is assigned dynamically and can be changed. - Each variable holds a value - Each variable is essentially a pointer pointing to a memory location <img width="200" src='https://www.python-course.eu/images/python_variable_1.png'/> As the code is executed, the program also maintains the **data** associated with the program. For the converter program, the data consists of the `variable`, f. A `variable` is a name assigned to a data element that is stored in the memory. At the same time, we can also assign names to the functions that we create, e.g., `converter`. All of these assigned names are also called `identifiers`. There are some rules about the naming convention in Python. For instance, every identifier must begin with a letter or underscore character (`_`). This can be followed by any sequence of letters, digits, or characters. No spaces are allowed. Python reserves some identifiers for predefined functions or other utilities and may not be used. We call these **reserved words** or **keywords**. ``` x = 4 hex(id(x)) # memory location x = 4 print(hex(id(x))) x = x + 5 print(hex(id(x))) print(x) ``` ### numeric data types [`int`, `float`, `complex`] Support several numeric operations. Many built-in functions can be applied to them too. ### data types Variables are used to store different types of data, which are then manipulated within the program. Many data types are built-in, including: 1. Boolean 2. Numeric (Integer, Long, Float, Complex) 3. Sequences (Lists, Strings, Tuples, Bytes) 4. Sets 5. Mappings (Dictionary) These will be introduced in the coming sections. Standard operations allowed for any data type - type() - check for truth value (`bool()` or within a `if` statement) - logical operations - comparisons ``` x = 0.0 b_x = bool(x) print(b_x) x == 5 ``` ### base modules - A `module` is a single file containing several related function definitions and global variables. - A `package` is a collection of related modules packaged and distributed together. Structure of `python` packages. <img src='https://files.realpython.com/media/pkg4.a830d6e144bf.png'/> #### The `math` module ``` from math import floor,factorial from x import * from y import * dir() x.y.x.x_func1() x = 4 factorial(5) ``` Before we jump into the programming world, we first need to understand the process of writing a program. It all begins with a problem. For instance, **write a program to convert temperature from Celsius to Fahrenheit** * First understand the problem. * Next, create a _pseudocode_ in your mind (or on paper). * Then check what do you need from the language to ``` # This is a user-defined function. # Functions are useful as pieces of reusable code that can be used by you or any other user. def converter(): ''' This function converts celsius to fahrenheit ''' celsius = float(input("What is the Celsius temperature? ")) f = celsius*(9/5) + 32 print("Temperature in Fahrenheit is %f"%f) return f help(converter) ``` ### Getting help in Python We will be utilizing many _built-in_ functions in Python. If you need to get help for them, you can use the `help` function. ``` help(float) ``` ### Adding help to user defined functions ``` # This is a user-defined function. # Functions are useful as pieces of reusable code that can be used by you or any other user. def converter(): ''' This function reads number off the standard input and converts it into the corresponding Fahreheit temperature. Function has not input parameters or output value. For more help, see - https://www.almanac.com/content/temperature-conversion ''' celsius = float(input("What is the Celsius temperature? ")) f = celsius*(9/5) + 32 # display print("Temperature in Fahrenheit is %d"%f) return f f = converter() f = 467868768768768768 import sys sys.getsizeof(f) help(converter) ``` ### Analyzing Simple Programs in Python We have already written a temperature converter program. Let us see some cool things we can do in Python. #### Anatomy of a Python (or any) program Here is what happens when we run the temperature converter program. 1. The Python compiler loads the program and converts it into byte code (.pyc). 2. The byte code is loaded into the memory of the computer 3. The byte code is executed according to the logical flow of the program ``` import dis dis.dis(converter) ``` ### Expressions Expressions are atomic part of a program that manipulates some data. We can _literal_ expressions which consist of a value. When an expression is executed in a Python environment, it is also known as _evaluation_. ``` 4 # literal 'Programming' converter() Converter() ``` ### Outputs We typically use a built-in function, `print`, to display information on the screen. ``` print(3+4) print(3,4,3+4) print() print('The answer is ',3 + 4) print('The answer is\n',3 + 4) y = 'print \\n' print(y) ``` ### Assignments One key component of any code is assignment, where a variable is "given" a value. The standard form is: ```python <variable> = <expression> ``` One can think of this as creating a box in the memory and putting the value of the expression in it, and then assigning the variable name to that box for future reference. #### Simultaneous assignments Python, in all its Pythonism, allows for many forms of the assignment statement, which might not be available in other programming languages. ``` x = 5 y = 3 print(x) print(y) x,y,z,a,b,c = 5,3,4,3,2,1 print(x) print(y) sm,df = x-y,x+y print(sm) print(df) # swapping values x,y = 5,3 y,x = x,y print(x) print(y) ``` ### Variable Scope Another important programming aspect is the notion of a variable's `scope` -- or _the places where a variable can be seen or is accessible_. In a `Python` environment, there are multiple namespaces that exist simultaneously. A _namespace_ is a container that allows mapping a name to a variable. At any given point in the code, `Python` searches in these namespaces for an appropriate mapping from the name to the variable. But in what order should the search be done across all the existing namespaces? ``` i = 1 def foo(): #i = 5 print(i, 'in foo()') def bar(): i = 7 print(i, 'in bar()') print(i, 'global') bar() foo() print(i,'global') ``` `Python` uses the following order: <img src='https://raw.githubusercontent.com/rasbt/python_reference/master/Images/scope_resolution_1.png'> ``` a_var = 'global variable' def a_func(): print(a_var, '[ a_var inside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') ``` In the example below, inside the function, the variable in the local scope is modified. However, outside the function, the global variable is used in the `print` statement. ``` a_var = 'global value' def a_func(): a_var = 'local value' print(a_var, '[ a_var inside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') ``` However, if one needs to modify the global variable within a function, the keyword `global` is used. ``` a_var = 'global value' def a_func(): global a_var a_var = 'local value' print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') ``` Of course, if the correct order is not maintained, one will get an error. ``` a_var = 1 def a_func(): a_var = a_var + 1 print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() a_var = 1 def a_func(): global a_var a_var = a_var + 1 print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() #a_var2 = 1 def a_func(): global a_var2 a_var2 = 0 a_var2 = a_var2 + 1 print(a_var2, '[ a_var2 inside a_func() ]') a_func() print(a_var2, '[ a_var2 outside a_func() ]') var_outermost = 8 def a_outer(var_outermost1): print("inside the outer function") print(var_outermost1) def a_inner(var_outermost2): var_inner = 4 print(var_inner) print(var_outermost2) return var_inner var_inner = a_inner(var_outermost1) print(var_inner) a_outer(var_outermost) ```
github_jupyter
import this import keyword print(keyword.kwlist) # keywords import keyword print(keyword.kwlist) dir() In type(dir) x = 4 hex(id(x)) # memory location x = 4 print(hex(id(x))) x = x + 5 print(hex(id(x))) print(x) x = 0.0 b_x = bool(x) print(b_x) x == 5 from math import floor,factorial from x import * from y import * dir() x.y.x.x_func1() x = 4 factorial(5) # This is a user-defined function. # Functions are useful as pieces of reusable code that can be used by you or any other user. def converter(): ''' This function converts celsius to fahrenheit ''' celsius = float(input("What is the Celsius temperature? ")) f = celsius*(9/5) + 32 print("Temperature in Fahrenheit is %f"%f) return f help(converter) help(float) # This is a user-defined function. # Functions are useful as pieces of reusable code that can be used by you or any other user. def converter(): ''' This function reads number off the standard input and converts it into the corresponding Fahreheit temperature. Function has not input parameters or output value. For more help, see - https://www.almanac.com/content/temperature-conversion ''' celsius = float(input("What is the Celsius temperature? ")) f = celsius*(9/5) + 32 # display print("Temperature in Fahrenheit is %d"%f) return f f = converter() f = 467868768768768768 import sys sys.getsizeof(f) help(converter) import dis dis.dis(converter) 4 # literal 'Programming' converter() Converter() print(3+4) print(3,4,3+4) print() print('The answer is ',3 + 4) print('The answer is\n',3 + 4) y = 'print \\n' print(y) <variable> = <expression> x = 5 y = 3 print(x) print(y) x,y,z,a,b,c = 5,3,4,3,2,1 print(x) print(y) sm,df = x-y,x+y print(sm) print(df) # swapping values x,y = 5,3 y,x = x,y print(x) print(y) i = 1 def foo(): #i = 5 print(i, 'in foo()') def bar(): i = 7 print(i, 'in bar()') print(i, 'global') bar() foo() print(i,'global') a_var = 'global variable' def a_func(): print(a_var, '[ a_var inside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') a_var = 'global value' def a_func(): a_var = 'local value' print(a_var, '[ a_var inside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') a_var = 'global value' def a_func(): global a_var a_var = 'local value' print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() print(a_var, '[ a_var outside a_func() ]') a_var = 1 def a_func(): a_var = a_var + 1 print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() a_var = 1 def a_func(): global a_var a_var = a_var + 1 print(a_var, '[ a_var inside a_func() ]') print(a_var, '[ a_var outside a_func() ]') a_func() #a_var2 = 1 def a_func(): global a_var2 a_var2 = 0 a_var2 = a_var2 + 1 print(a_var2, '[ a_var2 inside a_func() ]') a_func() print(a_var2, '[ a_var2 outside a_func() ]') var_outermost = 8 def a_outer(var_outermost1): print("inside the outer function") print(var_outermost1) def a_inner(var_outermost2): var_inner = 4 print(var_inner) print(var_outermost2) return var_inner var_inner = a_inner(var_outermost1) print(var_inner) a_outer(var_outermost)
0.272315
0.973544
<a href="https://colab.research.google.com/github/apmoore1/target-extraction/blob/master/tutorials/Difference_between_MAMS_ATSA_original_and_MAMS_ATSA_cleaned.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` %%capture !pip install git+git://github.com/apmoore1/target-extraction.git@master#egg=target-extraction ``` # Difference between MAMS ATSA original and MAMS ATSA cleaned In this notebook we describe the subtle differences between the original MAMS ATSA from [Jiang et al. 2019](https://www.aclweb.org/anthology/D19-1654.pdf) and the cleaned version created from exploring the dataset within the Bella package. This is only for the Training split and the Validation and Test splits are have not changed from the original. Below we load both original and cleaned training sets: ``` from target_extraction.dataset_parsers import multi_aspect_multi_sentiment_atsa original_train = multi_aspect_multi_sentiment_atsa('train') original_train.name = 'MAMS Original (Train)' cleaned_train = multi_aspect_multi_sentiment_atsa('train', original=False) cleaned_train.name = 'MAMS Cleaned (Train)' ``` After loading the datasets we report the dataset statistics below: ``` from target_extraction.analysis.dataset_statistics import dataset_target_sentiment_statistics dataset_target_sentiment_statistics([original_train, cleaned_train], dataframe_format=True) ``` As can be seen the only difference being we have 6 fewer samples/targets. The reason for these differences is due to overlapping targets in the original dataset which can be seen below: ``` from target_extraction.tokenizers import spacy_tokenizer original_train.tokenize(spacy_tokenizer()) sequence_errors = original_train.sequence_labels(return_errors=True) for error in sequence_errors: _id = error['text_id'] text = error['text'] targets = error['targets'] spans = error['spans'] print(f'ID of error {_id}') print(f'targets {targets}') print(f'target spans {spans}') print(f'text {text}\n') ``` As can be seen above for each TargetText there are two targets that overlap each other with respect to the Span of the text the target came from. e.g. in example 1 the targets are `targets ['beer selection', 'beer s']` of which it does not make sense to have two spans that cover the same target and in this case `beer s` appears to be an annotation mistake. The cleaned version removes the following targets that are believed to be annotation mistakes: 1. ID `train$1191` removed `beer s` 2. ID `train$1203` removed `table s` 3. ID `train$2385` removed `coconut` 4. ID `train$2645` removed `pizza` 5. ID `train$3865` removed `clam s` 6. ID `train$3903` removed `beet s` ``` ```
github_jupyter
%%capture !pip install git+git://github.com/apmoore1/target-extraction.git@master#egg=target-extraction from target_extraction.dataset_parsers import multi_aspect_multi_sentiment_atsa original_train = multi_aspect_multi_sentiment_atsa('train') original_train.name = 'MAMS Original (Train)' cleaned_train = multi_aspect_multi_sentiment_atsa('train', original=False) cleaned_train.name = 'MAMS Cleaned (Train)' from target_extraction.analysis.dataset_statistics import dataset_target_sentiment_statistics dataset_target_sentiment_statistics([original_train, cleaned_train], dataframe_format=True) from target_extraction.tokenizers import spacy_tokenizer original_train.tokenize(spacy_tokenizer()) sequence_errors = original_train.sequence_labels(return_errors=True) for error in sequence_errors: _id = error['text_id'] text = error['text'] targets = error['targets'] spans = error['spans'] print(f'ID of error {_id}') print(f'targets {targets}') print(f'target spans {spans}') print(f'text {text}\n')
0.409103
0.976691
# Cowell's formulation For cases where we only study the gravitational forces, solving the Kepler's equation is enough to propagate the orbit forward in time. However, when we want to take perturbations that deviate from Keplerian forces into account, we need a more complex method to solve our initial value problem: one of them is **Cowell's formulation**. In this formulation we write the two body differential equation separating the Keplerian and the perturbation accelerations: $$\ddot{\mathbb{r}} = -\frac{\mu}{|\mathbb{r}|^3} \mathbb{r} + \mathbb{a}_d$$ <div class="alert alert-info">For an in-depth exploration of this topic, still to be integrated in poliastro, check out <a href="https://github.com/Juanlu001/pfc-uc3m">this Master thesis</a></div> <div class="alert alert-info">An earlier version of this notebook allowed for more flexibility and interactivity, but was considerably more complex. Future versions of poliastro and plotly might bring back part of that functionality, depending on user feedback. You can still download the older version <a href="https://github.com/poliastro/poliastro/blob/0.8.x/docs/source/examples/Propagation%20using%20Cowell's%20formulation.ipynb">here</a>.</div> ## First example Let's setup a very simple example with constant acceleration to visualize the effects on the orbit. ``` import numpy as np from astropy import units as u from astropy import time from poliastro.bodies import Earth from poliastro.twobody import Orbit from poliastro.twobody.propagation import propagate from poliastro.examples import iss from poliastro.twobody.propagation import cowell from poliastro.plotting import OrbitPlotter3D from poliastro.util import norm import plotly.io as pio pio.renderers.default = "notebook_connected" ``` To provide an acceleration depending on an extra parameter, we can use **closures** like this one: ``` accel = 2e-5 def constant_accel_factory(accel): def constant_accel(t0, u, k): v = u[3:] norm_v = (v[0]**2 + v[1]**2 + v[2]**2)**.5 return accel * v / norm_v return constant_accel times = np.linspace(0, 10 * iss.period, 500) times positions = propagate( iss, time.TimeDelta(times), method=cowell, rtol=1e-11, ad=constant_accel_factory(accel), ) ``` And we plot the results: ``` frame = OrbitPlotter3D() frame.set_attractor(Earth) frame.plot_trajectory(positions, label="ISS") ``` ## Error checking ``` def state_to_vector(ss): r, v = ss.rv() x, y, z = r.to(u.km).value vx, vy, vz = v.to(u.km / u.s).value return np.array([x, y, z, vx, vy, vz]) k = Earth.k.to(u.km ** 3 / u.s ** 2).value rtol = 1e-13 full_periods = 2 u0 = state_to_vector(iss) tf = ((2 * full_periods + 1) * iss.period / 2) u0, tf iss_f_kep = iss.propagate(tf, rtol=1e-18) r, v = cowell(iss.attractor.k, iss.r, iss.v, [tf] * u.s, rtol=rtol) iss_f_num = Orbit.from_vectors(Earth, r[0], v[0], iss.epoch + tf) iss_f_num.r, iss_f_kep.r assert np.allclose(iss_f_num.r, iss_f_kep.r, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.v, iss_f_kep.v, rtol=rtol, atol=1e-08 * u.km / u.s) assert np.allclose(iss_f_num.a, iss_f_kep.a, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.ecc, iss_f_kep.ecc, rtol=rtol) assert np.allclose(iss_f_num.inc, iss_f_kep.inc, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.raan, iss_f_kep.raan, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.argp, iss_f_kep.argp, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.nu, iss_f_kep.nu, rtol=rtol, atol=1e-08 * u.rad) ``` ## Numerical validation According to [Edelbaum, 1961], a coplanar, semimajor axis change with tangent thrust is defined by: $$\frac{\operatorname{d}\!a}{a_0} = 2 \frac{F}{m V_0}\operatorname{d}\!t, \qquad \frac{\Delta{V}}{V_0} = \frac{1}{2} \frac{\Delta{a}}{a_0}$$ So let's create a new circular orbit and perform the necessary checks, assuming constant mass and thrust (i.e. constant acceleration): ``` ss = Orbit.circular(Earth, 500 * u.km) tof = 20 * ss.period ad = constant_accel_factory(1e-7) r, v = cowell(ss.attractor.k, ss.r, ss.v, [tof] * u.s, ad=ad) ss_final = Orbit.from_vectors(Earth, r[0], v[0], ss.epoch + tof) da_a0 = (ss_final.a - ss.a) / ss.a da_a0 dv_v0 = abs(norm(ss_final.v) - norm(ss.v)) / norm(ss.v) 2 * dv_v0 np.allclose(da_a0, 2 * dv_v0, rtol=1e-2) ``` This means **we successfully validated the model against an extremely simple orbit transfer with approximate analytical solution**. Notice that the final eccentricity, as originally noticed by Edelbaum, is nonzero: ``` ss_final.ecc ``` ## References * [Edelbaum, 1961] "Propulsion requirements for controllable satellites"
github_jupyter
import numpy as np from astropy import units as u from astropy import time from poliastro.bodies import Earth from poliastro.twobody import Orbit from poliastro.twobody.propagation import propagate from poliastro.examples import iss from poliastro.twobody.propagation import cowell from poliastro.plotting import OrbitPlotter3D from poliastro.util import norm import plotly.io as pio pio.renderers.default = "notebook_connected" accel = 2e-5 def constant_accel_factory(accel): def constant_accel(t0, u, k): v = u[3:] norm_v = (v[0]**2 + v[1]**2 + v[2]**2)**.5 return accel * v / norm_v return constant_accel times = np.linspace(0, 10 * iss.period, 500) times positions = propagate( iss, time.TimeDelta(times), method=cowell, rtol=1e-11, ad=constant_accel_factory(accel), ) frame = OrbitPlotter3D() frame.set_attractor(Earth) frame.plot_trajectory(positions, label="ISS") def state_to_vector(ss): r, v = ss.rv() x, y, z = r.to(u.km).value vx, vy, vz = v.to(u.km / u.s).value return np.array([x, y, z, vx, vy, vz]) k = Earth.k.to(u.km ** 3 / u.s ** 2).value rtol = 1e-13 full_periods = 2 u0 = state_to_vector(iss) tf = ((2 * full_periods + 1) * iss.period / 2) u0, tf iss_f_kep = iss.propagate(tf, rtol=1e-18) r, v = cowell(iss.attractor.k, iss.r, iss.v, [tf] * u.s, rtol=rtol) iss_f_num = Orbit.from_vectors(Earth, r[0], v[0], iss.epoch + tf) iss_f_num.r, iss_f_kep.r assert np.allclose(iss_f_num.r, iss_f_kep.r, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.v, iss_f_kep.v, rtol=rtol, atol=1e-08 * u.km / u.s) assert np.allclose(iss_f_num.a, iss_f_kep.a, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.ecc, iss_f_kep.ecc, rtol=rtol) assert np.allclose(iss_f_num.inc, iss_f_kep.inc, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.raan, iss_f_kep.raan, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.argp, iss_f_kep.argp, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.nu, iss_f_kep.nu, rtol=rtol, atol=1e-08 * u.rad) ss = Orbit.circular(Earth, 500 * u.km) tof = 20 * ss.period ad = constant_accel_factory(1e-7) r, v = cowell(ss.attractor.k, ss.r, ss.v, [tof] * u.s, ad=ad) ss_final = Orbit.from_vectors(Earth, r[0], v[0], ss.epoch + tof) da_a0 = (ss_final.a - ss.a) / ss.a da_a0 dv_v0 = abs(norm(ss_final.v) - norm(ss.v)) / norm(ss.v) 2 * dv_v0 np.allclose(da_a0, 2 * dv_v0, rtol=1e-2) ss_final.ecc
0.742608
0.984048
# Convolutional Neural Network on pixel neighborhoods This notebook reads the pixel-neighborhood data written out by the Dataflow program of [1_explore.ipynb](./1_explore.ipynb) and trains a simple convnet model on Cloud ML Engine. ``` BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi %%bash pip install --upgrade tensorflow import tensorflow as tf print(tf.__version__) ``` ## Train CNN model locally ``` %%bash OUTDIR=${PWD}/cnn_trained DATADIR=${PWD}/preproc/tfrecord rm -rf $OUTDIR gcloud ml-engine local train \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer \ -- \ --train_steps=10 --num_eval_records=512 --train_batch_size=16 --num_cores=1 --nlayers=5 \ --job-dir=$OUTDIR --train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval* ``` ## Training lighting prediction model on CMLE using GPU custom_model_m_gpu is a machine with 4 K-80 GPUs. ``` %writefile largemachine.yaml trainingInput: scaleTier: CUSTOM masterType: complex_model_m_p100 %%bash OUTDIR=gs://${BUCKET}/lightning/cnn_trained_gpu DATADIR=gs://$BUCKET/lightning/preproc/tfrecord JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S) gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \ --region=${REGION} --scale-tier=CUSTOM --config=largemachine.yaml \ --python-version=3.5 --runtime-version=1.10 \ -- \ --train_data_path=${DATADIR}/train-* --eval_data_path=${DATADIR}/eval-* \ --train_steps=5000 --train_batch_size=256 --num_cores=4 \ --num_eval_records=128000 --nlayers=5 --dprob=0.05 --ksize=3 --nfil=10 --learning_rate=0.01 ``` The training completed after 12 minutes with this result: <pre> loss: 0.3428 - acc: 0.8547 - mean_squared_error: 0.1059 - rmse: 0.2118 - val_loss: 0.3466 - val_acc: 0.8547 - val_mean_squared_error: 0.1068 - val_rmse: 0.2264 </pre> ## Training lightning prediction model on CMLE using TPUs Next, let's generate more (8x) data and then train on the TPU. ``` %%bash OUTDIR=gs://${BUCKET}/lightning/cnn_trained_tpu DATADIR=gs://$BUCKET/lightning/preproc/tfrecord JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S) gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \ --region=${REGION} --scale-tier=BASIC_TPU \ --python-version=3.5 --runtime-version=1.9 \ -- \ --train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval* \ --train_steps=10000 --train_batch_size=1024 --num_cores=8 --use_tpu \ --num_eval_records=128000 --nlayers=5 --dprob=0.05 --ksize=3 --nfil=10 --learning_rate=0.01 ``` When I ran it, training finished with accuracy= Copyright 2018 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
BUCKET = 'cloud-training-demos-ml' PROJECT = 'cloud-training-demos' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi %%bash pip install --upgrade tensorflow import tensorflow as tf print(tf.__version__) %%bash OUTDIR=${PWD}/cnn_trained DATADIR=${PWD}/preproc/tfrecord rm -rf $OUTDIR gcloud ml-engine local train \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer \ -- \ --train_steps=10 --num_eval_records=512 --train_batch_size=16 --num_cores=1 --nlayers=5 \ --job-dir=$OUTDIR --train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval* %writefile largemachine.yaml trainingInput: scaleTier: CUSTOM masterType: complex_model_m_p100 %%bash OUTDIR=gs://${BUCKET}/lightning/cnn_trained_gpu DATADIR=gs://$BUCKET/lightning/preproc/tfrecord JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S) gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \ --region=${REGION} --scale-tier=CUSTOM --config=largemachine.yaml \ --python-version=3.5 --runtime-version=1.10 \ -- \ --train_data_path=${DATADIR}/train-* --eval_data_path=${DATADIR}/eval-* \ --train_steps=5000 --train_batch_size=256 --num_cores=4 \ --num_eval_records=128000 --nlayers=5 --dprob=0.05 --ksize=3 --nfil=10 --learning_rate=0.01 %%bash OUTDIR=gs://${BUCKET}/lightning/cnn_trained_tpu DATADIR=gs://$BUCKET/lightning/preproc/tfrecord JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S) gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \ --region=${REGION} --scale-tier=BASIC_TPU \ --python-version=3.5 --runtime-version=1.9 \ -- \ --train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval* \ --train_steps=10000 --train_batch_size=1024 --num_cores=8 --use_tpu \ --num_eval_records=128000 --nlayers=5 --dprob=0.05 --ksize=3 --nfil=10 --learning_rate=0.01
0.200558
0.741066
# Algoritmos, dataficação da sociedade e o ofício de Historiadores/as: desigualdade, racismo e violências <small>Material para o Encontro Virtual 6, 14/12/2021, referente à [semana 10 do curso](https://ericbrasiln.github.io/intro-historia-digital/mod3/sem10.html#)</small> O objetivo desse encontro virtual é discutir a relação entre os algortimos e a dataficação da vida social com a pesquisa e ensino de história no presente e futuro. Para tanto vamos discutir os seguintes textos: 1. [O'NEIL, CATHY. Componentes da bomba. O que é um modelo? In: **Algoritmos de Destruição em massa**. São Paulo: Editora Rua do Sabão, 2020. pp. 25-50](https://github.com/ericbrasiln/intro-historia-digital/blob/bc111c29d3ec6f35221358f5c4af6edcebef524d/cclhm0069/biblio/oneil.pdf) 2. [NOBLE, Safiya Umoja. Introdução. In: **Algoritmos da opressão**. São Paulo: Editora Rua do Sabão, 2021. pp. 7-28](https://github.com/ericbrasiln/intro-historia-digital/blob/bc111c29d3ec6f35221358f5c4af6edcebef524d/cclhm0069/biblio/noble.pdf) ## E esse tal de _algoritmo_? ![math](https://raw.githubusercontent.com/ericbrasiln/ferramentas_digitais_UNILAB/master/docs/gifs/math.gif) Um conjunto de ações lógicas para realizar uma determinada tarefa. O algoritmo (escrito por um humano) informa ao computador que passos ele deve tomar e em que ordem isso deve ser feito. Essa lista de procedimentos é executada passo a passo até completar a ação esperada. Os passos lógicos são encadeados, por exemplo: `Se` tal coisa acontecer, então faça o passo 1, `senão` faça o passo 2. ~~~ if else ~~~ `Enquanto` tal coisa estiver acontecendo, continue com a ação. ~~~ while == True ~~~ `Tente` executar esse passo, se não funcionar, realize a `exceção` tal. ~~~ try: except: ~~~ ![waze](https://raw.githubusercontent.com/ericbrasiln/ferramentas_digitais_UNILAB/master/docs/gifs/waze.gif) ~~~ se "o carro passar de 65 km/h": mostrar alerta de velocidade senão: não mostrar nada ~~~ Ou ainda: ~~~ se "a rua estiver engarrafada": "calcule nova rota mais curta por outra rua" "mostre a nova rota" "informe a direção" senão: "manter a mesma rota" ~~~ ``` meu_nome = str(input('Qual é o seu nome? ')) if meu_nome == 'Eric': print('Que nome bonito!') else: print('Seu nome é tão normal!') idade = int(input('Qual a sua idade? ')) if idade < 16: print('Você é tão jovem. Ainda não pode votar!') elif idade >= 16 and idade <18: print('Nessa idade você tem a opção de votar.Já tirou o título de eleitor?') elif idade >= 18 and idade < 65: print('Na sua idade o voto é obrigatório!') else: print('Nessa idade o voto é facultativo!') ``` ## Algoritmos em tudo? <div class="center"> <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Trying a horrible experiment...<br><br>Which will the Twitter algorithm pick: Mitch McConnell or Barack Obama? <a href="https://t.co/bR1GRyCkia">pic.twitter.com/bR1GRyCkia</a></p>&mdash; Tony “Abolish (Pol)ICE” Arcieri 🦀 (@bascule) <a href="https://twitter.com/bascule/status/1307440596668182528?ref_src=twsrc%5Etfw">September 19, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> </div> ![tweet](https://github.com/ericbrasiln/intro-historia-digital/blob/945a2f6528af55f0f18c59e57afcfb93566e464a/cclhm0069/images/tweet_race.jpeg?raw=true) ## Dataficação das relações sociais Quais impactos para a pesquisa histórica? ## A opressão dos algoritmos - Debate dos textos ### Cathy O'Neil e os componentes da bomba #### O que é um modelo? - O que é um modelo? - O exemplo do beisebol - O exemplo da alimentação de uma família (31-33) - Modelo é uma simplificação (33-35) - Sucesso? (35) - Racismo como modelo? (37) - Modelos de Reincidência e o sistema penitenciário e judiciário nos EUA (40 - 44) - Retroalimentação (44) #### Taxonomia de ADM (Algoritmos de Destruição em Massa) 1. O modelo é opaco ou invisível? 2. É injusto? Destrói vidas? 3. Ganha escala? OPACIDADE + ESCALA + DANO = ADM ### Noble e a opressão dos algoritmos - Objetivos do livro: acadêmico e político - O caso das "meninas negras" no Google - "Racismo é a API fundamental da internet" (14) #### Google Search - Empresa de publicidade; - Implicações das tomadas de decisões algoritmicas (15); - São erros? - Quais impactos dos vieses racistas e sexistas nesses algoritmos para a sociedade?
github_jupyter
meu_nome = str(input('Qual é o seu nome? ')) if meu_nome == 'Eric': print('Que nome bonito!') else: print('Seu nome é tão normal!') idade = int(input('Qual a sua idade? ')) if idade < 16: print('Você é tão jovem. Ainda não pode votar!') elif idade >= 16 and idade <18: print('Nessa idade você tem a opção de votar.Já tirou o título de eleitor?') elif idade >= 18 and idade < 65: print('Na sua idade o voto é obrigatório!') else: print('Nessa idade o voto é facultativo!')
0.073696
0.820541
``` %%javascript var kernel = IPython.notebook.kernel; var body = document.body, attribs = body.attributes; var command = "__filename__ = " + "'" + decodeURIComponent(attribs['data-notebook-name'].value) + "'"; kernel.execute(command); print(__filename__) import os, sys, numpy as np, tensorflow as tf from pathlib import Path import time try: print(__file__) __current_dir__ = str(Path(__file__).resolve().parents[0]) __filename__ = os.path.basename(__file__) except NameError: # jupyter notebook automatically sets the working # directory to where the notebook is. __current_dir__ = str(Path(os.getcwd())) module_parent_dir = str(Path(__current_dir__).resolve().parents[0]) sys.path.append(module_parent_dir) import convnet __package__ = 'convnet' from . import network from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) BATCH_SIZE = 250 SCRIPT_DIR = __current_dir__ FILENAME = __filename__ SUMMARIES_DIR = SCRIPT_DIR SAVE_PATH = SCRIPT_DIR + "/network.ckpt" ### configure devices for this eval script. USE_DEVICE = '/gpu:0' session_config = tf.ConfigProto(log_device_placement=True) session_config.gpu_options.allow_growth = True # this is required if want to use GPU as device. # see: https://github.com/tensorflow/tensorflow/issues/2292 session_config.allow_soft_placement = True if __name__ == "__main__": with tf.Graph().as_default() as g, tf.device(USE_DEVICE): # inference() input, logits = network.inference() labels, loss_op = network.loss(logits) train = network.training(loss_op, 1e-1) eval = network.evaluation(logits, labels) init = tf.initialize_all_variables() with tf.Session(config=session_config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) # to see the tensor graph, fire up the tensorboard with --logdir="./train" merged = tf.merge_all_summaries() train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph) test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test') saver = tf.train.Saver() sess.run(init) saver.restore(sess, SAVE_PATH) # now let's test! TEST_BATCH_SIZE = np.shape(mnist.test.labels)[0] output, loss_value, accuracy = sess.run([logits, loss_op, eval], feed_dict={ input: mnist.test.images, labels: mnist.test.labels }) print("- MNIST Test accuracy is ", accuracy / TEST_BATCH_SIZE) ```
github_jupyter
%%javascript var kernel = IPython.notebook.kernel; var body = document.body, attribs = body.attributes; var command = "__filename__ = " + "'" + decodeURIComponent(attribs['data-notebook-name'].value) + "'"; kernel.execute(command); print(__filename__) import os, sys, numpy as np, tensorflow as tf from pathlib import Path import time try: print(__file__) __current_dir__ = str(Path(__file__).resolve().parents[0]) __filename__ = os.path.basename(__file__) except NameError: # jupyter notebook automatically sets the working # directory to where the notebook is. __current_dir__ = str(Path(os.getcwd())) module_parent_dir = str(Path(__current_dir__).resolve().parents[0]) sys.path.append(module_parent_dir) import convnet __package__ = 'convnet' from . import network from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) BATCH_SIZE = 250 SCRIPT_DIR = __current_dir__ FILENAME = __filename__ SUMMARIES_DIR = SCRIPT_DIR SAVE_PATH = SCRIPT_DIR + "/network.ckpt" ### configure devices for this eval script. USE_DEVICE = '/gpu:0' session_config = tf.ConfigProto(log_device_placement=True) session_config.gpu_options.allow_growth = True # this is required if want to use GPU as device. # see: https://github.com/tensorflow/tensorflow/issues/2292 session_config.allow_soft_placement = True if __name__ == "__main__": with tf.Graph().as_default() as g, tf.device(USE_DEVICE): # inference() input, logits = network.inference() labels, loss_op = network.loss(logits) train = network.training(loss_op, 1e-1) eval = network.evaluation(logits, labels) init = tf.initialize_all_variables() with tf.Session(config=session_config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) # to see the tensor graph, fire up the tensorboard with --logdir="./train" merged = tf.merge_all_summaries() train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph) test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test') saver = tf.train.Saver() sess.run(init) saver.restore(sess, SAVE_PATH) # now let's test! TEST_BATCH_SIZE = np.shape(mnist.test.labels)[0] output, loss_value, accuracy = sess.run([logits, loss_op, eval], feed_dict={ input: mnist.test.images, labels: mnist.test.labels }) print("- MNIST Test accuracy is ", accuracy / TEST_BATCH_SIZE)
0.408041
0.178526
NumPy Support ============= The magnitude of a Pint quantity can be of any numerical scalar type, and you are free to choose it according to your needs. For numerical applications requiring arrays, it is quite convenient to use [NumPy ndarray](http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html) (or [ndarray-like types supporting NEP-18](https://numpy.org/neps/nep-0018-array-function-protocol.html)), and therefore these are the array types supported by Pint. First, we import the relevant packages: ``` # Import NumPy import numpy as np # Import Pint import pint ureg = pint.UnitRegistry() Q_ = ureg.Quantity # Silence NEP 18 warning import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") Q_([]) ``` and then we create a quantity the standard way ``` legs1 = Q_(np.asarray([3., 4.]), 'meter') print(legs1) legs1 = [3., 4.] * ureg.meter print(legs1) ``` All usual Pint methods can be used with this quantity. For example: ``` print(legs1.to('kilometer')) print(legs1.dimensionality) try: legs1.to('joule') except pint.DimensionalityError as exc: print(exc) ``` NumPy functions are supported by Pint. For example if we define: ``` legs2 = [400., 300.] * ureg.centimeter print(legs2) ``` we can calculate the hypotenuse of the right triangles with legs1 and legs2. ``` hyps = np.hypot(legs1, legs2) print(hyps) ``` Notice that before the `np.hypot` was used, the numerical value of legs2 was internally converted to the units of legs1 as expected. Similarly, when you apply a function that expects angles in radians, a conversion is applied before the requested calculation: ``` angles = np.arccos(legs2/hyps) print(angles) ``` You can convert the result to degrees using usual unit conversion: ``` print(angles.to('degree')) ``` Applying a function that expects angles to a quantity with a different dimensionality results in an error: ``` try: np.arccos(legs2) except pint.DimensionalityError as exc: print(exc) ``` Function/Method Support ----------------------- The following [ufuncs](http://docs.scipy.org/doc/numpy/reference/ufuncs.html) can be applied to a Quantity object: - **Math operations**: `add`, `subtract`, `multiply`, `divide`, `logaddexp`, `logaddexp2`, `true_divide`, `floor_divide`, `negative`, `remainder`, `mod`, `fmod`, `absolute`, `rint`, `sign`, `conj`, `exp`, `exp2`, `log`, `log2`, `log10`, `expm1`, `log1p`, `sqrt`, `square`, `cbrt`, `reciprocal` - **Trigonometric functions**: `sin`, `cos`, `tan`, `arcsin`, `arccos`, `arctan`, `arctan2`, `hypot`, `sinh`, `cosh`, `tanh`, `arcsinh`, `arccosh`, `arctanh` - **Comparison functions**: `greater`, `greater_equal`, `less`, `less_equal`, `not_equal`, `equal` - **Floating functions**: `isreal`, `iscomplex`, `isfinite`, `isinf`, `isnan`, `signbit`, `sign`, `copysign`, `nextafter`, `modf`, `ldexp`, `frexp`, `fmod`, `floor`, `ceil`, `trunc` And the following NumPy functions: ``` from pint.numpy_func import HANDLED_FUNCTIONS print(sorted(list(HANDLED_FUNCTIONS))) ``` And the following [NumPy ndarray methods](http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-methods): - `argmax`, `argmin`, `argsort`, `astype`, `clip`, `compress`, `conj`, `conjugate`, `cumprod`, `cumsum`, `diagonal`, `dot`, `fill`, `flatten`, `flatten`, `item`, `max`, `mean`, `min`, `nonzero`, `prod`, `ptp`, `put`, `ravel`, `repeat`, `reshape`, `round`, `searchsorted`, `sort`, `squeeze`, `std`, `sum`, `take`, `trace`, `transpose`, `var` Pull requests are welcome for any NumPy function, ufunc, or method that is not currently supported. Array Type Support ------------------ ### Overview When not wrapping a scalar type, a Pint `Quantity` can be considered a ["duck array"](https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html), that is, an array-like type that implements (all or most of) NumPy's API for `ndarray`. Many other such duck arrays exist in the Python ecosystem, and Pint aims to work with as many of them as reasonably possible. To date, the following are specifically tested and known to work: - xarray: `DataArray`, `Dataset`, and `Variable` - Sparse: `COO` and the following have partial support, with full integration planned: - NumPy masked arrays (NOTE: Masked Array compatibility has changed with Pint 0.10 and versions of NumPy up to at least 1.18, see the example below) - Dask arrays - CuPy arrays ### Technical Commentary Starting with version 0.10, Pint aims to interoperate with other duck arrays in a well-defined and well-supported fashion. Part of this support lies in implementing [`__array_ufunc__` to support NumPy ufuncs](https://numpy.org/neps/nep-0013-ufunc-overrides.html) and [`__array_function__` to support NumPy functions](https://numpy.org/neps/nep-0018-array-function-protocol.html). However, the central component to this interoperability is respecting a [type casting hierarchy](https://numpy.org/neps/nep-0018-array-function-protocol.html) of duck arrays. When all types in the hierarchy properly defer to those above it (in wrapping, arithmetic, and NumPy operations), a well-defined nesting and operator precedence order exists. When they don't, the graph of relations becomes cyclic, and the expected result of mixed-type operations becomes ambiguous. For Pint, following this hierarchy means declaring a list of types that are above it in the hierarchy and to which it defers ("upcast types") and assuming all others are below it and wrappable by it ("downcast types"). To date, Pint's declared upcast types are: - `PintArray`, as defined by pint-pandas - `Series`, as defined by Pandas - `DataArray`, `Dataset`, and `Variable`, as defined by xarray (Note: if your application requires extension of this collection of types, it is available in Pint's API at `pint.compat.upcast_types`.) While Pint assumes it can wrap any other duck array (meaning, for now, those that implement `__array_function__`, `shape`, `ndim`, and `dtype`, at least until [NEP 30](https://numpy.org/neps/nep-0030-duck-array-protocol.html) is implemented), there are a few common types that Pint explicitly tests (or plans to test) for optimal interoperability. These are listed above in the overview section and included in the below chart. This type casting hierarchy of ndarray-like types can be shown by the below acyclic graph, where solid lines represent declared support, and dashed lines represent planned support: ``` from graphviz import Digraph g = Digraph(graph_attr={'size': '8,5'}, node_attr={'fontname': 'courier'}) g.edge('Dask array', 'NumPy ndarray') g.edge('Dask array', 'CuPy ndarray') g.edge('Dask array', 'Sparse COO') g.edge('Dask array', 'NumPy masked array', style='dashed') g.edge('CuPy ndarray', 'NumPy ndarray') g.edge('Sparse COO', 'NumPy ndarray') g.edge('NumPy masked array', 'NumPy ndarray') g.edge('Jax array', 'NumPy ndarray') g.edge('Pint Quantity', 'Dask array', style='dashed') g.edge('Pint Quantity', 'NumPy ndarray') g.edge('Pint Quantity', 'CuPy ndarray', style='dashed') g.edge('Pint Quantity', 'Sparse COO') g.edge('Pint Quantity', 'NumPy masked array', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Dask array') g.edge('xarray Dataset/DataArray/Variable', 'CuPy ndarray', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Sparse COO') g.edge('xarray Dataset/DataArray/Variable', 'NumPy ndarray') g.edge('xarray Dataset/DataArray/Variable', 'NumPy masked array', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Pint Quantity') g.edge('xarray Dataset/DataArray/Variable', 'Jax array', style='dashed') g ``` ### Examples **xarray wrapping Pint Quantity** ``` import xarray as xr # Load tutorial data air = xr.tutorial.load_dataset('air_temperature')['air'][0] # Convert to Quantity air.data = Q_(air.data, air.attrs.pop('units', '')) print(air) print() print(air.max()) ``` **Pint Quantity wrapping Sparse COO** ``` from sparse import COO np.random.seed(80243963) x = np.random.random((100, 100, 100)) x[x < 0.9] = 0 # fill most of the array with zeros s = COO(x) q = s * ureg.m print(q) print() print(np.mean(q)) ``` **Pint Quantity wrapping NumPy Masked Array** ``` m = np.ma.masked_array([2, 3, 5, 7], mask=[False, True, False, True]) # Must create using Quantity class print(repr(ureg.Quantity(m, 'm'))) print() # DO NOT create using multiplication until # https://github.com/numpy/numpy/issues/15200 is resolved, as # unexpected behavior may result print(repr(m * ureg.m)) ``` **Pint Quantity wrapping Dask Array** ``` import dask.array as da d = da.arange(500, chunks=50) # Must create using Quantity class, otherwise Dask will wrap Pint Quantity q = ureg.Quantity(d, ureg.kelvin) print(repr(q)) print() # DO NOT create using multiplication on the right until # https://github.com/dask/dask/issues/4583 is resolved, as # unexpected behavior may result print(repr(d * ureg.kelvin)) print(repr(ureg.kelvin * d)) ``` **xarray wrapping Pint Quantity wrapping Dask array wrapping Sparse COO** ``` import dask.array as da x = da.random.random((100, 100, 100), chunks=(100, 1, 1)) x[x < 0.95] = 0 data = xr.DataArray( Q_(x.map_blocks(COO), 'm'), dims=('z', 'y', 'x'), coords={ 'z': np.arange(100), 'y': np.arange(100) - 50, 'x': np.arange(100) * 1.5 - 20 }, name='test' ) print(data) print() print(data.sel(x=125.5, y=-46).mean()) ``` ### Compatibility Packages To aid in integration between various array types and Pint (such as by providing convenience methods), the following compatibility packages are available: - [pint-pandas](https://github.com/hgrecco/pint-pandas) - [pint-xarray](https://github.com/xarray-contrib/pint-xarray/) (Note: if you have developed a compatibility package for Pint, please submit a pull request to add it to this list!) ## Additional Comments What follows is a short discussion about how NumPy support is implemented in Pint's `Quantity` Object. For the supported functions, Pint expects certain units and attempts to convert the input (or inputs). For example, the argument of the exponential function (`numpy.exp`) must be dimensionless. Units will be simplified (converting the magnitude appropriately) and `numpy.exp` will be applied to the resulting magnitude. If the input is not dimensionless, a `DimensionalityError` exception will be raised. In some functions that take 2 or more arguments (e.g. `arctan2`), the second argument is converted to the units of the first. Again, a `DimensionalityError` exception will be raised if this is not possible. ndarray or downcast type arguments are generally treated as if they were dimensionless quantities, whereas Pint defers to its declared upcast types by always returning `NotImplemented` when they are encountered (see above). To achive these function and ufunc overrides, Pint uses the ``__array_function__`` and ``__array_ufunc__`` protocols respectively, as recommened by NumPy. This means that functions and ufuncs that Pint does not explicitly handle will error, rather than return a value with units stripped (in contrast to Pint's behavior prior to v0.10). For more information on these protocols, see <https://docs.scipy.org/doc/numpy-1.17.0/user/basics.dispatch.html>. This behaviour introduces some performance penalties and increased memory usage. Quantities that must be converted to other units require additional memory and CPU cycles. Therefore, for numerically intensive code, you might want to convert the objects first and then use directly the magnitude, such as by using Pint's `wraps` utility (see [wrapping](wrapping.rst)). Attempting to access array interface protocol attributes (such as `__array_struct__` and `__array_interface__`) on Pint Quantities will raise an AttributeError, since a Quantity is meant to behave as a "duck array," and not a pure ndarray.
github_jupyter
# Import NumPy import numpy as np # Import Pint import pint ureg = pint.UnitRegistry() Q_ = ureg.Quantity # Silence NEP 18 warning import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") Q_([]) legs1 = Q_(np.asarray([3., 4.]), 'meter') print(legs1) legs1 = [3., 4.] * ureg.meter print(legs1) print(legs1.to('kilometer')) print(legs1.dimensionality) try: legs1.to('joule') except pint.DimensionalityError as exc: print(exc) legs2 = [400., 300.] * ureg.centimeter print(legs2) hyps = np.hypot(legs1, legs2) print(hyps) angles = np.arccos(legs2/hyps) print(angles) print(angles.to('degree')) try: np.arccos(legs2) except pint.DimensionalityError as exc: print(exc) from pint.numpy_func import HANDLED_FUNCTIONS print(sorted(list(HANDLED_FUNCTIONS))) from graphviz import Digraph g = Digraph(graph_attr={'size': '8,5'}, node_attr={'fontname': 'courier'}) g.edge('Dask array', 'NumPy ndarray') g.edge('Dask array', 'CuPy ndarray') g.edge('Dask array', 'Sparse COO') g.edge('Dask array', 'NumPy masked array', style='dashed') g.edge('CuPy ndarray', 'NumPy ndarray') g.edge('Sparse COO', 'NumPy ndarray') g.edge('NumPy masked array', 'NumPy ndarray') g.edge('Jax array', 'NumPy ndarray') g.edge('Pint Quantity', 'Dask array', style='dashed') g.edge('Pint Quantity', 'NumPy ndarray') g.edge('Pint Quantity', 'CuPy ndarray', style='dashed') g.edge('Pint Quantity', 'Sparse COO') g.edge('Pint Quantity', 'NumPy masked array', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Dask array') g.edge('xarray Dataset/DataArray/Variable', 'CuPy ndarray', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Sparse COO') g.edge('xarray Dataset/DataArray/Variable', 'NumPy ndarray') g.edge('xarray Dataset/DataArray/Variable', 'NumPy masked array', style='dashed') g.edge('xarray Dataset/DataArray/Variable', 'Pint Quantity') g.edge('xarray Dataset/DataArray/Variable', 'Jax array', style='dashed') g import xarray as xr # Load tutorial data air = xr.tutorial.load_dataset('air_temperature')['air'][0] # Convert to Quantity air.data = Q_(air.data, air.attrs.pop('units', '')) print(air) print() print(air.max()) from sparse import COO np.random.seed(80243963) x = np.random.random((100, 100, 100)) x[x < 0.9] = 0 # fill most of the array with zeros s = COO(x) q = s * ureg.m print(q) print() print(np.mean(q)) m = np.ma.masked_array([2, 3, 5, 7], mask=[False, True, False, True]) # Must create using Quantity class print(repr(ureg.Quantity(m, 'm'))) print() # DO NOT create using multiplication until # https://github.com/numpy/numpy/issues/15200 is resolved, as # unexpected behavior may result print(repr(m * ureg.m)) import dask.array as da d = da.arange(500, chunks=50) # Must create using Quantity class, otherwise Dask will wrap Pint Quantity q = ureg.Quantity(d, ureg.kelvin) print(repr(q)) print() # DO NOT create using multiplication on the right until # https://github.com/dask/dask/issues/4583 is resolved, as # unexpected behavior may result print(repr(d * ureg.kelvin)) print(repr(ureg.kelvin * d)) import dask.array as da x = da.random.random((100, 100, 100), chunks=(100, 1, 1)) x[x < 0.95] = 0 data = xr.DataArray( Q_(x.map_blocks(COO), 'm'), dims=('z', 'y', 'x'), coords={ 'z': np.arange(100), 'y': np.arange(100) - 50, 'x': np.arange(100) * 1.5 - 20 }, name='test' ) print(data) print() print(data.sel(x=125.5, y=-46).mean())
0.670285
0.979996
``` import pandas as pd import numpy as np import xgboost as xgb import re ``` ## 1. Load data and build a model ``` train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") X_y_train = xgb.DMatrix(data=train[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch']], label= train['Survived']) X_test = xgb.DMatrix(data=test[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch']]) train[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch', 'Survived']].head() params = { 'base_score': np.mean(train['Survived']), 'eta': 0.1, 'max_depth': 3, 'gamma' :3, 'objective' :'reg:linear', 'eval_metric' :'mae' } model = xgb.train(params=params, dtrain=X_y_train, num_boost_round=3) ``` ### visualization of model (need to install graphviz in order to use this feature) ``` xgb.to_graphviz(booster = model, num_trees=0) xgb.to_graphviz(booster = model, num_trees=1) xgb.to_graphviz(booster = model, num_trees=2) model.get_dump() ``` ### convert dump string to .py file ``` def string_parser(s): if len(re.findall(r":leaf=", s)) == 0: out = re.findall(r"[\w.-]+", s) tabs = re.findall(r"[\t]+", s) if (out[4] == out[8]): missing_value_handling = (" or np.isnan(x['" + out[1] + "']) ") else: missing_value_handling = "" if len(tabs) > 0: return (re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' if state == ' + out[0] + ':\n' + re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' state = (' + out[4] + ' if ' + "x['" + out[1] +"']<" + out[2] + missing_value_handling + ' else ' + out[6] + ')\n' ) else: return (' if state == ' + out[0] + ':\n' + ' state = (' + out[4] + ' if ' + "x['" + out[1] +"']<" + out[2] + missing_value_handling + ' else ' + out[6] + ')\n' ) else: out = re.findall(r"[\d.-]+", s) return (re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' if state == ' + out[0] + ':\n ' + re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' return ' + out[1] + '\n') def tree_parser(tree, i): if i == 0: return (' if num_booster == 0:\n state = 0\n' + "".join([string_parser(tree.split('\n')[i]) for i in range(len(tree.split('\n'))-1)])) else: return (' elif num_booster == '+str(i)+':\n state = 0\n' + "".join([string_parser(tree.split('\n')[i]) for i in range(len(tree.split('\n'))-1)])) def model_to_py(base_score, model, out_file): trees = model.get_dump() result = ["import numpy as np\n\n" +"def xgb_tree(x, num_booster):\n"] for i in range(len(trees)): result.append(tree_parser(trees[i], i)) with open(out_file, 'a') as the_file: the_file.write("".join(result) + "\ndef xgb_predict(x):\n predict = " + str(base_score) + "\n" + "# initialize prediction with base score\n" + " for i in range(" + str(len(trees)) + "):\n predict = predict + xgb_tree(x, i)" + "\n return predict") model_to_py(params['base_score'], model, 'xgb_model.py') ``` ### prediction using dump file ``` import xgb_model passenger_data_1 = {'Pclass':3, 'Age':np.nan, 'SibSp':0, 'Parch':0, 'Fare':7.8958} passenger_data_2 = {'Pclass':1, 'Age':46, 'SibSp':0, 'Parch':0, 'Fare':26} print(xgb_model.xgb_predict(passenger_data_1)) print(xgb_model.xgb_predict(passenger_data_2)) y_test= model.predict(X_test) test['pred'] = y_test test[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch','pred']].iloc[10:].head(2) ```
github_jupyter
import pandas as pd import numpy as np import xgboost as xgb import re train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") X_y_train = xgb.DMatrix(data=train[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch']], label= train['Survived']) X_test = xgb.DMatrix(data=test[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch']]) train[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch', 'Survived']].head() params = { 'base_score': np.mean(train['Survived']), 'eta': 0.1, 'max_depth': 3, 'gamma' :3, 'objective' :'reg:linear', 'eval_metric' :'mae' } model = xgb.train(params=params, dtrain=X_y_train, num_boost_round=3) xgb.to_graphviz(booster = model, num_trees=0) xgb.to_graphviz(booster = model, num_trees=1) xgb.to_graphviz(booster = model, num_trees=2) model.get_dump() def string_parser(s): if len(re.findall(r":leaf=", s)) == 0: out = re.findall(r"[\w.-]+", s) tabs = re.findall(r"[\t]+", s) if (out[4] == out[8]): missing_value_handling = (" or np.isnan(x['" + out[1] + "']) ") else: missing_value_handling = "" if len(tabs) > 0: return (re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' if state == ' + out[0] + ':\n' + re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' state = (' + out[4] + ' if ' + "x['" + out[1] +"']<" + out[2] + missing_value_handling + ' else ' + out[6] + ')\n' ) else: return (' if state == ' + out[0] + ':\n' + ' state = (' + out[4] + ' if ' + "x['" + out[1] +"']<" + out[2] + missing_value_handling + ' else ' + out[6] + ')\n' ) else: out = re.findall(r"[\d.-]+", s) return (re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' if state == ' + out[0] + ':\n ' + re.findall(r"[\t]+", s)[0].replace('\t', ' ') + ' return ' + out[1] + '\n') def tree_parser(tree, i): if i == 0: return (' if num_booster == 0:\n state = 0\n' + "".join([string_parser(tree.split('\n')[i]) for i in range(len(tree.split('\n'))-1)])) else: return (' elif num_booster == '+str(i)+':\n state = 0\n' + "".join([string_parser(tree.split('\n')[i]) for i in range(len(tree.split('\n'))-1)])) def model_to_py(base_score, model, out_file): trees = model.get_dump() result = ["import numpy as np\n\n" +"def xgb_tree(x, num_booster):\n"] for i in range(len(trees)): result.append(tree_parser(trees[i], i)) with open(out_file, 'a') as the_file: the_file.write("".join(result) + "\ndef xgb_predict(x):\n predict = " + str(base_score) + "\n" + "# initialize prediction with base score\n" + " for i in range(" + str(len(trees)) + "):\n predict = predict + xgb_tree(x, i)" + "\n return predict") model_to_py(params['base_score'], model, 'xgb_model.py') import xgb_model passenger_data_1 = {'Pclass':3, 'Age':np.nan, 'SibSp':0, 'Parch':0, 'Fare':7.8958} passenger_data_2 = {'Pclass':1, 'Age':46, 'SibSp':0, 'Parch':0, 'Fare':26} print(xgb_model.xgb_predict(passenger_data_1)) print(xgb_model.xgb_predict(passenger_data_2)) y_test= model.predict(X_test) test['pred'] = y_test test[['Pclass', 'Age', 'Fare', 'SibSp', 'Parch','pred']].iloc[10:].head(2)
0.205934
0.63837
# A recap on Scikit-learn's estimator interface Scikit-learn strives to have a uniform interface across all methods. Given a scikit-learn *estimator* object named `model`, the following methods are available (not all for each model): - Available in **all Estimators** + `model.fit()` : fit training data. For supervised learning applications, this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`). For unsupervised learning applications, `fit` takes only a single argument, the data `X` (e.g. `model.fit(X)`). - Available in **supervised estimators** + `model.predict()` : given a trained model, predict the label of a new set of data. This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`), and returns the learned label for each object in the array. + `model.predict_proba()` : For classification problems, some estimators also provide this method, which returns the probability that a new observation has each categorical label. In this case, the label with the highest probability is returned by `model.predict()`. + `model.decision_function()` : For classification problems, some estimators provide an uncertainty estimate that is not a probability. For binary classification, a decision_function >= 0 means the positive class will be predicted, while < 0 means the negative class. + `model.score()` : for classification or regression problems, most (all?) estimators implement a score method. Scores are between 0 and 1, with a larger score indicating a better fit. For classifiers, the `score` method computes the prediction accuracy. For regressors, `score` computes the coefficient of determination (R<sup>2</sup>) of the prediction. + `model.transform()` : For feature selection algorithms, this will reduce the dataset to the selected features. For some classification and regression models such as some linear models and random forests, this method reduces the dataset to the most informative features. These classification and regression models can therefore also be used as feature selection methods. - Available in **unsupervised estimators** + `model.transform()` : given an unsupervised model, transform new data into the new basis. This also accepts one argument `X_new`, and returns the new representation of the data based on the unsupervised model. + `model.fit_transform()` : some estimators implement this method, which more efficiently performs a fit and a transform on the same input data. + `model.predict()` : for clustering algorithms, the predict method will produce cluster labels for new data points. Not all clustering methods have this functionality. + `model.predict_proba()` : Gaussian mixture models (GMMs) provide the probability for each point to be generated by a given mixture component. + `model.score()` : Density models like KDE and GMMs provide the likelihood of the data under the model. Apart from ``fit``, the two most important functions are arguably ``predict`` to produce a target variable (a ``y``) ``transform``, which produces a new representation of the data (an ``X``). The following table shows for which class of models which function applies: <table> <tr style="border:None; font-size:20px; padding:10px;"><th>``model.predict``</th><th>``model.transform``</th></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Classification</td><td>Preprocessing</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Regression</td><td>Dimensionality Reduction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Clustering</td><td>Feature Extraction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>&nbsp;</td><td>Feature Selection</td></tr> </table>
github_jupyter
# A recap on Scikit-learn's estimator interface Scikit-learn strives to have a uniform interface across all methods. Given a scikit-learn *estimator* object named `model`, the following methods are available (not all for each model): - Available in **all Estimators** + `model.fit()` : fit training data. For supervised learning applications, this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`). For unsupervised learning applications, `fit` takes only a single argument, the data `X` (e.g. `model.fit(X)`). - Available in **supervised estimators** + `model.predict()` : given a trained model, predict the label of a new set of data. This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`), and returns the learned label for each object in the array. + `model.predict_proba()` : For classification problems, some estimators also provide this method, which returns the probability that a new observation has each categorical label. In this case, the label with the highest probability is returned by `model.predict()`. + `model.decision_function()` : For classification problems, some estimators provide an uncertainty estimate that is not a probability. For binary classification, a decision_function >= 0 means the positive class will be predicted, while < 0 means the negative class. + `model.score()` : for classification or regression problems, most (all?) estimators implement a score method. Scores are between 0 and 1, with a larger score indicating a better fit. For classifiers, the `score` method computes the prediction accuracy. For regressors, `score` computes the coefficient of determination (R<sup>2</sup>) of the prediction. + `model.transform()` : For feature selection algorithms, this will reduce the dataset to the selected features. For some classification and regression models such as some linear models and random forests, this method reduces the dataset to the most informative features. These classification and regression models can therefore also be used as feature selection methods. - Available in **unsupervised estimators** + `model.transform()` : given an unsupervised model, transform new data into the new basis. This also accepts one argument `X_new`, and returns the new representation of the data based on the unsupervised model. + `model.fit_transform()` : some estimators implement this method, which more efficiently performs a fit and a transform on the same input data. + `model.predict()` : for clustering algorithms, the predict method will produce cluster labels for new data points. Not all clustering methods have this functionality. + `model.predict_proba()` : Gaussian mixture models (GMMs) provide the probability for each point to be generated by a given mixture component. + `model.score()` : Density models like KDE and GMMs provide the likelihood of the data under the model. Apart from ``fit``, the two most important functions are arguably ``predict`` to produce a target variable (a ``y``) ``transform``, which produces a new representation of the data (an ``X``). The following table shows for which class of models which function applies: <table> <tr style="border:None; font-size:20px; padding:10px;"><th>``model.predict``</th><th>``model.transform``</th></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Classification</td><td>Preprocessing</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Regression</td><td>Dimensionality Reduction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Clustering</td><td>Feature Extraction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>&nbsp;</td><td>Feature Selection</td></tr> </table>
0.868158
0.995151
``` import pandas as pd # formula to add the success score to the table # MLB_Data_Final['Success_Score'] = round((MLB_Data_Final['W']/(MLB_Data_Final['W'] + MLB_Data_Final['L']))*100, 2) MLB_Data_Final = pd.read_csv('MLB_Data_Cleaned.csv') MLB_Data_Final.head() MLB_Data_Final.columns # dropping unnecessary columns df_mlb = MLB_Data_Final.drop(['Unnamed: 0', 'Unnamed: 0.1','G_Bat', 'PA', 'HR', 'R', 'RBI', 'SB', 'BB%', 'K%', 'ISO', 'BABIP_x', 'AVG', 'OBP', 'SLG', 'wOBA', 'xwOBA', 'wRC+', 'BsR', 'Off', 'Def', 'WAR_Bat','SV', 'G_Pitch', 'GS', 'IP', 'K/9', 'BB/9', 'HR/9', 'BABIP_y', 'LOB%', 'GB%', 'HR/FB', 'vFA (pi)', 'ERA', 'xERA', 'FIP', 'xFIP', 'WAR_Pitch'], 1) df_mlb.head() # change the Montreal Expos to the Washington Nationals df_mlb = df_mlb.replace({'MON' : 'WSN'}) df_mlb = df_mlb.set_index(['Season', 'Team']) df_mlb = df_mlb.sort_index(ascending=True, axis=0) # Add AL/NL data (LeagueID), DivisionID, Rank, Division Win, Wildcard Win, League Win, World Series Win df_team = pd.read_csv('Teams.csv') # drop unnecessary columns and rows # columns df_team2 = df_team.drop(['Ghome', 'teamID', 'park', 'attendance', 'BPF', 'PPF', 'teamIDBR', 'teamIDlahman45', 'teamIDretro'], 1) df_team3 = df_team2.drop(df_team2[df_team2.yearID < 1988].index) df_team3 = df_team3.reset_index() df_team4 = df_team3.drop('index', 1) #df = df.drop(df[df.score < 50].index) df_team4.head() # rename the columns of the team df dict_rename = {'yearID': 'Season', 'lgID': 'LeagueID', 'franchID': 'Team', 'divID': 'DivisionID', 'Rank':'Rank', 'DivWin':'Division_Win', 'WCWin': 'Wildcard_Win', 'LgWin': 'League_Win', 'WSWin': 'WorldSeries_Win', 'name': 'Name'} df_rank = df_team4.rename(dict_rename, axis = 1) df_rank.head() # change the California Angels to the Los Angeles Angels df_rank = df_rank.replace({'ANA' : 'LAA', 'FLA': 'MIA', 'TBD':'TBR'}) # set the index of the table df_rank = df_rank.set_index(['Season', 'Team']) df_rank = df_rank.sort_index(ascending=True, axis=0) df_rank.head() # concatenate both sheets together df_mlb_final = pd.concat([df_mlb, df_rank], axis=1) df_mlb_final.head() # change the name of the OD Salary column dict_rename = {'OD': 'OD_Salary'} df_mlb_final2 = df_mlb_final.rename(dict_rename, axis = 1) df_mlb_final2.head() df_mlb_final2.to_csv('final_mlb_data.csv') #s_bool = MLB_Data_Final['Team'] == 'CHC' #MLB2 = MLB_Data_Final.loc[s_bool, :] #MLB2.loc[:, ['Season', 'Team', 'Success_Score']] ```
github_jupyter
import pandas as pd # formula to add the success score to the table # MLB_Data_Final['Success_Score'] = round((MLB_Data_Final['W']/(MLB_Data_Final['W'] + MLB_Data_Final['L']))*100, 2) MLB_Data_Final = pd.read_csv('MLB_Data_Cleaned.csv') MLB_Data_Final.head() MLB_Data_Final.columns # dropping unnecessary columns df_mlb = MLB_Data_Final.drop(['Unnamed: 0', 'Unnamed: 0.1','G_Bat', 'PA', 'HR', 'R', 'RBI', 'SB', 'BB%', 'K%', 'ISO', 'BABIP_x', 'AVG', 'OBP', 'SLG', 'wOBA', 'xwOBA', 'wRC+', 'BsR', 'Off', 'Def', 'WAR_Bat','SV', 'G_Pitch', 'GS', 'IP', 'K/9', 'BB/9', 'HR/9', 'BABIP_y', 'LOB%', 'GB%', 'HR/FB', 'vFA (pi)', 'ERA', 'xERA', 'FIP', 'xFIP', 'WAR_Pitch'], 1) df_mlb.head() # change the Montreal Expos to the Washington Nationals df_mlb = df_mlb.replace({'MON' : 'WSN'}) df_mlb = df_mlb.set_index(['Season', 'Team']) df_mlb = df_mlb.sort_index(ascending=True, axis=0) # Add AL/NL data (LeagueID), DivisionID, Rank, Division Win, Wildcard Win, League Win, World Series Win df_team = pd.read_csv('Teams.csv') # drop unnecessary columns and rows # columns df_team2 = df_team.drop(['Ghome', 'teamID', 'park', 'attendance', 'BPF', 'PPF', 'teamIDBR', 'teamIDlahman45', 'teamIDretro'], 1) df_team3 = df_team2.drop(df_team2[df_team2.yearID < 1988].index) df_team3 = df_team3.reset_index() df_team4 = df_team3.drop('index', 1) #df = df.drop(df[df.score < 50].index) df_team4.head() # rename the columns of the team df dict_rename = {'yearID': 'Season', 'lgID': 'LeagueID', 'franchID': 'Team', 'divID': 'DivisionID', 'Rank':'Rank', 'DivWin':'Division_Win', 'WCWin': 'Wildcard_Win', 'LgWin': 'League_Win', 'WSWin': 'WorldSeries_Win', 'name': 'Name'} df_rank = df_team4.rename(dict_rename, axis = 1) df_rank.head() # change the California Angels to the Los Angeles Angels df_rank = df_rank.replace({'ANA' : 'LAA', 'FLA': 'MIA', 'TBD':'TBR'}) # set the index of the table df_rank = df_rank.set_index(['Season', 'Team']) df_rank = df_rank.sort_index(ascending=True, axis=0) df_rank.head() # concatenate both sheets together df_mlb_final = pd.concat([df_mlb, df_rank], axis=1) df_mlb_final.head() # change the name of the OD Salary column dict_rename = {'OD': 'OD_Salary'} df_mlb_final2 = df_mlb_final.rename(dict_rename, axis = 1) df_mlb_final2.head() df_mlb_final2.to_csv('final_mlb_data.csv') #s_bool = MLB_Data_Final['Team'] == 'CHC' #MLB2 = MLB_Data_Final.loc[s_bool, :] #MLB2.loc[:, ['Season', 'Team', 'Success_Score']]
0.303938
0.436382
## Feature Selection & Importance For Forecasting 1 to 30 Days Out ``` import sys, time, datetime import matplotlib.pyplot as plt import numpy as np import pandas as pd import pylab as pl import seaborn as sns from tqdm import tqdm from time import sleep from sklearn import metrics, linear_model from xgboost import XGBRegressor, plot_importance, plot_tree from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import train_test_split, TimeSeriesSplit from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import Ridge, LinearRegression from sklearn.ensemble import RandomForestClassifier, AdaBoostRegressor from sklearn.preprocessing import MinMaxScaler from sklearn import preprocessing from sklearn.pipeline import make_pipeline from statsmodels.tsa.stattools import grangercausalitytests, adfuller import ppscore as pps import warnings warnings.filterwarnings('ignore') #.......................................................................... # Main Inputs #.......................................................................... DEBUG = False target_column = 'LUACTRUU_Index_OAS' # Forecast timeperiod max_forecast = 30 days_ahead = list(range(1,max_forecast+1)) # Input file date ranges date_start = datetime.date(2012, 8, 1) date_end = datetime.date(2020, 7, 30) # Default Models default_CART = DecisionTreeRegressor(random_state=1) default_XGB = XGBRegressor(n_estimators=1000,random_state=1) scaler = MinMaxScaler(feature_range=(0,1)) # Read File file_buffer = "./Economic_data_clean_20200801.xlsx" #.......................................................................... # Output Methods #.......................................................................... def predictive_power(dh=None, y_value=None): for target, feats in data_dict.items(): if dh == None: if y_value == None: y_value = "{0}_{1}D_Forecast".format(target_column, target) predictors_df = pps.predictors(feats, y=target_column) predictors_df = predictors_df[predictors_df['ppscore'] > 0.5] f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Predicative Power for: {0}".format(y_value)) sns.barplot(data=predictors_df, y="x", x="ppscore",palette="rocket") else: if target == dh: if y_value == None: y_value = "{0}_{1}D_Forecast".format(target_column, dh) predictors_df = pps.predictors(feats, y=target_column) predictors_df = predictors_df[predictors_df['ppscore'] > 0.5] f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Predicative Power for: {0}".format(y_value)) sns.barplot(data=predictors_df, y="x", x="ppscore",palette="rocket") def feature_importance_CART(dh=None): for target, feats in feature_CART.items(): width = 1 keys = feats.keys() values = feats.values() if dh == None: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") else: if target == dh: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") def feature_importance_XGBOOST(dh=None): for target, feats in feature_XGBOOST.items(): width = 1 keys = feats.keys() values = feats.values() if dh == None: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") else: if target == dh: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") def feature_imp_over_time_CART(): df = pd.DataFrame (features_over_time_CART, columns = features_over_time_CART.keys()) column_names = list(df.columns) df["day"] = days_ahead remove_list = [] for feat in column_names: usefulness = df[feat].max() if usefulness < 0.2: if(DEBUG):print("feat: {0}, usseful-max: {1}".format(feat, usefulness)) df.drop([feat], axis=1) if(DEBUG): print("...removing {0}".format(feat)) remove_list.append(feat) for x in remove_list: column_names.remove(x) sns.set_palette(sns.color_palette("rocket")) f, ax = plt.subplots(figsize=(14, 6)) for feat in column_names: sns.lineplot(data=df, x='day', y=df[feat], dashes=False).set_title('{0} Feature Importance By Time'.format(target_column)) sns.set_style("whitegrid") ax.grid(True) ax.set(xlabel='Days Out', ylabel='Predictive Importance') ax.set(xticks=days_ahead) ax.legend(column_names) def feature_imp_over_time_XGB(): df = pd.DataFrame (features_over_time_XGB, columns = features_over_time_XGB.keys()) column_names = list(df.columns) df["day"] = days_ahead remove_list = [] for feat in column_names: usefulness = df[feat].max() if usefulness < 0.2: if(DEBUG):print("feat: {0}, usseful-max: {1}".format(feat, usefulness)) df.drop([feat], axis=1) if(DEBUG): print("...removing {0}".format(feat)) remove_list.append(feat) for x in remove_list: column_names.remove(x) sns.set_palette(sns.color_palette("rocket")) f, ax = plt.subplots(figsize=(14, 6)) for feat in column_names: sns.lineplot(data=df, x='day', y=df[feat], dashes=False).set_title('{0} Feature Importance By Time'.format(target_column)) sns.set_style("whitegrid") ax.grid(True) ax.set(xlabel='Days Out', ylabel='Predictive Importance') ax.set(xticks=days_ahead) ax.legend(column_names) # Clean-up data_dict = {} model_dict = {} feature_CART = {} feature_XGBOOST = {} features_over_time_CART = None features_over_time_XGB = None # Set time period for analysis session_state = pd.read_excel(file_buffer) session_state['Dates'] = pd.to_datetime(session_state['Dates']).dt.date session_state= session_state[(session_state['Dates'] >= date_start) & (session_state['Dates'] <= date_end)] csv_data = session_state.copy() session_state = None print("Ready!") #.......................................................................... # Pre-Processing #.......................................................................... if(DEBUG): print("Preprocessing data...\n") csv_data['EARN_DOWN'] = csv_data['EARN_DOWN'].astype(np.float16) csv_data['EARN_UP'] = csv_data['EARN_DOWN'].astype(np.float16) #CDX Index Technicals csv_data['CDX_HY_momentum_10_30'] = \ csv_data['CDX_HY'] .rolling(window=10).mean() - \ csv_data['CDX_HY'] .rolling(window=30).mean() / \ csv_data['CDX_HY'] .rolling(window=30).mean() csv_data['CDX_HY_momentum_30D_MA'] = csv_data['CDX_HY'].rolling(window=20).mean() csv_data['CDX_HY_30D_STD'] = csv_data['CDX_HY'].rolling(window=20).std() csv_data['CDX_HY_upper_band'] = \ csv_data['CDX_HY_momentum_30D_MA'] + (csv_data['CDX_HY_30D_STD'] * 2) csv_data['CDX_HY_lower_band'] = \ csv_data['CDX_HY_momentum_30D_MA'] - (csv_data['CDX_HY_30D_STD'] * 2) csv_data['CDX_IG_momentum_10_30'] = \ csv_data['CDX_IG'] .rolling(window=10).mean() - \ csv_data['CDX_IG'] .rolling(window=30).mean() / \ csv_data['CDX_IG'] .rolling(window=30).mean() csv_data['CDX_IG_momentum_30D_MA'] = csv_data['CDX_IG'].rolling(window=20).mean() csv_data['CDX_IG_30D_STD'] = csv_data['CDX_IG'].rolling(window=20).std() csv_data['CDX_IG_upper_band'] = \ csv_data['CDX_IG_momentum_30D_MA'] + (csv_data['CDX_IG_30D_STD'] * 2) csv_data['CDX_IG_lower_band'] = \ csv_data['CDX_IG_momentum_30D_MA'] - (csv_data['CDX_IG_30D_STD'] * 2) # VIX Technicals csv_data['VIX_INDEX_5_15'] = \ csv_data['VIX_INDEX'] .rolling(window=5).mean() - \ csv_data['VIX_INDEX'] .rolling(window=15).mean() / \ csv_data['VIX_INDEX'] .rolling(window=15).mean() csv_data['VIX_INDEX_10_30'] = \ csv_data['VIX_INDEX'] .rolling(window=10).mean() - \ csv_data['VIX_INDEX'] .rolling(window=30).mean() / \ csv_data['VIX_INDEX'] .rolling(window=30).mean() csv_data['VIX_INDEX_10_90'] = \ csv_data['VIX_INDEX'] .rolling(window=10).mean() - \ csv_data['VIX_INDEX'] .rolling(window=90).mean() / \ csv_data['VIX_INDEX'] .rolling(window=90).mean() csv_data['VIX_INDEX_30_90'] = \ csv_data['VIX_INDEX'] .rolling(window=30).mean() - \ csv_data['VIX_INDEX'] .rolling(window=90).mean() / \ csv_data['VIX_INDEX'] .rolling(window=90).mean() csv_data['VIX_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['VIX_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['VIX_upper_band'] = \ csv_data['VIX_30D_MA'] + (csv_data['VIX_30D_STD'] * 2) csv_data['VIX_lower_band'] = \ csv_data['VIX_30D_MA'] - (csv_data['VIX_30D_STD'] * 2) #IG Index Technicals csv_data['INDEX_IG_momentum_5_15'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=5).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=15).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=15).mean() csv_data['INDEX_IG_momentum_10_30'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() csv_data['INDEX_IG_momentum_10_90'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_IG_momentum_30_90'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_IG_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['INDEX_IG_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['INDEX_IG_upper_band'] = \ csv_data['INDEX_IG_30D_MA'] + (csv_data['INDEX_IG_30D_STD'] * 2) csv_data['INDEX_IG_lower_band'] = \ csv_data['INDEX_IG_30D_MA'] - (csv_data['INDEX_IG_30D_STD'] * 2) #HY Index Technicals csv_data['INDEX_HY_momentum_5_15'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=5).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=15).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=15).mean() csv_data['INDEX_HY_momentum_10_30'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() csv_data['INDEX_HY_momentum_10_90'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_HY_momentum_30_90'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_HY_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['INDEX_HY_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['INDEX_HY_upper_band'] = \ csv_data['INDEX_HY_30D_MA'] + (csv_data['INDEX_HY_30D_STD'] * 2) csv_data['INDEX_HY_lower_band'] = \ csv_data['INDEX_HY_30D_MA'] - (csv_data['INDEX_HY_30D_STD'] * 2) print("Finished Adding New Features") #.......................................................................... # Customize Dataset For Each Forecasting Period #.......................................................................... # For Each look ahead period for dh in tqdm(days_ahead): sleep(0.1) complete_data = csv_data.copy() # Add predicative column for days ahead (dh) forecast_name = '{0}_{0}D_Forecast'.format(target_column, dh) if(DEBUG): print("Adding {0} ".format(forecast_name)) complete_data[forecast_name] = complete_data[target_column].shift(dh) # Hold orginal data set complete_data = complete_data.dropna() Y_target = complete_data[forecast_name] # Remove Target data from features X = complete_data.copy() X = X.drop([forecast_name, 'Dates'], axis=1) # Records column names X_feature_cols = X.columns if features_over_time_CART is None: features_over_time_CART = { feat : None for feat in X_feature_cols } if features_over_time_XGB is None: features_over_time_XGB = { feat : None for feat in X_feature_cols } # Scale and add back to df with column names X_scaled = scaler.fit_transform(X) X_scaled = pd.DataFrame(X_scaled, columns=X_feature_cols) data_dict[dh] = complete_data.copy() #.......................................................................... # Build & Fit Models For Feature Selection #.......................................................................... # Fit the models model_CART = default_CART model_XGB = default_XGB if(DEBUG): print("Fitting CART: {0}...".format(forecast_name)) model_CART.fit(X_scaled, Y_target) importances = model_CART.feature_importances_ feats = {} for feature, importance in zip(X_feature_cols, importances): if importance > 0.05: feats[feature] = importance if features_over_time_CART[feature] == None: features_over_time_CART[feature] = [importance] else: features_over_time_CART[feature].append(importance) feats = sorted(feats.items(), key=lambda x: x[1], reverse=True) feats = dict(feats) feature_CART[dh] = feats if(DEBUG): print("Fitting XGBOOST: {0}...".format(forecast_name)) model_XGB.fit(X_scaled, Y_target) importances = model_XGB.feature_importances_ feats = {} for feature, importance in zip(X_feature_cols, importances): if importance > 0.05: feats[feature] = importance if features_over_time_XGB[feature] == None: features_over_time_XGB[feature] = [importance] else: features_over_time_XGB[feature].append(importance) feats = sorted(feats.items(), key=lambda x: x[1], reverse=True) feats = dict(feats) feature_XGBOOST[dh] = feats model_dict[forecast_name] = model_XGB print('Done!') feature_importance_CART(30) feature_importance_XGBOOST(30) predictive_power(30) feature_imp_over_time_CART() feature_imp_over_time_XGB() ```
github_jupyter
import sys, time, datetime import matplotlib.pyplot as plt import numpy as np import pandas as pd import pylab as pl import seaborn as sns from tqdm import tqdm from time import sleep from sklearn import metrics, linear_model from xgboost import XGBRegressor, plot_importance, plot_tree from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import train_test_split, TimeSeriesSplit from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import Ridge, LinearRegression from sklearn.ensemble import RandomForestClassifier, AdaBoostRegressor from sklearn.preprocessing import MinMaxScaler from sklearn import preprocessing from sklearn.pipeline import make_pipeline from statsmodels.tsa.stattools import grangercausalitytests, adfuller import ppscore as pps import warnings warnings.filterwarnings('ignore') #.......................................................................... # Main Inputs #.......................................................................... DEBUG = False target_column = 'LUACTRUU_Index_OAS' # Forecast timeperiod max_forecast = 30 days_ahead = list(range(1,max_forecast+1)) # Input file date ranges date_start = datetime.date(2012, 8, 1) date_end = datetime.date(2020, 7, 30) # Default Models default_CART = DecisionTreeRegressor(random_state=1) default_XGB = XGBRegressor(n_estimators=1000,random_state=1) scaler = MinMaxScaler(feature_range=(0,1)) # Read File file_buffer = "./Economic_data_clean_20200801.xlsx" #.......................................................................... # Output Methods #.......................................................................... def predictive_power(dh=None, y_value=None): for target, feats in data_dict.items(): if dh == None: if y_value == None: y_value = "{0}_{1}D_Forecast".format(target_column, target) predictors_df = pps.predictors(feats, y=target_column) predictors_df = predictors_df[predictors_df['ppscore'] > 0.5] f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Predicative Power for: {0}".format(y_value)) sns.barplot(data=predictors_df, y="x", x="ppscore",palette="rocket") else: if target == dh: if y_value == None: y_value = "{0}_{1}D_Forecast".format(target_column, dh) predictors_df = pps.predictors(feats, y=target_column) predictors_df = predictors_df[predictors_df['ppscore'] > 0.5] f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Predicative Power for: {0}".format(y_value)) sns.barplot(data=predictors_df, y="x", x="ppscore",palette="rocket") def feature_importance_CART(dh=None): for target, feats in feature_CART.items(): width = 1 keys = feats.keys() values = feats.values() if dh == None: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") else: if target == dh: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") def feature_importance_XGBOOST(dh=None): for target, feats in feature_XGBOOST.items(): width = 1 keys = feats.keys() values = feats.values() if dh == None: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") else: if target == dh: f, ax = plt.subplots(figsize=(16, 5)) ax.set_title("Feature Importance for {0} Day Forecast: {1}".format(target, target_column)) sns.barplot(y=list(keys), x=list(values), palette="rocket") def feature_imp_over_time_CART(): df = pd.DataFrame (features_over_time_CART, columns = features_over_time_CART.keys()) column_names = list(df.columns) df["day"] = days_ahead remove_list = [] for feat in column_names: usefulness = df[feat].max() if usefulness < 0.2: if(DEBUG):print("feat: {0}, usseful-max: {1}".format(feat, usefulness)) df.drop([feat], axis=1) if(DEBUG): print("...removing {0}".format(feat)) remove_list.append(feat) for x in remove_list: column_names.remove(x) sns.set_palette(sns.color_palette("rocket")) f, ax = plt.subplots(figsize=(14, 6)) for feat in column_names: sns.lineplot(data=df, x='day', y=df[feat], dashes=False).set_title('{0} Feature Importance By Time'.format(target_column)) sns.set_style("whitegrid") ax.grid(True) ax.set(xlabel='Days Out', ylabel='Predictive Importance') ax.set(xticks=days_ahead) ax.legend(column_names) def feature_imp_over_time_XGB(): df = pd.DataFrame (features_over_time_XGB, columns = features_over_time_XGB.keys()) column_names = list(df.columns) df["day"] = days_ahead remove_list = [] for feat in column_names: usefulness = df[feat].max() if usefulness < 0.2: if(DEBUG):print("feat: {0}, usseful-max: {1}".format(feat, usefulness)) df.drop([feat], axis=1) if(DEBUG): print("...removing {0}".format(feat)) remove_list.append(feat) for x in remove_list: column_names.remove(x) sns.set_palette(sns.color_palette("rocket")) f, ax = plt.subplots(figsize=(14, 6)) for feat in column_names: sns.lineplot(data=df, x='day', y=df[feat], dashes=False).set_title('{0} Feature Importance By Time'.format(target_column)) sns.set_style("whitegrid") ax.grid(True) ax.set(xlabel='Days Out', ylabel='Predictive Importance') ax.set(xticks=days_ahead) ax.legend(column_names) # Clean-up data_dict = {} model_dict = {} feature_CART = {} feature_XGBOOST = {} features_over_time_CART = None features_over_time_XGB = None # Set time period for analysis session_state = pd.read_excel(file_buffer) session_state['Dates'] = pd.to_datetime(session_state['Dates']).dt.date session_state= session_state[(session_state['Dates'] >= date_start) & (session_state['Dates'] <= date_end)] csv_data = session_state.copy() session_state = None print("Ready!") #.......................................................................... # Pre-Processing #.......................................................................... if(DEBUG): print("Preprocessing data...\n") csv_data['EARN_DOWN'] = csv_data['EARN_DOWN'].astype(np.float16) csv_data['EARN_UP'] = csv_data['EARN_DOWN'].astype(np.float16) #CDX Index Technicals csv_data['CDX_HY_momentum_10_30'] = \ csv_data['CDX_HY'] .rolling(window=10).mean() - \ csv_data['CDX_HY'] .rolling(window=30).mean() / \ csv_data['CDX_HY'] .rolling(window=30).mean() csv_data['CDX_HY_momentum_30D_MA'] = csv_data['CDX_HY'].rolling(window=20).mean() csv_data['CDX_HY_30D_STD'] = csv_data['CDX_HY'].rolling(window=20).std() csv_data['CDX_HY_upper_band'] = \ csv_data['CDX_HY_momentum_30D_MA'] + (csv_data['CDX_HY_30D_STD'] * 2) csv_data['CDX_HY_lower_band'] = \ csv_data['CDX_HY_momentum_30D_MA'] - (csv_data['CDX_HY_30D_STD'] * 2) csv_data['CDX_IG_momentum_10_30'] = \ csv_data['CDX_IG'] .rolling(window=10).mean() - \ csv_data['CDX_IG'] .rolling(window=30).mean() / \ csv_data['CDX_IG'] .rolling(window=30).mean() csv_data['CDX_IG_momentum_30D_MA'] = csv_data['CDX_IG'].rolling(window=20).mean() csv_data['CDX_IG_30D_STD'] = csv_data['CDX_IG'].rolling(window=20).std() csv_data['CDX_IG_upper_band'] = \ csv_data['CDX_IG_momentum_30D_MA'] + (csv_data['CDX_IG_30D_STD'] * 2) csv_data['CDX_IG_lower_band'] = \ csv_data['CDX_IG_momentum_30D_MA'] - (csv_data['CDX_IG_30D_STD'] * 2) # VIX Technicals csv_data['VIX_INDEX_5_15'] = \ csv_data['VIX_INDEX'] .rolling(window=5).mean() - \ csv_data['VIX_INDEX'] .rolling(window=15).mean() / \ csv_data['VIX_INDEX'] .rolling(window=15).mean() csv_data['VIX_INDEX_10_30'] = \ csv_data['VIX_INDEX'] .rolling(window=10).mean() - \ csv_data['VIX_INDEX'] .rolling(window=30).mean() / \ csv_data['VIX_INDEX'] .rolling(window=30).mean() csv_data['VIX_INDEX_10_90'] = \ csv_data['VIX_INDEX'] .rolling(window=10).mean() - \ csv_data['VIX_INDEX'] .rolling(window=90).mean() / \ csv_data['VIX_INDEX'] .rolling(window=90).mean() csv_data['VIX_INDEX_30_90'] = \ csv_data['VIX_INDEX'] .rolling(window=30).mean() - \ csv_data['VIX_INDEX'] .rolling(window=90).mean() / \ csv_data['VIX_INDEX'] .rolling(window=90).mean() csv_data['VIX_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['VIX_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['VIX_upper_band'] = \ csv_data['VIX_30D_MA'] + (csv_data['VIX_30D_STD'] * 2) csv_data['VIX_lower_band'] = \ csv_data['VIX_30D_MA'] - (csv_data['VIX_30D_STD'] * 2) #IG Index Technicals csv_data['INDEX_IG_momentum_5_15'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=5).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=15).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=15).mean() csv_data['INDEX_IG_momentum_10_30'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() csv_data['INDEX_IG_momentum_10_90'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_IG_momentum_30_90'] = \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=30).mean() - \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LUACTRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_IG_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['INDEX_IG_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['INDEX_IG_upper_band'] = \ csv_data['INDEX_IG_30D_MA'] + (csv_data['INDEX_IG_30D_STD'] * 2) csv_data['INDEX_IG_lower_band'] = \ csv_data['INDEX_IG_30D_MA'] - (csv_data['INDEX_IG_30D_STD'] * 2) #HY Index Technicals csv_data['INDEX_HY_momentum_5_15'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=5).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=15).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=15).mean() csv_data['INDEX_HY_momentum_10_30'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() csv_data['INDEX_HY_momentum_10_90'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=10).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_HY_momentum_30_90'] = \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=30).mean() - \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() / \ csv_data['LF98TRUU_Index_OAS'] .rolling(window=90).mean() csv_data['INDEX_HY_30D_MA'] = csv_data['VIX_INDEX'].rolling(window=20).mean() csv_data['INDEX_HY_30D_STD'] = csv_data['VIX_INDEX'].rolling(window=20).std() csv_data['INDEX_HY_upper_band'] = \ csv_data['INDEX_HY_30D_MA'] + (csv_data['INDEX_HY_30D_STD'] * 2) csv_data['INDEX_HY_lower_band'] = \ csv_data['INDEX_HY_30D_MA'] - (csv_data['INDEX_HY_30D_STD'] * 2) print("Finished Adding New Features") #.......................................................................... # Customize Dataset For Each Forecasting Period #.......................................................................... # For Each look ahead period for dh in tqdm(days_ahead): sleep(0.1) complete_data = csv_data.copy() # Add predicative column for days ahead (dh) forecast_name = '{0}_{0}D_Forecast'.format(target_column, dh) if(DEBUG): print("Adding {0} ".format(forecast_name)) complete_data[forecast_name] = complete_data[target_column].shift(dh) # Hold orginal data set complete_data = complete_data.dropna() Y_target = complete_data[forecast_name] # Remove Target data from features X = complete_data.copy() X = X.drop([forecast_name, 'Dates'], axis=1) # Records column names X_feature_cols = X.columns if features_over_time_CART is None: features_over_time_CART = { feat : None for feat in X_feature_cols } if features_over_time_XGB is None: features_over_time_XGB = { feat : None for feat in X_feature_cols } # Scale and add back to df with column names X_scaled = scaler.fit_transform(X) X_scaled = pd.DataFrame(X_scaled, columns=X_feature_cols) data_dict[dh] = complete_data.copy() #.......................................................................... # Build & Fit Models For Feature Selection #.......................................................................... # Fit the models model_CART = default_CART model_XGB = default_XGB if(DEBUG): print("Fitting CART: {0}...".format(forecast_name)) model_CART.fit(X_scaled, Y_target) importances = model_CART.feature_importances_ feats = {} for feature, importance in zip(X_feature_cols, importances): if importance > 0.05: feats[feature] = importance if features_over_time_CART[feature] == None: features_over_time_CART[feature] = [importance] else: features_over_time_CART[feature].append(importance) feats = sorted(feats.items(), key=lambda x: x[1], reverse=True) feats = dict(feats) feature_CART[dh] = feats if(DEBUG): print("Fitting XGBOOST: {0}...".format(forecast_name)) model_XGB.fit(X_scaled, Y_target) importances = model_XGB.feature_importances_ feats = {} for feature, importance in zip(X_feature_cols, importances): if importance > 0.05: feats[feature] = importance if features_over_time_XGB[feature] == None: features_over_time_XGB[feature] = [importance] else: features_over_time_XGB[feature].append(importance) feats = sorted(feats.items(), key=lambda x: x[1], reverse=True) feats = dict(feats) feature_XGBOOST[dh] = feats model_dict[forecast_name] = model_XGB print('Done!') feature_importance_CART(30) feature_importance_XGBOOST(30) predictive_power(30) feature_imp_over_time_CART() feature_imp_over_time_XGB()
0.426083
0.616157
``` !wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb !dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub !apt update -q !apt install cuda gcc-6 g++-6 -y -q !ln -s /usr/bin/gcc-6 /usr/local/cuda/bin/gcc !ln -s /usr/bin/g++-6 /usr/local/cuda/bin/g++ !curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/1.3/julia-1.3.1-linux-x86_64.tar.gz" -o julia.tar.gz !tar -xzf julia.tar.gz -C /usr --strip-components 1 !rm -rf julia.tar.gz* !julia -e 'using Pkg; pkg"add IJulia; precompile"' !nvidia-smi !git clone https://github.com/KonstantinosChatziantoniou/JuliaCUDA_GridKNN.git !cp /content/JuliaCUDA_GridKNN/Julia/Code/* ./ ## RESTART KERNEL using Pkg Pkg.add("CUDAdrv") Pkg.add("CUDAnative") Pkg.add("CuArrays") Pkg.add("StaticArrays") Pkg.add("BenchmarkTools") using CuArrays, CUDAnative, CUDAdrv using Statistics, BenchmarkTools include("preprocess.jl") function RunKernel(len, blocks) numOfPoints = len numOfQueries = len dimensions = 3 numOfGrids = blocks #PerDimension Points = rand(Float32, numOfPoints ,dimensions) Queries = rand(Float32, numOfQueries, dimensions) BlockOfPoint = AssignPointsToBlock(Points, numOfGrids, dimensions) BlockOfQuery = AssignPointsToBlock(Queries, numOfGrids, dimensions) PointsPerBlock, IntegralPointsPerBlock = CountPointsPerBlock(Points, numOfGrids, dimensions) QueriesPerBlock, IntegralQueriesPerBlock = CountPointsPerBlock(Queries, numOfGrids, dimensions) OrderedPoints = ReorderPointsByBlock(Points, BlockOfPoint) OrderedQueries = ReorderPointsByBlock(Queries, BlockOfQuery) println("RUN: ",len) bnc = @benchmark begin gpu_idxs, gpu_dists = cuda_knn($OrderedPoints, $OrderedQueries,$PointsPerBlock, $QueriesPerBlock, $IntegralPointsPerBlock, $IntegralQueriesPerBlock,$numOfPoints, $numOfQueries, $numOfGrids, $dimensions) println(gpu_idxs[1:5]) println(gpu_dists[1:5]) end seconds=60 samples=4 ## Change here for benchmark limit return bnc end # Run once to initialize becnhamrk holder #benchLengths = [1<<i for i = 18:24] #benchBlocks = [1<<i for i = 2:8] suite = BenchmarkGroup() benchLengths = 21:24; println(benchLengths[:]) benchBlocks = 3:5; println(benchBlocks[:]) kernel_files = ["multi_kernel", "multi_kernel_check", "single_kernel", "single_kernel_check"] for k in kernel_files suite[current_kernel] = BenchmarkGroup() end #RUN to print saved benchmarks suite current_kernel = kernel_files[1] ## <- Change the number for different implementation include(string(current_kernel, ".jl")) l = 24 ## <- Change 'l' for different problem size for b = 4:6 ## <- Change 'b' for different block size range suite[current_kernel][l,b] = RunKernel(1<<l, 1<<b) end ## Run to save benchmarks to file. Download it manually BenchmarkTools.save("kernels.json", suite) ```
github_jupyter
!wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb !dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub !apt update -q !apt install cuda gcc-6 g++-6 -y -q !ln -s /usr/bin/gcc-6 /usr/local/cuda/bin/gcc !ln -s /usr/bin/g++-6 /usr/local/cuda/bin/g++ !curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/1.3/julia-1.3.1-linux-x86_64.tar.gz" -o julia.tar.gz !tar -xzf julia.tar.gz -C /usr --strip-components 1 !rm -rf julia.tar.gz* !julia -e 'using Pkg; pkg"add IJulia; precompile"' !nvidia-smi !git clone https://github.com/KonstantinosChatziantoniou/JuliaCUDA_GridKNN.git !cp /content/JuliaCUDA_GridKNN/Julia/Code/* ./ ## RESTART KERNEL using Pkg Pkg.add("CUDAdrv") Pkg.add("CUDAnative") Pkg.add("CuArrays") Pkg.add("StaticArrays") Pkg.add("BenchmarkTools") using CuArrays, CUDAnative, CUDAdrv using Statistics, BenchmarkTools include("preprocess.jl") function RunKernel(len, blocks) numOfPoints = len numOfQueries = len dimensions = 3 numOfGrids = blocks #PerDimension Points = rand(Float32, numOfPoints ,dimensions) Queries = rand(Float32, numOfQueries, dimensions) BlockOfPoint = AssignPointsToBlock(Points, numOfGrids, dimensions) BlockOfQuery = AssignPointsToBlock(Queries, numOfGrids, dimensions) PointsPerBlock, IntegralPointsPerBlock = CountPointsPerBlock(Points, numOfGrids, dimensions) QueriesPerBlock, IntegralQueriesPerBlock = CountPointsPerBlock(Queries, numOfGrids, dimensions) OrderedPoints = ReorderPointsByBlock(Points, BlockOfPoint) OrderedQueries = ReorderPointsByBlock(Queries, BlockOfQuery) println("RUN: ",len) bnc = @benchmark begin gpu_idxs, gpu_dists = cuda_knn($OrderedPoints, $OrderedQueries,$PointsPerBlock, $QueriesPerBlock, $IntegralPointsPerBlock, $IntegralQueriesPerBlock,$numOfPoints, $numOfQueries, $numOfGrids, $dimensions) println(gpu_idxs[1:5]) println(gpu_dists[1:5]) end seconds=60 samples=4 ## Change here for benchmark limit return bnc end # Run once to initialize becnhamrk holder #benchLengths = [1<<i for i = 18:24] #benchBlocks = [1<<i for i = 2:8] suite = BenchmarkGroup() benchLengths = 21:24; println(benchLengths[:]) benchBlocks = 3:5; println(benchBlocks[:]) kernel_files = ["multi_kernel", "multi_kernel_check", "single_kernel", "single_kernel_check"] for k in kernel_files suite[current_kernel] = BenchmarkGroup() end #RUN to print saved benchmarks suite current_kernel = kernel_files[1] ## <- Change the number for different implementation include(string(current_kernel, ".jl")) l = 24 ## <- Change 'l' for different problem size for b = 4:6 ## <- Change 'b' for different block size range suite[current_kernel][l,b] = RunKernel(1<<l, 1<<b) end ## Run to save benchmarks to file. Download it manually BenchmarkTools.save("kernels.json", suite)
0.41941
0.156975
# 地图数据处理及可视化 - 知识点:地理数据读取、处理、坐标系转换及绘图 - 工具:geopandas, descartes, mapclassify, cartopy ## 中国每个省级单位有多少个邻居 ``` import geopandas as gpd s = gpd.read_file('../shp_file/china.shp',encoding='utf8') s s.OWNER.unique(),len(s.OWNER.unique()) ``` 我们先定义一个函数来把省的名字精简成两、三个字 ``` def shorten_prov_names(prov_name:str) -> str: # Type hint, valid for python version >= 3.5 if ('自' in prov_name or '特' in prov_name) and prov_name != '内蒙古自治区': name = prov_name[:2] else: name = prov_name.replace('省', '').replace('市', '').replace('自治区', '') return name s.OWNER = s.OWNER.apply(shorten_prov_names) ``` 接着,我们利用 `dissolve` 方法将多个多边形合并(即将每个省级单位的多个条目进行合并) ``` provinces = s.dissolve(by='OWNER').reset_index() provinces = provinces.drop(['AREA','BOUND_A_','BOUND_A_ID','NAME', 'SOC','FCNAME','FENAME','PERIMETER'],axis=1) ``` 最后,利用 `geoDataFrame.geometry` 的几何关系判断方法(`touches`)来决定某两个省级单位是否相邻,并在 `provinces` 里面插入两个新列,分别存储每个省的领据列表和总数。 ``` for index, row in provinces.iterrows(): neighbors = provinces[provinces.geometry.touches(row['geometry'])].OWNER.tolist() #neighbors = neighbors.remove(row.OWNER) provinces.at[index, "neighbors"] = ", ".join(neighbors) provinces.at[index, "neighbors_num"] = len(neighbors) provinces["neighbors_num"] = provinces["neighbors_num"].astype('int') provinces.sort_values(by='neighbors_num',ascending=False) ``` 大家可以将这个结果和 https://www.sohu.com/a/355921579_353978 对照一下,看看结果是否正确。 ## 地图标注、填色 ``` import matplotlib.pyplot as plt import cartopy.crs as ccrs #crs = ccrs.LambertConformal(central_longitude=105.) #AzimuthalEqualArea() crs = ccrs.LambertAzimuthalEqualArea(central_longitude=105.) fig,ax = plt.subplots(figsize=(10,8),subplot_kw={'projection': crs}) provinces.plot(ax=ax, facecolor='grey', edgecolor='white', linestyle='--', alpha=0.8, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) provinces.geometry.representative_point().plot(ax=ax, facecolor='white', edgecolor='black', marker='*', markersize=100, linewidth=0.5, transform=ccrs.PlateCarree()) import matplotlib as mpl # 请使用你的电脑上的字体文件 myfont = mpl.font_manager.FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc') fig,ax = plt.subplots(figsize=(10,8),subplot_kw={'projection': crs}) provinces.plot(ax=ax, facecolor='grey', edgecolor='white', linestyle='--', alpha=0.8, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) for index, row in provinces.iterrows(): ax.text(row.geometry.representative_point().x, row.geometry.representative_point().y, row['OWNER'], ha="center", va="center", size=8, transform=ccrs.PlateCarree(), fontproperties=myfont) ``` 我们来获取 COVID-19 确诊信息,并把每个省的累计确诊数据画到地图上。 腾讯提供了实时疫情信息,我们可以从该网页分析出其输出数据的实际网址和参数(利用 Chrome 或者 Firefox 的开发者工具)。 ``` import requests, json url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5' req = requests.get(url) data = req.json() data.keys() data['data'] china_data = json.loads(data['data']) china_data.keys() china_data['areaTree'] china_data['areaTree'][0].keys() for item in china_data['areaTree'][0].get('children'): print(item['name'],': ', item['total']['confirm']) prov_confirm = dict() prov_confirm = {item['name']: int(item['total']['confirm']) for item in china_data['areaTree'][0].get('children')} prov_confirm ``` `apply()` 和 `map()` 类似,但是可以同时处理多列数据。此处我们用 `apply()` 来将每个省的确诊数据放入 `provinces` 中作为一列。 ``` provinces['confirmed_cases'] = provinces['OWNER'].apply(lambda name: prov_confirm[name]) provinces ``` ### `GeoDataFrame.plot` 方法参数 - `column`:用于指定映射地图视觉元素的数值信息,可以是对应`GeoDataFrame`的列名,或是直接传入与几何对象一一对应得数值序列,默认为`None` - `cmap`:传入色彩方案 - `categorical`:`bool`型,`True`表示指定映射目标列采取离散表示,对于数值型的列有意义,当对应目标列为类别型时自动变为`True` - `legend`:`bool`型,为`True`时会为地图添加图例 - `scheme`:`str`型,用于指定地区分布图颜色的数值划分方案 - `k`:`int`型,用于指定地图绘色的色阶数量 - `vmin`,`vmax`:`None`或`float`,用于指定绘图的数值范围下限和上限,默认为`None`即以对应数据中的最小/大值为下限 - `legend_kwds`:字典型,传入与图例相关的个性化参数 - `classification_kwds`:字典型,传入与划分绘图颜色相关的个性化参数 - `missing_kwds`:字典型,传入与缺失值处理相关的个性化参数,用于对缺失值部分的做个性化设置 - 地区分布图(Choropleth Map),指的是依据指定属性进行层次划分,并将对应的层次映射到对应几何对象的色彩之上。 - 这里我们将各省的确诊病例数目画成地区分布图。 - 为了对确诊数量进行分级,我们需要使用上面提到的 `scheme` 参数。它会调用外部包 `mapclassify`。 - `mapclassify` 提供多种分层方法,包括自然断点法(`NaturalBreaks`)、箱线图(`BoxPlot`)、等间距(`EqualInterval`)、分位数(`Quantiles`)和用户自定义(`Userdefined`)。 - 我们使用用户自定义方式,将病例数分位 9,99,999,9999 等几档 ``` # 请使用你的电脑上的字体文件 font_name = '/System/Library/Fonts/STHeiti Light.ttc' myfont = mpl.font_manager.FontProperties(fname=font_name) plt.rcParams["font.family"] = "Heiti TC" # 字体文件名和字体名不同 fig,ax = plt.subplots(figsize=(12,12),subplot_kw={'projection': crs}) provinces.plot(ax=ax,column='confirmed_cases', cmap='Reds', legend=True, scheme='UserDefined', classification_kwds={'bins':[10, 100, 500, 1000, 10000]}, legend_kwds={'loc':'lower left','title':'累计确诊数量', 'shadow':True,'fancybox':True}, #'prop':{'font.family':'Heiti TC'}}, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) for index, row in provinces.iterrows(): ax.text(row.geometry.representative_point().x, row.geometry.representative_point().y, row['OWNER'], ha="center", va="center", size=8, transform=ccrs.PlateCarree(), fontproperties=myfont) ``` ### 绘制疫情的南丁格尔玫瑰图 (Nightingale Rose) ``` import numpy as np sorted_pro = provinces.sort_values(by='confirmed_cases',ascending=False) cases = np.log(sorted_pro.confirmed_cases.values) N = len(sorted_pro) theta = np.arange(0,2*np.pi,2*np.pi/N) width = 2*np.pi / (N) num = sorted_pro.confirmed_cases colors = plt.cm.plasma(num/np.max(num[1:])) fig, ax = plt.subplots(figsize=(12,12),subplot_kw={'projection':'polar'}) ax.set_theta_zero_location("N") bars = ax.bar(theta, cases, width=width, bottom=0, color=colors,alpha=0.5) ax.axis('off') rotations = np.rad2deg(theta) for x, bar, rotation, label, label_num in zip(theta, bars, rotations, sorted_pro.OWNER.values, sorted_pro.confirmed_cases.values): lab = ax.text(x, bar.get_height(), label, ha='center', va='bottom', fontproperties=myfont,rotation=rotation, rotation_mode="anchor") lab = ax.text(x, bar.get_height()-0.2, str(label_num), ha='center', va='top', rotation=rotation, rotation_mode="anchor") ``` ### 课后练习 - 返回的数据里有各个省级单位下面的市、县的数据。尝试画一画你所在省的确诊病例分布图(如果需要市、县级地图,可以联系我)。 - https://view.inews.qq.com/g2/getOnsInfo?name=disease_foreign 返回国外病例数据。尝试画一画世界的病例分布。 ## References https://www.cnblogs.com/feffery/p/12361421.html (强烈推荐此博客的文章)
github_jupyter
import geopandas as gpd s = gpd.read_file('../shp_file/china.shp',encoding='utf8') s s.OWNER.unique(),len(s.OWNER.unique()) def shorten_prov_names(prov_name:str) -> str: # Type hint, valid for python version >= 3.5 if ('自' in prov_name or '特' in prov_name) and prov_name != '内蒙古自治区': name = prov_name[:2] else: name = prov_name.replace('省', '').replace('市', '').replace('自治区', '') return name s.OWNER = s.OWNER.apply(shorten_prov_names) provinces = s.dissolve(by='OWNER').reset_index() provinces = provinces.drop(['AREA','BOUND_A_','BOUND_A_ID','NAME', 'SOC','FCNAME','FENAME','PERIMETER'],axis=1) for index, row in provinces.iterrows(): neighbors = provinces[provinces.geometry.touches(row['geometry'])].OWNER.tolist() #neighbors = neighbors.remove(row.OWNER) provinces.at[index, "neighbors"] = ", ".join(neighbors) provinces.at[index, "neighbors_num"] = len(neighbors) provinces["neighbors_num"] = provinces["neighbors_num"].astype('int') provinces.sort_values(by='neighbors_num',ascending=False) import matplotlib.pyplot as plt import cartopy.crs as ccrs #crs = ccrs.LambertConformal(central_longitude=105.) #AzimuthalEqualArea() crs = ccrs.LambertAzimuthalEqualArea(central_longitude=105.) fig,ax = plt.subplots(figsize=(10,8),subplot_kw={'projection': crs}) provinces.plot(ax=ax, facecolor='grey', edgecolor='white', linestyle='--', alpha=0.8, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) provinces.geometry.representative_point().plot(ax=ax, facecolor='white', edgecolor='black', marker='*', markersize=100, linewidth=0.5, transform=ccrs.PlateCarree()) import matplotlib as mpl # 请使用你的电脑上的字体文件 myfont = mpl.font_manager.FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc') fig,ax = plt.subplots(figsize=(10,8),subplot_kw={'projection': crs}) provinces.plot(ax=ax, facecolor='grey', edgecolor='white', linestyle='--', alpha=0.8, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) for index, row in provinces.iterrows(): ax.text(row.geometry.representative_point().x, row.geometry.representative_point().y, row['OWNER'], ha="center", va="center", size=8, transform=ccrs.PlateCarree(), fontproperties=myfont) import requests, json url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5' req = requests.get(url) data = req.json() data.keys() data['data'] china_data = json.loads(data['data']) china_data.keys() china_data['areaTree'] china_data['areaTree'][0].keys() for item in china_data['areaTree'][0].get('children'): print(item['name'],': ', item['total']['confirm']) prov_confirm = dict() prov_confirm = {item['name']: int(item['total']['confirm']) for item in china_data['areaTree'][0].get('children')} prov_confirm provinces['confirmed_cases'] = provinces['OWNER'].apply(lambda name: prov_confirm[name]) provinces # 请使用你的电脑上的字体文件 font_name = '/System/Library/Fonts/STHeiti Light.ttc' myfont = mpl.font_manager.FontProperties(fname=font_name) plt.rcParams["font.family"] = "Heiti TC" # 字体文件名和字体名不同 fig,ax = plt.subplots(figsize=(12,12),subplot_kw={'projection': crs}) provinces.plot(ax=ax,column='confirmed_cases', cmap='Reds', legend=True, scheme='UserDefined', classification_kwds={'bins':[10, 100, 500, 1000, 10000]}, legend_kwds={'loc':'lower left','title':'累计确诊数量', 'shadow':True,'fancybox':True}, #'prop':{'font.family':'Heiti TC'}}, transform=ccrs.PlateCarree()) ax.patch.set_visible(False) ax.set_extent([75,130,15,54]) for index, row in provinces.iterrows(): ax.text(row.geometry.representative_point().x, row.geometry.representative_point().y, row['OWNER'], ha="center", va="center", size=8, transform=ccrs.PlateCarree(), fontproperties=myfont) import numpy as np sorted_pro = provinces.sort_values(by='confirmed_cases',ascending=False) cases = np.log(sorted_pro.confirmed_cases.values) N = len(sorted_pro) theta = np.arange(0,2*np.pi,2*np.pi/N) width = 2*np.pi / (N) num = sorted_pro.confirmed_cases colors = plt.cm.plasma(num/np.max(num[1:])) fig, ax = plt.subplots(figsize=(12,12),subplot_kw={'projection':'polar'}) ax.set_theta_zero_location("N") bars = ax.bar(theta, cases, width=width, bottom=0, color=colors,alpha=0.5) ax.axis('off') rotations = np.rad2deg(theta) for x, bar, rotation, label, label_num in zip(theta, bars, rotations, sorted_pro.OWNER.values, sorted_pro.confirmed_cases.values): lab = ax.text(x, bar.get_height(), label, ha='center', va='bottom', fontproperties=myfont,rotation=rotation, rotation_mode="anchor") lab = ax.text(x, bar.get_height()-0.2, str(label_num), ha='center', va='top', rotation=rotation, rotation_mode="anchor")
0.337531
0.878887
# Εργασία 3 ## Β. Ομαδοποίηση τύπων καρκίνου με βάση τα επίπεδα έκφρασης ακολουθιών RNA Σκοπός της εργασίας είναι η ομαδοποίηση 5 τύπων καρκίνου με βάση τα επίπεδα έκφρασης ακολουθιών RNA. Πρώτα γίνεται μείωση διάστασης δεδομένων βασισμένη σε φασματική ανάλυση γράφου στις 2 διαστάσεις και ακολούθως ομαδοποίηση με spectral clustering. ### 1. Preprocessing Τα χαρακτηριστικά των δειγμάτων περιέχονται στο αρχείο data.csv και οι ετικέτες τους στο αρχείο labels.csv. Τα αρχεία διαβάζονται και τα δεδομένα τους αποθηκεύονται σε pandas dataframes. ``` import urllib.request import pandas as pd import numpy as np import tarfile import os np.random.seed(0) folder = 'TCGA-PANCAN-HiSeq-801x20531' filename = folder + '.tar.gz' url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00401/' + filename data_filename = os.path.join(folder, 'data.csv') labels_filename = os.path.join(folder, 'labels.csv') if not os.path.exists(data_filename) or not os.path.exists(labels_filename): print('Downloading file...') urllib.request.urlretrieve(url, filename) print('Done.') tar = tarfile.open(filename, "r:gz") tar.extractall() tar.close() df_x = pd.read_csv(data_filename) df_y = pd.read_csv(labels_filename) ``` Παρακάτω φαίνονται τα 5 πρώτα δείγματα. ``` df_x.head() df_y.head() df_x.info() df_y.info() ``` Βγάζουμε εκτός του dataframe τη στήλη Unnamed γιατί είναι απλώς ένας αύξων αριθμός και δεν προσφέρει κάποια χρήσιμη πληροφορία για την κατηγοριοποίηση. ``` df_x.drop(df_x.columns[0], axis=1, inplace=True) df_x.head() df_y.drop(df_y.columns[0], axis=1, inplace=True) df_y.head() ``` Παρακάτω βλέπουμε το ιστόγραμμα των κλάσεων. ``` df_y = df_y['Class'] df_y.hist() ``` To dataset χωρίζεται σε training set (60%) και test set (40%) χρησιμοποιώντας την συνάρτηση StratifiedShuffleSplit η οποία μας εξασφαλίζει οτι η κατανομή στα δυο set θα είναι ίδια ως προς τη μεταβλητή στόχο. ``` from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt sss = StratifiedShuffleSplit(n_splits=1, test_size=0.4, random_state=0) for train_index, test_index in sss.split(df_x, df_y): df_train_x = df_x.loc[train_index] df_train_y = df_y.loc[train_index] df_test_x = df_x.loc[test_index] df_test_y = df_y.loc[test_index] df_train_y.hist() df_test_y.hist() plt.show() ``` Το training set και το test set αποθηκεύονται σε numpy arrays. ``` from sklearn.preprocessing import LabelEncoder x_train = df_train_x.to_numpy() y_train = df_train_y.to_numpy() x_test = df_test_x.to_numpy() y_test = df_test_y.to_numpy() le = LabelEncoder() y_train = le.fit_transform(y_train) y_test = le.transform(y_test) print('x_train.shape =', x_train.shape) print('y_train.shape =', y_train.shape) print('x_test.shape =', x_test.shape) print('y_test.shape =', y_test.shape) ``` Γίνεται κανονικοποίηση με αφαίρεση της μέσης τιμής και διαίρεση με την τυπική απόκλιση. ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) ``` ### 2. Embedding Ακολουθεί Spectral embedding με rbf affinity το οποίο μετασχηματίζει τα δεδομένα σε χώρο δύο διαστάσεων. Επιλέγεται gamma=1e-4. Αυτή η τιμή μας δίνει ένα ενδιαφέρον αποτελέσμα για την μετέπειτα ομαδοποίηση. Για μεγαλύτερες τιμές υπάρχει πλήρης διαχωρισμός κ πολύ μικρή διασπορά των ομάδων οπότε το πρόβλημα θα μπορούσε να λυθεί εύκολα ακόμα και με k-means. ``` from sklearn.manifold import SpectralEmbedding embedding = SpectralEmbedding(n_components=2, affinity='rbf', gamma=1e-4) x_train_embedded = embedding.fit_transform(x_train) plt.title('Spectral embedding') plt.scatter(x_train_embedded[:, 0], x_train_embedded[:, 1], c=y_train, cmap='tab10', marker='+', alpha=0.8) plt.colorbar() plt.show() ``` ### 3. Algorithms #### 3.1 MySpectralClustering Ακολουθεί υλοποίηση Spectral Clustering. Αρχικά χτίζεται ο γράφος ομοιότητας με βάση τους k πλησιέστερους γείτονες. Απο αυτόν προκύπτει ο πίνακας γειτνίασης $G$. Αυτός ο πίνακας δεν είναι συμμετρικός γιατί αν ένα δείγμα Α έχει γείτονα το Β δεν σημαίνει ότι θα ισχύει και το αντίστροφο. Γίνεται συμμετρικός με τον ακόλουθο τύπο $S = \frac{1}{2} (G + G^T)$. Αυτός ο πίνακας είναι ο πίνακας ομοιότητας. Έπειτα πραγματοποιείται ιδιοανάλυση στον unnormalized laplacian matrix $L = D - S$ ή στον normalized laplacian matrix $L_{sym} = D^{-1/2}LD^{-1/2}$. Τα ιδιοδιανύσματα αποτελούν τις στήλες του νέου πίνακα δειγμάτων τα οποία έχουν μετασχηματιστεί σε ένα νέο χώρο. Στο τέλος πραγματοποιείται clustering με k-means. ``` from sklearn.neighbors import kneighbors_graph from sklearn.cluster import KMeans import scipy from scipy.linalg import eigh class MySpectralClustering(object): def __init__(self, n_clusters=8, n_components=None, n_neighbors=10, normed=True, random_state=None, n_jobs=None): self.n_clusters = n_clusters self.n_components = n_components self.n_neighbors = n_neighbors self.normed = normed self.random_state = random_state self.n_jobs = n_jobs self.lambdas = None def set_params(self, **params): if not params: return self self.n_clusters = params.get('n_clusters', self.n_clusters) self.n_components = params.get('n_clusters', self.n_components) self.n_neighbors = params.get('n_neighbors', self.n_neighbors) self.normed = params.get('normed', self.normed) self.random_state = params.get('random_state', self.random_state) self.n_jobs = params.get('n_jobs', self.n_jobs) return self def fit_predict(self, X): G = kneighbors_graph(X, n_neighbors=self.n_neighbors, n_jobs=self.n_jobs) G = G.toarray() S = 0.5*(G + G.T) L, d = scipy.sparse.csgraph.laplacian(S, normed=self.normed, return_diag=True) if self.n_components is None: self.n_components = self.n_clusters w, v = eigh(L) indices = np.argsort(w) w = w[indices] v = v[:, indices] v = v[:, :self.n_components] self.lambdas = w if self.normed: v /= np.sqrt(d).reshape(-1, 1) kmeans = KMeans(n_clusters=self.n_clusters, random_state=self.random_state) labels = kmeans.fit_predict(v) return labels ``` ### 4. Clustering Εφαρμόζονται διάφοροι τύποι Spectral Clustering. Γίνεται ομαδοποίηση για n_clusters απο 2 έως 9. Για κάθε τιμή του n_clusters δίνονται διαγράμματα όπου φαίνονται οι διάφορες ομάδες που έχουν σχηματιστεί. Έπειτα δίνονται οι τιμές των Homogeneity, Completeness και V-measure. Η μετρική Homogeneity μας δείχνει κατά πόσο οι ομάδες περιέχουν δεδομένα τα οποία είναι μέλη μίας μόνο κλάσης. Η μετρική Completeness μας δείχνει κατά πόσο όλα τα δείγματα μιας ομάδας είναι μέλη της ίδιας κλάσης. Τέλος η μετρική V-measure είναι ο αρμονικός μέσος των Homogeneity και Completeness. Τα αποτελέσματα της ομαδοποίησης χρησιμοποιούνται για να γίνει κατηγοριοποίηση του test set με Nearest Class Centroid. Ως ετικέτα της ομάδας επιλέγεται η πιο συχνή ετικέτα. Μετά την κατηγοριοποίηση δίνονται οι μετρικές Accuracy, Precision, Recall, F1 και το Confusion Matrix. Επίσης δίνεται διάγραμμα με τις 30 πρώτες ιδιοτιμές και γίνεται εκτίμηση της βέλτιστης τιμής του n_clusters με το eigengap heuristic. ``` from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import homogeneity_completeness_v_measure from sklearn.metrics import confusion_matrix from sklearn.neighbors import NearestCentroid from sklearn.utils import shuffle from time import time all_results = {} def do_the_clustering(clustering_str, clustering, X, y, verbose=0): all_n_clusters = range(2, 10) experiments_num = len(all_n_clusters) homogeneity = np.zeros(experiments_num) completeness = np.zeros(experiments_num) v_measure = np.zeros(experiments_num) times = np.zeros(experiments_num) accuracy = np.zeros(experiments_num) recall = np.zeros(experiments_num) precision = np.zeros(experiments_num) f1 = np.zeros(experiments_num) most_frequent_labels = [] cm = None plt.subplots(4, 2, figsize=(15, 25)) plt.subplots_adjust(hspace=0.3) for i, n_clusters in enumerate(all_n_clusters): clustering.set_params(n_clusters=n_clusters) t1 = time() y_pred = clustering.fit_predict(X) t2 = time() times[i] = t2 - t1 homogeneity[i], completeness[i], v_measure[i] = homogeneity_completeness_v_measure(y, y_pred) plt.subplot(4, 2, i+1) plt.title('n_clusters = {}'.format(n_clusters)) plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='tab10', marker='+', alpha=0.8) plt.colorbar() if verbose > 0: print('n_clusters = {}, time = {:.1f} sec'.format(n_clusters, times[i])) y_train_new = np.copy(y_pred) for label in range(n_clusters): indices = (y_pred == label) counts = np.bincount(y.astype(int)[indices]) val = np.argmax(counts) y_train_new[indices] = val clf = NearestCentroid() clf.fit(x_train, y_train_new) y_pred_test = clf.predict(x_test) accuracy[i] = accuracy_score(y_test, y_pred_test) precision[i] = precision_score(y_test, y_pred_test, average='weighted', zero_division=0) recall[i] = recall_score(y_test, y_pred_test, average='weighted') f1[i] = f1_score(y_test, y_pred_test, average='weighted') if n_clusters == 5: cm = confusion_matrix(y_test, y_pred_test) plt.show() lambdas = clustering.lambdas[:20] if hasattr(clustering, 'lambdas') else None results = { 'all_n_clusters': all_n_clusters, 'homogeneity': homogeneity, 'completeness': completeness, 'v_measure': v_measure, 'times': times, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'times': times, 'cm': cm, 'most_frequent_labels': most_frequent_labels, 'lambdas': lambdas } all_results[clustering_str] = results return results def plot_clustering_results(results): all_n_clusters = results['all_n_clusters'] homogeneity = results['homogeneity'] completeness = results['completeness'] v_measure = results['v_measure'] times = results['times'] plt.subplots(1, 2, figsize=(15, 5)) plt.subplots_adjust(wspace=0.4) plt.subplot(1, 2, 1) plt.title('Clustering scores') plt.plot(all_n_clusters, homogeneity, label='Homogeneity') plt.plot(all_n_clusters, completeness, label='Completeness') plt.plot(all_n_clusters, v_measure, label='V-Measure') plt.ylabel('Score') plt.xlabel('n_clusters') plt.legend() plt.subplot(1, 2, 2) plt.title('Clustering time') plt.plot(all_n_clusters, times) plt.ylabel('Time (sec)') plt.xlabel('n_clusters') plt.show() import seaborn as sns def plot_classification_results(results): all_n_clusters = results['all_n_clusters'] accuracy = results['accuracy'] precision = results['precision'] recall = results['recall'] f1 = results['f1'] cm = results['cm'] plt.subplots(1, 2, figsize=(15, 5)) plt.subplots_adjust(wspace=0.4) plt.subplot(1, 2, 1) plt.title('Classification scores') plt.plot(all_n_clusters, accuracy, label='Accuracy') plt.plot(all_n_clusters, precision, label='Precision') plt.plot(all_n_clusters, recall, label='Recall') plt.plot(all_n_clusters, f1, label='F1') plt.ylabel('Score') plt.xlabel('n_clusters') plt.legend() plt.subplot(1, 2, 2) plt.title('Confusion matrix for n_clusters = 5') sns.heatmap(cm, cmap="Oranges", annot=True) plt.show() def plot_eigvals(results): lambdas = results['lambdas'] plt.title('Eigenvalues') plt.scatter(range(1, len(lambdas)+1), lambdas, marker='+') plt.show() from IPython.display import display, HTML import pandas as pd def display_scores(results): all_n_clusters = results['all_n_clusters'] homogeneity = results['homogeneity'] completeness = results['completeness'] v_measure = results['v_measure'] accuracy = results['accuracy'] precision = results['precision'] recall = results['recall'] f1 = results['f1'] times = results['times'] df = pd.DataFrame(list(zip(all_n_clusters, homogeneity, completeness, v_measure, accuracy, precision, recall, f1, times)), columns=('n_clusters', 'Homogeneity', 'Completeness', 'V-Measure', 'Accuracy', 'Precision', 'Recall', 'F1', 'Clustering Time (sec)')) display(HTML(df.to_html(index=False))) def display_final_scores(all_results, n_clusters): homogeneity, completeness, v_measure = [], [], [] accuracy, precision, recall, f1 = [], [], [], [] times = [] clustering_strs = [ 'My Unnormalized Spectral Clustering', 'My Normalized Spectral Clustering', 'Normalized Spectral Clustering', ] index = n_clusters-2 for clustering_str in clustering_strs: results = all_results[clustering_str] homogeneity.append(results['homogeneity'][index]) completeness.append(results['completeness'][index]) v_measure.append(results['v_measure'][index]) accuracy.append(results['accuracy'][index]) precision.append(results['precision'][index]) recall.append(results['recall'][index]) f1.append(results['f1'][index]) times.append(results['times'][index]) df = pd.DataFrame(list(zip(clustering_strs, homogeneity, completeness, v_measure, accuracy, precision, recall, f1, times)), columns=('Clustering', 'Homogeneity', 'Completeness', 'V-Measure', 'Accuracy', 'Precision', 'Recall', 'F1', 'Clustering Time (sec)')) display(HTML(df.to_html(index=False))) ``` #### 4.1 My Unnormalized Spectral Clustering Πραγματοποιείται ομαδοποίηση Spectral Clustering χρησιμοποιώντας τον unnormalized laplacian matrix $L = D - S$. ``` clustering = MySpectralClustering(n_neighbors=20, normed=False, random_state=0, n_jobs=-1) results = do_the_clustering('My Unnormalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) plot_eigvals(results) display_scores(results) ``` #### 4.2 My Normalized Spectral Clustering Πραγματοποιείται ομαδοποίηση Spectral Clustering χρησιμοποιώντας τον normalized laplacian matrix $L_{sym} = D^{-1/2}LD^{-1/2}$. ``` clustering = MySpectralClustering(n_neighbors=20, normed=True, random_state=0, n_jobs=-1) results = do_the_clustering('My Normalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) plot_eigvals(results) display_scores(results) ``` #### 4.3 Normalized Spectral Clustering Πραγματοποιείται ομαδοποίηση Spectral Clustering με τον αλγόριθμο sklearn.cluster.SpectralClustering ο οποίος χρησιμοποιεί τον normalized laplacian matrix $L_{sym} = D^{-1/2}LD^{-1/2}$. ``` from sklearn.cluster import SpectralClustering clustering = SpectralClustering(affinity='nearest_neighbors', n_neighbors=20, random_state=0, n_jobs=-1) results = do_the_clustering('Normalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) display_scores(results) ``` ### 5. Σύγκριση αποτελεσμάτων για n_clusters = 5 Ακολουθεί συνοπτικός πίνακας των clustering και classification scores για n_clusters = 5. ``` display_final_scores(all_results, n_clusters=5) ``` Όλοι οι αλγόριθμοι έχουν παρόμοια αποτελέσματα. Το πρώτο κενό παρατηρείται μεταξύ της 4ης και 5ης ιδιοτιμής, οπότε η βέλτιστη τιμή του n_clusters με βάση τις ιδιοτιμές είναι ίση με 4. Το V-measure έχει τη μέγιστη τιμή για n_clusters=4 αλλά το Homogeneity είναι μικρό. Αντίθετα για n_clusters=5 όλα τα clustering scores έχουν καλή τιμή. Τα classification scores πιάνουν μια αρκετά μεγάλη τιμή για n_clusters=5 ενώ για περισσότερα clusters δεν αυξάνονται ιδιαίτερα. Για n_clusters=5 υπάρχει σύγχιση μεταξύ της κλάσης 0 (PRAD) και 3 (KIRC).
github_jupyter
import urllib.request import pandas as pd import numpy as np import tarfile import os np.random.seed(0) folder = 'TCGA-PANCAN-HiSeq-801x20531' filename = folder + '.tar.gz' url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00401/' + filename data_filename = os.path.join(folder, 'data.csv') labels_filename = os.path.join(folder, 'labels.csv') if not os.path.exists(data_filename) or not os.path.exists(labels_filename): print('Downloading file...') urllib.request.urlretrieve(url, filename) print('Done.') tar = tarfile.open(filename, "r:gz") tar.extractall() tar.close() df_x = pd.read_csv(data_filename) df_y = pd.read_csv(labels_filename) df_x.head() df_y.head() df_x.info() df_y.info() df_x.drop(df_x.columns[0], axis=1, inplace=True) df_x.head() df_y.drop(df_y.columns[0], axis=1, inplace=True) df_y.head() df_y = df_y['Class'] df_y.hist() from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt sss = StratifiedShuffleSplit(n_splits=1, test_size=0.4, random_state=0) for train_index, test_index in sss.split(df_x, df_y): df_train_x = df_x.loc[train_index] df_train_y = df_y.loc[train_index] df_test_x = df_x.loc[test_index] df_test_y = df_y.loc[test_index] df_train_y.hist() df_test_y.hist() plt.show() from sklearn.preprocessing import LabelEncoder x_train = df_train_x.to_numpy() y_train = df_train_y.to_numpy() x_test = df_test_x.to_numpy() y_test = df_test_y.to_numpy() le = LabelEncoder() y_train = le.fit_transform(y_train) y_test = le.transform(y_test) print('x_train.shape =', x_train.shape) print('y_train.shape =', y_train.shape) print('x_test.shape =', x_test.shape) print('y_test.shape =', y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) from sklearn.manifold import SpectralEmbedding embedding = SpectralEmbedding(n_components=2, affinity='rbf', gamma=1e-4) x_train_embedded = embedding.fit_transform(x_train) plt.title('Spectral embedding') plt.scatter(x_train_embedded[:, 0], x_train_embedded[:, 1], c=y_train, cmap='tab10', marker='+', alpha=0.8) plt.colorbar() plt.show() from sklearn.neighbors import kneighbors_graph from sklearn.cluster import KMeans import scipy from scipy.linalg import eigh class MySpectralClustering(object): def __init__(self, n_clusters=8, n_components=None, n_neighbors=10, normed=True, random_state=None, n_jobs=None): self.n_clusters = n_clusters self.n_components = n_components self.n_neighbors = n_neighbors self.normed = normed self.random_state = random_state self.n_jobs = n_jobs self.lambdas = None def set_params(self, **params): if not params: return self self.n_clusters = params.get('n_clusters', self.n_clusters) self.n_components = params.get('n_clusters', self.n_components) self.n_neighbors = params.get('n_neighbors', self.n_neighbors) self.normed = params.get('normed', self.normed) self.random_state = params.get('random_state', self.random_state) self.n_jobs = params.get('n_jobs', self.n_jobs) return self def fit_predict(self, X): G = kneighbors_graph(X, n_neighbors=self.n_neighbors, n_jobs=self.n_jobs) G = G.toarray() S = 0.5*(G + G.T) L, d = scipy.sparse.csgraph.laplacian(S, normed=self.normed, return_diag=True) if self.n_components is None: self.n_components = self.n_clusters w, v = eigh(L) indices = np.argsort(w) w = w[indices] v = v[:, indices] v = v[:, :self.n_components] self.lambdas = w if self.normed: v /= np.sqrt(d).reshape(-1, 1) kmeans = KMeans(n_clusters=self.n_clusters, random_state=self.random_state) labels = kmeans.fit_predict(v) return labels from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import homogeneity_completeness_v_measure from sklearn.metrics import confusion_matrix from sklearn.neighbors import NearestCentroid from sklearn.utils import shuffle from time import time all_results = {} def do_the_clustering(clustering_str, clustering, X, y, verbose=0): all_n_clusters = range(2, 10) experiments_num = len(all_n_clusters) homogeneity = np.zeros(experiments_num) completeness = np.zeros(experiments_num) v_measure = np.zeros(experiments_num) times = np.zeros(experiments_num) accuracy = np.zeros(experiments_num) recall = np.zeros(experiments_num) precision = np.zeros(experiments_num) f1 = np.zeros(experiments_num) most_frequent_labels = [] cm = None plt.subplots(4, 2, figsize=(15, 25)) plt.subplots_adjust(hspace=0.3) for i, n_clusters in enumerate(all_n_clusters): clustering.set_params(n_clusters=n_clusters) t1 = time() y_pred = clustering.fit_predict(X) t2 = time() times[i] = t2 - t1 homogeneity[i], completeness[i], v_measure[i] = homogeneity_completeness_v_measure(y, y_pred) plt.subplot(4, 2, i+1) plt.title('n_clusters = {}'.format(n_clusters)) plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='tab10', marker='+', alpha=0.8) plt.colorbar() if verbose > 0: print('n_clusters = {}, time = {:.1f} sec'.format(n_clusters, times[i])) y_train_new = np.copy(y_pred) for label in range(n_clusters): indices = (y_pred == label) counts = np.bincount(y.astype(int)[indices]) val = np.argmax(counts) y_train_new[indices] = val clf = NearestCentroid() clf.fit(x_train, y_train_new) y_pred_test = clf.predict(x_test) accuracy[i] = accuracy_score(y_test, y_pred_test) precision[i] = precision_score(y_test, y_pred_test, average='weighted', zero_division=0) recall[i] = recall_score(y_test, y_pred_test, average='weighted') f1[i] = f1_score(y_test, y_pred_test, average='weighted') if n_clusters == 5: cm = confusion_matrix(y_test, y_pred_test) plt.show() lambdas = clustering.lambdas[:20] if hasattr(clustering, 'lambdas') else None results = { 'all_n_clusters': all_n_clusters, 'homogeneity': homogeneity, 'completeness': completeness, 'v_measure': v_measure, 'times': times, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'times': times, 'cm': cm, 'most_frequent_labels': most_frequent_labels, 'lambdas': lambdas } all_results[clustering_str] = results return results def plot_clustering_results(results): all_n_clusters = results['all_n_clusters'] homogeneity = results['homogeneity'] completeness = results['completeness'] v_measure = results['v_measure'] times = results['times'] plt.subplots(1, 2, figsize=(15, 5)) plt.subplots_adjust(wspace=0.4) plt.subplot(1, 2, 1) plt.title('Clustering scores') plt.plot(all_n_clusters, homogeneity, label='Homogeneity') plt.plot(all_n_clusters, completeness, label='Completeness') plt.plot(all_n_clusters, v_measure, label='V-Measure') plt.ylabel('Score') plt.xlabel('n_clusters') plt.legend() plt.subplot(1, 2, 2) plt.title('Clustering time') plt.plot(all_n_clusters, times) plt.ylabel('Time (sec)') plt.xlabel('n_clusters') plt.show() import seaborn as sns def plot_classification_results(results): all_n_clusters = results['all_n_clusters'] accuracy = results['accuracy'] precision = results['precision'] recall = results['recall'] f1 = results['f1'] cm = results['cm'] plt.subplots(1, 2, figsize=(15, 5)) plt.subplots_adjust(wspace=0.4) plt.subplot(1, 2, 1) plt.title('Classification scores') plt.plot(all_n_clusters, accuracy, label='Accuracy') plt.plot(all_n_clusters, precision, label='Precision') plt.plot(all_n_clusters, recall, label='Recall') plt.plot(all_n_clusters, f1, label='F1') plt.ylabel('Score') plt.xlabel('n_clusters') plt.legend() plt.subplot(1, 2, 2) plt.title('Confusion matrix for n_clusters = 5') sns.heatmap(cm, cmap="Oranges", annot=True) plt.show() def plot_eigvals(results): lambdas = results['lambdas'] plt.title('Eigenvalues') plt.scatter(range(1, len(lambdas)+1), lambdas, marker='+') plt.show() from IPython.display import display, HTML import pandas as pd def display_scores(results): all_n_clusters = results['all_n_clusters'] homogeneity = results['homogeneity'] completeness = results['completeness'] v_measure = results['v_measure'] accuracy = results['accuracy'] precision = results['precision'] recall = results['recall'] f1 = results['f1'] times = results['times'] df = pd.DataFrame(list(zip(all_n_clusters, homogeneity, completeness, v_measure, accuracy, precision, recall, f1, times)), columns=('n_clusters', 'Homogeneity', 'Completeness', 'V-Measure', 'Accuracy', 'Precision', 'Recall', 'F1', 'Clustering Time (sec)')) display(HTML(df.to_html(index=False))) def display_final_scores(all_results, n_clusters): homogeneity, completeness, v_measure = [], [], [] accuracy, precision, recall, f1 = [], [], [], [] times = [] clustering_strs = [ 'My Unnormalized Spectral Clustering', 'My Normalized Spectral Clustering', 'Normalized Spectral Clustering', ] index = n_clusters-2 for clustering_str in clustering_strs: results = all_results[clustering_str] homogeneity.append(results['homogeneity'][index]) completeness.append(results['completeness'][index]) v_measure.append(results['v_measure'][index]) accuracy.append(results['accuracy'][index]) precision.append(results['precision'][index]) recall.append(results['recall'][index]) f1.append(results['f1'][index]) times.append(results['times'][index]) df = pd.DataFrame(list(zip(clustering_strs, homogeneity, completeness, v_measure, accuracy, precision, recall, f1, times)), columns=('Clustering', 'Homogeneity', 'Completeness', 'V-Measure', 'Accuracy', 'Precision', 'Recall', 'F1', 'Clustering Time (sec)')) display(HTML(df.to_html(index=False))) clustering = MySpectralClustering(n_neighbors=20, normed=False, random_state=0, n_jobs=-1) results = do_the_clustering('My Unnormalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) plot_eigvals(results) display_scores(results) clustering = MySpectralClustering(n_neighbors=20, normed=True, random_state=0, n_jobs=-1) results = do_the_clustering('My Normalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) plot_eigvals(results) display_scores(results) from sklearn.cluster import SpectralClustering clustering = SpectralClustering(affinity='nearest_neighbors', n_neighbors=20, random_state=0, n_jobs=-1) results = do_the_clustering('Normalized Spectral Clustering', clustering, x_train_embedded, y_train) plot_clustering_results(results) plot_classification_results(results) display_scores(results) display_final_scores(all_results, n_clusters=5)
0.488283
0.764232
### Hands-on 3: * Download and preprocess Named Entity Recognition (NER) corpus (CONLL 2002) * Prepare CRF model for NER * Run CRF for training and evaluation ## Named Entity Recognition (NER) using CRF The task of Named Entity Recognition (NER) involves the recognition of :<br> * names of persons * locations * organizations * dates * ... #### Example For example, the following sentence is tagged with sub-sequences indicating PER (for persons), LOC (for location) and ORG (for organization): <br> Wolff, currently a journalist in Argentina, played with Del Bosque in the final years of the seventies in Real Madrid. <br> _______________ <b>[PER Wolff ] </b> , currently a journalist in <b> [LOC Argentina ] </b> , played with <b> [PER Del Bosque ] </b> in the final years of the seventies in <b> [ORG Real Madrid ] </b> . _______________ <br> ### NER - Sub Task Involved : NER involves 2 sub-tasks: <br> * identifying the boundaries of such expressions (the open and close brackets) and * labeling the expressions (with tags such as PER, LOC or ORG). As for the task of chunking, this sequence labeling task is mapped to a classification tag, using a BIO encoding of the data: <br> ### BIO Tagging: The BIO / IOB format (short for inside, outside, beginning) is a common tagging format for tagging tokens in a chunking task in computational linguistics (ex. named-entity recognition). * The B- prefix before a tag indicates that the tag is the beginning of a chunk * An I- prefix before a tag indicates that the tag is inside a chunk. * An O tag indicates that a token belongs to no entity / chunk. The following figure shows how a BIO tagged sentence looks like: ``` Wolff B-PER , O currently O a O journalist O in O Argentina B-LOC , O played O with O Del B-PER Bosque I-PER in O the O final O years O of O the O seventies O in O Real B-ORG Madrid I-ORG . O ``` ### DataSet Let’s use CoNLL 2002 data to build a NER system CoNLL2002 corpus is available in NLTK. ``` # download corpus import nltk nltk.download('conll2002') # get training/testing datasets from nltk.corpus import conll2002 pip install pyconll ### Data Preparation ## Training and testing train_sents = list(conll2002.iob_sents('esp.train')) ## spain test_sents = list(conll2002.iob_sents('esp.testb')) print(train_sents[0]) #each tuple contains token, syntactic tag, ner label ``` ### Features - word level features - word shape - word suffix - Current/previous word context - some information from nearby words is used. - word POS tag - label context This makes a simple baseline, but you certainly can add and remove some features to get (much?) better results - experiment with it. sklearn-crfsuite (and python-crfsuite) supports several feature formats; here we use feature dicts. ``` # functions of sentence representations for sequence labelling def word2features(sent, i): word = sent[i][0] postag = sent[i][1] features = { 'bias': 1.0, 'word.lower()': word.lower(), 'word[-3:]': word[-3:], 'word[-2:]': word[-2:], 'word.isupper()': word.isupper(), 'word.istitle()': word.istitle(), 'word.isdigit()': word.isdigit(), 'postag': postag, 'postag[:2]': postag[:2], } if i > 0: word1 = sent[i-1][0] postag1 = sent[i-1][1] features.update({ '-1:word.lower()': word1.lower(), '-1:word.istitle()': word1.istitle(), '-1:word.isupper()': word1.isupper(), '-1:postag': postag1, '-1:postag[:2]': postag1[:2], }) else: # Indicate that it is the 'beginning of a document' features['BOS'] = True if i < len(sent)-1: word1 = sent[i+1][0] postag1 = sent[i+1][1] features.update({ '+1:word.lower()': word1.lower(), '+1:word.istitle()': word1.istitle(), '+1:word.isupper()': word1.isupper(), '+1:postag': postag1, '+1:postag[:2]': postag1[:2], }) else: # Features for words that are not at the end of a document features['EOS'] = True return features def sent2features(sent): return [word2features(sent, i) for i in range(len(sent))] def sent2labels(sent): return [label for token, postag, label in sent] def sent2tokens(sent): return [token for token, postag, label in sent] ``` This is what word2features extracts: ``` sample_sentence = " ".join([s for s,c,d in train_sents[2]]) sample_sentence train_sents[2][:10] word2features(train_sents[2], 1) sent2features(train_sents[2]) ``` ### Feature Extraction: Extract features from the training data and testing data ``` # sentence representations for sequence labelling X_train = [sent2features(s) for s in train_sents] y_train = [sent2labels(s) for s in train_sents] X_test = [sent2features(s) for s in test_sents] y_test = [sent2labels(s) for s in test_sents] train_sents[0], y_train[0] X_train[0], y_train[0] ``` ### Training Here we are using L-BFGS training algorithm (it is default) with Elastic Net (L1 + L2) regularization. ``` # train CRF model !pip install sklearn_crfsuite import sklearn_crfsuite crf = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=0.1, c2=0.1, max_iterations=100, all_possible_transitions=True ) crf crf.fit(X_train, y_train) # training model parameters ``` ### Evaluation There is much more O entities in data set, but we’re more interested in other entities. To account for this we’ll use averaged F1 score computed for all labels except for O. sklearn-crfsuite.metrics package provides some useful metrics for sequence classification task, including this one. ``` # get label set labels = list(crf.classes_) labels.remove('O') print(labels) # evaluate CRF model from sklearn_crfsuite import metrics y_pred = crf.predict(X_test) metrics.flat_f1_score(y_test, y_pred, average='weighted', labels=labels) y_pred[0] ``` ### Inspect per-class results in more detail: ``` # group B and I results sorted_labels = sorted( labels, key=lambda name: (name[1:], name[0]) ) print(metrics.flat_classification_report( y_test, y_pred, labels=sorted_labels, digits=3 )) ```
github_jupyter
Wolff B-PER , O currently O a O journalist O in O Argentina B-LOC , O played O with O Del B-PER Bosque I-PER in O the O final O years O of O the O seventies O in O Real B-ORG Madrid I-ORG . O # download corpus import nltk nltk.download('conll2002') # get training/testing datasets from nltk.corpus import conll2002 pip install pyconll ### Data Preparation ## Training and testing train_sents = list(conll2002.iob_sents('esp.train')) ## spain test_sents = list(conll2002.iob_sents('esp.testb')) print(train_sents[0]) #each tuple contains token, syntactic tag, ner label # functions of sentence representations for sequence labelling def word2features(sent, i): word = sent[i][0] postag = sent[i][1] features = { 'bias': 1.0, 'word.lower()': word.lower(), 'word[-3:]': word[-3:], 'word[-2:]': word[-2:], 'word.isupper()': word.isupper(), 'word.istitle()': word.istitle(), 'word.isdigit()': word.isdigit(), 'postag': postag, 'postag[:2]': postag[:2], } if i > 0: word1 = sent[i-1][0] postag1 = sent[i-1][1] features.update({ '-1:word.lower()': word1.lower(), '-1:word.istitle()': word1.istitle(), '-1:word.isupper()': word1.isupper(), '-1:postag': postag1, '-1:postag[:2]': postag1[:2], }) else: # Indicate that it is the 'beginning of a document' features['BOS'] = True if i < len(sent)-1: word1 = sent[i+1][0] postag1 = sent[i+1][1] features.update({ '+1:word.lower()': word1.lower(), '+1:word.istitle()': word1.istitle(), '+1:word.isupper()': word1.isupper(), '+1:postag': postag1, '+1:postag[:2]': postag1[:2], }) else: # Features for words that are not at the end of a document features['EOS'] = True return features def sent2features(sent): return [word2features(sent, i) for i in range(len(sent))] def sent2labels(sent): return [label for token, postag, label in sent] def sent2tokens(sent): return [token for token, postag, label in sent] sample_sentence = " ".join([s for s,c,d in train_sents[2]]) sample_sentence train_sents[2][:10] word2features(train_sents[2], 1) sent2features(train_sents[2]) # sentence representations for sequence labelling X_train = [sent2features(s) for s in train_sents] y_train = [sent2labels(s) for s in train_sents] X_test = [sent2features(s) for s in test_sents] y_test = [sent2labels(s) for s in test_sents] train_sents[0], y_train[0] X_train[0], y_train[0] # train CRF model !pip install sklearn_crfsuite import sklearn_crfsuite crf = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=0.1, c2=0.1, max_iterations=100, all_possible_transitions=True ) crf crf.fit(X_train, y_train) # training model parameters # get label set labels = list(crf.classes_) labels.remove('O') print(labels) # evaluate CRF model from sklearn_crfsuite import metrics y_pred = crf.predict(X_test) metrics.flat_f1_score(y_test, y_pred, average='weighted', labels=labels) y_pred[0] # group B and I results sorted_labels = sorted( labels, key=lambda name: (name[1:], name[0]) ) print(metrics.flat_classification_report( y_test, y_pred, labels=sorted_labels, digits=3 ))
0.52074
0.942401
# ISBN Numbers ## Introduction The function `clean_isbn()` cleans a column containing ISBN strings, and standardizes them in a given format. The function `validate_isbn()` validates either a single ISBN strings, a column of ISBN strings or a DataFrame of ISBN strings, returning `True` if the value is valid, and `False` otherwise. Currently, an ISBN is composed by following components: * 3-digit (only in ISBN-13) Bookland Code * 1 to 5-digit Group Identifier (identifies country or language) * 1 to 7-digit Publisher Code * 1 to 8-digit Item Number (identifies the book) * a Check Digit Usually, the delimiters of ISBN is `-`. ISBN strings can be converted to the following formats via the `output_format` parameter: * `compact`: only number strings without any seperators, like "9789024538270" * `standard`: ISBN strings with proper delimiters in the proper places, like "978-90-245-3827-0" * `isbn13`: standard format of 13-digits ISBN string, like "978-90-245-3827-0" * `isbn10`: standard format of 10-digits ISBN string, like "90-245-3827-0" Invalid parsing is handled with the `errors` parameter: * `coerce` (default): invalid parsing will be set to NaN * `ignore`: invalid parsing will return the input * `raise`: invalid parsing will raise an exception The following sections demonstrate the functionality of `clean_isbn()` and `validate_isbn()`. ### An example dataset containing ISBN strings ``` import pandas as pd import numpy as np df = pd.DataFrame( { "isbn": [ "978-9024538270", "978-9024538271", "1-85798-218-5", "9780471117094", "1857982185", "hello", np.nan, "NULL" ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "1111 S Figueroa St, Los Angeles, CA 90015", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df ``` ## 1. Default `clean_isbn` By default, `clean_isbn` will clean isbn strings and output them in the standard format with proper separators. ``` from dataprep.clean import clean_isbn clean_isbn(df, column = "isbn") df_clean = clean_isbn(df, column = "isbn", inplace=True) df_clean ``` ## 2. Output formats This section demonstrates the output parameter. ### `standard` (default) ``` clean_isbn(df, column = "isbn", inplace=True, output_format="standard") ``` ### `compact` ``` clean_isbn(df, column = "isbn", inplace=True, output_format="compact") ``` ### `isbn13` ``` clean_isbn(df, column = "isbn", inplace=True, output_format="isbn13") ``` ### `isbn10` ``` clean_isbn(df, column = "isbn", inplace=True, output_format="isbn10") ``` ## 3. `split` parameter The `split` parameter adds individual columns containing the cleaned 13-digits ISBN values to the given DataFrame. ``` clean_isbn(df, column="isbn", split=True) ``` ## 4. `inplace` parameter This deletes the given column from the returned DataFrame. A new column containing cleaned ISBN strings is added with a title in the format `"{original title}_clean"`. ``` clean_isbn(df, column="isbn", inplace=True) ``` ### `inplace` and `split` ``` clean_isbn(df, column="isbn", inplace=True, split=True) ``` ## 5. `errors` parameter ### `coerce` (default) ``` clean_isbn(df, "isbn", errors="coerce") ``` ### `ignore` ``` clean_isbn(df, "isbn", errors="ignore") ``` ## 4. `validate_isbn()` `validate_isbn()` returns `True` when the input is a valid phone number. Otherwise it returns `False`. The input of `validate_isbn()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame. When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_isbn()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_isbn()` returns the validation result for the whole DataFrame. ``` from dataprep.clean import validate_isbn print(validate_isbn("978-9024538270")) print(validate_isbn("978-9024538271")) print(validate_isbn("1-85798-218-5")) print(validate_isbn("9780471117094")) print(validate_isbn("1857982185")) print(validate_isbn("hello")) print(validate_isbn(np.nan)) print(validate_isbn("NULL")) ``` ### DataFrame + Specify Column ``` validate_isbn(df, column="isbn") ``` ### Only DataFrame ``` validate_isbn(df) ```
github_jupyter
import pandas as pd import numpy as np df = pd.DataFrame( { "isbn": [ "978-9024538270", "978-9024538271", "1-85798-218-5", "9780471117094", "1857982185", "hello", np.nan, "NULL" ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "1111 S Figueroa St, Los Angeles, CA 90015", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df from dataprep.clean import clean_isbn clean_isbn(df, column = "isbn") df_clean = clean_isbn(df, column = "isbn", inplace=True) df_clean clean_isbn(df, column = "isbn", inplace=True, output_format="standard") clean_isbn(df, column = "isbn", inplace=True, output_format="compact") clean_isbn(df, column = "isbn", inplace=True, output_format="isbn13") clean_isbn(df, column = "isbn", inplace=True, output_format="isbn10") clean_isbn(df, column="isbn", split=True) clean_isbn(df, column="isbn", inplace=True) clean_isbn(df, column="isbn", inplace=True, split=True) clean_isbn(df, "isbn", errors="coerce") clean_isbn(df, "isbn", errors="ignore") from dataprep.clean import validate_isbn print(validate_isbn("978-9024538270")) print(validate_isbn("978-9024538271")) print(validate_isbn("1-85798-218-5")) print(validate_isbn("9780471117094")) print(validate_isbn("1857982185")) print(validate_isbn("hello")) print(validate_isbn(np.nan)) print(validate_isbn("NULL")) validate_isbn(df, column="isbn") validate_isbn(df)
0.415847
0.992123
# 2A.eco - Rappel de ce que vous savez déjà mais avez peut-être oublié [pandas](http://pandas.pydata.org/) et [numpy](http://www.numpy.org/) sont essentiels pour manipuler les données. C'est ce que rappelle ce notebook. Voir aussi [Essential Cheat Sheets for Machine Learning and Deep Learning Engineers](https://startupsventurecapital.com/essential-cheat-sheets-for-machine-learning-and-deep-learning-researchers-efb6a8ebd2e5). ``` from jyquickhelper import add_notebook_menu add_notebook_menu() ``` ## Les quelques règles de Python Python est un peu susceptible et protocolaire, il y a quelques règles à respecter : 1) L'indentation est primordiale : un code mal indenté ne fonctionne pas. L'indentation indique à l'interpréteur où se trouvent les séparations entre des blocs d'instructions. Un peu comme des points dans un texte. Si les lignes ne sont pas bien alignées, l'interpréteur ne sait plus à quel bloc associer la ligne. 2) On commence à compter à 0. Ca peut paraitre bizarre mais c'est comme ça. Le premier élément d'une liste est le 0-ème. 3) Les marques de ponctuation sont importantes : - Pour une liste : [] - Pour un dictionnaire : {} - Pour un tuple : () - Pour séparer des éléments : , - Pour commenter un bout de code : # - Pour aller à la ligne dans un bloc d'instructions : \ - Les majuscules et minuscules sont importantes - Par contre l'usage des ' ou des " est indifférente. Il faut juste avoir les mêmes début et fin. - Pour documenter une fonction ou une classe """ documentation """ ## les outputs de Python : l'opération, le print et le return Quand Python réalise des opérations, il faut lui préciser ce qu'il doit en faire : - est-ce qu'il doit juste faire l'opération, - afficher le résultat de l'opération, - créer un objet avec le résultat de l'opération ? Remarque : dans l'environnement Notebook, le dernier élement d'une cellule est automatiquement affiché (print), qu'on lui demande ou non de le faire. Ce n'est pas le cas dans un éditeur classique comme Spyder. ``` # on calcule : dans le cas d'une opération par exemple une somme 2+3 # Python calcule le résultat mais n'affiche rien dans la sortie # le print : on affiche print(2+3) # Python calcule et on lui demande juste de l'afficher # le résultat est en dessous du code # le print dans une fonction def addition_v1(a,b) : print(a+b) resultat_print = addition_v1(2,0) print(type(resultat_print)) # dans la sortie on a l'affichage du résultat, car la sortie de la fonction est un print # en plus on lui demande quel est le type du résultat. Un print ne renvoie aucun type, ce n'est ni un numérique, # ni une chaine de charactères, le résultat d'un print n'est pas un format utilisable ``` Le résultat de l'addition est affiché La fonction addition_v1 effectue un print Par contre, l'objet crée n'a pas de type, il n'est pas un chiffre, ce n'est qu'un affichage. Pour créer un objet avec le résultat de la fonction, il faut utiliser __return__ ``` # le return dans une fonction def addition_v2(a,b) : return a+b resultat_return = addition_v2(2,5) # print(type(resultat_return)) ## là on a bien un résultat qui est du type "entier" ``` Le résultat de addition_v2 n'est pas affiché comme dans addition_v1 Par contre, la fonction addition_v2 permet d'avoir un objet de type int, un entier donc. ## Type de base : variables, listes, dictionnaires ... Pyhon permet de manipuler différents types de base On distingue deux types de variables : les immuables qui ne peuvent être modifiés et les modifiables ### Les variables - types immuables Les variables immuables ne peuvent être modifiées - None : ce type est une convention de programmation pour dire que la valeur n'est pas calculée - bool : un booléen - int : un entier - float : un réel - str : une chaine de caractères - tuple : un vecteur ``` i = 3 # entier = type numérique (type int) r = 3.3 # réel = type numérique (type float) s = "exemple" # chaîne de caractères = type str n = None # None signifie que la variable existe mais qu'elle ne contient rien # elle est souvent utilisée pour signifier qu'il n'y a pas de résultat a = (1,2) # tuple print(i,r,s,n,a) ``` Si on essaie de changer le premier élément de la chaine de caractères __s__ on va avoir un peu de mal. Par exemple si on voulait mettre une majuscule à "exemple", on aurait envie d'écrire que le premier élément de la chaine s est "E" majuscule Mais Python ne va pas nous laisser faire, il nous dit que les objets "chaine de caractère" ne peuvent être modifiés ``` s[0] = "E" # déclenche une exception ``` Tout ce qu'on peut faire avec une variable immutable, c'est le réaffecter à une autre valeur : il ne peut pas être modifié Pour s'en convaincre, utilisons la fonction id() qui donne un identifiant à chaque objet. ``` print(s) id(s) s = "autre_mot" id(s) ``` On voit bien que s a changé d'identifiant : il peut avoir le même nom, ce n'est plus le même objet ### Les variables - types modifiable : listes et dictionnaires Heureusement, il existe des variables modifiables comme les listes et les dictionnaires. #### Les listes - elles s''écrivent entre [ ] Les listes sont des élements très utiles, notamment quand vous souhaitez faire des boucles Pour faire appel aux élements d'une liste, on donne leur position dans la liste : le 1er est le 0, le 2ème est le 1 ... ``` ma_liste = [1,2,3,4] print("La longueur de ma liste est de", len(ma_liste)) print("Le premier élément de ma liste est :", ma_liste[0]) print("Le dernier élément de ma liste est :", ma_liste[3]) print("Le dernier élément de ma liste est :", ma_liste[-1]) ``` #### Les dictionnaires - ils s'écrivent entre { } Un dictionnaire associe à une clé un autre élément, appelé une valeur : un chiffre, un nom, une liste, un autre dictionnaire etc. - __Format d'un dictionnaire : {Clé : valeur}__ #### Dictionnaire avec des valeurs int On peut par exemple associer à un nom, un nombre ``` mon_dictionnaire_notes = { 'Nicolas' : 18 , 'Pimprenelle' : 15} # un dictionnaire qui à chaque nom associe un nombre # à Nicolas, on associe 18 print(mon_dictionnaire_notes) ``` #### Dictionnaire avec des valeurs qui sont des listes Pour chaque clé d'un dictionnaire, il ne faut pas forcément garder la même forme de valeur Dans l'exemple, la valeur de la clé "Nicolas" est une liste, alors que celle de "Philou" est une liste de liste ``` mon_dictionnaire_loisirs = \ { 'Nicolas' : ['Rugby','Pastis','Belote'] , 'Pimprenelle' : ['Gin Rami','Tisane','Tara Jarmon','Barcelone','Mickey Mouse'], 'Philou' : [['Maths','Jeux'],['Guillaume','Jeanne','Thimothée','Adrien']]} ``` Pour accéder à un élément du dictionnaire, on fait appel à la clé et non plus à la position, comme c'était le cas dans les listes ``` print(mon_dictionnaire_loisirs['Nicolas']) # on affiche une liste print(mon_dictionnaire_loisirs['Philou']) # on affiche une liste de listes ``` Si on ne veut avoir que la première liste des loisirs de Philou, on demande le premier élément de la liste ``` print(mon_dictionnaire_loisirs['Philou'][0]) # on affiche alors juste la première liste ``` On peut aussi avoir des valeurs qui sont des int et des listes ``` mon_dictionnaire_patchwork_good = \ { 'Nicolas' : ['Rugby','Pastis','Belote'] , 'Pimprenelle' : 18 } ``` ----------------- ## A retenir - L'indentation du code est importante (4 espaces et pas une tabulation) - Une __liste__ est entre [] et on peut appeler les positions par leur place - Un __dictionnaire__, clé x valeur, s'écrit entre {} et on appelle un élément en fonction de la clé ------ ## Questions pratiques : - Quelle est la position de 7 dans la liste suivante ``` liste_nombres = [1,2,7,5,3] ``` - Combien de clés a ce dictionnaire ? ``` dictionnaire_evangile = {"Marc" : "Lion", "Matthieu" : ["Ange","Homme ailé"] , "Jean" : "Aigle" , "Luc" : "Taureau"} ``` - Que faut-il écrire pour obtenir "Ange" en résultat à partir du dictionnaire_evangile ? ## Objets : méthodes et attributs Mainentant qu'on a vu quels objets existaient en Python, nous allons voir comment nous en servir. ### Un petit détour pour bien comprendre : Un objet, c'est quoi ? Un objet a deux choses : des attributs et des méthodes - Les attributs décrivent sa structure interne : sa taille, sa forme (dont on ne va pas parler ici) - Les méthodes sont des "actions" qui s'appliqueront à l'objet ### Premiers exemples de méthode Avec les éléments définis dans la partie 1 (les listes, les dictionnaires) on peut faire appel à des méthodes qui sont directement liées à ces objets. Les méthodes, c'est un peu les actions de Python. ##### Une méthode pour les listes Pour ajouter un item dans une liste : on va utiliser la méthode _.append()_ ``` ma_liste = ["Nicolas","Michel","Bernard"] ma_liste.append("Philippe") print(ma_liste) ``` #### Une méthode pour les dictionnaires Pour connaitre l'ensemble des clés d'un dictionnaire, on appelle la méthode _.keys()_ ``` mon_dictionnaire = {"Marc" : "Lion", "Matthieu" : ["Ange","Homme ailé"] , "Jean" : "Aigle" , "Luc" : "Taureau"} print(mon_dictionnaire.keys()) ``` ### Connaitre les méthodes d'un objet Pour savoir quelles sont les méthodes d'un objet vous pouvez : - taper help(mon_objet) ou mon_objet? dans la console iPython - taper mon_objet. + touche tabulation dans la console iPython ou dans le notebook . iPython permet la complétion, c'est-à-dire que vous pouvez faire appaître la liste ## Les opérations et méthodes classiques des listes ### Créer une liste Pour créer un objet de la classe list, il suffit de le déclarer. Ici on affecte à __x__ une liste ``` x = [4, 5] # création d’une liste composée de deux entiers x = ["un", 1, "deux", 2] # création d’une liste composée de 2 chaînes de caractères # et de deux entiers, l’ordre d’écriture est important x = [3] # création d’une liste d’un élément, sans la virgule, x = [ ] # crée une liste vide x = list () # crée une liste vide ``` ### Un premier test sur les listes Si on veut tester la présence d'un élément dans une liste, on l'écrit de la manière suivante : ``` # Exemple x = "Marcel" l = ["Marcel","Edith","Maurice","Jean"] print(x in l) #vrai si x est un des éléments de l ``` ### Pour concaténer deux listes : On utilise le symbole + ``` t = ["Antoine","David"] print(l + t) #concaténation de l et t ``` ### Pour trouver certains éléments d'une liste Pour chercher des élements dans une liste, on utilise la position dans la liste. ``` l[1] # donne l'élément qui est en 2ème position de la liste l[1:3] # donne les éléments de la 2ème position de la liste à la 4ème exclue ``` ### Quelques fonctions des listes ``` longueur = len(l) # nombre d’éléments de l minimum = min(l) # plus petit élément de l, ici par ordre alphabétique maximum = max(l) # plus grand élément de l, ici par ordre alphabétique print(longueur,minimum,maximum) del l[0 : 2] # supprime les éléments entre la position 0 et 2 exclue print(l) ``` ## Les méthodes des listes On les trouve dans l'aide de la liste. On distingue les méthodes et les méthodes spéciales : visuellement, les méthodes spéciales sont celles qui précédées et suivis de deux caractères de soulignement, les autres sont des méthodes classiques. ``` help(l) ``` ------ # A retenir et questions A retenir : - Chaque objet Python a des attributs et des méthodes - Vous pouvez créer des classes avec des attributs et des méthodes - Les méthodes des listes et des dictionnaires sont les plus utilisées : - list.count() - list.sort() - list.append() - dict.keys() - dict.items() - dict.values() ------ Questions pratiques : - Définir la liste allant de 1 à 10, puis effectuez les actions suivantes : – triez et affichez la liste – ajoutez l’élément 11 à la liste et affichez la liste – renversez et affichez la liste – affichez l’indice de l’élément 7 – enlevez l’élément 9 et affichez la liste – affichez la sous-liste du 2e au 3e élément ; – affichez la sous-liste du début au 2e élément ; – affichez la sous-liste du 3e élément à la fin de la liste ; - Construire le dictionnaire des 6 premiers mois de l'année avec comme valeurs le nombre de jours respectif. - Renvoyer la liste des mois. - Renvoyer la liste des jours. - Ajouez la clé du mois de Juillet ? ## Passer des listes, dictionnaires à pandas Supposons que la variable 'data' est un liste qui contient nos données. Une observation correspond à un dictionnaire qui contient le nom, le type, l'ambiance et la note d'un restaurant. Il est aisé de transformer cette liste en dataframe grâce à la fonction 'DataFrame'. ``` import pandas data = [{"nom": "Little Pub", "type" : "Bar", "ambiance": 9, "note": 7}, {"nom": "Le Corse", "type" : "Sandwicherie", "ambiance": 2, "note": 8}, {"nom": "Café Caumartin", "type" : "Bar", "ambiance": 1}] df = pandas.DataFrame(data) print(data) df ```
github_jupyter
from jyquickhelper import add_notebook_menu add_notebook_menu() # on calcule : dans le cas d'une opération par exemple une somme 2+3 # Python calcule le résultat mais n'affiche rien dans la sortie # le print : on affiche print(2+3) # Python calcule et on lui demande juste de l'afficher # le résultat est en dessous du code # le print dans une fonction def addition_v1(a,b) : print(a+b) resultat_print = addition_v1(2,0) print(type(resultat_print)) # dans la sortie on a l'affichage du résultat, car la sortie de la fonction est un print # en plus on lui demande quel est le type du résultat. Un print ne renvoie aucun type, ce n'est ni un numérique, # ni une chaine de charactères, le résultat d'un print n'est pas un format utilisable # le return dans une fonction def addition_v2(a,b) : return a+b resultat_return = addition_v2(2,5) # print(type(resultat_return)) ## là on a bien un résultat qui est du type "entier" i = 3 # entier = type numérique (type int) r = 3.3 # réel = type numérique (type float) s = "exemple" # chaîne de caractères = type str n = None # None signifie que la variable existe mais qu'elle ne contient rien # elle est souvent utilisée pour signifier qu'il n'y a pas de résultat a = (1,2) # tuple print(i,r,s,n,a) s[0] = "E" # déclenche une exception print(s) id(s) s = "autre_mot" id(s) ma_liste = [1,2,3,4] print("La longueur de ma liste est de", len(ma_liste)) print("Le premier élément de ma liste est :", ma_liste[0]) print("Le dernier élément de ma liste est :", ma_liste[3]) print("Le dernier élément de ma liste est :", ma_liste[-1]) mon_dictionnaire_notes = { 'Nicolas' : 18 , 'Pimprenelle' : 15} # un dictionnaire qui à chaque nom associe un nombre # à Nicolas, on associe 18 print(mon_dictionnaire_notes) mon_dictionnaire_loisirs = \ { 'Nicolas' : ['Rugby','Pastis','Belote'] , 'Pimprenelle' : ['Gin Rami','Tisane','Tara Jarmon','Barcelone','Mickey Mouse'], 'Philou' : [['Maths','Jeux'],['Guillaume','Jeanne','Thimothée','Adrien']]} print(mon_dictionnaire_loisirs['Nicolas']) # on affiche une liste print(mon_dictionnaire_loisirs['Philou']) # on affiche une liste de listes print(mon_dictionnaire_loisirs['Philou'][0]) # on affiche alors juste la première liste mon_dictionnaire_patchwork_good = \ { 'Nicolas' : ['Rugby','Pastis','Belote'] , 'Pimprenelle' : 18 } liste_nombres = [1,2,7,5,3] dictionnaire_evangile = {"Marc" : "Lion", "Matthieu" : ["Ange","Homme ailé"] , "Jean" : "Aigle" , "Luc" : "Taureau"} ma_liste = ["Nicolas","Michel","Bernard"] ma_liste.append("Philippe") print(ma_liste) mon_dictionnaire = {"Marc" : "Lion", "Matthieu" : ["Ange","Homme ailé"] , "Jean" : "Aigle" , "Luc" : "Taureau"} print(mon_dictionnaire.keys()) x = [4, 5] # création d’une liste composée de deux entiers x = ["un", 1, "deux", 2] # création d’une liste composée de 2 chaînes de caractères # et de deux entiers, l’ordre d’écriture est important x = [3] # création d’une liste d’un élément, sans la virgule, x = [ ] # crée une liste vide x = list () # crée une liste vide # Exemple x = "Marcel" l = ["Marcel","Edith","Maurice","Jean"] print(x in l) #vrai si x est un des éléments de l t = ["Antoine","David"] print(l + t) #concaténation de l et t l[1] # donne l'élément qui est en 2ème position de la liste l[1:3] # donne les éléments de la 2ème position de la liste à la 4ème exclue longueur = len(l) # nombre d’éléments de l minimum = min(l) # plus petit élément de l, ici par ordre alphabétique maximum = max(l) # plus grand élément de l, ici par ordre alphabétique print(longueur,minimum,maximum) del l[0 : 2] # supprime les éléments entre la position 0 et 2 exclue print(l) help(l) import pandas data = [{"nom": "Little Pub", "type" : "Bar", "ambiance": 9, "note": 7}, {"nom": "Le Corse", "type" : "Sandwicherie", "ambiance": 2, "note": 8}, {"nom": "Café Caumartin", "type" : "Bar", "ambiance": 1}] df = pandas.DataFrame(data) print(data) df
0.116149
0.983198
<a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/single%20task/function%20documentation%20generation/go/base_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Install the library and download the pretrained models ``` print("Installing dependencies...") %tensorflow_version 2.x !pip install -q t5==0.6.4 import functools import os import time import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import t5 !wget "https://www.dropbox.com/sh/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa?dl=1" -O vocabulary.zip !unzip vocabulary.zip !rm vocabulary.zip !wget "https://www.dropbox.com/sh/uhpf2fc6b9x217q/AABB4_apYAEVg_1XmxIZYV2za?dl=1" -O go.zip !unzip go.zip !rm go.zip ``` ## Set sentencepiece model ``` from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary vocab_model_path = 'code_spm_unigram_40M.model' vocab = SentencePieceVocabulary(vocab_model_path, extra_ids=100) print("Vocab has a size of %d\n" % vocab.vocab_size) ``` ## Set the preprocessors and the task registry for the t5 model ``` def go_codeSearchNet_dataset_fn(split, shuffle_files=False): del shuffle_files ds = tf.data.TextLineDataset(go_path[split]) ds = ds.map( functools.partial(tf.io.decode_csv, record_defaults=["", ""], field_delim="\t", use_quote_delim=False), num_parallel_calls=tf.data.experimental.AUTOTUNE ) ds = ds.map(lambda *ex: dict(zip(["code", "docstring"], ex))) return ds def go_preprocessor(ds): def normalize_text(text): return text def to_inputs_and_targets(ex): return { "inputs": tf.strings.join(["function documentation generation go: ", normalize_text(ex["code"])]), "targets": normalize_text(ex["docstring"]) } return ds.map(to_inputs_and_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE) t5.data.TaskRegistry.remove('function_documentation_generation_go_code') t5.data.TaskRegistry.add( "function_documentation_generation_go_code", dataset_fn=go_codeSearchNet_dataset_fn, output_features={ "inputs": t5.data.utils.Feature(vocabulary=vocab), "targets": t5.data.utils.Feature(vocabulary=vocab), }, splits=["train", "validation"], text_preprocessor=[go_preprocessor], postprocess_fn=t5.data.postprocessors.lower_text, metric_fns=[t5.evaluation.metrics.bleu, t5.evaluation.metrics.accuracy, t5.evaluation.metrics.rouge], ) ``` ## Set t5 base model ``` MODEL_DIR = "base" model_parallelism = 1 train_batch_size = 256 tf.io.gfile.makedirs(MODEL_DIR) model = t5.models.MtfModel( model_dir=MODEL_DIR, tpu=None, tpu_topology=None, model_parallelism=model_parallelism, batch_size=train_batch_size, sequence_length={"inputs": 512, "targets": 512}, mesh_shape="model:1,batch:1", mesh_devices=["GPU:0"], learning_rate_schedule=0.003, save_checkpoints_steps=5000, keep_checkpoint_max=None, iterations_per_loop=100, ) ``` ## Code Documentation Summarization ### Give the code for summarization ``` code = "func (pr *Progress) needSnapshotAbort() bool {\n\treturn pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot\n}" #@param {type:"raw"} ``` ### Parsing and Tokenization ``` !pip install tree_sitter !git clone https://github.com/tree-sitter/tree-sitter-go from tree_sitter import Language, Parser Language.build_library( 'build/my-languages.so', ['tree-sitter-go'] ) GO_LANGUAGE = Language('build/my-languages.so', 'go') parser = Parser() parser.set_language(GO_LANGUAGE) def get_string_from_code(node, lines): line_start = node.start_point[0] line_end = node.end_point[0] char_start = node.start_point[1] char_end = node.end_point[1] if line_start != line_end: code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]])) else: code_list.append(lines[line_start][char_start:char_end]) def my_traverse(node, code_list): lines = code.split('\n') if node.child_count == 0: get_string_from_code(node, lines) elif node.type == 'string': get_string_from_code(node, lines) else: for n in node.children: my_traverse(n, code_list) return ' '.join(code_list) tree = parser.parse(bytes(code, "utf8")) code_list=[] tokenized_code = my_traverse(tree.root_node, code_list) print("Output after tokenization: " + tokenized_code) ``` ### Record the code for summarization with the prefix to a txt file ``` codes = [tokenized_code] inputs_path = 'input.txt' with tf.io.gfile.GFile(inputs_path, "w") as f: for c in codes: f.write("function documentation generation go: %s\n" % c) predict_outputs_path = 'MtfModel-output.txt' ``` ### Running the model with the best checkpoint to summarize the given code ``` model.batch_size = 8 model.predict( input_file="input.txt", output_file=predict_outputs_path, checkpoint_steps=80000, beam_size=4, vocabulary=vocab, # Select the most probable output token at each step. temperature=0, ) ``` ### Code Summarization Result ``` prediction_file = "MtfModel-output.txt-80000" print("\nPredictions using checkpoint 80000:\n" ) with tf.io.gfile.GFile(prediction_file) as f: for c, d in zip(codes, f): if c: print("Code for prediction: " + c + '\n') print("Generated Documentation: " + d) ```
github_jupyter
print("Installing dependencies...") %tensorflow_version 2.x !pip install -q t5==0.6.4 import functools import os import time import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import t5 !wget "https://www.dropbox.com/sh/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa?dl=1" -O vocabulary.zip !unzip vocabulary.zip !rm vocabulary.zip !wget "https://www.dropbox.com/sh/uhpf2fc6b9x217q/AABB4_apYAEVg_1XmxIZYV2za?dl=1" -O go.zip !unzip go.zip !rm go.zip from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary vocab_model_path = 'code_spm_unigram_40M.model' vocab = SentencePieceVocabulary(vocab_model_path, extra_ids=100) print("Vocab has a size of %d\n" % vocab.vocab_size) def go_codeSearchNet_dataset_fn(split, shuffle_files=False): del shuffle_files ds = tf.data.TextLineDataset(go_path[split]) ds = ds.map( functools.partial(tf.io.decode_csv, record_defaults=["", ""], field_delim="\t", use_quote_delim=False), num_parallel_calls=tf.data.experimental.AUTOTUNE ) ds = ds.map(lambda *ex: dict(zip(["code", "docstring"], ex))) return ds def go_preprocessor(ds): def normalize_text(text): return text def to_inputs_and_targets(ex): return { "inputs": tf.strings.join(["function documentation generation go: ", normalize_text(ex["code"])]), "targets": normalize_text(ex["docstring"]) } return ds.map(to_inputs_and_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE) t5.data.TaskRegistry.remove('function_documentation_generation_go_code') t5.data.TaskRegistry.add( "function_documentation_generation_go_code", dataset_fn=go_codeSearchNet_dataset_fn, output_features={ "inputs": t5.data.utils.Feature(vocabulary=vocab), "targets": t5.data.utils.Feature(vocabulary=vocab), }, splits=["train", "validation"], text_preprocessor=[go_preprocessor], postprocess_fn=t5.data.postprocessors.lower_text, metric_fns=[t5.evaluation.metrics.bleu, t5.evaluation.metrics.accuracy, t5.evaluation.metrics.rouge], ) MODEL_DIR = "base" model_parallelism = 1 train_batch_size = 256 tf.io.gfile.makedirs(MODEL_DIR) model = t5.models.MtfModel( model_dir=MODEL_DIR, tpu=None, tpu_topology=None, model_parallelism=model_parallelism, batch_size=train_batch_size, sequence_length={"inputs": 512, "targets": 512}, mesh_shape="model:1,batch:1", mesh_devices=["GPU:0"], learning_rate_schedule=0.003, save_checkpoints_steps=5000, keep_checkpoint_max=None, iterations_per_loop=100, ) code = "func (pr *Progress) needSnapshotAbort() bool {\n\treturn pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot\n}" #@param {type:"raw"} !pip install tree_sitter !git clone https://github.com/tree-sitter/tree-sitter-go from tree_sitter import Language, Parser Language.build_library( 'build/my-languages.so', ['tree-sitter-go'] ) GO_LANGUAGE = Language('build/my-languages.so', 'go') parser = Parser() parser.set_language(GO_LANGUAGE) def get_string_from_code(node, lines): line_start = node.start_point[0] line_end = node.end_point[0] char_start = node.start_point[1] char_end = node.end_point[1] if line_start != line_end: code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]])) else: code_list.append(lines[line_start][char_start:char_end]) def my_traverse(node, code_list): lines = code.split('\n') if node.child_count == 0: get_string_from_code(node, lines) elif node.type == 'string': get_string_from_code(node, lines) else: for n in node.children: my_traverse(n, code_list) return ' '.join(code_list) tree = parser.parse(bytes(code, "utf8")) code_list=[] tokenized_code = my_traverse(tree.root_node, code_list) print("Output after tokenization: " + tokenized_code) codes = [tokenized_code] inputs_path = 'input.txt' with tf.io.gfile.GFile(inputs_path, "w") as f: for c in codes: f.write("function documentation generation go: %s\n" % c) predict_outputs_path = 'MtfModel-output.txt' model.batch_size = 8 model.predict( input_file="input.txt", output_file=predict_outputs_path, checkpoint_steps=80000, beam_size=4, vocabulary=vocab, # Select the most probable output token at each step. temperature=0, ) prediction_file = "MtfModel-output.txt-80000" print("\nPredictions using checkpoint 80000:\n" ) with tf.io.gfile.GFile(prediction_file) as f: for c, d in zip(codes, f): if c: print("Code for prediction: " + c + '\n') print("Generated Documentation: " + d)
0.538741
0.800731
# 1. Object types in Python ## Introduction In this section we discuss one of the main building blocks of python, the `type`. We will cover some of the basic types and how they can be used. Python code is composed of the manipulation of various objects in order to achieve a certain outcome. Every object in python has a type. For example, the object `'Hello world'` (introduced in the previous section) is text, also known as a string. Object types include: - strings (i.e. text) - integers (i.e. whole numbers, both positive and negative) - floating point values (i.e. decimal numbers) - boolean values (i.e. True or False) Objects can have things done to them, i.e. operations can be performed on them, such as addition, substraction and division. ## 1.1 Strings For example, we can add two strings together: ``` # The first string: 'Hello' # The second string: 'world' # Add them together 'Hello'+'world' # print the string (and add a space between words) print('Hello '+'world') ``` Apart from the space between words, what is the difference between the outputs from the last two cells? As discussed in the previous section, the `print` function will write out the contents passed to it without the included quotation. We should note that the fact that strings are written out on cell execution without a `print` function is unique to jupyter notebooks. If you were to write the above in a python script, only the contents passed to the `print` function would be shown in your terminal output. ### Strings and quotation marks: Question: Do we have to use single quotes to indicate a string? Will double quotes (`""`) work as well? Answer: Yes. Single quotes, double quotes, and even triple quotes (`'''something'''` or `"""something"""`) tell Python that you want a string. When to use one or the other depends on what you are trying to do; check the exercises below for more on this. A word of caution: don't get in the habit of using triple quotes for strings, as these should be reserved for docstrings. As previously mentioned, these will be covered briefly in section 8, just know that these are important in other aspects of Python, and it generaly isn't a good idea to mix them with single and double quotation marks. You can use double quotes within single quotes and the other way around: ``` print("Single 'quotes' can be used within double quotes.") print('Double "quotes" can be used within single quotes.') ``` ### Exercise 1.1.1: Try printing strings surrounded by either single quotes or double quotes. ``` # Exercise 1.1.1 ``` As explained above, you can use the two types of quotation marks to print quotation marks within a string For example, to print: Just say "No!" ``` print('Just say "No!"') ``` You can also achieve the same effect with single quotes by using a backslash `\` before the quotation mark you wish to be printed. This "escapes" the single quote mark from being interpreted as a quotation mark at the end of the string. For example: ``` print('Just say \'No!\'') ``` ### Exercise 1.1.2: Print the following string twice, first using single quotes at the start and end of the string, then double quotes: `I can't be bothered with this "exercise"` ``` # Exercise 1.1.2 ``` #### More types of quotation marks If you want to include more than one line in your string, you can: - use a 'new line character' in your string - this character looks like: `\n` - use triple quotes (`'''` or `"""`) at either end of your string ``` print('Here is an example of using a new line character\nto print a second line.') print('''Or you can just use triple quotes at either end of the string and start a new line as you type.''') ``` ## 1.2 Integers What about the other types mentioned, i.e. integers (whole numbers) and floating point values (decimals)? We can also perform operations on integers and floating point values, in all the ways you might expect. Adding and subtracting integers: ``` 2 + 4 5-3 7- 10 ``` **Note:** usually it doesn't matter if you use spaces between numbers and symbols, however you may want to do so in order to improve code readability. Multiplying (we use the `*` sign to multiply) ``` 2 * 5 ``` Dividing (use the `/` sign): ``` 4 / 2 ``` Note that when dividing integers, a floating point type value (i.e. a decimal) will be returned. It is important to note that in previous versions of python (v2.7 and lower) only integer types (i.e. whole numbers) would be returned, resulting in a rounding down of values. This very specific difference between versions is a frequent source of errors when porting python code. If integer division (i.e. a division that returns only whole numbers) is required, the `//` operation can be used instead. for example: ``` # This division will return a non-whole number result (float type) 7 / 4 # With integer division a whole number is returned (same as using '/' in python 2.7 and lower) 7 // 4 ``` Powers, eg. $4^2$ , (using 2 consecutive `*` symbols): ``` 4 ** 2 ``` Modular arithmetic (or integer remainders), is done using the `%` symbol: ``` 12 % 4 13 % 4 17 % 4 ``` So how big can a python integer be? In most programming languages, integers and floats are limited by the amount of memory allocated to them (you'll often hear the words "64 bit integer"). In Python 3, integers have an unlimited size, so that maximum value you can give them is only limited to the amount of memory your computer has. You will however find that processing times get much longer as you work with increasingly large numbers. ## 1.3 Floating point values As introduced in section 1.2, a floating point value is a decimal, and python can tell we are using floating points when we use a decimal point in the number. For example: ``` 8.23 5.0 ``` We don't necessarily need to put any numbers after the floating point, eg, 5. is the same as 5.0: ``` 5. ``` We can also perform operations on floating point values, like before. ### Exercise 1.3.1 Write code to find: a) $8.3 + 4$ b) $5.1$ x $2.5$ c) $10$ / $3$ (the answer should be a floating point value) d) $8.1^3$ ``` # Exercise 1.3.1 a) # Exercise 1.3.1 b) # Exercise 1.3.1 c) # Exercise 1.3.1 d) ``` ### Floating point precision Floating point arithmetic is not exact, because computers work in base 2, which means that numbers like `1/10` are not stored exactly. Usually this is not a problem - floating point values are correct to ~17 significant figures on modern computers - however, it can mean that small errors creep in: ``` 0.1 + 0.1 + 0.1 ``` When adding several small and large numbers together, you can find that your answer will eventually diverge from its exact solution. This is a common problem in computer science, and several approaches such as parallel and Kahan summation have been proposed to avoid this problem. A discussion of these methods goes beyond the scope of this tutorial, however for more information on this, we recommended looking at resources such as; "Accuracy and Stability of Numerical Algorithms" by Nicholas J. Higham. ### Exercise 1.3.2 Find $(1/10)^5$ How accurate is the answer? ``` # Exercise 1.3.2 ``` ## 1.4 Other types of objects Many different types of python objects exist. Here we cover some other common object types which can be obtained by grouping strings, integers and floating point values into new objects: - Lists; surrounded by square brackets, `[ ]`, succesive entries are separated by `,`. These are used a lot. - Tuples; surrounded by round brackets, `( )`, succesive entries are separated by `,`. Like a list except that, once specified, its elements can't be changed. - Dictionaries; surrounded by curly brackets, `{ }`, succesive entries are separated by `,`. These are lists of `key:value` pairs, where each entry is indexed by the contents of the `key`. A list of integers: ``` [1, 6, 3, 0] ``` A tuple of integers: ``` (6,1,3) ``` A dictionary, eg. of exam scores for each person in a class (as string-float pairs): ``` {'Andy':2.0, 'Vivek':6.1, 'Sian':9.5, 'Chen':3.6} ``` There are also many operations we can perform on these object types, eg. adding two lists. The types of operation you can perform will depend on the type of object (eg. try adding two dictionaries). We will come back to lists, tuples and dictionaries later on. ## 1.5 Checking what type of object we have We can check what type of object we are dealing with using the `type()` function. For example: ``` type('what type of object is this?') type(4.) type(3/4) type([8,1]) ``` ## Review In section 1.1 we covered: - Strings, surrounded by either `'single quotes'` or `"double quotes"`. - Printing strings. - Adding strings together. - Quotes within a string, either using `"double"` or `'single quotes'`, or using `\`. - Inserting a new line into your string, with a new line character, `\n`, or `'''triple quotes'''`. In section 1.2 we covered: - Addition `+`. - Substraction `-`. - Multiplication `*`. - Division and differences from older version of python (`/` and `//`). - Power operations `**`. - Modulo arithmetics `%`. - Python integer size limits. In section 1.3 we covered: - Defining floating points (by using a decimal point, `.`, in the number). - Performing addition, substraction, multiplication, and powers with floating points. - Issues with the precision of floating point values. In section 1.4 we covered: - Additional types of objects, including; lists, tuples, and dictionaries. In section 1.5 we covered: - How to check the type of an object using the `type()` function.
github_jupyter
# The first string: 'Hello' # The second string: 'world' # Add them together 'Hello'+'world' # print the string (and add a space between words) print('Hello '+'world') print("Single 'quotes' can be used within double quotes.") print('Double "quotes" can be used within single quotes.') # Exercise 1.1.1 print('Just say "No!"') print('Just say \'No!\'') # Exercise 1.1.2 print('Here is an example of using a new line character\nto print a second line.') print('''Or you can just use triple quotes at either end of the string and start a new line as you type.''') 2 + 4 5-3 7- 10 2 * 5 4 / 2 # This division will return a non-whole number result (float type) 7 / 4 # With integer division a whole number is returned (same as using '/' in python 2.7 and lower) 7 // 4 4 ** 2 12 % 4 13 % 4 17 % 4 8.23 5.0 5. # Exercise 1.3.1 a) # Exercise 1.3.1 b) # Exercise 1.3.1 c) # Exercise 1.3.1 d) 0.1 + 0.1 + 0.1 # Exercise 1.3.2 [1, 6, 3, 0] (6,1,3) {'Andy':2.0, 'Vivek':6.1, 'Sian':9.5, 'Chen':3.6} type('what type of object is this?') type(4.) type(3/4) type([8,1])
0.360602
0.986125
``` import numpy as np import pandas as pd import sklearn.metrics as mtr from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.callbacks import Callback, EarlyStopping from keras.models import Model from keras.layers import Input, Dense, Concatenate, Reshape, Dropout, merge, Add from keras.layers.embeddings import Embedding from sklearn.model_selection import KFold,GroupKFold import warnings import random as rn import math import datetime import tensorflow as tf from keras.models import load_model import os import tqdm warnings.filterwarnings("ignore") pd.options.display.max_columns = 200 from kaggle.competitions import nflrush env = nflrush.make_env() iter_test = env.iter_test() # evaluation metric def crps(y_true, y_pred): y_true = np.clip(np.cumsum(y_true, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) return ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * y_true.shape[0]) # author : nlgn # Link : https://www.kaggle.com/kingychiu/keras-nn-starter-crps-early-stopping class Metric(Callback): def __init__(self, model, callbacks, data): super().__init__() self.model = model self.callbacks = callbacks self.data = data def on_train_begin(self, logs=None): for callback in self.callbacks: callback.on_train_begin(logs) def on_train_end(self, logs=None): for callback in self.callbacks: callback.on_train_end(logs) def on_epoch_end(self, batch, logs=None): X_train, y_train = self.data[0][0], self.data[0][1] y_pred = self.model.predict(X_train) y_true = np.clip(np.cumsum(y_train, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) tr_s = ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * X_train[-1].shape[0]) tr_s = np.round(tr_s, 6) logs['tr_CRPS'] = tr_s X_valid, y_valid = self.data[1][0], self.data[1][1] y_pred = self.model.predict(X_valid) y_true = np.clip(np.cumsum(y_valid, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) val_s = ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * X_valid[-1].shape[0]) val_s = np.round(val_s, 6) logs['val_CRPS'] = val_s print('tr CRPS', tr_s, 'val CRPS', val_s) for callback in self.callbacks: callback.on_epoch_end(batch, logs) def create_features(df): def new_X(x_coordinate, play_direction): if play_direction == 'left': return 120.0 - x_coordinate else: return x_coordinate def new_line(rush_team, field_position, yardline): if rush_team == field_position: # offense starting at X = 0 plus the 10 yard endzone plus the line of scrimmage return 10.0 + yardline else: # half the field plus the yards between midfield and the line of scrimmage return 60.0 + (50 - yardline) def new_orientation(angle, play_direction): if play_direction == 'left': new_angle = 360.0 - angle if new_angle == 360.0: new_angle = 0.0 return new_angle else: return angle def euclidean_distance(x1,y1,x2,y2): x_diff = (x1-x2)**2 y_diff = (y1-y2)**2 return np.sqrt(x_diff + y_diff) def back_direction(orientation): if orientation > 180.0: return 1 else: return 0 def map_team_name(df): map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'} for abb in df['PossessionTeam'].unique(): map_abbr[abb] = abb df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr) df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr) df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr) df['FieldPosition'] = df['FieldPosition'].map(map_abbr) return df def clean_position(df): def get_position(pos): if pos == 'SAF': return 'DB' if pos == 'S': return 'DB' elif pos == 'OG': return 'G' elif pos == "OT": return 'T' else: return pos df['Position'] = df['Position'].apply(get_position) return df def update_yardline(df): new_yardline = df[df['NflId'] == df['NflIdRusher']] new_yardline['YardLine'] = new_yardline[['PossessionTeam','FieldPosition','YardLine']].apply(lambda x: new_line(x[0],x[1],x[2]), axis=1) new_yardline = new_yardline[['GameId','PlayId','YardLine']] return new_yardline def update_orientation(df, yardline): df['X'] = df[['X','PlayDirection']].apply(lambda x: new_X(x[0],x[1]), axis=1) df['Orientation'] = df[['Orientation','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df['Dir'] = df[['Dir','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df = df.drop('YardLine', axis=1) df = pd.merge(df, yardline, on=['GameId','PlayId'], how='inner') return df def back_features(df): carriers = df[df['NflId'] == df['NflIdRusher']][['GameId','PlayId','NflIdRusher','X','Y','Orientation','Dir','YardLine']] carriers['RusherDisYardLine'] = carriers['YardLine'] - carriers['X'] carriers['back_oriented_down_field'] = carriers['Orientation'].apply(lambda x: back_direction(x)) carriers['back_moving_down_field'] = carriers['Dir'].apply(lambda x: back_direction(x)) carriers = carriers.rename(columns={'X':'back_X', 'Y':'back_Y'}) carriers = carriers[['GameId','PlayId','NflIdRusher','back_X','back_Y', 'RusherDisYardLine','back_oriented_down_field','back_moving_down_field']] return carriers def features_relative_to_back(df, carriers): player_distance = df[['GameId','PlayId','NflId','X','Y']] player_distance = pd.merge(player_distance, carriers, on=['GameId','PlayId'], how='inner') player_distance = player_distance[player_distance['NflId'] != player_distance['NflIdRusher']] player_distance['dist_to_back'] = player_distance[['X','Y','back_X','back_Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) player_distance = player_distance.groupby(['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field'])\ .agg({'dist_to_back':['min','max','mean','std']})\ .reset_index() player_distance.columns = ['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field', 'min_dist','max_dist','mean_dist','std_dist'] return player_distance def create_features(df): def new_X(x_coordinate, play_direction): if play_direction == 'left': return 120.0 - x_coordinate else: return x_coordinate def new_line(rush_team, field_position, yardline): if rush_team == field_position: # offense starting at X = 0 plus the 10 yard endzone plus the line of scrimmage return 10.0 + yardline else: # half the field plus the yards between midfield and the line of scrimmage return 60.0 + (50 - yardline) def new_orientation(angle, play_direction): if play_direction == 'left': new_angle = 360.0 - angle if new_angle == 360.0: new_angle = 0.0 return new_angle else: return angle def euclidean_distance(x1,y1,x2,y2): x_diff = (x1-x2)**2 y_diff = (y1-y2)**2 return np.sqrt(x_diff + y_diff) def back_direction(orientation): if orientation > 180.0: return 1 else: return 0 def map_team_name(df): map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'} for abb in df['PossessionTeam'].unique(): map_abbr[abb] = abb df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr) for abb in df['HomeTeamAbbr'].unique(): map_abbr[abb] = abb df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr) for abb in df['VisitorTeamAbbr'].unique(): map_abbr[abb] = abb df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr) for abb in df['FieldPosition'].unique(): map_abbr[abb] = abb df['FieldPosition'] = df['FieldPosition'].map(map_abbr) return df def clean_position(df): def get_position(pos): if pos == 'SAF': return 'DB' if pos == 'S': return 'DB' elif pos == 'OG': return 'G' elif pos == "OT": return 'T' else: return pos df['Position'] = df['Position'].apply(get_position) return df def update_yardline(df): new_yardline = df[df['NflId'] == df['NflIdRusher']] new_yardline['YardLine'] = new_yardline[['PossessionTeam','FieldPosition','YardLine']].apply(lambda x: new_line(x[0],x[1],x[2]), axis=1) new_yardline = new_yardline[['GameId','PlayId','YardLine']] return new_yardline def update_orientation(df, yardline): df['X'] = df[['X','PlayDirection']].apply(lambda x: new_X(x[0],x[1]), axis=1) df['Orientation'] = df[['Orientation','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df['Dir'] = df[['Dir','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df = df.drop('YardLine', axis=1) df = pd.merge(df, yardline, on=['GameId','PlayId'], how='inner') return df def back_features(df): carriers = df[df['NflId'] == df['NflIdRusher']][['GameId','PlayId','NflIdRusher','X','Y','Orientation','Dir','YardLine']] carriers['RusherDisYardLine'] = carriers['YardLine'] - carriers['X'] carriers['back_oriented_down_field'] = carriers['Orientation'].apply(lambda x: back_direction(x)) carriers['back_moving_down_field'] = carriers['Dir'].apply(lambda x: back_direction(x)) carriers = carriers.rename(columns={'X':'back_X', 'Y':'back_Y'}) carriers = carriers[['GameId','PlayId','NflIdRusher','back_X','back_Y', 'RusherDisYardLine','back_oriented_down_field','back_moving_down_field']] return carriers def features_relative_to_back(df, carriers): player_distance = df[['GameId','PlayId','NflId','X','Y']] player_distance = pd.merge(player_distance, carriers, on=['GameId','PlayId'], how='inner') player_distance = player_distance[player_distance['NflId'] != player_distance['NflIdRusher']] player_distance['dist_to_back'] = player_distance[['X','Y','back_X','back_Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) player_distance = player_distance.groupby(['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field'])\ .agg({'dist_to_back':['min','max','mean','std']})\ .reset_index() player_distance.columns = ['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field', 'min_dist','max_dist','mean_dist','std_dist'] return player_distance def create_general_position(df): def get_general_position(pos): if pos == 'SS' or pos == 'FS' or pos == 'CB' or pos == 'DB': return 'DB' elif pos == 'DE' or pos == 'DT' or pos == 'DL': return 'DL' elif pos == 'ILB' or pos == 'OLB' or pos == 'MLB' or pos == 'LB': return 'LB' elif pos == 'WR': return 'WR' elif pos == 'TE': return 'TE' elif pos == 'T' or pos == 'G' or pos == 'C' or pos == 'NT' or pos == 'OL': return 'OL' elif pos == 'QB' or pos == 'RB' or pos == 'FB' or pos == 'HB' or pos == 'TB' or pos == 'WB': return 'OB' else: return 'Other' df['GeneralPosition'] = df['Position'].apply(get_general_position) return df def get_team_on_offense(df): df['TeamOnOffense'] = "home" df.loc[df.PossessionTeam != df.HomeTeamAbbr, 'TeamOnOffense'] = "away" df['IsOnOffense'] = df.Team == df.TeamOnOffense return df def map_offense_defense_team(df): df['OffenseTeam'] = df['VisitorTeamAbbr'] df.loc[df.TeamOnOffense == 'home', 'OffenseTeam'] = df['HomeTeamAbbr'] df['DefenseTeam'] = df['VisitorTeamAbbr'] df.loc[df.TeamOnOffense == 'away', 'DefenseTeam'] = df['HomeTeamAbbr'] df['IsOffenseAtHome'] = True df.loc[df.TeamOnOffense == 'away', 'IsOffenseAtHome'] = False return df def get_is_offense_winning(df): df['OffenseScore'] = df['HomeScoreBeforePlay'] df.loc[df.TeamOnOffense == 'away', 'OffenseScore'] = df['VisitorScoreBeforePlay'] df['DefenseScore'] = df['VisitorScoreBeforePlay'] df.loc[df.TeamOnOffense == 'away', 'DefenseScore'] = df['HomeScoreBeforePlay'] df['OffenseLessDefenseScore'] = df['OffenseScore'] - df['DefenseScore'] df['OffenseInOwnTerritory'] = False df.loc[df.FieldPosition == df.OffenseTeam, 'OffenseInOwnTerritory'] = True df.drop(['OffenseScore','DefenseScore'], axis=1, inplace=True) return df def get_general_pos_counts(df): df['NumberOfBacksOnPlay'] = 0 df['NumberOfOLinemenOnPlay'] = 0 df['NumberOfWRsOnPlay'] = 0 df['NumberOfTEsOnPlay'] = 0 df['NumberOfDBsOnPlay'] = 0 df['NumberOfDLinemenOnPlay'] = 0 df['NumberOfLBsOnPlay'] = 0 # Pivot to find counts of each general position gen_pos_counts = df[['PlayId','GeneralPosition']].pivot_table(index='PlayId', columns='GeneralPosition', aggfunc=len, fill_value=0) gen_pos_counts = gen_pos_counts.rename(columns = {'DB':'NumberOfDBsOnPlay', 'DL':'NumberOfDLinemenOnPlay', 'LB':'NumberOfLBsOnPlay', 'OB':'NumberOfBacksOnPlay', 'OL':'NumberOfOLinemenOnPlay', 'TE':'NumberOfTEsOnPlay', 'WR':'NumberOfWRsOnPlay'}) gen_pos_counts = gen_pos_counts.reset_index(drop=False) del gen_pos_counts.columns.name gen_pos_counts_cols = gen_pos_counts.columns.values.tolist() gen_pos_counts = gen_pos_counts.loc[gen_pos_counts.index.repeat(22)].reset_index(drop=True) df.update(gen_pos_counts) return df def utc2sec(x): return int(x.split("-")[2].split(":")[2].split(".")[0]) def gameclock2secs(x): clock = x.split(":") return (60 * int(clock[0])) + int(clock[1]) def str_to_float(txt): try: return float(txt) except: return -1 def get_time_features(df): df['TimeBetweenSnapHandoff'] = df['TimeHandoff'].apply(utc2sec) - df['TimeSnap'].apply(utc2sec) df['QuarterGameSecs'] = df['GameClock'].apply(gameclock2secs) df['TotalGameSecsPlayed'] = (900 - df['QuarterGameSecs']) + ((df['Quarter'] - 1) * 900) df['HalfGameSecsLeft'] = df['QuarterGameSecs'] df.loc[(df['Quarter'].isin([1,3])), 'HalfGameSecsLeft'] = (900 + df['QuarterGameSecs']) return(df) def get_player_age(df): def timesnap2day(x): days = x.split("-") return 365 * int(days[0]) + 30 * int(days[1]) + int(days[2][:2]) def birthday2day(x): days = x.split("/") return 30 * int(days[0]) + int(days[1]) + 365 * int(days[2]) df['PlayerAge'] = df['TimeSnap'].apply(timesnap2day) - df['PlayerBirthDate'].apply(birthday2day) df.drop('PlayerBirthDate', axis=1, inplace=True) return df def get_player_weights_bmi(df): def height2inch(x): height = x.split("-") return 12 * int(height[0]) + int(height[1]) df['PlayerHeight'] = df['PlayerHeight'].apply(height2inch) df = df.rename(columns={'PlayerWeight':'PlayerMass'}) df['PlayerBMI'] = df['PlayerMass'] / df['PlayerHeight'] return df def get_is_rusher(df): df['IsRusher'] = df.NflId == df.NflIdRusher return df def get_redzone(df): df['InOffenseRedzone'] = False df.loc[df.YardLine <= 30, 'InOffenseRedzone'] = True df['InDefenseRedzone'] = False df.loc[df.YardLine >= 90, 'InDefenseRedzone'] = True return df def get_qb_kneel(df): df['QBKneel'] = False df.loc[ ((df.Quarter == 2) | (df.Quarter == 4)) & (df.GameClock <= '02:00') & (df.OffenseLessDefenseScore > 0) & (df.NumberOfBacksOnPlay >= 3) & (df.NumberOfTEsOnPlay >= 2), 'QBKneel' ] = True return df def get_dis_yardline(df): """ For defender use only """ df['DisYardLine'] = 0 df.loc[df.IsOnOffense == True, 'DisYardLine'] = df['YardLine'] - df['X'] df.loc[df.IsOnOffense == False, 'DisYardLine'] = df['X'] - df['YardLine'] return df def get_no_defenders_yl(df): df['NoDefenderYL'] = 'NaN' df.loc[(df.IsOnOffense == False) & (df.DisYardLine < 0), 'NoDefenderYL'] = 'NoDefendersBelow0YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 0) & (df.DisYardLine < 3)), 'NoDefenderYL'] = 'NoDefenders0_2YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 3) & (df.DisYardLine < 6)), 'NoDefenderYL'] = 'NoDefenders3_5YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 6) & (df.DisYardLine < 9)), 'NoDefenderYL'] = 'NoDefenders6_8YL' df.loc[(df.IsOnOffense == False) & (df.DisYardLine >= 9), 'NoDefenderYL'] = 'NoDefendersAbove9YL' df['NoDefendersBelow0YL'] = 0 df['NoDefenders0_2YL'] = 0 df['NoDefenders3_5YL'] = 0 df['NoDefenders6_8YL'] = 0 df['NoDefendersAbove9YL'] = 0 # Pivot to find counts of each general position no_defenders = df[['PlayId','NoDefenderYL']].pivot_table(index='PlayId', columns='NoDefenderYL', aggfunc=len, fill_value=0) no_defenders = no_defenders.reset_index(drop=False).drop('NaN', axis=1) del no_defenders.columns.name no_defenders_cols = no_defenders.columns.values.tolist() no_defenders = no_defenders.loc[no_defenders.index.repeat(22)].reset_index(drop=True) df.update(no_defenders) return df def get_inside_runs(df): df['IsInside'] = 0 inside1 = df[ # Outside seams and running in (((df.RusherY > -2.00) & (df.RusherY <= 23.55)) & ((df.RusherDir > 270) | (df.RusherDir <= 90))) | (((df.RusherY > 29.75) & (df.RusherY <= 55.00)) & ((df.RusherDir > 90) & (df.RusherDir <= 270))) ]['PlayId'] inside2 = df[ # Inside the seams and running in (((df.RusherY > 23.55) & (df.RusherY <= 29.75)) & ((df.RusherDir > 40) & (df.RusherDir <= 140))) ]['PlayId'] inside = inside1.tolist() + inside2.tolist() df.loc[df.PlayId.isin(inside), 'IsInside'] = 1 return df def get_dis_from_yl(df): """ For both off and def """ df['DisFromYL'] = abs(df['YardLine'] - df['X']) return df def get_dis_rusher(df): rusher_xy = df.loc[df.IsRusher == True, ['GameId','PlayId','X','Y']].rename(columns={'X':'RusherX','Y':'RusherY'}) df = df.merge(rusher_xy, on=['GameId','PlayId']) df['DisRusher'] = df[['X','Y','RusherX','RusherY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df.drop(['RusherX','RusherY'], axis=1,inplace=True) return df def get_dis_features(df): """ Returns DisRusherNearestYardLine, RusherDisQB, RusherDisC and RusherDisMLB, DisC, DisQB """ def get_rusher_dis_mlb(df): lb_xy = df.loc[(df.Position == 'MLB') | (df.Position == 'ILB'), ['PlayId','X','Y']].rename(columns={'X':'MLBX', 'Y':'MLBY'}) rusher_lb_xy = lb_xy.merge(rusher_xy, on=['PlayId'], how='left') rusher_lb_xy['RusherDisMLB'] = rusher_lb_xy[ ['RusherX','RusherY','MLBX','MLBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) rusher_lb_xy.drop(['RusherX','RusherY','MLBX','MLBY'],axis=1, inplace=True) rusher_lb_dis = rusher_lb_xy.groupby(['PlayId']).agg({'RusherDisMLB':['min'],}).reset_index() rusher_lb_dis.columns = ['PlayId','RusherDisMLB'] return rusher_lb_dis rusher_xy = df.loc[df.IsRusher == True, ['PlayId','X','Y']].rename(columns={'X':'RusherX','Y':'RusherY'}) qb_xy = df.loc[df.Position == 'QB', ['PlayId','X','Y']].rename(columns={'X':'QBX','Y':'QBY'}) c_xy = df.loc[df.Position == 'C', ['PlayId','X','Y']].rename(columns={'X':'CX','Y':'CY'}) try: rusher_lb_dis = get_rusher_dis_mlb(df) except: rusher_lb_dis = np.nan rusherxy_qbxy = rusher_xy.merge(qb_xy, on=['PlayId']) rusherxy_qbxy_cxy = rusherxy_qbxy.merge(c_xy, on=['PlayId']) try: dis_total_xy = rusherxy_qbxy_cxy.merge(rusher_lb_dis, on=['PlayId']) except: dis_total_xy = rusherxy_qbxy_cxy dis_total_xy['RusherDisMLB'] = np.nan dis_total_xy = dis_total_xy.loc[dis_total_xy.index.repeat(22)].reset_index(drop=True) dis_total_xy.drop(['PlayId'], axis=1, inplace=True) df['RusherX'] = 0 df['RusherY'] = 0 df['QBX'] = 0 df['QBY'] = 0 df['CX'] = 0 df['CY'] = 0 df['RusherDisMLB'] = 0 df.update(dis_total_xy) df['DisRusherNearestYardLine'] = df[['YardLine','RusherY','X','Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['RusherDisQB'] = df[['RusherX','RusherY','QBX','QBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['RusherDisC'] = df[['RusherX','RusherY','CX','CY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['DisC'] = df[['X','Y','CX','CY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['DisQB'] = df[['X','Y','QBX','QBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df.drop(['RusherX','RusherY','QBX','QBY','CX','CY'], axis=1,inplace=True) return df def get_team_aggs(df, col, for_offense=True): aggs = ['Avg','Min','Max','Std'] if for_offense == True: team_agg = df[df.IsOnOffense == True][['PlayId'] + [col]] team_agg = df[['PlayId'] + [col]] team_agg = team_agg.groupby(['PlayId']).agg({col:['mean','min','max','std']}).reset_index() avg_col = 'AvgOffense' + col min_col = 'MinOffense' + col max_col = 'MaxOffense' + col std_col = 'StdOffense' + col if for_offense == False: team_agg = df[df.IsOnOffense == False][['PlayId'] + [col]] team_agg = team_agg.groupby(['PlayId']).agg({col:['mean','min','max','std']}).reset_index() avg_col = 'AvgDefense' + col min_col = 'MinDefense' + col max_col = 'MaxDefense' + col std_col = 'StdDefense' + col team_agg.drop(['PlayId'], axis=1, inplace=True) team_agg_cols = [avg_col,min_col,max_col,std_col] team_agg.columns = team_agg_cols team_agg = team_agg.loc[team_agg.index.repeat(22)].reset_index(drop=True) for col in team_agg_cols: df[col] = 0 df.update(team_agg) return df def get_rusher_dis_mlb_inside(df): try: df['RusherDisMLBByIsInside'] = (1 / df['RusherDisMLB']) * df['IsInside'] df['RusherDisMLBByIsInside'] = df['RusherDisMLBByIsInside'].replace([np.inf, -np.inf], np.nan) return df except: df['RusherDisMLBByIsInside'] = np.nan return df def get_yards_by_down(df): df['YardsByDownSqrt'] = (df['Distance'] * df['Down']) **(1/2) return df def get_diff_rusher_dir_otation(df): df['DiffRusherDirOtation'] = df['RusherDir'] - df['RusherOrientation'] return df def get_mech_feats(df): df['Weight'] = df['PlayerMass'] * 9.806 # acceleration gravity df['ChangeTime'] = df['Dis'] / df['S'] df['Force'] = df['PlayerMass'] * df['A'] df['Momentum'] = df['PlayerMass'] * df['S'] df['KE'] = 0.5 * df['PlayerMass'] * (df['S']**2) df['Work'] = df['Force'] * df['Dis'] df['Power'] = df['Work'] / df['ChangeTime'] df['Impulse'] = df['Force'] * df['ChangeTime'] angle = 90 - df['Dir'] df['SX'] = np.abs(df['S'] * np.cos(angle)) df['SY'] = np.abs(df['S'] * np.sin(angle)) df['ForceX'] = np.abs(df['Force'] * np.cos(angle)) df['ForceY'] = np.abs(df['Force'] * np.sin(angle)) df['MomentumX'] = np.abs(df['Momentum'] * np.cos(angle)) df['MomentumY'] = np.abs(df['Momentum'] * np.sin(angle)) df['WorkX'] = np.abs(df['Work'] * np.cos(angle)) df['WorkY'] = np.abs(df['Work'] * np.sin(angle)) df['PowerX'] = np.abs(df['Power'] * np.cos(angle)) df['PowerY'] = np.abs(df['Power'] * np.sin(angle)) df['ImpulseX'] = np.abs(df['Impulse'] * np.cos(angle)) df['ImpulseY'] = np.abs(df['Impulse'] * np.sin(angle)) return df def get_gen_position_feats(df, position): pos_feat = df.loc[df.GeneralPosition == position, ['PlayId','A','S','Dir', 'Orientation','Dis', 'PlayerMass','PlayerHeight']] pos_feat = pos_feat.rename(columns={'A':position+'A','S':position+'S','Dir':position+'Dir', 'Orientation':position+'Orientation', 'Dis':position+'Dis','PlayerMass':position+'Weight', 'PlayerHeight':position+'Height'}) pos_feat = pos_feat.groupby(['PlayId']).agg( {position+'A':['mean','min','max'], position+'S':['mean','min','max'], position+'Dir':['mean','min','max'], position+'Orientation':['mean','min','max'], position+'Dis':['mean','min','max'], position+'Weight':['mean','min','max'], position+'Height':['mean','min','max']}).reset_index() pos_feat.columns = [''.join(col) for col in pos_feat.columns.values] pos_feat_columns = pos_feat.columns.tolist() pos_feat_columns.remove('PlayId') pos_feat.drop('PlayId',axis=1,inplace=True) pos_feat = pos_feat.loc[pos_feat.index.repeat(22)].reset_index(drop=True) for feat in pos_feat_columns: df[feat] = 0 df.update(pos_feat) return df def get_off_less_def_feats(df, feat): off_feat = df.loc[df.IsOnOffense == True, ['PlayId',feat]] off_feat = off_feat.groupby(['PlayId']).agg({feat:['sum']}).reset_index() off_feat.drop('PlayId', axis=1,inplace=True) off_feat.columns = ['Off'+feat] def_feat = df.loc[df.IsOnOffense == False, ['PlayId',feat]] def_feat = def_feat.groupby(['PlayId']).agg({feat:['sum']}).reset_index() def_feat.drop('PlayId', axis=1,inplace=True) def_feat.columns = ['Def'+feat] off_def_feat = pd.DataFrame(off_feat['Off'+feat] - def_feat['Def'+feat], columns=['OffLessDef'+feat]) df['OffLessDef'+feat] = 0 off_def_feat = off_def_feat.loc[off_def_feat.index.repeat(22)].reset_index(drop=True) df.update(off_def_feat) return df def get_rusher_feats(df): rusher_feats = df.loc[df.IsRusher == True,['X','Y','S','A','Dis', 'Orientation','Dir','DisFromYL', 'PlayerMass','PlayerHeight']] rusher_feats = rusher_feats.loc[rusher_feats.index.repeat(22)].reset_index(drop=True) rusher_feats = rusher_feats.rename(columns={'X':'RusherX','Y':'RusherY',}) df['RusherX'] = 0 df['RusherY'] = 0 df.update(rusher_feats) df = df.rename(columns={'S':'RusherS', 'A':'RusherA','Dis':'RusherDis', 'Orientation':'RusherOrientation', 'Dir':'RusherDir','DisFromYL':'RusherDisYL', 'PlayerMass':'RusherMass', 'PlayerHeight':'RusherHeight'}) df['RusherWeight'] = df['RusherMass'] * 9.806 # acceleration gravity df['ChangeTime'] = df['RusherDis'] / df['RusherS'] df['RusherForce'] = df['RusherMass'] * df['RusherA'] df['RusherMomentum'] = df['RusherMass'] * df['RusherS'] df['RusherKE'] = 0.5 * df['RusherMass'] * (df['RusherS']**2) df['RusherWork'] = df['RusherForce'] * df['RusherDis'] df['RusherPower'] = df['RusherWork'] / df['ChangeTime'] df['RusherImpulse'] = df['RusherForce'] * df['ChangeTime'] angle = 90 - df['RusherDir'] df['RusherSX'] = np.abs(df['RusherS'] * np.cos(angle)) df['RusherSY'] = np.abs(df['RusherS'] * np.sin(angle)) df['RusherForceX'] = np.abs(df['RusherForce'] * np.cos(angle)) df['RusherForceY'] = np.abs(df['RusherForce'] * np.sin(angle)) df['RusherMomentumX'] = np.abs(df['RusherMomentum'] * np.cos(angle)) df['RusherMomentumY'] = np.abs(df['RusherMomentum'] * np.sin(angle)) df['RusherWorkX'] = np.abs(df['RusherWork'] * np.cos(angle)) df['RusherWorkY'] = np.abs(df['RusherWork'] * np.sin(angle)) df.drop(['ChangeTime'],axis=1,inplace=True) df = df.replace([np.inf, -np.inf], np.nan) df = df.fillna(0) return df def get_gap_feats(df): df['X_gapmedian'] = 0 df['X_gapmax'] = 0 df['Y_gapmedian'] = 0 df['Y_gapmax'] = 0 plays = df.loc[df.IsOnOffense == False, ['PlayId','X','Y','RusherX']] gaps_df = pd.DataFrame(columns=['PlayId','X_gap','Y_gap']) for play in plays['PlayId'].unique(): RusherX_val = df.loc[df.PlayId == play, 'RusherX'].unique()[0] X_vals = plays.loc[plays.PlayId == play, 'X'] X_vals = X_vals.append(pd.Series([RusherX_val,120]), ignore_index=True).sort_values().reset_index(drop=True) X_vals = np.diff(X_vals) Y_vals = plays.loc[plays.PlayId == play, 'Y'] Y_vals = Y_vals.append(pd.Series([0,53.3]), ignore_index=True).sort_values().reset_index(drop=True) Y_vals = np.diff(Y_vals) gaps_play = pd.DataFrame() gaps_play['X_gap'] = X_vals gaps_play['Y_gap'] = Y_vals gaps_play['PlayId'] = play gaps_df = pd.concat([gaps_df, gaps_play], axis=0, ignore_index=True) gaps_agg_x = gaps_df.groupby('PlayId').agg({'X_gap':['median','max']}).reset_index() gaps_agg_x.columns = [''.join(col) for col in gaps_agg_x.columns.values] gaps_agg_x = gaps_agg_x.loc[gaps_agg_x.index.repeat(22)].reset_index(drop=True) gaps_agg_y = gaps_df.groupby('PlayId').agg({'Y_gap':['median','max']}).reset_index() gaps_agg_y.columns = [''.join(col) for col in gaps_agg_y.columns.values] gaps_agg_y = gaps_agg_y.loc[gaps_agg_y.index.repeat(22)].reset_index(drop=True) df.update(gaps_agg_x) df.update(gaps_agg_y) df['XY_gap_area'] = df['X_gapmax'] * df['Y_gapmax'] df.drop(['X','Y'], axis=1, inplace=True) return df def combine_features(df): df = map_team_name(df) df = get_team_on_offense(df) df = map_offense_defense_team(df) df = clean_position(df) df = get_is_rusher(df) df = create_general_position(df) df = get_player_age(df) df = get_player_weights_bmi(df) yardline = update_yardline(df) df = update_orientation(df, yardline) df = get_redzone(df) df = get_dis_yardline(df) # use for defender distance only df = get_dis_from_yl(df) # absolute distance for both off and def df = get_dis_rusher(df) df = get_dis_features(df) df = get_mech_feats(df) agg_cols = ['X','Y','A','Dir','DisFromYL','DisRusher','Force','Momentum','ForceX','Dis' ] for agg_col in agg_cols: df = get_team_aggs(df, col=agg_col, for_offense=True) df = get_team_aggs(df, col=agg_col, for_offense=False) del agg_cols df.drop(['DisQB','DisC','MinOffenseDisRusher'],axis=1,inplace=True) off_less_def_feats = ['X'] for feat in off_less_def_feats: df = get_off_less_def_feats(df, feat) df = get_rusher_feats(df) df = get_gap_feats(df) return df df = combine_features(df) df = df.fillna(-999) df = df.select_dtypes(exclude=['object']) df.drop(['RusherMass','PlayerAge','PlayerBMI','DisYardLine', 'DisRusher','NflIdRusher','IsOnOffense', 'NflId','JerseyNumber','IsRusher','DisRusherNearestYardLine', 'Weight','Force','Momentum','KE','Work','Power','Impulse', 'SX','SY','ForceX','ForceY','MomentumX','MomentumY','WorkX', 'WorkY','PowerX','PowerY','ImpulseX','ImpulseY'], axis=1, inplace=True) df = df.drop_duplicates().reset_index(drop=True) return df train = pd.read_csv('../input/nfl-big-data-bowl-2020/train.csv') outcomes = train[['GameId','PlayId','Yards']].drop_duplicates() train_basetable = create_features(train) X = train_basetable.copy() X = X.sample(frac=1).reset_index(drop=True) yards = X.Yards y = np.zeros((yards.shape[0], 199)) for idx, target in enumerate(list(yards)): y[idx][99 + target] = 1 print(train_basetable.shape) train_basetable.head() cat = ['InDefenseRedzone'] num = list(set(X.columns.values.tolist()) - set(cat)) num.remove('GameId') num.remove('PlayId') print(len(cat)) print(len(num)) features = ['GameId','PlayId', 'RusherX','RusherA', 'RusherDir', 'RusherDis', 'YardLine', 'RusherDisYL', 'StdDefenseX', 'StdDefenseY', 'AvgOffenseA', 'AvgDefenseA', 'StdOffenseDir', 'StdDefenseDir', 'MaxDefenseDisFromYL', 'AvgDefenseDisRusher', 'MinDefenseDisRusher', 'AvgOffenseForce', 'AvgDefenseForce', 'AvgOffenseMomentum', 'AvgDefenseMomentum', 'AvgDefenseForceX', 'OffLessDefX', 'RusherForce', 'RusherMomentum', 'InDefenseRedzone', 'AvgOffenseDis', 'AvgDefenseDis', 'AvgOffenseDisFromYL', 'AvgDefenseDisFromYL', 'RusherKE', 'RusherWork', 'Y_gapmax' ] X = X[features] scaler = StandardScaler() num = list(set(features) & set(num)) # update num to only show intersection with features selected X[num] = scaler.fit_transform(X[num]) def model_396_1(): inputs = [] embeddings = [] for i in cat: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(X[i]).max() + 1), 10, input_length=1)(input_) embedding = Reshape(target_shape=(10,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(num),)) embedding_numeric = Dense(512, activation='relu')(input_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) x = Dense(256, activation='relu')(x) x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) output = Dense(199, activation='softmax')(x) model = Model(inputs, output) return model n_splits = 5 kf = GroupKFold(n_splits=n_splits) score = [] for i_369, (tdx, vdx) in enumerate(kf.split(X, y, X['GameId'])): print(f'Fold : {i_369}') X_train, X_val, y_train, y_val = X.iloc[tdx], X.iloc[vdx], y[tdx], y[vdx] X_train = [np.absolute(X_train[i]) for i in cat] + [X_train[num]] X_val = [np.absolute(X_val[i]) for i in cat] + [X_val[num]] model = model_396_1() model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[]) es = EarlyStopping(monitor='val_CRPS', mode='min', restore_best_weights=True, verbose=2, patience=5) es.set_model(model) metric = Metric(model, [es], [(X_train,y_train), (X_val,y_val)]) for i in range(1): model.fit(X_train, y_train, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=64, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=128, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=256, verbose=False) model.fit(X_train, y_train, callbacks=[metric], epochs=100, batch_size=1024, verbose=False) score_ = crps(y_val, model.predict(X_val)) model.save(f'keras_369_{i_369}.h5') print(score_) score.append(score_) print(np.mean(score)) models = [] for i in range(n_splits): models.append(load_model(f'keras_369_{i}.h5')) for (test_df, sample_prediction_df) in tqdm.tqdm(iter_test): basetable = create_features(test_df) basetable = basetable[features] basetable[num] = scaler.transform(basetable[num]) test_ = [np.absolute(basetable[i]) for i in cat] + [basetable[num]] y_pred = np.mean([model.predict(test_) for model in models], axis=0) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1).tolist()[0] preds_df = pd.DataFrame(data=[y_pred], columns=sample_prediction_df.columns) env.predict(preds_df) env.write_submission_file() ```
github_jupyter
import numpy as np import pandas as pd import sklearn.metrics as mtr from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.callbacks import Callback, EarlyStopping from keras.models import Model from keras.layers import Input, Dense, Concatenate, Reshape, Dropout, merge, Add from keras.layers.embeddings import Embedding from sklearn.model_selection import KFold,GroupKFold import warnings import random as rn import math import datetime import tensorflow as tf from keras.models import load_model import os import tqdm warnings.filterwarnings("ignore") pd.options.display.max_columns = 200 from kaggle.competitions import nflrush env = nflrush.make_env() iter_test = env.iter_test() # evaluation metric def crps(y_true, y_pred): y_true = np.clip(np.cumsum(y_true, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) return ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * y_true.shape[0]) # author : nlgn # Link : https://www.kaggle.com/kingychiu/keras-nn-starter-crps-early-stopping class Metric(Callback): def __init__(self, model, callbacks, data): super().__init__() self.model = model self.callbacks = callbacks self.data = data def on_train_begin(self, logs=None): for callback in self.callbacks: callback.on_train_begin(logs) def on_train_end(self, logs=None): for callback in self.callbacks: callback.on_train_end(logs) def on_epoch_end(self, batch, logs=None): X_train, y_train = self.data[0][0], self.data[0][1] y_pred = self.model.predict(X_train) y_true = np.clip(np.cumsum(y_train, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) tr_s = ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * X_train[-1].shape[0]) tr_s = np.round(tr_s, 6) logs['tr_CRPS'] = tr_s X_valid, y_valid = self.data[1][0], self.data[1][1] y_pred = self.model.predict(X_valid) y_true = np.clip(np.cumsum(y_valid, axis=1), 0, 1) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1) val_s = ((y_true - y_pred) ** 2).sum(axis=1).sum(axis=0) / (199 * X_valid[-1].shape[0]) val_s = np.round(val_s, 6) logs['val_CRPS'] = val_s print('tr CRPS', tr_s, 'val CRPS', val_s) for callback in self.callbacks: callback.on_epoch_end(batch, logs) def create_features(df): def new_X(x_coordinate, play_direction): if play_direction == 'left': return 120.0 - x_coordinate else: return x_coordinate def new_line(rush_team, field_position, yardline): if rush_team == field_position: # offense starting at X = 0 plus the 10 yard endzone plus the line of scrimmage return 10.0 + yardline else: # half the field plus the yards between midfield and the line of scrimmage return 60.0 + (50 - yardline) def new_orientation(angle, play_direction): if play_direction == 'left': new_angle = 360.0 - angle if new_angle == 360.0: new_angle = 0.0 return new_angle else: return angle def euclidean_distance(x1,y1,x2,y2): x_diff = (x1-x2)**2 y_diff = (y1-y2)**2 return np.sqrt(x_diff + y_diff) def back_direction(orientation): if orientation > 180.0: return 1 else: return 0 def map_team_name(df): map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'} for abb in df['PossessionTeam'].unique(): map_abbr[abb] = abb df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr) df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr) df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr) df['FieldPosition'] = df['FieldPosition'].map(map_abbr) return df def clean_position(df): def get_position(pos): if pos == 'SAF': return 'DB' if pos == 'S': return 'DB' elif pos == 'OG': return 'G' elif pos == "OT": return 'T' else: return pos df['Position'] = df['Position'].apply(get_position) return df def update_yardline(df): new_yardline = df[df['NflId'] == df['NflIdRusher']] new_yardline['YardLine'] = new_yardline[['PossessionTeam','FieldPosition','YardLine']].apply(lambda x: new_line(x[0],x[1],x[2]), axis=1) new_yardline = new_yardline[['GameId','PlayId','YardLine']] return new_yardline def update_orientation(df, yardline): df['X'] = df[['X','PlayDirection']].apply(lambda x: new_X(x[0],x[1]), axis=1) df['Orientation'] = df[['Orientation','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df['Dir'] = df[['Dir','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df = df.drop('YardLine', axis=1) df = pd.merge(df, yardline, on=['GameId','PlayId'], how='inner') return df def back_features(df): carriers = df[df['NflId'] == df['NflIdRusher']][['GameId','PlayId','NflIdRusher','X','Y','Orientation','Dir','YardLine']] carriers['RusherDisYardLine'] = carriers['YardLine'] - carriers['X'] carriers['back_oriented_down_field'] = carriers['Orientation'].apply(lambda x: back_direction(x)) carriers['back_moving_down_field'] = carriers['Dir'].apply(lambda x: back_direction(x)) carriers = carriers.rename(columns={'X':'back_X', 'Y':'back_Y'}) carriers = carriers[['GameId','PlayId','NflIdRusher','back_X','back_Y', 'RusherDisYardLine','back_oriented_down_field','back_moving_down_field']] return carriers def features_relative_to_back(df, carriers): player_distance = df[['GameId','PlayId','NflId','X','Y']] player_distance = pd.merge(player_distance, carriers, on=['GameId','PlayId'], how='inner') player_distance = player_distance[player_distance['NflId'] != player_distance['NflIdRusher']] player_distance['dist_to_back'] = player_distance[['X','Y','back_X','back_Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) player_distance = player_distance.groupby(['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field'])\ .agg({'dist_to_back':['min','max','mean','std']})\ .reset_index() player_distance.columns = ['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field', 'min_dist','max_dist','mean_dist','std_dist'] return player_distance def create_features(df): def new_X(x_coordinate, play_direction): if play_direction == 'left': return 120.0 - x_coordinate else: return x_coordinate def new_line(rush_team, field_position, yardline): if rush_team == field_position: # offense starting at X = 0 plus the 10 yard endzone plus the line of scrimmage return 10.0 + yardline else: # half the field plus the yards between midfield and the line of scrimmage return 60.0 + (50 - yardline) def new_orientation(angle, play_direction): if play_direction == 'left': new_angle = 360.0 - angle if new_angle == 360.0: new_angle = 0.0 return new_angle else: return angle def euclidean_distance(x1,y1,x2,y2): x_diff = (x1-x2)**2 y_diff = (y1-y2)**2 return np.sqrt(x_diff + y_diff) def back_direction(orientation): if orientation > 180.0: return 1 else: return 0 def map_team_name(df): map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'} for abb in df['PossessionTeam'].unique(): map_abbr[abb] = abb df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr) for abb in df['HomeTeamAbbr'].unique(): map_abbr[abb] = abb df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr) for abb in df['VisitorTeamAbbr'].unique(): map_abbr[abb] = abb df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr) for abb in df['FieldPosition'].unique(): map_abbr[abb] = abb df['FieldPosition'] = df['FieldPosition'].map(map_abbr) return df def clean_position(df): def get_position(pos): if pos == 'SAF': return 'DB' if pos == 'S': return 'DB' elif pos == 'OG': return 'G' elif pos == "OT": return 'T' else: return pos df['Position'] = df['Position'].apply(get_position) return df def update_yardline(df): new_yardline = df[df['NflId'] == df['NflIdRusher']] new_yardline['YardLine'] = new_yardline[['PossessionTeam','FieldPosition','YardLine']].apply(lambda x: new_line(x[0],x[1],x[2]), axis=1) new_yardline = new_yardline[['GameId','PlayId','YardLine']] return new_yardline def update_orientation(df, yardline): df['X'] = df[['X','PlayDirection']].apply(lambda x: new_X(x[0],x[1]), axis=1) df['Orientation'] = df[['Orientation','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df['Dir'] = df[['Dir','PlayDirection']].apply(lambda x: new_orientation(x[0],x[1]), axis=1) df = df.drop('YardLine', axis=1) df = pd.merge(df, yardline, on=['GameId','PlayId'], how='inner') return df def back_features(df): carriers = df[df['NflId'] == df['NflIdRusher']][['GameId','PlayId','NflIdRusher','X','Y','Orientation','Dir','YardLine']] carriers['RusherDisYardLine'] = carriers['YardLine'] - carriers['X'] carriers['back_oriented_down_field'] = carriers['Orientation'].apply(lambda x: back_direction(x)) carriers['back_moving_down_field'] = carriers['Dir'].apply(lambda x: back_direction(x)) carriers = carriers.rename(columns={'X':'back_X', 'Y':'back_Y'}) carriers = carriers[['GameId','PlayId','NflIdRusher','back_X','back_Y', 'RusherDisYardLine','back_oriented_down_field','back_moving_down_field']] return carriers def features_relative_to_back(df, carriers): player_distance = df[['GameId','PlayId','NflId','X','Y']] player_distance = pd.merge(player_distance, carriers, on=['GameId','PlayId'], how='inner') player_distance = player_distance[player_distance['NflId'] != player_distance['NflIdRusher']] player_distance['dist_to_back'] = player_distance[['X','Y','back_X','back_Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) player_distance = player_distance.groupby(['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field'])\ .agg({'dist_to_back':['min','max','mean','std']})\ .reset_index() player_distance.columns = ['GameId','PlayId','RusherDisYardLine','back_oriented_down_field','back_moving_down_field', 'min_dist','max_dist','mean_dist','std_dist'] return player_distance def create_general_position(df): def get_general_position(pos): if pos == 'SS' or pos == 'FS' or pos == 'CB' or pos == 'DB': return 'DB' elif pos == 'DE' or pos == 'DT' or pos == 'DL': return 'DL' elif pos == 'ILB' or pos == 'OLB' or pos == 'MLB' or pos == 'LB': return 'LB' elif pos == 'WR': return 'WR' elif pos == 'TE': return 'TE' elif pos == 'T' or pos == 'G' or pos == 'C' or pos == 'NT' or pos == 'OL': return 'OL' elif pos == 'QB' or pos == 'RB' or pos == 'FB' or pos == 'HB' or pos == 'TB' or pos == 'WB': return 'OB' else: return 'Other' df['GeneralPosition'] = df['Position'].apply(get_general_position) return df def get_team_on_offense(df): df['TeamOnOffense'] = "home" df.loc[df.PossessionTeam != df.HomeTeamAbbr, 'TeamOnOffense'] = "away" df['IsOnOffense'] = df.Team == df.TeamOnOffense return df def map_offense_defense_team(df): df['OffenseTeam'] = df['VisitorTeamAbbr'] df.loc[df.TeamOnOffense == 'home', 'OffenseTeam'] = df['HomeTeamAbbr'] df['DefenseTeam'] = df['VisitorTeamAbbr'] df.loc[df.TeamOnOffense == 'away', 'DefenseTeam'] = df['HomeTeamAbbr'] df['IsOffenseAtHome'] = True df.loc[df.TeamOnOffense == 'away', 'IsOffenseAtHome'] = False return df def get_is_offense_winning(df): df['OffenseScore'] = df['HomeScoreBeforePlay'] df.loc[df.TeamOnOffense == 'away', 'OffenseScore'] = df['VisitorScoreBeforePlay'] df['DefenseScore'] = df['VisitorScoreBeforePlay'] df.loc[df.TeamOnOffense == 'away', 'DefenseScore'] = df['HomeScoreBeforePlay'] df['OffenseLessDefenseScore'] = df['OffenseScore'] - df['DefenseScore'] df['OffenseInOwnTerritory'] = False df.loc[df.FieldPosition == df.OffenseTeam, 'OffenseInOwnTerritory'] = True df.drop(['OffenseScore','DefenseScore'], axis=1, inplace=True) return df def get_general_pos_counts(df): df['NumberOfBacksOnPlay'] = 0 df['NumberOfOLinemenOnPlay'] = 0 df['NumberOfWRsOnPlay'] = 0 df['NumberOfTEsOnPlay'] = 0 df['NumberOfDBsOnPlay'] = 0 df['NumberOfDLinemenOnPlay'] = 0 df['NumberOfLBsOnPlay'] = 0 # Pivot to find counts of each general position gen_pos_counts = df[['PlayId','GeneralPosition']].pivot_table(index='PlayId', columns='GeneralPosition', aggfunc=len, fill_value=0) gen_pos_counts = gen_pos_counts.rename(columns = {'DB':'NumberOfDBsOnPlay', 'DL':'NumberOfDLinemenOnPlay', 'LB':'NumberOfLBsOnPlay', 'OB':'NumberOfBacksOnPlay', 'OL':'NumberOfOLinemenOnPlay', 'TE':'NumberOfTEsOnPlay', 'WR':'NumberOfWRsOnPlay'}) gen_pos_counts = gen_pos_counts.reset_index(drop=False) del gen_pos_counts.columns.name gen_pos_counts_cols = gen_pos_counts.columns.values.tolist() gen_pos_counts = gen_pos_counts.loc[gen_pos_counts.index.repeat(22)].reset_index(drop=True) df.update(gen_pos_counts) return df def utc2sec(x): return int(x.split("-")[2].split(":")[2].split(".")[0]) def gameclock2secs(x): clock = x.split(":") return (60 * int(clock[0])) + int(clock[1]) def str_to_float(txt): try: return float(txt) except: return -1 def get_time_features(df): df['TimeBetweenSnapHandoff'] = df['TimeHandoff'].apply(utc2sec) - df['TimeSnap'].apply(utc2sec) df['QuarterGameSecs'] = df['GameClock'].apply(gameclock2secs) df['TotalGameSecsPlayed'] = (900 - df['QuarterGameSecs']) + ((df['Quarter'] - 1) * 900) df['HalfGameSecsLeft'] = df['QuarterGameSecs'] df.loc[(df['Quarter'].isin([1,3])), 'HalfGameSecsLeft'] = (900 + df['QuarterGameSecs']) return(df) def get_player_age(df): def timesnap2day(x): days = x.split("-") return 365 * int(days[0]) + 30 * int(days[1]) + int(days[2][:2]) def birthday2day(x): days = x.split("/") return 30 * int(days[0]) + int(days[1]) + 365 * int(days[2]) df['PlayerAge'] = df['TimeSnap'].apply(timesnap2day) - df['PlayerBirthDate'].apply(birthday2day) df.drop('PlayerBirthDate', axis=1, inplace=True) return df def get_player_weights_bmi(df): def height2inch(x): height = x.split("-") return 12 * int(height[0]) + int(height[1]) df['PlayerHeight'] = df['PlayerHeight'].apply(height2inch) df = df.rename(columns={'PlayerWeight':'PlayerMass'}) df['PlayerBMI'] = df['PlayerMass'] / df['PlayerHeight'] return df def get_is_rusher(df): df['IsRusher'] = df.NflId == df.NflIdRusher return df def get_redzone(df): df['InOffenseRedzone'] = False df.loc[df.YardLine <= 30, 'InOffenseRedzone'] = True df['InDefenseRedzone'] = False df.loc[df.YardLine >= 90, 'InDefenseRedzone'] = True return df def get_qb_kneel(df): df['QBKneel'] = False df.loc[ ((df.Quarter == 2) | (df.Quarter == 4)) & (df.GameClock <= '02:00') & (df.OffenseLessDefenseScore > 0) & (df.NumberOfBacksOnPlay >= 3) & (df.NumberOfTEsOnPlay >= 2), 'QBKneel' ] = True return df def get_dis_yardline(df): """ For defender use only """ df['DisYardLine'] = 0 df.loc[df.IsOnOffense == True, 'DisYardLine'] = df['YardLine'] - df['X'] df.loc[df.IsOnOffense == False, 'DisYardLine'] = df['X'] - df['YardLine'] return df def get_no_defenders_yl(df): df['NoDefenderYL'] = 'NaN' df.loc[(df.IsOnOffense == False) & (df.DisYardLine < 0), 'NoDefenderYL'] = 'NoDefendersBelow0YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 0) & (df.DisYardLine < 3)), 'NoDefenderYL'] = 'NoDefenders0_2YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 3) & (df.DisYardLine < 6)), 'NoDefenderYL'] = 'NoDefenders3_5YL' df.loc[(df.IsOnOffense == False) & ((df.DisYardLine >= 6) & (df.DisYardLine < 9)), 'NoDefenderYL'] = 'NoDefenders6_8YL' df.loc[(df.IsOnOffense == False) & (df.DisYardLine >= 9), 'NoDefenderYL'] = 'NoDefendersAbove9YL' df['NoDefendersBelow0YL'] = 0 df['NoDefenders0_2YL'] = 0 df['NoDefenders3_5YL'] = 0 df['NoDefenders6_8YL'] = 0 df['NoDefendersAbove9YL'] = 0 # Pivot to find counts of each general position no_defenders = df[['PlayId','NoDefenderYL']].pivot_table(index='PlayId', columns='NoDefenderYL', aggfunc=len, fill_value=0) no_defenders = no_defenders.reset_index(drop=False).drop('NaN', axis=1) del no_defenders.columns.name no_defenders_cols = no_defenders.columns.values.tolist() no_defenders = no_defenders.loc[no_defenders.index.repeat(22)].reset_index(drop=True) df.update(no_defenders) return df def get_inside_runs(df): df['IsInside'] = 0 inside1 = df[ # Outside seams and running in (((df.RusherY > -2.00) & (df.RusherY <= 23.55)) & ((df.RusherDir > 270) | (df.RusherDir <= 90))) | (((df.RusherY > 29.75) & (df.RusherY <= 55.00)) & ((df.RusherDir > 90) & (df.RusherDir <= 270))) ]['PlayId'] inside2 = df[ # Inside the seams and running in (((df.RusherY > 23.55) & (df.RusherY <= 29.75)) & ((df.RusherDir > 40) & (df.RusherDir <= 140))) ]['PlayId'] inside = inside1.tolist() + inside2.tolist() df.loc[df.PlayId.isin(inside), 'IsInside'] = 1 return df def get_dis_from_yl(df): """ For both off and def """ df['DisFromYL'] = abs(df['YardLine'] - df['X']) return df def get_dis_rusher(df): rusher_xy = df.loc[df.IsRusher == True, ['GameId','PlayId','X','Y']].rename(columns={'X':'RusherX','Y':'RusherY'}) df = df.merge(rusher_xy, on=['GameId','PlayId']) df['DisRusher'] = df[['X','Y','RusherX','RusherY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df.drop(['RusherX','RusherY'], axis=1,inplace=True) return df def get_dis_features(df): """ Returns DisRusherNearestYardLine, RusherDisQB, RusherDisC and RusherDisMLB, DisC, DisQB """ def get_rusher_dis_mlb(df): lb_xy = df.loc[(df.Position == 'MLB') | (df.Position == 'ILB'), ['PlayId','X','Y']].rename(columns={'X':'MLBX', 'Y':'MLBY'}) rusher_lb_xy = lb_xy.merge(rusher_xy, on=['PlayId'], how='left') rusher_lb_xy['RusherDisMLB'] = rusher_lb_xy[ ['RusherX','RusherY','MLBX','MLBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) rusher_lb_xy.drop(['RusherX','RusherY','MLBX','MLBY'],axis=1, inplace=True) rusher_lb_dis = rusher_lb_xy.groupby(['PlayId']).agg({'RusherDisMLB':['min'],}).reset_index() rusher_lb_dis.columns = ['PlayId','RusherDisMLB'] return rusher_lb_dis rusher_xy = df.loc[df.IsRusher == True, ['PlayId','X','Y']].rename(columns={'X':'RusherX','Y':'RusherY'}) qb_xy = df.loc[df.Position == 'QB', ['PlayId','X','Y']].rename(columns={'X':'QBX','Y':'QBY'}) c_xy = df.loc[df.Position == 'C', ['PlayId','X','Y']].rename(columns={'X':'CX','Y':'CY'}) try: rusher_lb_dis = get_rusher_dis_mlb(df) except: rusher_lb_dis = np.nan rusherxy_qbxy = rusher_xy.merge(qb_xy, on=['PlayId']) rusherxy_qbxy_cxy = rusherxy_qbxy.merge(c_xy, on=['PlayId']) try: dis_total_xy = rusherxy_qbxy_cxy.merge(rusher_lb_dis, on=['PlayId']) except: dis_total_xy = rusherxy_qbxy_cxy dis_total_xy['RusherDisMLB'] = np.nan dis_total_xy = dis_total_xy.loc[dis_total_xy.index.repeat(22)].reset_index(drop=True) dis_total_xy.drop(['PlayId'], axis=1, inplace=True) df['RusherX'] = 0 df['RusherY'] = 0 df['QBX'] = 0 df['QBY'] = 0 df['CX'] = 0 df['CY'] = 0 df['RusherDisMLB'] = 0 df.update(dis_total_xy) df['DisRusherNearestYardLine'] = df[['YardLine','RusherY','X','Y']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['RusherDisQB'] = df[['RusherX','RusherY','QBX','QBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['RusherDisC'] = df[['RusherX','RusherY','CX','CY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['DisC'] = df[['X','Y','CX','CY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df['DisQB'] = df[['X','Y','QBX','QBY']].apply(lambda x: euclidean_distance(x[0],x[1],x[2],x[3]), axis=1) df.drop(['RusherX','RusherY','QBX','QBY','CX','CY'], axis=1,inplace=True) return df def get_team_aggs(df, col, for_offense=True): aggs = ['Avg','Min','Max','Std'] if for_offense == True: team_agg = df[df.IsOnOffense == True][['PlayId'] + [col]] team_agg = df[['PlayId'] + [col]] team_agg = team_agg.groupby(['PlayId']).agg({col:['mean','min','max','std']}).reset_index() avg_col = 'AvgOffense' + col min_col = 'MinOffense' + col max_col = 'MaxOffense' + col std_col = 'StdOffense' + col if for_offense == False: team_agg = df[df.IsOnOffense == False][['PlayId'] + [col]] team_agg = team_agg.groupby(['PlayId']).agg({col:['mean','min','max','std']}).reset_index() avg_col = 'AvgDefense' + col min_col = 'MinDefense' + col max_col = 'MaxDefense' + col std_col = 'StdDefense' + col team_agg.drop(['PlayId'], axis=1, inplace=True) team_agg_cols = [avg_col,min_col,max_col,std_col] team_agg.columns = team_agg_cols team_agg = team_agg.loc[team_agg.index.repeat(22)].reset_index(drop=True) for col in team_agg_cols: df[col] = 0 df.update(team_agg) return df def get_rusher_dis_mlb_inside(df): try: df['RusherDisMLBByIsInside'] = (1 / df['RusherDisMLB']) * df['IsInside'] df['RusherDisMLBByIsInside'] = df['RusherDisMLBByIsInside'].replace([np.inf, -np.inf], np.nan) return df except: df['RusherDisMLBByIsInside'] = np.nan return df def get_yards_by_down(df): df['YardsByDownSqrt'] = (df['Distance'] * df['Down']) **(1/2) return df def get_diff_rusher_dir_otation(df): df['DiffRusherDirOtation'] = df['RusherDir'] - df['RusherOrientation'] return df def get_mech_feats(df): df['Weight'] = df['PlayerMass'] * 9.806 # acceleration gravity df['ChangeTime'] = df['Dis'] / df['S'] df['Force'] = df['PlayerMass'] * df['A'] df['Momentum'] = df['PlayerMass'] * df['S'] df['KE'] = 0.5 * df['PlayerMass'] * (df['S']**2) df['Work'] = df['Force'] * df['Dis'] df['Power'] = df['Work'] / df['ChangeTime'] df['Impulse'] = df['Force'] * df['ChangeTime'] angle = 90 - df['Dir'] df['SX'] = np.abs(df['S'] * np.cos(angle)) df['SY'] = np.abs(df['S'] * np.sin(angle)) df['ForceX'] = np.abs(df['Force'] * np.cos(angle)) df['ForceY'] = np.abs(df['Force'] * np.sin(angle)) df['MomentumX'] = np.abs(df['Momentum'] * np.cos(angle)) df['MomentumY'] = np.abs(df['Momentum'] * np.sin(angle)) df['WorkX'] = np.abs(df['Work'] * np.cos(angle)) df['WorkY'] = np.abs(df['Work'] * np.sin(angle)) df['PowerX'] = np.abs(df['Power'] * np.cos(angle)) df['PowerY'] = np.abs(df['Power'] * np.sin(angle)) df['ImpulseX'] = np.abs(df['Impulse'] * np.cos(angle)) df['ImpulseY'] = np.abs(df['Impulse'] * np.sin(angle)) return df def get_gen_position_feats(df, position): pos_feat = df.loc[df.GeneralPosition == position, ['PlayId','A','S','Dir', 'Orientation','Dis', 'PlayerMass','PlayerHeight']] pos_feat = pos_feat.rename(columns={'A':position+'A','S':position+'S','Dir':position+'Dir', 'Orientation':position+'Orientation', 'Dis':position+'Dis','PlayerMass':position+'Weight', 'PlayerHeight':position+'Height'}) pos_feat = pos_feat.groupby(['PlayId']).agg( {position+'A':['mean','min','max'], position+'S':['mean','min','max'], position+'Dir':['mean','min','max'], position+'Orientation':['mean','min','max'], position+'Dis':['mean','min','max'], position+'Weight':['mean','min','max'], position+'Height':['mean','min','max']}).reset_index() pos_feat.columns = [''.join(col) for col in pos_feat.columns.values] pos_feat_columns = pos_feat.columns.tolist() pos_feat_columns.remove('PlayId') pos_feat.drop('PlayId',axis=1,inplace=True) pos_feat = pos_feat.loc[pos_feat.index.repeat(22)].reset_index(drop=True) for feat in pos_feat_columns: df[feat] = 0 df.update(pos_feat) return df def get_off_less_def_feats(df, feat): off_feat = df.loc[df.IsOnOffense == True, ['PlayId',feat]] off_feat = off_feat.groupby(['PlayId']).agg({feat:['sum']}).reset_index() off_feat.drop('PlayId', axis=1,inplace=True) off_feat.columns = ['Off'+feat] def_feat = df.loc[df.IsOnOffense == False, ['PlayId',feat]] def_feat = def_feat.groupby(['PlayId']).agg({feat:['sum']}).reset_index() def_feat.drop('PlayId', axis=1,inplace=True) def_feat.columns = ['Def'+feat] off_def_feat = pd.DataFrame(off_feat['Off'+feat] - def_feat['Def'+feat], columns=['OffLessDef'+feat]) df['OffLessDef'+feat] = 0 off_def_feat = off_def_feat.loc[off_def_feat.index.repeat(22)].reset_index(drop=True) df.update(off_def_feat) return df def get_rusher_feats(df): rusher_feats = df.loc[df.IsRusher == True,['X','Y','S','A','Dis', 'Orientation','Dir','DisFromYL', 'PlayerMass','PlayerHeight']] rusher_feats = rusher_feats.loc[rusher_feats.index.repeat(22)].reset_index(drop=True) rusher_feats = rusher_feats.rename(columns={'X':'RusherX','Y':'RusherY',}) df['RusherX'] = 0 df['RusherY'] = 0 df.update(rusher_feats) df = df.rename(columns={'S':'RusherS', 'A':'RusherA','Dis':'RusherDis', 'Orientation':'RusherOrientation', 'Dir':'RusherDir','DisFromYL':'RusherDisYL', 'PlayerMass':'RusherMass', 'PlayerHeight':'RusherHeight'}) df['RusherWeight'] = df['RusherMass'] * 9.806 # acceleration gravity df['ChangeTime'] = df['RusherDis'] / df['RusherS'] df['RusherForce'] = df['RusherMass'] * df['RusherA'] df['RusherMomentum'] = df['RusherMass'] * df['RusherS'] df['RusherKE'] = 0.5 * df['RusherMass'] * (df['RusherS']**2) df['RusherWork'] = df['RusherForce'] * df['RusherDis'] df['RusherPower'] = df['RusherWork'] / df['ChangeTime'] df['RusherImpulse'] = df['RusherForce'] * df['ChangeTime'] angle = 90 - df['RusherDir'] df['RusherSX'] = np.abs(df['RusherS'] * np.cos(angle)) df['RusherSY'] = np.abs(df['RusherS'] * np.sin(angle)) df['RusherForceX'] = np.abs(df['RusherForce'] * np.cos(angle)) df['RusherForceY'] = np.abs(df['RusherForce'] * np.sin(angle)) df['RusherMomentumX'] = np.abs(df['RusherMomentum'] * np.cos(angle)) df['RusherMomentumY'] = np.abs(df['RusherMomentum'] * np.sin(angle)) df['RusherWorkX'] = np.abs(df['RusherWork'] * np.cos(angle)) df['RusherWorkY'] = np.abs(df['RusherWork'] * np.sin(angle)) df.drop(['ChangeTime'],axis=1,inplace=True) df = df.replace([np.inf, -np.inf], np.nan) df = df.fillna(0) return df def get_gap_feats(df): df['X_gapmedian'] = 0 df['X_gapmax'] = 0 df['Y_gapmedian'] = 0 df['Y_gapmax'] = 0 plays = df.loc[df.IsOnOffense == False, ['PlayId','X','Y','RusherX']] gaps_df = pd.DataFrame(columns=['PlayId','X_gap','Y_gap']) for play in plays['PlayId'].unique(): RusherX_val = df.loc[df.PlayId == play, 'RusherX'].unique()[0] X_vals = plays.loc[plays.PlayId == play, 'X'] X_vals = X_vals.append(pd.Series([RusherX_val,120]), ignore_index=True).sort_values().reset_index(drop=True) X_vals = np.diff(X_vals) Y_vals = plays.loc[plays.PlayId == play, 'Y'] Y_vals = Y_vals.append(pd.Series([0,53.3]), ignore_index=True).sort_values().reset_index(drop=True) Y_vals = np.diff(Y_vals) gaps_play = pd.DataFrame() gaps_play['X_gap'] = X_vals gaps_play['Y_gap'] = Y_vals gaps_play['PlayId'] = play gaps_df = pd.concat([gaps_df, gaps_play], axis=0, ignore_index=True) gaps_agg_x = gaps_df.groupby('PlayId').agg({'X_gap':['median','max']}).reset_index() gaps_agg_x.columns = [''.join(col) for col in gaps_agg_x.columns.values] gaps_agg_x = gaps_agg_x.loc[gaps_agg_x.index.repeat(22)].reset_index(drop=True) gaps_agg_y = gaps_df.groupby('PlayId').agg({'Y_gap':['median','max']}).reset_index() gaps_agg_y.columns = [''.join(col) for col in gaps_agg_y.columns.values] gaps_agg_y = gaps_agg_y.loc[gaps_agg_y.index.repeat(22)].reset_index(drop=True) df.update(gaps_agg_x) df.update(gaps_agg_y) df['XY_gap_area'] = df['X_gapmax'] * df['Y_gapmax'] df.drop(['X','Y'], axis=1, inplace=True) return df def combine_features(df): df = map_team_name(df) df = get_team_on_offense(df) df = map_offense_defense_team(df) df = clean_position(df) df = get_is_rusher(df) df = create_general_position(df) df = get_player_age(df) df = get_player_weights_bmi(df) yardline = update_yardline(df) df = update_orientation(df, yardline) df = get_redzone(df) df = get_dis_yardline(df) # use for defender distance only df = get_dis_from_yl(df) # absolute distance for both off and def df = get_dis_rusher(df) df = get_dis_features(df) df = get_mech_feats(df) agg_cols = ['X','Y','A','Dir','DisFromYL','DisRusher','Force','Momentum','ForceX','Dis' ] for agg_col in agg_cols: df = get_team_aggs(df, col=agg_col, for_offense=True) df = get_team_aggs(df, col=agg_col, for_offense=False) del agg_cols df.drop(['DisQB','DisC','MinOffenseDisRusher'],axis=1,inplace=True) off_less_def_feats = ['X'] for feat in off_less_def_feats: df = get_off_less_def_feats(df, feat) df = get_rusher_feats(df) df = get_gap_feats(df) return df df = combine_features(df) df = df.fillna(-999) df = df.select_dtypes(exclude=['object']) df.drop(['RusherMass','PlayerAge','PlayerBMI','DisYardLine', 'DisRusher','NflIdRusher','IsOnOffense', 'NflId','JerseyNumber','IsRusher','DisRusherNearestYardLine', 'Weight','Force','Momentum','KE','Work','Power','Impulse', 'SX','SY','ForceX','ForceY','MomentumX','MomentumY','WorkX', 'WorkY','PowerX','PowerY','ImpulseX','ImpulseY'], axis=1, inplace=True) df = df.drop_duplicates().reset_index(drop=True) return df train = pd.read_csv('../input/nfl-big-data-bowl-2020/train.csv') outcomes = train[['GameId','PlayId','Yards']].drop_duplicates() train_basetable = create_features(train) X = train_basetable.copy() X = X.sample(frac=1).reset_index(drop=True) yards = X.Yards y = np.zeros((yards.shape[0], 199)) for idx, target in enumerate(list(yards)): y[idx][99 + target] = 1 print(train_basetable.shape) train_basetable.head() cat = ['InDefenseRedzone'] num = list(set(X.columns.values.tolist()) - set(cat)) num.remove('GameId') num.remove('PlayId') print(len(cat)) print(len(num)) features = ['GameId','PlayId', 'RusherX','RusherA', 'RusherDir', 'RusherDis', 'YardLine', 'RusherDisYL', 'StdDefenseX', 'StdDefenseY', 'AvgOffenseA', 'AvgDefenseA', 'StdOffenseDir', 'StdDefenseDir', 'MaxDefenseDisFromYL', 'AvgDefenseDisRusher', 'MinDefenseDisRusher', 'AvgOffenseForce', 'AvgDefenseForce', 'AvgOffenseMomentum', 'AvgDefenseMomentum', 'AvgDefenseForceX', 'OffLessDefX', 'RusherForce', 'RusherMomentum', 'InDefenseRedzone', 'AvgOffenseDis', 'AvgDefenseDis', 'AvgOffenseDisFromYL', 'AvgDefenseDisFromYL', 'RusherKE', 'RusherWork', 'Y_gapmax' ] X = X[features] scaler = StandardScaler() num = list(set(features) & set(num)) # update num to only show intersection with features selected X[num] = scaler.fit_transform(X[num]) def model_396_1(): inputs = [] embeddings = [] for i in cat: input_ = Input(shape=(1,)) embedding = Embedding(int(np.absolute(X[i]).max() + 1), 10, input_length=1)(input_) embedding = Reshape(target_shape=(10,))(embedding) inputs.append(input_) embeddings.append(embedding) input_numeric = Input(shape=(len(num),)) embedding_numeric = Dense(512, activation='relu')(input_numeric) inputs.append(input_numeric) embeddings.append(embedding_numeric) x = Concatenate()(embeddings) x = Dense(256, activation='relu')(x) x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) output = Dense(199, activation='softmax')(x) model = Model(inputs, output) return model n_splits = 5 kf = GroupKFold(n_splits=n_splits) score = [] for i_369, (tdx, vdx) in enumerate(kf.split(X, y, X['GameId'])): print(f'Fold : {i_369}') X_train, X_val, y_train, y_val = X.iloc[tdx], X.iloc[vdx], y[tdx], y[vdx] X_train = [np.absolute(X_train[i]) for i in cat] + [X_train[num]] X_val = [np.absolute(X_val[i]) for i in cat] + [X_val[num]] model = model_396_1() model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[]) es = EarlyStopping(monitor='val_CRPS', mode='min', restore_best_weights=True, verbose=2, patience=5) es.set_model(model) metric = Metric(model, [es], [(X_train,y_train), (X_val,y_val)]) for i in range(1): model.fit(X_train, y_train, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=64, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=128, verbose=False) for i in range(1): model.fit(X_train, y_train, batch_size=256, verbose=False) model.fit(X_train, y_train, callbacks=[metric], epochs=100, batch_size=1024, verbose=False) score_ = crps(y_val, model.predict(X_val)) model.save(f'keras_369_{i_369}.h5') print(score_) score.append(score_) print(np.mean(score)) models = [] for i in range(n_splits): models.append(load_model(f'keras_369_{i}.h5')) for (test_df, sample_prediction_df) in tqdm.tqdm(iter_test): basetable = create_features(test_df) basetable = basetable[features] basetable[num] = scaler.transform(basetable[num]) test_ = [np.absolute(basetable[i]) for i in cat] + [basetable[num]] y_pred = np.mean([model.predict(test_) for model in models], axis=0) y_pred = np.clip(np.cumsum(y_pred, axis=1), 0, 1).tolist()[0] preds_df = pd.DataFrame(data=[y_pred], columns=sample_prediction_df.columns) env.predict(preds_df) env.write_submission_file()
0.726523
0.32611
# Image Preprocessing PNG ``` import tensorflow as tf import tensorflow_datasets as tfds import tensorlayer as tl from tensorflow_examples.models.pix2pix import pix2pix from IPython.display import clear_output import matplotlib.pyplot as plt import numpy as np import cv2 import skimage import skimage.morphology from skimage.measure import label import math import pandas as pd import re from pathlib import Path import imageio import scipy as sp import shutil import glob2 from tqdm import tqdm import os print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) ``` ## Define Processing Fuctions ``` def cropBorders(img, l=0.01, r=0.01, u=0.04, d=0.04): nrows, ncols = img.shape # Get the start and end rows and columns l_crop = int(ncols * l) r_crop = int(ncols * (1 - r)) u_crop = int(nrows * u) d_crop = int(nrows * (1 - d)) cropped_img = img[u_crop:d_crop, l_crop:r_crop] return cropped_img def minMaxNormalise(img): norm_img = (img - img.min()) / (img.max() - img.min()) return norm_img def globalBinarise(img, thresh, maxval): binarised_img = np.zeros(img.shape, np.uint8) binarised_img[img >= thresh] = maxval return binarised_img def editMask(mask, ksize=(23, 23), operation="open"): kernel = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=ksize) if operation == "open": edited_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) elif operation == "close": edited_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # Then dilate edited_mask = cv2.morphologyEx(edited_mask, cv2.MORPH_DILATE, kernel) return edited_mask def sortContoursByArea(contours, reverse=True): # Sort contours based on contour area. sorted_contours = sorted(contours, key=cv2.contourArea, reverse=reverse) # Construct the list of corresponding bounding boxes. bounding_boxes = [cv2.boundingRect(c) for c in sorted_contours] return sorted_contours, bounding_boxes def xLargestBlobs(mask, top_x=None, reverse=True): # Find all contours from binarised image. # Note: parts of the image that you want to get should be white. contours, hierarchy = cv2.findContours( image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE ) n_contours = len(contours) # Only get largest blob if there is at least 1 contour. if n_contours > 0: # Make sure that the number of contours to keep is at most equal # to the number of contours present in the mask. if n_contours < top_x or top_x == None: top_x = n_contours # Sort contours based on contour area. sorted_contours, bounding_boxes = sortContoursByArea( contours=contours, reverse=reverse ) # Get the top X largest contours. X_largest_contours = sorted_contours[0:top_x] # Create black canvas to draw contours on. to_draw_on = np.zeros(mask.shape, np.uint8) # Draw contours in X_largest_contours. X_largest_blobs = cv2.drawContours( image=to_draw_on, # Draw the contours on `to_draw_on`. contours=X_largest_contours, # List of contours to draw. contourIdx=-1, # Draw all contours in `contours`. color=1, # Draw the contours in white. thickness=-1, # Thickness of the contour lines. ) return n_contours, X_largest_blobs def applyMask(img, mask): masked_img = img.copy() masked_img[mask == 0] = 0 return masked_img def checkLRFlip(mask): # Get number of rows and columns in the image. nrows, ncols = mask.shape x_center = ncols // 2 y_center = nrows // 2 # Sum down each column. col_sum = mask.sum(axis=0) # Sum across each row. row_sum = mask.sum(axis=1) left_sum = sum(col_sum[0:x_center]) right_sum = sum(col_sum[x_center:-1]) if left_sum < right_sum: LR_flip = True else: LR_flip = False return LR_flip def makeLRFlip(img): flipped_img = np.fliplr(img) return flipped_img def clahe(img, clip=2.0, tile=(8, 8)): img = cv2.normalize( img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F, ) img_uint8 = img.astype("uint8") clahe_create = cv2.createCLAHE(clipLimit=clip, tileGridSize=tile) clahe_img = clahe_create.apply(img_uint8) return clahe_img def pad(img): nrows, ncols = img.shape # If padding is required... if nrows != ncols: # Take the longer side as the target shape. if ncols < nrows: target_shape = (nrows, nrows) elif nrows < ncols: target_shape = (ncols, ncols) # pad. padded_img = np.zeros(shape=target_shape) padded_img[:nrows, :ncols] = img # If padding is not required... elif nrows == ncols: # Return original image. padded_img = img return padded_img def display_images(display_list,titles,ncol=3): plt.figure(figsize=(15,15)) nrow = int(np.ceil(len(display_list)/ncol)) for i in range(len(display_list)): plt.subplot(nrow,ncol,i+1) plt.title(titles[i]) plt.imshow(display_list[i],cmap='gray') plt.show() def fullMammoPreprocess( img, l, r, d, u, thresh, maxval, ksize, operation, reverse, top_x, clip, tile, ): # Step 1: Initial crop. cropped_img = cropBorders(img=img, l=l, r=r, d=d, u=u) # Step 2: Min-max normalise. norm_img = minMaxNormalise(img=cropped_img) # Step 3: Remove artefacts. binarised_img = globalBinarise(img=norm_img, thresh=thresh, maxval=maxval) edited_mask = editMask( mask=binarised_img, ksize=(ksize, ksize), operation=operation ) _, xlargest_mask = xLargestBlobs(mask=edited_mask, top_x=top_x, reverse=reverse) masked_img = applyMask(img=norm_img, mask=xlargest_mask) # Step 4: Horizontal flip. lr_flip = checkLRFlip(mask=xlargest_mask) if lr_flip: flipped_img = makeLRFlip(img=masked_img) elif not lr_flip: flipped_img = masked_img # Step 5: CLAHE enhancement. clahe_img = clahe(img=flipped_img, clip=clip, tile=(tile, tile)) # Step 6: pad. padded_img = pad(img=clahe_img) padded_img = cv2.normalize( padded_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F, ) # Step 7: Min-max normalise. img_pre = minMaxNormalise(img=padded_img) return img_pre, lr_flip def maskPreprocess(mask, lr_flip): # Step 1: Initial crop. mask = cropBorders(img=mask) # Step 2: Horizontal flip. if lr_flip: mask = makeLRFlip(img=mask) # Step 3: Pad. mask_pre = pad(img=mask) return mask_pre def sumMasks(mask_list): summed_mask = np.zeros(mask_list[0].shape) for arr in mask_list: summed_mask = np.add(summed_mask, arr) # Binarise (there might be some overlap, resulting in pixels with # values of 510, 765, etc...) _, summed_mask_bw = cv2.threshold( src=summed_mask, thresh=1, maxval=255, type=cv2.THRESH_BINARY ) return summed_mask_bw ``` ## Process the Images ``` l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 ``` ### Test Case ``` df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_train_set.csv') df.head() df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["output_roi_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] ``` ### full mammogram ``` i = 10 img = imageio.imread(df["input_img_path"].tolist()[i]) cropped_img = cropBorders(img=img, l=l, r=r, d=d, u=u) norm_img = minMaxNormalise(img=cropped_img) binarised_img = globalBinarise(img=norm_img, thresh=thresh, maxval=maxval) edited_mask = editMask(mask=binarised_img, ksize=(ksize, ksize), operation=operation) _, xlargest_mask = xLargestBlobs(mask=edited_mask, top_x=top_x, reverse=reverse) masked_img = applyMask(img=norm_img, mask=xlargest_mask) lr_flip = checkLRFlip(mask=xlargest_mask) if lr_flip: flipped_img = makeLRFlip(img=masked_img) elif not lr_flip: flipped_img = masked_img clahe_img = clahe(img=flipped_img, clip=clip, tile=(tile, tile)) padded_img = pad(img=clahe_img) padded_img = cv2.normalize(padded_img,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F) img_pre = minMaxNormalise(img=padded_img) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) display_images([img,cropped_img,norm_img,binarised_img,edited_mask,masked_img,flipped_img,clahe_img,padded_img,img_pre], ['Raw Image','Cropped Image','Normalized Image','Binarized Mask','Dilated Mask','Masked Image', 'Flipped Image','Contrast Adjusted Image','Padded Image','Final Image'], ncol=5) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #df["output_img_path"].tolist()[i] img = imageio.imread(df["output_img_path"].tolist()[i]) plt.imshow(img) plt.gray() plt.show() df["input_roi_path"].tolist()[i] ``` ### roi mask ``` mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_output_files = [re.sub(input_topdir,output_topdir,x) for x in mask_input_files] mask = imageio.imread(mask_files[0]) cropped_mask = cropBorders(img=mask) if lr_flip: flipped_mask = makeLRFlip(img=cropped_mask) else: flipped_mask = cropped_mask mask_pre = pad(img=flipped_mask) mask = imageio.imread(mask_files[0]) mask_pre = maskPreprocess(mask,lr_flip) display_images([mask,cropped_mask,flipped_mask,mask_pre], ['Raw Mask','Cropped Mask','Flipped Mask','Final Mask'], ncol=4) ``` ## Process Calc-Training Images ``` l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-calc/train/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_train_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df["output_img_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] #df["output_roi_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x]) os.makedirs(df["output_mask_dir"].tolist()[x]) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_roi_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #Process masks mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_ids = [str(x+1) for x in np.arange(len(mask_input_files))] mask_output_files = [df["output_mask_dir"].tolist()[i] + "mask" + mask_ids[x] + "_000001.png" for x in np.arange(len(mask_ids))] for j in range(len(mask_input_files)): mask = imageio.imread(mask_input_files[j]) mask_pre = maskPreprocess(mask,lr_flip) imageio.imwrite(mask_output_files[j],(255*mask_pre).astype(np.uint8)) #image = imageio.imread(df["output_img_path"].tolist()[i]) #mask = imageio.imread(mask_output_files[j]) #image = imageio.imread(output_topdir+"Calc-Training_P_00008_RIGHT_CC/image/000000.png") #mask = imageio.imread(output_topdir+"Calc-Training_P_00008_RIGHT_CC/mask/mask5_000001.png") i = 20 path = input_topdir+df["samp_prefix"].tolist()[i]+"/" input_image = imageio.imread(glob2.glob(path+"**/000000.png")[0]) input_mask = imageio.imread(glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png")[0]) #input_image2 = np.where(input_mask==255,255,input_image) input_roi = imageio.imread(df["input_roi_path"].tolist()[i]) rows, cols = np.where(input_mask>0) xmin = min(rows); xmax = max(rows); ymin = min(cols); ymax = max(cols); input_roi2 = input_image[xmin:xmax,ymin:ymax] path = output_topdir+df["samp_prefix"].tolist()[i]+"/" output_image = imageio.imread(path+"image/000000.png") output_mask = imageio.imread(path+"mask/mask1_000001.png") #output_image2 = np.where(output_mask==1,255,output_image) rows, cols = np.where(output_mask>0) xmin = min(rows); xmax = max(rows); ymin = min(cols); ymax = max(cols); output_roi = output_image[xmin:xmax,ymin:ymax] display_images([input_image,input_mask,input_roi,input_roi2,output_image,output_mask,input_roi,output_roi], ['input_image','input_mask','input patch','input extracted patch', 'output_image','output_mask','input patch', 'output extracted patch'],ncol=4) np.max(output_image) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) ``` ## Process Calc Test Images ``` l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-calc/test/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_test_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x]) os.makedirs(df["output_mask_dir"].tolist()[x]) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_roi_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #Process masks mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_ids = [str(x+1) for x in np.arange(len(mask_input_files))] mask_output_files = [df["output_mask_dir"].tolist()[i] + "mask" + mask_ids[x] + "_000001.png" for x in np.arange(len(mask_ids))] for j in range(len(mask_input_files)): mask = imageio.imread(mask_input_files[j]) mask_pre = maskPreprocess(mask,lr_flip) imageio.imwrite(mask_output_files[j],(255*mask_pre).astype(np.uint8)) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) ``` ## Process Mass Training Images ``` input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-mass/train/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/mass_case_description_train_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_patch_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x).rstrip() for x in df["ROI mask file path"].tolist()] df["input_mask_path"] = [input_topdir + re.sub(r'\.dcm.*','.png',x).rstrip() for x in df["cropped image file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] last_idx = np.array([int(re.sub(r'\.dcm','',x).rstrip()[-1]) for x in df["ROI mask file path"].tolist()]) df = df.iloc[np.where(last_idx==1)[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x],exist_ok=True) os.makedirs(df["output_mask_dir"].tolist()[x],exist_ok=True) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_patch_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) mask = imageio.imread(df["input_mask_path"].tolist()[i]) roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(mask)>0: tmp = mask mask = roi roi = tmp img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) if ~Path(df["output_img_path"].tolist()[i]).is_file(): imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) mask_pre = maskPreprocess(mask,lr_flip) mask_output_file = df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png" imageio.imwrite(mask_output_file,(255*mask_pre).astype(np.uint8)) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) ``` ## Process Mass Test Images ``` input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-mass/test/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/mass_case_description_test_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_patch_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x).rstrip() for x in df["ROI mask file path"].tolist()] df["input_mask_path"] = [input_topdir + re.sub(r'\.dcm.*','.png',x).rstrip() for x in df["cropped image file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] last_idx = np.array([int(re.sub(r'\.dcm','',x).rstrip()[-1]) for x in df["ROI mask file path"].tolist()]) df = df.iloc[np.where(last_idx==1)[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x],exist_ok=True) os.makedirs(df["output_mask_dir"].tolist()[x],exist_ok=True) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_patch_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) mask = imageio.imread(df["input_mask_path"].tolist()[i]) roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(mask)>0: tmp = mask mask = roi roi = tmp img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) if ~Path(df["output_img_path"].tolist()[i]).is_file(): imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) mask_pre = maskPreprocess(mask,lr_flip) mask_output_file = df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png" imageio.imwrite(mask_output_file,(255*mask_pre).astype(np.uint8)) i = np.random.choice(df.shape[0],size=1)[0] input_image = imageio.imread(df["input_img_path"].tolist()[i]) input_mask = mask = imageio.imread(df["input_mask_path"].tolist()[i]) input_roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(input_mask)>0: tmp = input_mask input_mask = input_roi input_roi = tmp input_image2 = np.where(input_mask==255,255,input_image) output_image = imageio.imread(df["output_img_path"].tolist()[i]) output_mask = imageio.imread(df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png") output_image2 = np.where(output_mask==1,255,output_image) display_images([input_image,input_mask,input_image2,output_image,output_mask,output_image2], ['input_image','input_mask','masked_image','output_image','output_mask','masked_image'],ncol=3) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) ``` ## Copy over any remaining files ``` all_input_files = glob2.glob(input_topdir+"**/*.png") all_output_files = [re.sub(input_topdir,output_topdir,x) for x in all_input_files] not_replaced = np.where(~np.array([Path(x).is_file() for x in all_output_files]))[0] for j in tqdm(range(len(not_replaced))): idx = not_replaced[j] shutil.copy2(all_input_files[idx],all_output_files[idx]) print("All images exist = "+str(all(np.array([Path(x).is_file() for x in all_output_files])))) ``` all_input_files[0:10] ``` all_output_files[1] j = 10 print(all_input_files[j]) img = imageio.imread(all_input_files[j]) plt.imshow(img) plt.gray() plt.show() img = imageio.imread(all_output_files[j]) plt.imshow(img) plt.gray() plt.show() ```
github_jupyter
import tensorflow as tf import tensorflow_datasets as tfds import tensorlayer as tl from tensorflow_examples.models.pix2pix import pix2pix from IPython.display import clear_output import matplotlib.pyplot as plt import numpy as np import cv2 import skimage import skimage.morphology from skimage.measure import label import math import pandas as pd import re from pathlib import Path import imageio import scipy as sp import shutil import glob2 from tqdm import tqdm import os print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) def cropBorders(img, l=0.01, r=0.01, u=0.04, d=0.04): nrows, ncols = img.shape # Get the start and end rows and columns l_crop = int(ncols * l) r_crop = int(ncols * (1 - r)) u_crop = int(nrows * u) d_crop = int(nrows * (1 - d)) cropped_img = img[u_crop:d_crop, l_crop:r_crop] return cropped_img def minMaxNormalise(img): norm_img = (img - img.min()) / (img.max() - img.min()) return norm_img def globalBinarise(img, thresh, maxval): binarised_img = np.zeros(img.shape, np.uint8) binarised_img[img >= thresh] = maxval return binarised_img def editMask(mask, ksize=(23, 23), operation="open"): kernel = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=ksize) if operation == "open": edited_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) elif operation == "close": edited_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # Then dilate edited_mask = cv2.morphologyEx(edited_mask, cv2.MORPH_DILATE, kernel) return edited_mask def sortContoursByArea(contours, reverse=True): # Sort contours based on contour area. sorted_contours = sorted(contours, key=cv2.contourArea, reverse=reverse) # Construct the list of corresponding bounding boxes. bounding_boxes = [cv2.boundingRect(c) for c in sorted_contours] return sorted_contours, bounding_boxes def xLargestBlobs(mask, top_x=None, reverse=True): # Find all contours from binarised image. # Note: parts of the image that you want to get should be white. contours, hierarchy = cv2.findContours( image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE ) n_contours = len(contours) # Only get largest blob if there is at least 1 contour. if n_contours > 0: # Make sure that the number of contours to keep is at most equal # to the number of contours present in the mask. if n_contours < top_x or top_x == None: top_x = n_contours # Sort contours based on contour area. sorted_contours, bounding_boxes = sortContoursByArea( contours=contours, reverse=reverse ) # Get the top X largest contours. X_largest_contours = sorted_contours[0:top_x] # Create black canvas to draw contours on. to_draw_on = np.zeros(mask.shape, np.uint8) # Draw contours in X_largest_contours. X_largest_blobs = cv2.drawContours( image=to_draw_on, # Draw the contours on `to_draw_on`. contours=X_largest_contours, # List of contours to draw. contourIdx=-1, # Draw all contours in `contours`. color=1, # Draw the contours in white. thickness=-1, # Thickness of the contour lines. ) return n_contours, X_largest_blobs def applyMask(img, mask): masked_img = img.copy() masked_img[mask == 0] = 0 return masked_img def checkLRFlip(mask): # Get number of rows and columns in the image. nrows, ncols = mask.shape x_center = ncols // 2 y_center = nrows // 2 # Sum down each column. col_sum = mask.sum(axis=0) # Sum across each row. row_sum = mask.sum(axis=1) left_sum = sum(col_sum[0:x_center]) right_sum = sum(col_sum[x_center:-1]) if left_sum < right_sum: LR_flip = True else: LR_flip = False return LR_flip def makeLRFlip(img): flipped_img = np.fliplr(img) return flipped_img def clahe(img, clip=2.0, tile=(8, 8)): img = cv2.normalize( img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F, ) img_uint8 = img.astype("uint8") clahe_create = cv2.createCLAHE(clipLimit=clip, tileGridSize=tile) clahe_img = clahe_create.apply(img_uint8) return clahe_img def pad(img): nrows, ncols = img.shape # If padding is required... if nrows != ncols: # Take the longer side as the target shape. if ncols < nrows: target_shape = (nrows, nrows) elif nrows < ncols: target_shape = (ncols, ncols) # pad. padded_img = np.zeros(shape=target_shape) padded_img[:nrows, :ncols] = img # If padding is not required... elif nrows == ncols: # Return original image. padded_img = img return padded_img def display_images(display_list,titles,ncol=3): plt.figure(figsize=(15,15)) nrow = int(np.ceil(len(display_list)/ncol)) for i in range(len(display_list)): plt.subplot(nrow,ncol,i+1) plt.title(titles[i]) plt.imshow(display_list[i],cmap='gray') plt.show() def fullMammoPreprocess( img, l, r, d, u, thresh, maxval, ksize, operation, reverse, top_x, clip, tile, ): # Step 1: Initial crop. cropped_img = cropBorders(img=img, l=l, r=r, d=d, u=u) # Step 2: Min-max normalise. norm_img = minMaxNormalise(img=cropped_img) # Step 3: Remove artefacts. binarised_img = globalBinarise(img=norm_img, thresh=thresh, maxval=maxval) edited_mask = editMask( mask=binarised_img, ksize=(ksize, ksize), operation=operation ) _, xlargest_mask = xLargestBlobs(mask=edited_mask, top_x=top_x, reverse=reverse) masked_img = applyMask(img=norm_img, mask=xlargest_mask) # Step 4: Horizontal flip. lr_flip = checkLRFlip(mask=xlargest_mask) if lr_flip: flipped_img = makeLRFlip(img=masked_img) elif not lr_flip: flipped_img = masked_img # Step 5: CLAHE enhancement. clahe_img = clahe(img=flipped_img, clip=clip, tile=(tile, tile)) # Step 6: pad. padded_img = pad(img=clahe_img) padded_img = cv2.normalize( padded_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F, ) # Step 7: Min-max normalise. img_pre = minMaxNormalise(img=padded_img) return img_pre, lr_flip def maskPreprocess(mask, lr_flip): # Step 1: Initial crop. mask = cropBorders(img=mask) # Step 2: Horizontal flip. if lr_flip: mask = makeLRFlip(img=mask) # Step 3: Pad. mask_pre = pad(img=mask) return mask_pre def sumMasks(mask_list): summed_mask = np.zeros(mask_list[0].shape) for arr in mask_list: summed_mask = np.add(summed_mask, arr) # Binarise (there might be some overlap, resulting in pixels with # values of 510, 765, etc...) _, summed_mask_bw = cv2.threshold( src=summed_mask, thresh=1, maxval=255, type=cv2.THRESH_BINARY ) return summed_mask_bw l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_train_set.csv') df.head() df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["output_roi_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] i = 10 img = imageio.imread(df["input_img_path"].tolist()[i]) cropped_img = cropBorders(img=img, l=l, r=r, d=d, u=u) norm_img = minMaxNormalise(img=cropped_img) binarised_img = globalBinarise(img=norm_img, thresh=thresh, maxval=maxval) edited_mask = editMask(mask=binarised_img, ksize=(ksize, ksize), operation=operation) _, xlargest_mask = xLargestBlobs(mask=edited_mask, top_x=top_x, reverse=reverse) masked_img = applyMask(img=norm_img, mask=xlargest_mask) lr_flip = checkLRFlip(mask=xlargest_mask) if lr_flip: flipped_img = makeLRFlip(img=masked_img) elif not lr_flip: flipped_img = masked_img clahe_img = clahe(img=flipped_img, clip=clip, tile=(tile, tile)) padded_img = pad(img=clahe_img) padded_img = cv2.normalize(padded_img,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F) img_pre = minMaxNormalise(img=padded_img) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) display_images([img,cropped_img,norm_img,binarised_img,edited_mask,masked_img,flipped_img,clahe_img,padded_img,img_pre], ['Raw Image','Cropped Image','Normalized Image','Binarized Mask','Dilated Mask','Masked Image', 'Flipped Image','Contrast Adjusted Image','Padded Image','Final Image'], ncol=5) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #df["output_img_path"].tolist()[i] img = imageio.imread(df["output_img_path"].tolist()[i]) plt.imshow(img) plt.gray() plt.show() df["input_roi_path"].tolist()[i] mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_output_files = [re.sub(input_topdir,output_topdir,x) for x in mask_input_files] mask = imageio.imread(mask_files[0]) cropped_mask = cropBorders(img=mask) if lr_flip: flipped_mask = makeLRFlip(img=cropped_mask) else: flipped_mask = cropped_mask mask_pre = pad(img=flipped_mask) mask = imageio.imread(mask_files[0]) mask_pre = maskPreprocess(mask,lr_flip) display_images([mask,cropped_mask,flipped_mask,mask_pre], ['Raw Mask','Cropped Mask','Flipped Mask','Final Mask'], ncol=4) l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-calc/train/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_train_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df["output_img_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] #df["output_roi_path"] = [output_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x]) os.makedirs(df["output_mask_dir"].tolist()[x]) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_roi_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #Process masks mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_ids = [str(x+1) for x in np.arange(len(mask_input_files))] mask_output_files = [df["output_mask_dir"].tolist()[i] + "mask" + mask_ids[x] + "_000001.png" for x in np.arange(len(mask_ids))] for j in range(len(mask_input_files)): mask = imageio.imread(mask_input_files[j]) mask_pre = maskPreprocess(mask,lr_flip) imageio.imwrite(mask_output_files[j],(255*mask_pre).astype(np.uint8)) #image = imageio.imread(df["output_img_path"].tolist()[i]) #mask = imageio.imread(mask_output_files[j]) #image = imageio.imread(output_topdir+"Calc-Training_P_00008_RIGHT_CC/image/000000.png") #mask = imageio.imread(output_topdir+"Calc-Training_P_00008_RIGHT_CC/mask/mask5_000001.png") i = 20 path = input_topdir+df["samp_prefix"].tolist()[i]+"/" input_image = imageio.imread(glob2.glob(path+"**/000000.png")[0]) input_mask = imageio.imread(glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png")[0]) #input_image2 = np.where(input_mask==255,255,input_image) input_roi = imageio.imread(df["input_roi_path"].tolist()[i]) rows, cols = np.where(input_mask>0) xmin = min(rows); xmax = max(rows); ymin = min(cols); ymax = max(cols); input_roi2 = input_image[xmin:xmax,ymin:ymax] path = output_topdir+df["samp_prefix"].tolist()[i]+"/" output_image = imageio.imread(path+"image/000000.png") output_mask = imageio.imread(path+"mask/mask1_000001.png") #output_image2 = np.where(output_mask==1,255,output_image) rows, cols = np.where(output_mask>0) xmin = min(rows); xmax = max(rows); ymin = min(cols); ymax = max(cols); output_roi = output_image[xmin:xmax,ymin:ymax] display_images([input_image,input_mask,input_roi,input_roi2,output_image,output_mask,input_roi,output_roi], ['input_image','input_mask','input patch','input extracted patch', 'output_image','output_mask','input patch', 'output extracted patch'],ncol=4) np.max(output_image) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) l = 0.01 r = 0.01 u = 0.04 d = 0.04 thresh = 0.1 maxval = 1.0 ksize = 23 operation = "open" reverse = True top_x = 1 clip = 2.0 tile = 8 input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-calc/test/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/calc_case_description_test_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_roi_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["ROI mask file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x]) os.makedirs(df["output_mask_dir"].tolist()[x]) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_roi_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) #Process masks mask_input_files = glob2.glob(input_topdir+df["samp_prefix"].tolist()[i]+"_*/**/000001.png") mask_ids = [str(x+1) for x in np.arange(len(mask_input_files))] mask_output_files = [df["output_mask_dir"].tolist()[i] + "mask" + mask_ids[x] + "_000001.png" for x in np.arange(len(mask_ids))] for j in range(len(mask_input_files)): mask = imageio.imread(mask_input_files[j]) mask_pre = maskPreprocess(mask,lr_flip) imageio.imwrite(mask_output_files[j],(255*mask_pre).astype(np.uint8)) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-mass/train/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/mass_case_description_train_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_patch_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x).rstrip() for x in df["ROI mask file path"].tolist()] df["input_mask_path"] = [input_topdir + re.sub(r'\.dcm.*','.png',x).rstrip() for x in df["cropped image file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] last_idx = np.array([int(re.sub(r'\.dcm','',x).rstrip()[-1]) for x in df["ROI mask file path"].tolist()]) df = df.iloc[np.where(last_idx==1)[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x],exist_ok=True) os.makedirs(df["output_mask_dir"].tolist()[x],exist_ok=True) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_patch_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) mask = imageio.imread(df["input_mask_path"].tolist()[i]) roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(mask)>0: tmp = mask mask = roi roi = tmp img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) if ~Path(df["output_img_path"].tolist()[i]).is_file(): imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) mask_pre = maskPreprocess(mask,lr_flip) mask_output_file = df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png" imageio.imwrite(mask_output_file,(255*mask_pre).astype(np.uint8)) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) input_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/manual/' output_topdir = '/home/alangenb_mit_edu/tensorflow_datasets/downloads/prepro-png/original-mass/test/' df = pd.read_csv('/home/alangenb_mit_edu/manifests/mass_case_description_test_set.csv') df["samp_prefix"] = [re.sub(r'\/.*','',x) for x in df["image file path"].tolist()] df["input_img_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x) for x in df["image file path"].tolist()] df["input_patch_path"] = [input_topdir + re.sub(r'\.dcm$','.png',x).rstrip() for x in df["ROI mask file path"].tolist()] df["input_mask_path"] = [input_topdir + re.sub(r'\.dcm.*','.png',x).rstrip() for x in df["cropped image file path"].tolist()] df["output_img_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/image/" for x in np.arange(df.shape[0])] df["output_img_path"] = [df["output_img_dir"].tolist()[x] + re.sub(r'\.dcm$','.png',re.sub(r'.*\/','',df["image file path"].tolist()[x])) for x in np.arange(df.shape[0])] df["output_mask_dir"] = [output_topdir + df["samp_prefix"].tolist()[x] + "/mask/" for x in np.arange(df.shape[0])] #df = df.iloc[np.where(~df.duplicated(subset='samp_prefix'))[0],:] last_idx = np.array([int(re.sub(r'\.dcm','',x).rstrip()[-1]) for x in df["ROI mask file path"].tolist()]) df = df.iloc[np.where(last_idx==1)[0],:] for x in np.arange(df.shape[0]): os.makedirs(df["output_img_dir"].tolist()[x],exist_ok=True) os.makedirs(df["output_mask_dir"].tolist()[x],exist_ok=True) print("All input images exist = "+str(all(np.array([Path(x).is_file() for x in df["input_img_path"].tolist()])))) print("All input ROI masks exist = "+str(all(np.array([Path(x).is_file() for x in df["input_patch_path"].tolist()])))) for i in tqdm(range(df.shape[0])): #Process images img = imageio.imread(df["input_img_path"].tolist()[i]) mask = imageio.imread(df["input_mask_path"].tolist()[i]) roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(mask)>0: tmp = mask mask = roi roi = tmp img_pre, lr_flip = fullMammoPreprocess(img, l=l, r=r, u=u, d=d, thresh=thresh, maxval=maxval, ksize=ksize, operation=operation, reverse=reverse, top_x=top_x, clip=clip, tile=tile) if ~Path(df["output_img_path"].tolist()[i]).is_file(): imageio.imwrite(df["output_img_path"].tolist()[i],(255*img_pre).astype(np.uint8)) mask_pre = maskPreprocess(mask,lr_flip) mask_output_file = df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png" imageio.imwrite(mask_output_file,(255*mask_pre).astype(np.uint8)) i = np.random.choice(df.shape[0],size=1)[0] input_image = imageio.imread(df["input_img_path"].tolist()[i]) input_mask = mask = imageio.imread(df["input_mask_path"].tolist()[i]) input_roi = imageio.imread(re.sub(r'0.png','1.png',df["input_mask_path"].tolist()[i])) if np.median(input_mask)>0: tmp = input_mask input_mask = input_roi input_roi = tmp input_image2 = np.where(input_mask==255,255,input_image) output_image = imageio.imread(df["output_img_path"].tolist()[i]) output_mask = imageio.imread(df["output_mask_dir"].tolist()[i]+"mask"+str(df["abnormality id"].tolist()[i])+"_000001.png") output_image2 = np.where(output_mask==1,255,output_image) display_images([input_image,input_mask,input_image2,output_image,output_mask,output_image2], ['input_image','input_mask','masked_image','output_image','output_mask','masked_image'],ncol=3) print("All output images exist = "+str(all(np.array([Path(x).is_file() for x in df["output_img_path"].tolist()])))) all_input_files = glob2.glob(input_topdir+"**/*.png") all_output_files = [re.sub(input_topdir,output_topdir,x) for x in all_input_files] not_replaced = np.where(~np.array([Path(x).is_file() for x in all_output_files]))[0] for j in tqdm(range(len(not_replaced))): idx = not_replaced[j] shutil.copy2(all_input_files[idx],all_output_files[idx]) print("All images exist = "+str(all(np.array([Path(x).is_file() for x in all_output_files])))) all_output_files[1] j = 10 print(all_input_files[j]) img = imageio.imread(all_input_files[j]) plt.imshow(img) plt.gray() plt.show() img = imageio.imread(all_output_files[j]) plt.imshow(img) plt.gray() plt.show()
0.743168
0.739951
# Table of Contents <p><div class="lev1 toc-item"><a href="#Load-CML-example-data" data-toc-modified-id="Load-CML-example-data-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Load CML example data</a></div><div class="lev1 toc-item"><a href="#Do-a-simple-standard-processing-to-get-rain-rates-for-each-CML" data-toc-modified-id="Do-a-simple-standard-processing-to-get-rain-rates-for-each-CML-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Do a simple standard processing to get rain rates for each CML</a></div><div class="lev1 toc-item"><a href="#Do-IDW-interpolation-of-CML-rain-rates" data-toc-modified-id="Do-IDW-interpolation-of-CML-rain-rates-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Do IDW interpolation of CML rain rates</a></div><div class="lev2 toc-item"><a href="#Initialize-interpolator" data-toc-modified-id="Initialize-interpolator-31"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Initialize interpolator</a></div><div class="lev2 toc-item"><a href="#Perform-interpolation-for-all-time-steps" data-toc-modified-id="Perform-interpolation-for-all-time-steps-32"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Perform interpolation for all time steps</a></div> ``` %matplotlib inline import pycomlink as pycml import matplotlib.pyplot as plt from tqdm import tqdm ``` # Load CML example data Coordinates mimic the real network topology but are fake ``` cml_list = pycml.io.examples.get_75_cmls() fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') ``` # Do a simple standard processing to get rain rates for each CML ``` for cml in tqdm(cml_list): window_length = 60 threshold = 1.0 cml.process.wet_dry.std_dev(window_length=window_length, threshold=threshold) cml.process.baseline.linear() cml.process.baseline.calc_A() cml.process.A_R.calc_R() ``` # Do IDW interpolation of CML rain rates The `ComlinkGridInterpolator` takes a `PointsToGridInterpolator` object as argument, which is used for the interpolation of each time step. You can pass config arguments to the initialization of the `PointsToGridInterpolator`. Currently only the IDW interpolator `IdWKdtreeInterpolator` which subclasses `PointsToGridInterpolator` is available. A Kriging version is already implemented but does not work reliably. ## Initialize interpolator `resolution` is used to generate a grid using a bounding box aroudn all CMLs if no x- and y-grid are supplied. Currently CML rain rates are averaged to hourly data before interpolating. ``` cml_interp = pycml.spatial.interpolator.ComlinkGridInterpolator( cml_list=cml_list, resolution=0.01, interpolator=pycml.spatial.interpolator.IdwKdtreeInterpolator()) ``` ## Perform interpolation for all time steps ``` ds = cml_interp.loop_over_time() ds fig, ax = plt.subplots(3, 3, sharex=True, sharey=True, figsize=(12,12)) for i, axi in enumerate(ax.flat): for cml in cml_list: cml.plot_line(ax=axi, color='k') pc = axi.pcolormesh(ds.lon, ds.lat, ds.R.isel(time=20+i), cmap=plt.get_cmap('BuPu', 8), vmin=0, vmax=20) axi.set_title(cml_interp.df_cmls.index[20+i]) fig.subplots_adjust(right=0.9) cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7]) fig.colorbar(pc, cax=cbar_ax, label='Hourly rainfall sum in mm'); ``` # Calculate CML coverage mask Coverage for 0.05 degree coverage around CMLs. Note: Calculating coverage using lon-lat and degrees does result in distortions. In the future this will be done using a area preserving reprojection of the lon-lat coordinates before calculating coverage. ``` cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask( cml_list=cml_list, xgrid=ds.lon.values, ygrid=ds.lat.values, max_dist_from_cml=0.05) fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray'); ``` Coverage for 0.1 degree coverage around CMLs. ``` cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask( cml_list=cml_list, xgrid=ds.lon.values, ygrid=ds.lat.values, max_dist_from_cml=0.1) fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray'); ``` # Plot CML rainfall sum and apply coverage map ``` fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') pc = ax.pcolormesh( ds.lon, ds.lat, ds.R.sum(dim='time').where(cml_coverage_mask), cmap=plt.get_cmap('BuPu', 32)) plt.colorbar(pc, label='rainfall sum in mm'); ```
github_jupyter
%matplotlib inline import pycomlink as pycml import matplotlib.pyplot as plt from tqdm import tqdm cml_list = pycml.io.examples.get_75_cmls() fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') for cml in tqdm(cml_list): window_length = 60 threshold = 1.0 cml.process.wet_dry.std_dev(window_length=window_length, threshold=threshold) cml.process.baseline.linear() cml.process.baseline.calc_A() cml.process.A_R.calc_R() cml_interp = pycml.spatial.interpolator.ComlinkGridInterpolator( cml_list=cml_list, resolution=0.01, interpolator=pycml.spatial.interpolator.IdwKdtreeInterpolator()) ds = cml_interp.loop_over_time() ds fig, ax = plt.subplots(3, 3, sharex=True, sharey=True, figsize=(12,12)) for i, axi in enumerate(ax.flat): for cml in cml_list: cml.plot_line(ax=axi, color='k') pc = axi.pcolormesh(ds.lon, ds.lat, ds.R.isel(time=20+i), cmap=plt.get_cmap('BuPu', 8), vmin=0, vmax=20) axi.set_title(cml_interp.df_cmls.index[20+i]) fig.subplots_adjust(right=0.9) cbar_ax = fig.add_axes([0.95, 0.15, 0.02, 0.7]) fig.colorbar(pc, cax=cbar_ax, label='Hourly rainfall sum in mm'); cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask( cml_list=cml_list, xgrid=ds.lon.values, ygrid=ds.lat.values, max_dist_from_cml=0.05) fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray'); cml_coverage_mask = pycml.spatial.coverage.calc_coverage_mask( cml_list=cml_list, xgrid=ds.lon.values, ygrid=ds.lat.values, max_dist_from_cml=0.1) fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') ax.pcolormesh(ds.lon, ds.lat, cml_coverage_mask, cmap='gray'); fig, ax = plt.subplots() for cml in cml_list: cml.plot_line(ax=ax, color='k') pc = ax.pcolormesh( ds.lon, ds.lat, ds.R.sum(dim='time').where(cml_coverage_mask), cmap=plt.get_cmap('BuPu', 32)) plt.colorbar(pc, label='rainfall sum in mm');
0.446977
0.919859
# Stock Price Prediction In this notebook, we demonstrate a reference use case where we use historical stock price data to predict the future price. The dataset we use is the daily stock price of S&P500 stocks during 2013-2018 ([data source](https://www.kaggle.com/camnugent/sandp500/)). We demostrate how to do univariate forecasting using the past 80% of the total days' MMM price to predict the future 20% days' daily price. Reference: https://github.com/jwkanggist/tf-keras-stock-pred ## Get Data We will use the close prices of MMM stock for our experiment. We will 1. download raw dataset and load into dataframe. 2. Extract the close prices of MMM stock from the dataframe into a numpy array ``` import numpy as np import pandas as pd import os # S&P 500 FILE_NAME = 'all_stocks_5yr.csv' SOURCE_URL = 'https://github.com/CNuge/kaggle-code/raw/master/stock_data/' filepath = './data/'+ FILE_NAME filepath = os.path.join('data', FILE_NAME) print(filepath) # download data !if ! [ -d "data" ]; then mkdir data; cd data; wget https://github.com/CNuge/kaggle-code/raw/master/stock_data/individual_stocks_5yr.zip; wget https://raw.githubusercontent.com/CNuge/kaggle-code/master/stock_data/merge.sh; chmod +x merge.sh; unzip individual_stocks_5yr.zip; ./merge.sh; fi # read data data = pd.read_csv(filepath) print(data[:10]) target_rows = data[data['Name']=='MMM'] print(target_rows[:10]) # extract close value close_val = target_rows[['close']].values print(close_val[:10]) # Visualize data import matplotlib.pyplot as plt plt.plot(close_val, color='blue', label='MMM daily price Raw') plt.xlabel("Time Period") plt.ylabel("Stock Price") plt.legend() plt.show() ``` ## Data Pre-processing Now we need to do data cleaning and preprocessing on the raw data. Note that this part could vary for different dataset. For the stock price data we're using, the processing contains 2 parts: 1. Data normalization such that the normalized stock prices fall in the range of 0 to 1 2. Extract time series of given window size We generate a built-in TSDataset to complete the whole processing. ``` from zoo.chronos.data import TSDataset from sklearn.preprocessing import MinMaxScaler df = target_rows[['date', 'close']] tsdata_train, _, tsdata_test = TSDataset.from_pandas(df, dt_col="date", target_col="close", with_split=True, test_ratio=0.2) minmax_scaler = MinMaxScaler() for tsdata in [tsdata_train, tsdata_test]: tsdata.scale(minmax_scaler, fit=(tsdata is tsdata_train))\ .roll(lookback=50, horizon=1) X_train, y_train = tsdata_train.to_numpy() X_test, y_test = tsdata_test.to_numpy() X_train.shape, y_train.shape, X_test.shape, y_test.shape ``` ## Time series forecasting We use LSTMForecaster for forecasting. ``` from zoo.chronos.forecaster.lstm_forecaster import LSTMForecaster ``` First we initiate a LSTMForecaster. * `feature_dim` should match the dimension of the input data, so we just use the last dimension of train input data shape * `target_dim` equals the dimension of the output data, here we set `target_dim=1` for univariate forecasting. ``` # Hyperparameters feature_dim = X_train.shape[-1] target_dim = 1 hidden_dim = 10 learning_rate = 0.01 batch_size = 16 epochs = 50 # build model forecaster = LSTMForecaster(past_seq_len=X_train.shape[1], input_feature_num=feature_dim, output_feature_num=target_dim, hidden_dim=32, lr=learning_rate, ) ``` Then we use fit to train the model. Wait sometime for it to finish. ``` %%time forecaster.fit(data=(X_train, y_train), batch_size=batch_size, epochs=epochs) ``` After training is finished. You can use the forecaster to do prediction and evaluation. ``` # make prediction y_pred = forecaster.predict(X_test) ``` Since we have used standard scaler to scale the input data (including the target values), we need to inverse the scaling on the predicted values too. ``` y_pred_unscale = tsdata_test.unscale_numpy(y_pred) y_test_unscale = tsdata_test.unscale_numpy(y_test) ``` Calculate the mean square error. ``` # evaluate with mean_squared_error from zoo.orca.automl.metrics import Evaluator print("mean_squared error is", Evaluator.evaluate("mse", y_test_unscale, y_pred_unscale, multioutput='uniform_average')) ``` Visualize the prediction. ``` # Plot predictions plt.plot(y_test_unscale[:, :, 0], color='blue', label="MMM daily price Raw") plt.plot(y_pred_unscale[:, :, 0], color='red', label="MMM daily price Predicted") plt.xlabel("Time Period") plt.ylabel("Normalized Stock Price") plt.legend() plt.show() ```
github_jupyter
import numpy as np import pandas as pd import os # S&P 500 FILE_NAME = 'all_stocks_5yr.csv' SOURCE_URL = 'https://github.com/CNuge/kaggle-code/raw/master/stock_data/' filepath = './data/'+ FILE_NAME filepath = os.path.join('data', FILE_NAME) print(filepath) # download data !if ! [ -d "data" ]; then mkdir data; cd data; wget https://github.com/CNuge/kaggle-code/raw/master/stock_data/individual_stocks_5yr.zip; wget https://raw.githubusercontent.com/CNuge/kaggle-code/master/stock_data/merge.sh; chmod +x merge.sh; unzip individual_stocks_5yr.zip; ./merge.sh; fi # read data data = pd.read_csv(filepath) print(data[:10]) target_rows = data[data['Name']=='MMM'] print(target_rows[:10]) # extract close value close_val = target_rows[['close']].values print(close_val[:10]) # Visualize data import matplotlib.pyplot as plt plt.plot(close_val, color='blue', label='MMM daily price Raw') plt.xlabel("Time Period") plt.ylabel("Stock Price") plt.legend() plt.show() from zoo.chronos.data import TSDataset from sklearn.preprocessing import MinMaxScaler df = target_rows[['date', 'close']] tsdata_train, _, tsdata_test = TSDataset.from_pandas(df, dt_col="date", target_col="close", with_split=True, test_ratio=0.2) minmax_scaler = MinMaxScaler() for tsdata in [tsdata_train, tsdata_test]: tsdata.scale(minmax_scaler, fit=(tsdata is tsdata_train))\ .roll(lookback=50, horizon=1) X_train, y_train = tsdata_train.to_numpy() X_test, y_test = tsdata_test.to_numpy() X_train.shape, y_train.shape, X_test.shape, y_test.shape from zoo.chronos.forecaster.lstm_forecaster import LSTMForecaster # Hyperparameters feature_dim = X_train.shape[-1] target_dim = 1 hidden_dim = 10 learning_rate = 0.01 batch_size = 16 epochs = 50 # build model forecaster = LSTMForecaster(past_seq_len=X_train.shape[1], input_feature_num=feature_dim, output_feature_num=target_dim, hidden_dim=32, lr=learning_rate, ) %%time forecaster.fit(data=(X_train, y_train), batch_size=batch_size, epochs=epochs) # make prediction y_pred = forecaster.predict(X_test) y_pred_unscale = tsdata_test.unscale_numpy(y_pred) y_test_unscale = tsdata_test.unscale_numpy(y_test) # evaluate with mean_squared_error from zoo.orca.automl.metrics import Evaluator print("mean_squared error is", Evaluator.evaluate("mse", y_test_unscale, y_pred_unscale, multioutput='uniform_average')) # Plot predictions plt.plot(y_test_unscale[:, :, 0], color='blue', label="MMM daily price Raw") plt.plot(y_pred_unscale[:, :, 0], color='red', label="MMM daily price Predicted") plt.xlabel("Time Period") plt.ylabel("Normalized Stock Price") plt.legend() plt.show()
0.58747
0.985746
``` import numpy as np import pandas as pd from pandas import DataFrame from sklearn import svm, linear_model, neural_network, naive_bayes, neighbors, tree, ensemble, linear_model from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score, f1_score from sklearn import preprocessing from tqdm import tqdm_notebook as tqdm import time import os import glob import matplotlib.pyplot as plt import seaborn as sns from itertools import product kval = [1, 3, 5, 9, 17, 33, 65, 129, 257, 513] w = ['uniform', 'distance'] conf=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] ridge=[0.00000001, 0.000001, 0.0001, 0.01, 1, 10, 100, 1000, 10000, 100000] kval_knn = [1, 3, 5, 9, 17, 33, 65, 129, 257, 313] file = ['electricity-normalized','pc4','MagicTelescope','irish','pc1','tic-tac-toe','ionosphere','diabetes'] p='{0:.8f}'.format(ridge[0]) files = os.path.join(os.getcwd(),'Result/C45', "*.csv") print(files) data = glob.glob(files) len(data) data.sort() #data for j in range(len(data)): #print(data[j]) df = pd.read_csv(data[j]) df1 = df.as_matrix() #print(df) p1=str(0.1) p2=str(1) s='/home/pranav/Project/Result/C45/'+file[2]+'CONF'+p1+'KVAL'+p2+'.csv' print(s) dfq = pd.read_csv(s) #dfq len(file) conf[0] #C45 n_datasets = len(file) p1_c45 = 9 p2_c45 = 10 shape2 = (n_datasets, p1_c45, p2_c45) accuracies_c45 = np.zeros(shape2) f1_scores_c45 = np.zeros(shape2) build_time_c45 = np.zeros(shape2) for k in range(len(file)): for i in range(p1_c45): for j in range(p2_c45): #print(s) p1=str(conf[i]) p2=str(kval[j]) s='/home/pranav/Project/Result/C45/'+file[k]+'CONF'+p1+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_c45[k,i,j] = x f1_scores_c45[k,i,j] = y build_time_c45[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_c45[d], columns=kval, index=conf) sf2 = pd.DataFrame(f1_scores_c45[d], columns=kval, index=conf) sf3 = pd.DataFrame(build_time_c45[d], columns=kval, index=conf) y=str(d) path1 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_acc_c45' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_fm_c45' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_bt_c45' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_c45[d], xticklabels=kval, yticklabels=conf[::-1]) plt.xlabel('min no of leaf') plt.ylabel('confidence factor') plt.title(file[d]) plt.show() #KNN n_datasets = len(file) p1_knn = 2 p2_knn = 10 shape2 = (n_datasets, p1_knn, p2_knn) accuracies_knn = np.zeros(shape2) f1_scores_knn = np.zeros(shape2) build_time_knn = np.zeros(shape2) for k in range(len(file)): for i in range(p1_knn): for j in range(p2_knn): #print(s) #p1=str(conf[i]) p2=str(kval[j]) if i == 0: s='/home/pranav/Project/Result/KNN/'+file[k]+'FOR_'+'KVAL'+p2+'.csv' if i == 1: s='/home/pranav/Project/Result/KNN/'+file[k]+'INVERSE_FOR_'+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_knn[k,i,j] = x f1_scores_knn[k,i,j] = y build_time_knn[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_knn[d], columns=kval, index=w) sf2 = pd.DataFrame(f1_scores_knn[d], columns=kval, index=w) sf3 = pd.DataFrame(build_time_knn[d], columns=kval, index=w) x=str(d) path1 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_acc_knn' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_fm_knn' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_bt_knn' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_knn[d], xticklabels=kval, yticklabels=w) #plt.xticks(np.arange(accuracies[0].shape[1]), kval) plt.xlabel('neighbours') plt.ylabel('weight') plt.title(file[d]) plt.show() #RandomForest n_datasets = len(file) p1_rf = 10 p2_rf = 10 shape2 = (n_datasets, p1_rf, p2_rf) accuracies_rf = np.zeros(shape2) f1_scores_rf = np.zeros(shape2) build_time_rf = np.zeros(shape2) for k in range(len(file)): for i in range(p1_rf): for j in range(p2_rf): print(s) p1=str(kval[i]) p2=str(kval[j]) s='/home/pranav/Project/Result/RandomForest/'+file[k]+'NTREE'+p1+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_rf[k,i,j] = x f1_scores_rf[k,i,j] = y build_time_rf[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_rf[d], columns=kval, index=kval) sf2 = pd.DataFrame(f1_scores_rf[d], columns=kval, index=kval) sf3 = pd.DataFrame(build_time_rf[d], columns=kval, index=kval) z=str(d) path1 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_acc_rf' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_fm_rf' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_bt_rf' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_rf[d][::-1], yticklabels=kval[::-1], xticklabels=kval) plt.ylabel('no of trees') plt.xlabel('max depth') plt.title(file[d]) plt.show() #Logistic n_datasets = len(file) p1_lg = 10 p2_lg = 10 shape2 = (n_datasets, p1_lg, p2_lg) accuracies_lg = np.zeros(shape2) f1_scores_lg = np.zeros(shape2) build_time_lg = np.zeros(shape2) for k in range(len(file)): for i in range(p1_lg): for j in range(p2_lg): print(s) p1=str(kval[i]) p2=str(ridge[j]) if j == 0: p2 ='{0:.8f}'.format(ridge[j]) if j == 1: p2 ='{0:.6f}'.format(ridge[j]) s='/home/pranav/Project/Result/Logistic/'+file[k]+'ITER'+p1+'R'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_lg[k,i,j] = x f1_scores_lg[k,i,j] = y build_time_lg[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_lg[d], columns=kval, index=ridge) sf2 = pd.DataFrame(f1_scores_lg[d], columns=kval, index=ridge) sf3 = pd.DataFrame(build_time_lg[d], columns=kval, index=ridge) w=str(d) path1 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_acc_lg' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_fm_lg' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_bt_lg' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_lg[d][::-1], yticklabels=kval[::-1], xticklabels=ridge) plt.ylabel('max iter') plt.xlabel('regularization') plt.title(file[d]) #plt.savefig() plt.show() df1 = df.as_matrix() #print(df1) df1.shape check1=0 check2=0 for i in range(len(df1)): df2 = str(df1[i,0]) if 'Correctly' in df2: check1=check1+1 print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) print(x) if 'Weighted' in df2: check2=check2+1 print(check2) if check2 == 2: y=float(df2[55:60]) print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) print(z) s =['asfsgsd' 'fghsfg'] s1 = ''.join(s) s1 'asf' in s1 ```
github_jupyter
import numpy as np import pandas as pd from pandas import DataFrame from sklearn import svm, linear_model, neural_network, naive_bayes, neighbors, tree, ensemble, linear_model from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score, f1_score from sklearn import preprocessing from tqdm import tqdm_notebook as tqdm import time import os import glob import matplotlib.pyplot as plt import seaborn as sns from itertools import product kval = [1, 3, 5, 9, 17, 33, 65, 129, 257, 513] w = ['uniform', 'distance'] conf=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] ridge=[0.00000001, 0.000001, 0.0001, 0.01, 1, 10, 100, 1000, 10000, 100000] kval_knn = [1, 3, 5, 9, 17, 33, 65, 129, 257, 313] file = ['electricity-normalized','pc4','MagicTelescope','irish','pc1','tic-tac-toe','ionosphere','diabetes'] p='{0:.8f}'.format(ridge[0]) files = os.path.join(os.getcwd(),'Result/C45', "*.csv") print(files) data = glob.glob(files) len(data) data.sort() #data for j in range(len(data)): #print(data[j]) df = pd.read_csv(data[j]) df1 = df.as_matrix() #print(df) p1=str(0.1) p2=str(1) s='/home/pranav/Project/Result/C45/'+file[2]+'CONF'+p1+'KVAL'+p2+'.csv' print(s) dfq = pd.read_csv(s) #dfq len(file) conf[0] #C45 n_datasets = len(file) p1_c45 = 9 p2_c45 = 10 shape2 = (n_datasets, p1_c45, p2_c45) accuracies_c45 = np.zeros(shape2) f1_scores_c45 = np.zeros(shape2) build_time_c45 = np.zeros(shape2) for k in range(len(file)): for i in range(p1_c45): for j in range(p2_c45): #print(s) p1=str(conf[i]) p2=str(kval[j]) s='/home/pranav/Project/Result/C45/'+file[k]+'CONF'+p1+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_c45[k,i,j] = x f1_scores_c45[k,i,j] = y build_time_c45[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_c45[d], columns=kval, index=conf) sf2 = pd.DataFrame(f1_scores_c45[d], columns=kval, index=conf) sf3 = pd.DataFrame(build_time_c45[d], columns=kval, index=conf) y=str(d) path1 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_acc_c45' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_fm_c45' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/c45/d_' + y + '_' +file[d] + '_bt_c45' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_c45[d], xticklabels=kval, yticklabels=conf[::-1]) plt.xlabel('min no of leaf') plt.ylabel('confidence factor') plt.title(file[d]) plt.show() #KNN n_datasets = len(file) p1_knn = 2 p2_knn = 10 shape2 = (n_datasets, p1_knn, p2_knn) accuracies_knn = np.zeros(shape2) f1_scores_knn = np.zeros(shape2) build_time_knn = np.zeros(shape2) for k in range(len(file)): for i in range(p1_knn): for j in range(p2_knn): #print(s) #p1=str(conf[i]) p2=str(kval[j]) if i == 0: s='/home/pranav/Project/Result/KNN/'+file[k]+'FOR_'+'KVAL'+p2+'.csv' if i == 1: s='/home/pranav/Project/Result/KNN/'+file[k]+'INVERSE_FOR_'+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_knn[k,i,j] = x f1_scores_knn[k,i,j] = y build_time_knn[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_knn[d], columns=kval, index=w) sf2 = pd.DataFrame(f1_scores_knn[d], columns=kval, index=w) sf3 = pd.DataFrame(build_time_knn[d], columns=kval, index=w) x=str(d) path1 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_acc_knn' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_fm_knn' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/knn/d_' + x + '_' +file[d] + '_bt_knn' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_knn[d], xticklabels=kval, yticklabels=w) #plt.xticks(np.arange(accuracies[0].shape[1]), kval) plt.xlabel('neighbours') plt.ylabel('weight') plt.title(file[d]) plt.show() #RandomForest n_datasets = len(file) p1_rf = 10 p2_rf = 10 shape2 = (n_datasets, p1_rf, p2_rf) accuracies_rf = np.zeros(shape2) f1_scores_rf = np.zeros(shape2) build_time_rf = np.zeros(shape2) for k in range(len(file)): for i in range(p1_rf): for j in range(p2_rf): print(s) p1=str(kval[i]) p2=str(kval[j]) s='/home/pranav/Project/Result/RandomForest/'+file[k]+'NTREE'+p1+'KVAL'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_rf[k,i,j] = x f1_scores_rf[k,i,j] = y build_time_rf[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_rf[d], columns=kval, index=kval) sf2 = pd.DataFrame(f1_scores_rf[d], columns=kval, index=kval) sf3 = pd.DataFrame(build_time_rf[d], columns=kval, index=kval) z=str(d) path1 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_acc_rf' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_fm_rf' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/randomforest/d_' + z + '_' +file[d] + '_bt_rf' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_rf[d][::-1], yticklabels=kval[::-1], xticklabels=kval) plt.ylabel('no of trees') plt.xlabel('max depth') plt.title(file[d]) plt.show() #Logistic n_datasets = len(file) p1_lg = 10 p2_lg = 10 shape2 = (n_datasets, p1_lg, p2_lg) accuracies_lg = np.zeros(shape2) f1_scores_lg = np.zeros(shape2) build_time_lg = np.zeros(shape2) for k in range(len(file)): for i in range(p1_lg): for j in range(p2_lg): print(s) p1=str(kval[i]) p2=str(ridge[j]) if j == 0: p2 ='{0:.8f}'.format(ridge[j]) if j == 1: p2 ='{0:.6f}'.format(ridge[j]) s='/home/pranav/Project/Result/Logistic/'+file[k]+'ITER'+p1+'R'+p2+'.csv' df = pd.read_csv(s) df1 = df.as_matrix() check1=0 check2=0 for q in range(len(df1)): df2 = str(df1[q,0]) if 'Correctly' in df2: check1=check1+1 #print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) #print(x) if 'Weighted' in df2: check2=check2+1 #print(check2) if check2 == 2: y=float(df2[55:60]) #print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) #print(z) accuracies_lg[k,i,j] = x f1_scores_lg[k,i,j] = y build_time_lg[k,i,j] = z for d in range(len(file)): sf1 = pd.DataFrame(accuracies_lg[d], columns=kval, index=ridge) sf2 = pd.DataFrame(f1_scores_lg[d], columns=kval, index=ridge) sf3 = pd.DataFrame(build_time_lg[d], columns=kval, index=ridge) w=str(d) path1 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_acc_lg' sf1.to_csv(path_or_buf=path1) path2 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_fm_lg' sf2.to_csv(path_or_buf=path2) path3 = '/home/pranav/Project/results_weka/logistic/d_' + w + '_' +file[d] + '_bt_lg' sf3.to_csv(path_or_buf=path3) for d in range(len(file)): ax = sns.heatmap(accuracies_lg[d][::-1], yticklabels=kval[::-1], xticklabels=ridge) plt.ylabel('max iter') plt.xlabel('regularization') plt.title(file[d]) #plt.savefig() plt.show() df1 = df.as_matrix() #print(df1) df1.shape check1=0 check2=0 for i in range(len(df1)): df2 = str(df1[i,0]) if 'Correctly' in df2: check1=check1+1 print(check1) if check1 == 2: #print(df2[57:64]) x=(float(df2[57:64])/100) #x=float(x) print(x) if 'Weighted' in df2: check2=check2+1 print(check2) if check2 == 2: y=float(df2[55:60]) print(y) if 'Time taken to build model' in df2: z=float(df2[27:31]) print(z) s =['asfsgsd' 'fghsfg'] s1 = ''.join(s) s1 'asf' in s1
0.104746
0.273508
## Max Information Coefficient (MIC) ### Part I: package installation after some google search, I found the package `minepy`. However, the installation of `minepy` is a bit tricky in windows environment. Using the installation command `pip install minepy` gives error message. ![title](img/error.png) My solution is to install using wheel file. The file can be downloaded here: https://www.lfd.uci.edu/~gohlke/pythonlibs/. ![title](img/wheel.png) It is very important to download a file that matches with your python version. Otherwise the installation will fail. For example, my python version is 3.6, so I downloaded the whl file with 36 in file name. ![title](img/install.png) If you download the file that does not match with your python version, you will get error message. For example, I have Python 3.7 on another machine and I tried a few files. ![title](img/install_error.png) ### Part II: How to get MIC Python examples can be found here:https://minepy.readthedocs.io/en/latest/python.html ``` import numpy as np import matplotlib.pyplot as plt from minepy import MINE rs = np.random.RandomState(seed=0) def mysubplot(x, y, numRows, numCols, plotNum, xlim=(-4, 4), ylim=(-4, 4)): r = np.around(np.corrcoef(x, y)[0, 1], 1) mine = MINE(alpha=0.6, c=15, est="mic_approx") mine.compute_score(x, y) mic = np.around(mine.mic(), 1) ax = plt.subplot(numRows, numCols, plotNum, xlim=xlim, ylim=ylim) ax.set_title('Pearson r=%.1f\nMIC=%.1f' % (r, mic),fontsize=10) ax.set_frame_on(False) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.plot(x, y, ',') ax.set_xticks([]) ax.set_yticks([]) return ax def rotation(xy, t): return np.dot(xy, [[np.cos(t), -np.sin(t)], [np.sin(t), np.cos(t)]]) def mvnormal(n=1000): cors = [1.0, 0.8, 0.4, 0.0, -0.4, -0.8, -1.0] for i, cor in enumerate(cors): cov = [[1, cor],[cor, 1]] xy = rs.multivariate_normal([0, 0], cov, n) mysubplot(xy[:, 0], xy[:, 1], 3, 7, i+1) def rotnormal(n=1000): ts = [0, np.pi/12, np.pi/6, np.pi/4, np.pi/2-np.pi/6, np.pi/2-np.pi/12, np.pi/2] cov = [[1, 1],[1, 1]] xy = rs.multivariate_normal([0, 0], cov, n) for i, t in enumerate(ts): xy_r = rotation(xy, t) mysubplot(xy_r[:, 0], xy_r[:, 1], 3, 7, i+8) def others(n=1000): x = rs.uniform(-1, 1, n) y = 4*(x**2-0.5)**2 + rs.uniform(-1, 1, n)/3 mysubplot(x, y, 3, 7, 15, (-1, 1), (-1/3, 1+1/3)) y = rs.uniform(-1, 1, n) xy = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1)), axis=1) xy = rotation(xy, -np.pi/8) lim = np.sqrt(2+np.sqrt(2)) / np.sqrt(2) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 16, (-lim, lim), (-lim, lim)) xy = rotation(xy, -np.pi/8) lim = np.sqrt(2) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 17, (-lim, lim), (-lim, lim)) y = 2*x**2 + rs.uniform(-1, 1, n) mysubplot(x, y, 3, 7, 18, (-1, 1), (-1, 3)) y = (x**2 + rs.uniform(0, 0.5, n)) * \ np.array([-1, 1])[rs.randint(0, 1, size=n)] mysubplot(x, y, 3, 7, 19, (-1.5, 1.5), (-1.5, 1.5)) y = np.cos(x * np.pi) + rs.uniform(0, 1/8, n) x = np.sin(x * np.pi) + rs.uniform(0, 1/8, n) mysubplot(x, y, 3, 7, 20, (-1.5, 1.5), (-1.5, 1.5)) xy1 = np.random.multivariate_normal([3, 3], [[1, 0], [0, 1]], int(n/4)) xy2 = np.random.multivariate_normal([-3, 3], [[1, 0], [0, 1]], int(n/4)) xy3 = np.random.multivariate_normal([-3, -3], [[1, 0], [0, 1]], int(n/4)) xy4 = np.random.multivariate_normal([3, -3], [[1, 0], [0, 1]], int(n/4)) xy = np.concatenate((xy1, xy2, xy3, xy4), axis=0) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 21, (-7, 7), (-7, 7)) plt.figure(facecolor='white') mvnormal(n=800) rotnormal(n=200) others(n=800) plt.tight_layout() plt.show() ``` ### Part III: literature references I find this paper super useful: - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3325791/ - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3325791/bin/NIHMS358982-supplement-Supplemental_Figures_and_Tables.pdf
github_jupyter
import numpy as np import matplotlib.pyplot as plt from minepy import MINE rs = np.random.RandomState(seed=0) def mysubplot(x, y, numRows, numCols, plotNum, xlim=(-4, 4), ylim=(-4, 4)): r = np.around(np.corrcoef(x, y)[0, 1], 1) mine = MINE(alpha=0.6, c=15, est="mic_approx") mine.compute_score(x, y) mic = np.around(mine.mic(), 1) ax = plt.subplot(numRows, numCols, plotNum, xlim=xlim, ylim=ylim) ax.set_title('Pearson r=%.1f\nMIC=%.1f' % (r, mic),fontsize=10) ax.set_frame_on(False) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.plot(x, y, ',') ax.set_xticks([]) ax.set_yticks([]) return ax def rotation(xy, t): return np.dot(xy, [[np.cos(t), -np.sin(t)], [np.sin(t), np.cos(t)]]) def mvnormal(n=1000): cors = [1.0, 0.8, 0.4, 0.0, -0.4, -0.8, -1.0] for i, cor in enumerate(cors): cov = [[1, cor],[cor, 1]] xy = rs.multivariate_normal([0, 0], cov, n) mysubplot(xy[:, 0], xy[:, 1], 3, 7, i+1) def rotnormal(n=1000): ts = [0, np.pi/12, np.pi/6, np.pi/4, np.pi/2-np.pi/6, np.pi/2-np.pi/12, np.pi/2] cov = [[1, 1],[1, 1]] xy = rs.multivariate_normal([0, 0], cov, n) for i, t in enumerate(ts): xy_r = rotation(xy, t) mysubplot(xy_r[:, 0], xy_r[:, 1], 3, 7, i+8) def others(n=1000): x = rs.uniform(-1, 1, n) y = 4*(x**2-0.5)**2 + rs.uniform(-1, 1, n)/3 mysubplot(x, y, 3, 7, 15, (-1, 1), (-1/3, 1+1/3)) y = rs.uniform(-1, 1, n) xy = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1)), axis=1) xy = rotation(xy, -np.pi/8) lim = np.sqrt(2+np.sqrt(2)) / np.sqrt(2) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 16, (-lim, lim), (-lim, lim)) xy = rotation(xy, -np.pi/8) lim = np.sqrt(2) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 17, (-lim, lim), (-lim, lim)) y = 2*x**2 + rs.uniform(-1, 1, n) mysubplot(x, y, 3, 7, 18, (-1, 1), (-1, 3)) y = (x**2 + rs.uniform(0, 0.5, n)) * \ np.array([-1, 1])[rs.randint(0, 1, size=n)] mysubplot(x, y, 3, 7, 19, (-1.5, 1.5), (-1.5, 1.5)) y = np.cos(x * np.pi) + rs.uniform(0, 1/8, n) x = np.sin(x * np.pi) + rs.uniform(0, 1/8, n) mysubplot(x, y, 3, 7, 20, (-1.5, 1.5), (-1.5, 1.5)) xy1 = np.random.multivariate_normal([3, 3], [[1, 0], [0, 1]], int(n/4)) xy2 = np.random.multivariate_normal([-3, 3], [[1, 0], [0, 1]], int(n/4)) xy3 = np.random.multivariate_normal([-3, -3], [[1, 0], [0, 1]], int(n/4)) xy4 = np.random.multivariate_normal([3, -3], [[1, 0], [0, 1]], int(n/4)) xy = np.concatenate((xy1, xy2, xy3, xy4), axis=0) mysubplot(xy[:, 0], xy[:, 1], 3, 7, 21, (-7, 7), (-7, 7)) plt.figure(facecolor='white') mvnormal(n=800) rotnormal(n=200) others(n=800) plt.tight_layout() plt.show()
0.441673
0.889433
``` import numpy as np import pandas as pd import re import matplotlib.pyplot as plt from nltk.corpus import stopwords import nltk from bs4 import BeautifulSoup from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import urllib.request np.random.seed(seed=0) data = pd.read_csv("Reviews.csv", nrows = 100000) print('전체 리뷰 개수 :',(len(data))) data = data[['Text','Summary']] data.sample(10) print('Text 열에서 중복을 배제한 유일한 샘플의 수 :', data['Text'].nunique()) print('Summary 열에서 중복을 배제한 유일한 샘플의 수 :', data['Summary'].nunique()) data.drop_duplicates(subset=['Text'], inplace=True) print("전체 샘플수 :", len(data)) # Null 값을 가진 샘플 제거 data.dropna(axis=0, inplace=True) print('전체 샘플수 :',(len(data))) contractions = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have"} nltk.download('stopwords') stop_words = set(stopwords.words('english')) print('불용어 개수 :', len(stop_words)) print(stop_words) # 전처리 함수 def preprocess_sentence(sentence, remove_stopwords = True): sentence = sentence.lower() # 텍스트 소문자화 sentence = BeautifulSoup(sentence, "lxml").text # <br />, <a href = ...> 등의 html 태그 제거 sentence = re.sub(r'\([^)]*\)', '', sentence) # 괄호로 닫힌 문자열 제거 Ex) my husband (and myself) for => my husband for sentence = re.sub('"','', sentence) # 쌍따옴표 " 제거 sentence = ' '.join([contractions[t] if t in contractions else t for t in sentence.split(" ")]) # 약어 정규화 sentence = re.sub(r"'s\b","",sentence) # 소유격 제거. Ex) roland's -> roland sentence = re.sub("[^a-zA-Z]", " ", sentence) # 영어 외 문자(숫자, 특수문자 등) 공백으로 변환 sentence = re.sub('[m]{2,}', 'mm', sentence) # m이 3개 이상이면 2개로 변경. Ex) ummmmmmm yeah -> umm yeah # 불용어 제거 (Text) if remove_stopwords: tokens = ' '.join(word for word in sentence.split() if not word in stop_words if len(word) > 1) # 불용어 미제거 (Summary) else: tokens = ' '.join(word for word in sentence.split() if len(word) > 1) return tokens temp_text = 'Everything I bought was great, infact I ordered twice and the third ordered was<br />for my mother and father.' temp_summary = 'Great way to start (or finish) the day!!!' print(preprocess_sentence(temp_text)) print(preprocess_sentence(temp_summary, 0)) # Text 열 전처리 clean_text = [] for s in data['Text']: clean_text.append(preprocess_sentence(s)) clean_text[:5] # Summary 열 전처리 clean_summary = [] for s in data['Summary']: clean_summary.append(preprocess_sentence(s, 0)) clean_summary[:5] data['Text'] = clean_text data['Summary'] = clean_summary # 길이가 공백인 샘플은 NULL 값으로 변환 data.replace('', np.nan, inplace=True) print(data.isnull().sum()) data.dropna(axis = 0, inplace = True) print('전체 샘플수 :',(len(data))) # 길이 분포 출력 text_len = [len(s.split()) for s in data['Text']] summary_len = [len(s.split()) for s in data['Summary']] print('텍스트의 최소 길이 : {}'.format(np.min(text_len))) print('텍스트의 최대 길이 : {}'.format(np.max(text_len))) print('텍스트의 평균 길이 : {}'.format(np.mean(text_len))) print('요약의 최소 길이 : {}'.format(np.min(summary_len))) print('요약의 최대 길이 : {}'.format(np.max(summary_len))) print('요약의 평균 길이 : {}'.format(np.mean(summary_len))) plt.subplot(1,2,1) plt.boxplot(summary_len) plt.title('Summary') plt.subplot(1,2,2) plt.boxplot(text_len) plt.title('Text') plt.tight_layout() plt.show() plt.title('Summary') plt.hist(summary_len, bins=40) plt.xlabel('length of samples') plt.ylabel('number of samples') plt.show() plt.title('Text') plt.hist(text_len, bins=40) plt.xlabel('length of samples') plt.ylabel('number of samples') plt.show() text_max_len = 50 summary_max_len = 8 def below_threshold_len(max_len, nested_list): cnt = 0 for s in nested_list: if(len(s.split()) <= max_len): cnt = cnt + 1 print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(nested_list)))) below_threshold_len(text_max_len, data['Text']) below_threshold_len(summary_max_len, data['Summary']) data = data[data['Text'].apply(lambda x: len(x.split()) <= text_max_len)] data = data[data['Summary'].apply(lambda x: len(x.split()) <= summary_max_len)] print('전체 샘플수 :',(len(data))) # 요약 데이터에는 시작 토큰과 종료 토큰을 추가한다. data['decoder_input'] = data['Summary'].apply(lambda x : 'sostoken '+ x) data['decoder_target'] = data['Summary'].apply(lambda x : x + ' eostoken') data.head() encoder_input = np.array(data['Text']) decoder_input = np.array(data['decoder_input']) decoder_target = np.array(data['decoder_target']) #분리 indices = np.arange(encoder_input.shape[0]) np.random.shuffle(indices) print(indices) encoder_input = encoder_input[indices] decoder_input = decoder_input[indices] decoder_target = decoder_target[indices] n_of_val = int(len(encoder_input)*0.2) print('테스트 데이터의 수 :',n_of_val) encoder_input_train = encoder_input[:-n_of_val] decoder_input_train = decoder_input[:-n_of_val] decoder_target_train = decoder_target[:-n_of_val] encoder_input_test = encoder_input[-n_of_val:] decoder_input_test = decoder_input[-n_of_val:] decoder_target_test = decoder_target[-n_of_val:] print('훈련 데이터의 개수 :', len(encoder_input_train)) print('훈련 레이블의 개수 :',len(decoder_input_train)) print('테스트 데이터의 개수 :',len(encoder_input_test)) print('테스트 레이블의 개수 :',len(decoder_input_test)) src_tokenizer = Tokenizer() src_tokenizer.fit_on_texts(encoder_input_train) threshold = 7 total_cnt = len(src_tokenizer.word_index) # 단어의 수 rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트 total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합 rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합 # 단어와 빈도수의 쌍(pair)을 key와 value로 받는다. for key, value in src_tokenizer.word_counts.items(): total_freq = total_freq + value # 단어의 등장 빈도수가 threshold보다 작으면 if(value < threshold): rare_cnt = rare_cnt + 1 rare_freq = rare_freq + value print('단어 집합(vocabulary)의 크기 :',total_cnt) print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt)) print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt)) print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100) print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100) src_vocab = 8000 src_tokenizer = Tokenizer(num_words = src_vocab) src_tokenizer.fit_on_texts(encoder_input_train) # 텍스트 시퀀스를 정수 시퀀스로 변환 encoder_input_train = src_tokenizer.texts_to_sequences(encoder_input_train) encoder_input_test = src_tokenizer.texts_to_sequences(encoder_input_test) tar_tokenizer = Tokenizer() tar_tokenizer.fit_on_texts(decoder_input_train) threshold = 6 total_cnt = len(tar_tokenizer.word_index) # 단어의 수 rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트 total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합 rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합 # 단어와 빈도수의 쌍(pair)을 key와 value로 받는다. for key, value in tar_tokenizer.word_counts.items(): total_freq = total_freq + value # 단어의 등장 빈도수가 threshold보다 작으면 if(value < threshold): rare_cnt = rare_cnt + 1 rare_freq = rare_freq + value print('단어 집합(vocabulary)의 크기 :',total_cnt) print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt)) print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt)) print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100) print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100) tar_vocab = 2000 tar_tokenizer = Tokenizer(num_words = tar_vocab) tar_tokenizer.fit_on_texts(decoder_input_train) tar_tokenizer.fit_on_texts(decoder_target_train) # 텍스트 시퀀스를 정수 시퀀스로 변환 decoder_input_train = tar_tokenizer.texts_to_sequences(decoder_input_train) decoder_target_train = tar_tokenizer.texts_to_sequences(decoder_target_train) decoder_input_test = tar_tokenizer.texts_to_sequences(decoder_input_test) decoder_target_test = tar_tokenizer.texts_to_sequences(decoder_target_test) #빈 샘플 제거 drop_train = [index for index, sentence in enumerate(decoder_input_train) if len(sentence) == 1] drop_test = [index for index, sentence in enumerate(decoder_input_test) if len(sentence) == 1] print('삭제할 훈련 데이터의 개수 :',len(drop_train)) print('삭제할 테스트 데이터의 개수 :',len(drop_test)) encoder_input_train = np.delete(encoder_input_train, drop_train, axis=0) decoder_input_train = np.delete(decoder_input_train, drop_train, axis=0) decoder_target_train = np.delete(decoder_target_train, drop_train, axis=0) encoder_input_test = np.delete(encoder_input_test, drop_test, axis=0) decoder_input_test = np.delete(decoder_input_test, drop_test, axis=0) decoder_target_test = np.delete(decoder_target_test, drop_test, axis=0) print('훈련 데이터의 개수 :', len(encoder_input_train)) print('훈련 레이블의 개수 :',len(decoder_input_train)) print('테스트 데이터의 개수 :',len(encoder_input_test)) print('테스트 레이블의 개수 :',len(decoder_input_test)) #패딩 encoder_input_train = pad_sequences(encoder_input_train, maxlen = text_max_len, padding='post') encoder_input_test = pad_sequences(encoder_input_test, maxlen = text_max_len, padding='post') decoder_input_train = pad_sequences(decoder_input_train, maxlen = summary_max_len, padding='post') decoder_target_train = pad_sequences(decoder_target_train, maxlen = summary_max_len, padding='post') decoder_input_test = pad_sequences(decoder_input_test, maxlen = summary_max_len, padding='post') decoder_target_test = pad_sequences(decoder_target_test, maxlen = summary_max_len, padding='post') from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate from tensorflow.keras.models import Model from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint embedding_dim = 128 hidden_size = 256 # 인코더 encoder_inputs = Input(shape=(text_max_len,)) # 인코더의 임베딩 층 enc_emb = Embedding(src_vocab, embedding_dim)(encoder_inputs) # 인코더의 LSTM 1 encoder_lstm1 = LSTM(hidden_size, return_sequences=True, return_state=True ,dropout = 0.4, recurrent_dropout = 0.4) encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb) # 인코더의 LSTM 2 encoder_lstm2 = LSTM(hidden_size, return_sequences=True, return_state=True, dropout=0.4, recurrent_dropout=0.4) encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1) # 인코더의 LSTM 3 encoder_lstm3 = LSTM(hidden_size, return_state=True, return_sequences=True, dropout=0.4, recurrent_dropout=0.4) encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2) # 디코더 decoder_inputs = Input(shape=(None,)) # 디코더의 임베딩 층 dec_emb_layer = Embedding(tar_vocab, embedding_dim) dec_emb = dec_emb_layer(decoder_inputs) # 디코더의 LSTM decoder_lstm = LSTM(hidden_size, return_sequences = True, return_state = True, dropout = 0.4, recurrent_dropout=0.2) decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state = [state_h, state_c]) # 디코더의 출력층 decoder_softmax_layer = Dense(tar_vocab, activation = 'softmax') decoder_softmax_outputs = decoder_softmax_layer(decoder_outputs) # 모델 정의 model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs) model.summary() urllib.request.urlretrieve("https://raw.githubusercontent.com/thushv89/attention_keras/master/src/layers/attention.py", filename="attention.py") from attention import AttentionLayer # 어텐션 층(어텐션 함수) attn_layer = AttentionLayer(name='attention_layer') attn_out, attn_states = attn_layer([encoder_outputs, decoder_outputs]) # 어텐션의 결과와 디코더의 hidden state들을 연결 decoder_concat_input = Concatenate(axis = -1, name='concat_layer')([decoder_outputs, attn_out]) # 디코더의 출력층 decoder_softmax_layer = Dense(tar_vocab, activation='softmax') decoder_softmax_outputs = decoder_softmax_layer(decoder_concat_input) # 모델 정의 model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs) model.summary() model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy') es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience = 2) history = model.fit(x = [encoder_input_train, decoder_input_train], y = decoder_target_train, \ validation_data = ([encoder_input_test, decoder_input_test], decoder_target_test), batch_size = 256, callbacks=[es], epochs = 50) plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() src_index_to_word = src_tokenizer.index_word # 원문 단어 집합에서 정수 -> 단어를 얻음 tar_word_to_index = tar_tokenizer.word_index # 요약 단어 집합에서 단어 -> 정수를 얻음 tar_index_to_word = tar_tokenizer.index_word # 요약 단어 집합에서 정수 -> 단어를 얻음 # 인코더 설계 encoder_model = Model(inputs=encoder_inputs, outputs=[encoder_outputs, state_h, state_c]) # 이전 시점의 상태들을 저장하는 텐서 decoder_state_input_h = Input(shape=(hidden_size,)) decoder_state_input_c = Input(shape=(hidden_size,)) dec_emb2 = dec_emb_layer(decoder_inputs) # 문장의 다음 단어를 예측하기 위해서 초기 상태(initial_state)를 이전 시점의 상태로 사용. 이는 뒤의 함수 decode_sequence()에 구현 # 훈련 과정에서와 달리 LSTM의 리턴하는 은닉 상태와 셀 상태인 state_h와 state_c를 버리지 않음. decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c]) # 어텐션 함수 decoder_hidden_state_input = Input(shape=(text_max_len, hidden_size)) attn_out_inf, attn_states_inf = attn_layer([decoder_hidden_state_input, decoder_outputs2]) decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_outputs2, attn_out_inf]) # 디코더의 출력층 decoder_outputs2 = decoder_softmax_layer(decoder_inf_concat) # 최종 디코더 모델 decoder_model = Model( [decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c], [decoder_outputs2] + [state_h2, state_c2]) def decode_sequence(input_seq): # 입력으로부터 인코더의 상태를 얻음 e_out, e_h, e_c = encoder_model.predict(input_seq) # <SOS>에 해당하는 토큰 생성 target_seq = np.zeros((1,1)) target_seq[0, 0] = tar_word_to_index['sostoken'] stop_condition = False decoded_sentence = '' while not stop_condition: # stop_condition이 True가 될 때까지 루프 반복 output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c]) sampled_token_index = np.argmax(output_tokens[0, -1, :]) sampled_token = tar_index_to_word[sampled_token_index] if(sampled_token!='eostoken'): decoded_sentence += ' '+sampled_token # <eos>에 도달하거나 최대 길이를 넘으면 중단. if (sampled_token == 'eostoken' or len(decoded_sentence.split()) >= (summary_max_len-1)): stop_condition = True # 길이가 1인 타겟 시퀀스를 업데이트 target_seq = np.zeros((1,1)) target_seq[0, 0] = sampled_token_index # 상태를 업데이트 합니다. e_h, e_c = h, c return decoded_sentence # 원문의 정수 시퀀스를 텍스트 시퀀스로 변환 def seq2text(input_seq): temp='' for i in input_seq: if(i!=0): temp = temp + src_index_to_word[i]+' ' return temp # 요약문의 정수 시퀀스를 텍스트 시퀀스로 변환 def seq2summary(input_seq): temp='' for i in input_seq: if((i!=0 and i!=tar_word_to_index['sostoken']) and i!=tar_word_to_index['eostoken']): temp = temp + tar_index_to_word[i] + ' ' return temp for i in range(500, 1000): print("원문 : ",seq2text(encoder_input_test[i])) print("실제 요약문 :",seq2summary(decoder_input_test[i])) print("예측 요약문 :",decode_sequence(encoder_input_test[i].reshape(1, text_max_len))) print("\n") ```
github_jupyter
import numpy as np import pandas as pd import re import matplotlib.pyplot as plt from nltk.corpus import stopwords import nltk from bs4 import BeautifulSoup from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import urllib.request np.random.seed(seed=0) data = pd.read_csv("Reviews.csv", nrows = 100000) print('전체 리뷰 개수 :',(len(data))) data = data[['Text','Summary']] data.sample(10) print('Text 열에서 중복을 배제한 유일한 샘플의 수 :', data['Text'].nunique()) print('Summary 열에서 중복을 배제한 유일한 샘플의 수 :', data['Summary'].nunique()) data.drop_duplicates(subset=['Text'], inplace=True) print("전체 샘플수 :", len(data)) # Null 값을 가진 샘플 제거 data.dropna(axis=0, inplace=True) print('전체 샘플수 :',(len(data))) contractions = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have"} nltk.download('stopwords') stop_words = set(stopwords.words('english')) print('불용어 개수 :', len(stop_words)) print(stop_words) # 전처리 함수 def preprocess_sentence(sentence, remove_stopwords = True): sentence = sentence.lower() # 텍스트 소문자화 sentence = BeautifulSoup(sentence, "lxml").text # <br />, <a href = ...> 등의 html 태그 제거 sentence = re.sub(r'\([^)]*\)', '', sentence) # 괄호로 닫힌 문자열 제거 Ex) my husband (and myself) for => my husband for sentence = re.sub('"','', sentence) # 쌍따옴표 " 제거 sentence = ' '.join([contractions[t] if t in contractions else t for t in sentence.split(" ")]) # 약어 정규화 sentence = re.sub(r"'s\b","",sentence) # 소유격 제거. Ex) roland's -> roland sentence = re.sub("[^a-zA-Z]", " ", sentence) # 영어 외 문자(숫자, 특수문자 등) 공백으로 변환 sentence = re.sub('[m]{2,}', 'mm', sentence) # m이 3개 이상이면 2개로 변경. Ex) ummmmmmm yeah -> umm yeah # 불용어 제거 (Text) if remove_stopwords: tokens = ' '.join(word for word in sentence.split() if not word in stop_words if len(word) > 1) # 불용어 미제거 (Summary) else: tokens = ' '.join(word for word in sentence.split() if len(word) > 1) return tokens temp_text = 'Everything I bought was great, infact I ordered twice and the third ordered was<br />for my mother and father.' temp_summary = 'Great way to start (or finish) the day!!!' print(preprocess_sentence(temp_text)) print(preprocess_sentence(temp_summary, 0)) # Text 열 전처리 clean_text = [] for s in data['Text']: clean_text.append(preprocess_sentence(s)) clean_text[:5] # Summary 열 전처리 clean_summary = [] for s in data['Summary']: clean_summary.append(preprocess_sentence(s, 0)) clean_summary[:5] data['Text'] = clean_text data['Summary'] = clean_summary # 길이가 공백인 샘플은 NULL 값으로 변환 data.replace('', np.nan, inplace=True) print(data.isnull().sum()) data.dropna(axis = 0, inplace = True) print('전체 샘플수 :',(len(data))) # 길이 분포 출력 text_len = [len(s.split()) for s in data['Text']] summary_len = [len(s.split()) for s in data['Summary']] print('텍스트의 최소 길이 : {}'.format(np.min(text_len))) print('텍스트의 최대 길이 : {}'.format(np.max(text_len))) print('텍스트의 평균 길이 : {}'.format(np.mean(text_len))) print('요약의 최소 길이 : {}'.format(np.min(summary_len))) print('요약의 최대 길이 : {}'.format(np.max(summary_len))) print('요약의 평균 길이 : {}'.format(np.mean(summary_len))) plt.subplot(1,2,1) plt.boxplot(summary_len) plt.title('Summary') plt.subplot(1,2,2) plt.boxplot(text_len) plt.title('Text') plt.tight_layout() plt.show() plt.title('Summary') plt.hist(summary_len, bins=40) plt.xlabel('length of samples') plt.ylabel('number of samples') plt.show() plt.title('Text') plt.hist(text_len, bins=40) plt.xlabel('length of samples') plt.ylabel('number of samples') plt.show() text_max_len = 50 summary_max_len = 8 def below_threshold_len(max_len, nested_list): cnt = 0 for s in nested_list: if(len(s.split()) <= max_len): cnt = cnt + 1 print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(nested_list)))) below_threshold_len(text_max_len, data['Text']) below_threshold_len(summary_max_len, data['Summary']) data = data[data['Text'].apply(lambda x: len(x.split()) <= text_max_len)] data = data[data['Summary'].apply(lambda x: len(x.split()) <= summary_max_len)] print('전체 샘플수 :',(len(data))) # 요약 데이터에는 시작 토큰과 종료 토큰을 추가한다. data['decoder_input'] = data['Summary'].apply(lambda x : 'sostoken '+ x) data['decoder_target'] = data['Summary'].apply(lambda x : x + ' eostoken') data.head() encoder_input = np.array(data['Text']) decoder_input = np.array(data['decoder_input']) decoder_target = np.array(data['decoder_target']) #분리 indices = np.arange(encoder_input.shape[0]) np.random.shuffle(indices) print(indices) encoder_input = encoder_input[indices] decoder_input = decoder_input[indices] decoder_target = decoder_target[indices] n_of_val = int(len(encoder_input)*0.2) print('테스트 데이터의 수 :',n_of_val) encoder_input_train = encoder_input[:-n_of_val] decoder_input_train = decoder_input[:-n_of_val] decoder_target_train = decoder_target[:-n_of_val] encoder_input_test = encoder_input[-n_of_val:] decoder_input_test = decoder_input[-n_of_val:] decoder_target_test = decoder_target[-n_of_val:] print('훈련 데이터의 개수 :', len(encoder_input_train)) print('훈련 레이블의 개수 :',len(decoder_input_train)) print('테스트 데이터의 개수 :',len(encoder_input_test)) print('테스트 레이블의 개수 :',len(decoder_input_test)) src_tokenizer = Tokenizer() src_tokenizer.fit_on_texts(encoder_input_train) threshold = 7 total_cnt = len(src_tokenizer.word_index) # 단어의 수 rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트 total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합 rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합 # 단어와 빈도수의 쌍(pair)을 key와 value로 받는다. for key, value in src_tokenizer.word_counts.items(): total_freq = total_freq + value # 단어의 등장 빈도수가 threshold보다 작으면 if(value < threshold): rare_cnt = rare_cnt + 1 rare_freq = rare_freq + value print('단어 집합(vocabulary)의 크기 :',total_cnt) print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt)) print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt)) print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100) print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100) src_vocab = 8000 src_tokenizer = Tokenizer(num_words = src_vocab) src_tokenizer.fit_on_texts(encoder_input_train) # 텍스트 시퀀스를 정수 시퀀스로 변환 encoder_input_train = src_tokenizer.texts_to_sequences(encoder_input_train) encoder_input_test = src_tokenizer.texts_to_sequences(encoder_input_test) tar_tokenizer = Tokenizer() tar_tokenizer.fit_on_texts(decoder_input_train) threshold = 6 total_cnt = len(tar_tokenizer.word_index) # 단어의 수 rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트 total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합 rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합 # 단어와 빈도수의 쌍(pair)을 key와 value로 받는다. for key, value in tar_tokenizer.word_counts.items(): total_freq = total_freq + value # 단어의 등장 빈도수가 threshold보다 작으면 if(value < threshold): rare_cnt = rare_cnt + 1 rare_freq = rare_freq + value print('단어 집합(vocabulary)의 크기 :',total_cnt) print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt)) print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt)) print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100) print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100) tar_vocab = 2000 tar_tokenizer = Tokenizer(num_words = tar_vocab) tar_tokenizer.fit_on_texts(decoder_input_train) tar_tokenizer.fit_on_texts(decoder_target_train) # 텍스트 시퀀스를 정수 시퀀스로 변환 decoder_input_train = tar_tokenizer.texts_to_sequences(decoder_input_train) decoder_target_train = tar_tokenizer.texts_to_sequences(decoder_target_train) decoder_input_test = tar_tokenizer.texts_to_sequences(decoder_input_test) decoder_target_test = tar_tokenizer.texts_to_sequences(decoder_target_test) #빈 샘플 제거 drop_train = [index for index, sentence in enumerate(decoder_input_train) if len(sentence) == 1] drop_test = [index for index, sentence in enumerate(decoder_input_test) if len(sentence) == 1] print('삭제할 훈련 데이터의 개수 :',len(drop_train)) print('삭제할 테스트 데이터의 개수 :',len(drop_test)) encoder_input_train = np.delete(encoder_input_train, drop_train, axis=0) decoder_input_train = np.delete(decoder_input_train, drop_train, axis=0) decoder_target_train = np.delete(decoder_target_train, drop_train, axis=0) encoder_input_test = np.delete(encoder_input_test, drop_test, axis=0) decoder_input_test = np.delete(decoder_input_test, drop_test, axis=0) decoder_target_test = np.delete(decoder_target_test, drop_test, axis=0) print('훈련 데이터의 개수 :', len(encoder_input_train)) print('훈련 레이블의 개수 :',len(decoder_input_train)) print('테스트 데이터의 개수 :',len(encoder_input_test)) print('테스트 레이블의 개수 :',len(decoder_input_test)) #패딩 encoder_input_train = pad_sequences(encoder_input_train, maxlen = text_max_len, padding='post') encoder_input_test = pad_sequences(encoder_input_test, maxlen = text_max_len, padding='post') decoder_input_train = pad_sequences(decoder_input_train, maxlen = summary_max_len, padding='post') decoder_target_train = pad_sequences(decoder_target_train, maxlen = summary_max_len, padding='post') decoder_input_test = pad_sequences(decoder_input_test, maxlen = summary_max_len, padding='post') decoder_target_test = pad_sequences(decoder_target_test, maxlen = summary_max_len, padding='post') from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate from tensorflow.keras.models import Model from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint embedding_dim = 128 hidden_size = 256 # 인코더 encoder_inputs = Input(shape=(text_max_len,)) # 인코더의 임베딩 층 enc_emb = Embedding(src_vocab, embedding_dim)(encoder_inputs) # 인코더의 LSTM 1 encoder_lstm1 = LSTM(hidden_size, return_sequences=True, return_state=True ,dropout = 0.4, recurrent_dropout = 0.4) encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb) # 인코더의 LSTM 2 encoder_lstm2 = LSTM(hidden_size, return_sequences=True, return_state=True, dropout=0.4, recurrent_dropout=0.4) encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1) # 인코더의 LSTM 3 encoder_lstm3 = LSTM(hidden_size, return_state=True, return_sequences=True, dropout=0.4, recurrent_dropout=0.4) encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2) # 디코더 decoder_inputs = Input(shape=(None,)) # 디코더의 임베딩 층 dec_emb_layer = Embedding(tar_vocab, embedding_dim) dec_emb = dec_emb_layer(decoder_inputs) # 디코더의 LSTM decoder_lstm = LSTM(hidden_size, return_sequences = True, return_state = True, dropout = 0.4, recurrent_dropout=0.2) decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state = [state_h, state_c]) # 디코더의 출력층 decoder_softmax_layer = Dense(tar_vocab, activation = 'softmax') decoder_softmax_outputs = decoder_softmax_layer(decoder_outputs) # 모델 정의 model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs) model.summary() urllib.request.urlretrieve("https://raw.githubusercontent.com/thushv89/attention_keras/master/src/layers/attention.py", filename="attention.py") from attention import AttentionLayer # 어텐션 층(어텐션 함수) attn_layer = AttentionLayer(name='attention_layer') attn_out, attn_states = attn_layer([encoder_outputs, decoder_outputs]) # 어텐션의 결과와 디코더의 hidden state들을 연결 decoder_concat_input = Concatenate(axis = -1, name='concat_layer')([decoder_outputs, attn_out]) # 디코더의 출력층 decoder_softmax_layer = Dense(tar_vocab, activation='softmax') decoder_softmax_outputs = decoder_softmax_layer(decoder_concat_input) # 모델 정의 model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs) model.summary() model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy') es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience = 2) history = model.fit(x = [encoder_input_train, decoder_input_train], y = decoder_target_train, \ validation_data = ([encoder_input_test, decoder_input_test], decoder_target_test), batch_size = 256, callbacks=[es], epochs = 50) plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() src_index_to_word = src_tokenizer.index_word # 원문 단어 집합에서 정수 -> 단어를 얻음 tar_word_to_index = tar_tokenizer.word_index # 요약 단어 집합에서 단어 -> 정수를 얻음 tar_index_to_word = tar_tokenizer.index_word # 요약 단어 집합에서 정수 -> 단어를 얻음 # 인코더 설계 encoder_model = Model(inputs=encoder_inputs, outputs=[encoder_outputs, state_h, state_c]) # 이전 시점의 상태들을 저장하는 텐서 decoder_state_input_h = Input(shape=(hidden_size,)) decoder_state_input_c = Input(shape=(hidden_size,)) dec_emb2 = dec_emb_layer(decoder_inputs) # 문장의 다음 단어를 예측하기 위해서 초기 상태(initial_state)를 이전 시점의 상태로 사용. 이는 뒤의 함수 decode_sequence()에 구현 # 훈련 과정에서와 달리 LSTM의 리턴하는 은닉 상태와 셀 상태인 state_h와 state_c를 버리지 않음. decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c]) # 어텐션 함수 decoder_hidden_state_input = Input(shape=(text_max_len, hidden_size)) attn_out_inf, attn_states_inf = attn_layer([decoder_hidden_state_input, decoder_outputs2]) decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_outputs2, attn_out_inf]) # 디코더의 출력층 decoder_outputs2 = decoder_softmax_layer(decoder_inf_concat) # 최종 디코더 모델 decoder_model = Model( [decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c], [decoder_outputs2] + [state_h2, state_c2]) def decode_sequence(input_seq): # 입력으로부터 인코더의 상태를 얻음 e_out, e_h, e_c = encoder_model.predict(input_seq) # <SOS>에 해당하는 토큰 생성 target_seq = np.zeros((1,1)) target_seq[0, 0] = tar_word_to_index['sostoken'] stop_condition = False decoded_sentence = '' while not stop_condition: # stop_condition이 True가 될 때까지 루프 반복 output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c]) sampled_token_index = np.argmax(output_tokens[0, -1, :]) sampled_token = tar_index_to_word[sampled_token_index] if(sampled_token!='eostoken'): decoded_sentence += ' '+sampled_token # <eos>에 도달하거나 최대 길이를 넘으면 중단. if (sampled_token == 'eostoken' or len(decoded_sentence.split()) >= (summary_max_len-1)): stop_condition = True # 길이가 1인 타겟 시퀀스를 업데이트 target_seq = np.zeros((1,1)) target_seq[0, 0] = sampled_token_index # 상태를 업데이트 합니다. e_h, e_c = h, c return decoded_sentence # 원문의 정수 시퀀스를 텍스트 시퀀스로 변환 def seq2text(input_seq): temp='' for i in input_seq: if(i!=0): temp = temp + src_index_to_word[i]+' ' return temp # 요약문의 정수 시퀀스를 텍스트 시퀀스로 변환 def seq2summary(input_seq): temp='' for i in input_seq: if((i!=0 and i!=tar_word_to_index['sostoken']) and i!=tar_word_to_index['eostoken']): temp = temp + tar_index_to_word[i] + ' ' return temp for i in range(500, 1000): print("원문 : ",seq2text(encoder_input_test[i])) print("실제 요약문 :",seq2summary(decoder_input_test[i])) print("예측 요약문 :",decode_sequence(encoder_input_test[i].reshape(1, text_max_len))) print("\n")
0.132571
0.41117
``` import numpy as np import os, sys, shutil import scipy.ndimage as snd import h5py import SimpleITK as sitk from shutil import copy import nibabel as nib import skimage.morphology as morph from skimage.feature import canny from tqdm import tqdm import pandas as pd import matplotlib.pyplot as plt %matplotlib inline def imshow(*args,**kwargs): """ Handy function to show multiple plots in on row, possibly with different cmaps and titles Usage: imshow(img1, title="myPlot") imshow(img1,img2, title=['title1','title2']) imshow(img1,img2, cmap='hot') imshow(img1,img2,cmap=['gray','Blues']) """ cmap = kwargs.get('cmap', 'gray') title= kwargs.get('title','') if len(args)==0: raise ValueError("No images given to imshow") elif len(args)==1: plt.title(title) plt.imshow(args[0], interpolation='none') else: n=len(args) if type(cmap)==str: cmap = [cmap]*n if type(title)==str: title= [title]*n plt.figure(figsize=(n*5,10)) for i in range(n): plt.subplot(1,n,i+1) plt.title(title[i]) plt.imshow(args[i], cmap[i]) plt.show() selem = morph.disk(2) def getWeightMap(label): label = np.argmax(label, axis=3)[0] edge = np.float32(morph.binary_dilation(canny(np.float32(label)), selem)) weight_map = np.zeros(label.shape) weight_map[np.where(label>0)] = 7 weight_map = weight_map + 1 weight_map[np.where(edge==1.0)] = 25 # weight_map[np.where(label == 2.0)] = 15 return np.uint8(weight_map[None,:,:]) def downSampleImage(image): return np.float64(snd.interpolation.zoom(image, 0.5)) def loadDicomVolume(file_path, itk_image): reader = sitk.ImageSeriesReader() reader.SetOutputPixelType(sitk.sitkFloat32) dicom_names = reader.GetGDCMSeriesFileNames(file_path) reader.SetFileNames(dicom_names) itk_image = reader.Execute() image_vol = sitk.GetArrayFromImage(self.itk_image) image_vol = np.transpose(image_vol,(1,2,0)) return np.float32(image_vol) def oneHot(targets,n_class = 6): axis = targets.ndim ret_val = (np.arange(n_class) == np.expand_dims(targets,axis = axis)).astype(int) return ret_val def histogram_equalization(arr): nbr_bins = 256 imhist, bins = np.histogram(arr.flatten(), nbr_bins, normed = True) cdf = imhist.cumsum() cdf = 255 * cdf / cdf[-1] original_shape = arr.shape arr = np.interp(arr.flatten(), bins[:-1], cdf) out_arr = arr.reshape(original_shape) return out_arr def normalize(x): x = np.float32(x) min_val = np.min(x) max_val = np.max(x) ret_val = (x - min_val) / (max_val - min_val) return ret_val def downSample1(slc): return snd.interpolation.zoom(slc,0.5) def makeLablel(lbl, num_class = 3): if num_class == 2: lbl[lbl==2] = 1 lbl = oneHot(lbl,num_class) return np.uint8(lbl[None,:,:]) def get_z_minmaxforbrain(lbl): lbl[lbl==2] = 1 maxes = np.max(lbl,axis =(1,2)) nonzero_maxes = np.nonzero(maxes)[0] mn, mx = nonzero_maxes[0] - 10, nonzero_maxes[-1] + 10 if mn < 0: mn = 0 if mx >= lbl.shape[0]: mx = lbl.shape[0]-1 return mn, mx root = './raw_data/pac2018_data/' dest = './processed_data/hdf5_file/' if not os.path.exists(dest): os.makedirs(dest) else: shutil.rmtree(dest) os.makedirs(dest) vols = [] for f in next(os.walk(root))[2]: vols.append(os.path.join(root, f)) vols.sort() print len(vols), vols[10].split("/") # label paths labels = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Label'].as_matrix()[: len(vols)] age = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Age'].as_matrix()[: len(vols)] gender = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Gender'].as_matrix()[: len(vols)] tiv = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['TIV'].as_matrix()[: len(vols)] labels.shape for vol_path, label, a, g, t in tqdm(zip(vols,labels, age, gender, tiv)): # print("working on : " + vol_path) vol_img = sitk.ReadImage(vol_path) vol = sitk.GetArrayFromImage(vol_img) vol = histogram_equalization(vol) vol = normalize(vol) vol = np.swapaxes(vol, 1, 2) # imshow(vol[:,:,72]) dest_path = os.path.join(dest, vol_path.split("/")[3][:-4] +'.hdf5') hp = h5py.File(dest_path,'w') hp.create_dataset('volume', data=vol) hp.create_dataset('label', data=[label]) hp.create_dataset('gender', data=[g]) hp.create_dataset('tiv', data=[t]) hp.create_dataset('age', data=[a]) hp.close() df = pd.read_csv('./raw_data/PAC_info_sheet.csv') print (np.unique(df['Scanner'].as_matrix()), np.unique(df['Gender'].as_matrix())) scanner_1_male_df = df[(df['Scanner']==1)*df['Gender']==1.] scanner_2_male_df = df[(df['Scanner']==2)*df['Gender']==1.] scanner_3_male_df = df[(df['Scanner']==3)*df['Gender']==1.] scanner_1_female_df = df[(df['Scanner']==1)*df['Gender']==2.] scanner_2_female_df = df[(df['Scanner']==2)*df['Gender']==2.] scanner_3_female_df = df[(df['Scanner']==3)*df['Gender']==2.] print len(scanner_1_male_df), len(scanner_2_male_df), len(scanner_3_male_df) print len(scanner_1_female_df), len(scanner_2_female_df), len(scanner_3_female_df) src_path = './processed_data/hdf5_file' dest_path = './processed_data/' def generate_train_validate_test_set(df, name): """ Split the data into 70:15:15 for train-validate-test set arg: path: input data path generates CSV file with slice id and corrosponding bool value for train, test and validation """ SPLIT_TRAIN = 0.7 SPLIT_VALID = 0.15 train_files = df['PAC_ID'].as_matrix() labels = df['Label'].as_matrix() total_samples = len(train_files) print "total number of samples: {}".format(total_samples) index = np.random.randint(0, total_samples, size = total_samples) train_files = train_files[index] labels = labels[index] train_vols = train_files[0:int(SPLIT_TRAIN*total_samples)] valid_vols = train_files[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_vols = train_files[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] train_labels = labels[0:int(SPLIT_TRAIN*total_samples)] valid_labels = labels[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_labels = labels[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] vols_path, train, valid, test, labels = [], [], [], [], [] # to save ids and corrosponding bool values for vol, label in zip(train_vols, train_labels): folder_path = os.path.join(src_path, vol+'.hdf5') vols_path.append('.' + folder_path) train.append(True) valid.append(False) test.append(False) labels.append(label) print "Training Set Done!!" for vol, label in zip(valid_vols, valid_labels): folder_path = os.path.join(src_path, vol+'.hdf5') vols_path.append('.' + folder_path) train.append(False) valid.append(True) test.append(False) labels.append(label) print "Validation Set Done!!" for vol, label in zip(test_vols, test_labels): folder_path = os.path.join(src_path, vol+'hdf5') vols_path.append('.' + folder_path) train.append(False) valid.append(False) test.append(True) labels.append(label) print "Test Set Done!!" data = pd.DataFrame() data['Volume_Path'] = vols_path data['Labels'] = labels data['Training'] = train data['Testing'] = test data['Validation'] = valid data.to_csv(os.path.join(dest_path, name + 'train_test_split.csv'), index=False) print "Data Splitting Done..." generate_train_validate_test_set(scanner_1_female_df, 'scanner_1_female_') def generate_train_validate_test_set(src_path, labels, dest_path): """ Split the data into 70:15:15 for train-validate-test set arg: path: input data path generates CSV file with slice id and corrosponding bool value for train, test and validation """ SPLIT_TRAIN = 0.7 SPLIT_VALID = 0.15 train_files = np.array(next(os.walk(src_path))[2]) total_samples = len(train_files) print "total number of samples: {}".format(total_samples) index = np.random.randint(0, total_samples, size = total_samples) train_files = train_files[index] labels = labels[index] train_vols = train_files[0:int(SPLIT_TRAIN*total_samples)] valid_vols = train_files[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_vols = train_files[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] train_labels = labels[0:int(SPLIT_TRAIN*total_samples)] valid_labels = labels[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_labels = labels[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] vols_path, train, valid, test, labels = [], [], [], [], [] # to save ids and corrosponding bool values for vol, label in zip(train_vols, train_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(True) valid.append(False) test.append(False) labels.append(label) print "Training Set Done!!" for vol, label in zip(valid_vols, valid_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(False) valid.append(True) test.append(False) labels.append(label) print "Validation Set Done!!" for vol, label in zip(test_vols, test_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(False) valid.append(False) test.append(True) labels.append(label) print "Test Set Done!!" data = pd.DataFrame() data['Volume Path'] = vols_path data['Labels'] = labels data['Training'] = train data['Testing'] = test data['Validation'] = valid data.to_csv(os.path.join(dest_path, 'train_test_split.csv'), index=False) print "Data Splitting Done..." generate_train_validate_test_set('./processed_data/hdf5_file', labels, './processed_data/') data = pd.read_csv('./processed_data/train_test_split.csv') data ```
github_jupyter
import numpy as np import os, sys, shutil import scipy.ndimage as snd import h5py import SimpleITK as sitk from shutil import copy import nibabel as nib import skimage.morphology as morph from skimage.feature import canny from tqdm import tqdm import pandas as pd import matplotlib.pyplot as plt %matplotlib inline def imshow(*args,**kwargs): """ Handy function to show multiple plots in on row, possibly with different cmaps and titles Usage: imshow(img1, title="myPlot") imshow(img1,img2, title=['title1','title2']) imshow(img1,img2, cmap='hot') imshow(img1,img2,cmap=['gray','Blues']) """ cmap = kwargs.get('cmap', 'gray') title= kwargs.get('title','') if len(args)==0: raise ValueError("No images given to imshow") elif len(args)==1: plt.title(title) plt.imshow(args[0], interpolation='none') else: n=len(args) if type(cmap)==str: cmap = [cmap]*n if type(title)==str: title= [title]*n plt.figure(figsize=(n*5,10)) for i in range(n): plt.subplot(1,n,i+1) plt.title(title[i]) plt.imshow(args[i], cmap[i]) plt.show() selem = morph.disk(2) def getWeightMap(label): label = np.argmax(label, axis=3)[0] edge = np.float32(morph.binary_dilation(canny(np.float32(label)), selem)) weight_map = np.zeros(label.shape) weight_map[np.where(label>0)] = 7 weight_map = weight_map + 1 weight_map[np.where(edge==1.0)] = 25 # weight_map[np.where(label == 2.0)] = 15 return np.uint8(weight_map[None,:,:]) def downSampleImage(image): return np.float64(snd.interpolation.zoom(image, 0.5)) def loadDicomVolume(file_path, itk_image): reader = sitk.ImageSeriesReader() reader.SetOutputPixelType(sitk.sitkFloat32) dicom_names = reader.GetGDCMSeriesFileNames(file_path) reader.SetFileNames(dicom_names) itk_image = reader.Execute() image_vol = sitk.GetArrayFromImage(self.itk_image) image_vol = np.transpose(image_vol,(1,2,0)) return np.float32(image_vol) def oneHot(targets,n_class = 6): axis = targets.ndim ret_val = (np.arange(n_class) == np.expand_dims(targets,axis = axis)).astype(int) return ret_val def histogram_equalization(arr): nbr_bins = 256 imhist, bins = np.histogram(arr.flatten(), nbr_bins, normed = True) cdf = imhist.cumsum() cdf = 255 * cdf / cdf[-1] original_shape = arr.shape arr = np.interp(arr.flatten(), bins[:-1], cdf) out_arr = arr.reshape(original_shape) return out_arr def normalize(x): x = np.float32(x) min_val = np.min(x) max_val = np.max(x) ret_val = (x - min_val) / (max_val - min_val) return ret_val def downSample1(slc): return snd.interpolation.zoom(slc,0.5) def makeLablel(lbl, num_class = 3): if num_class == 2: lbl[lbl==2] = 1 lbl = oneHot(lbl,num_class) return np.uint8(lbl[None,:,:]) def get_z_minmaxforbrain(lbl): lbl[lbl==2] = 1 maxes = np.max(lbl,axis =(1,2)) nonzero_maxes = np.nonzero(maxes)[0] mn, mx = nonzero_maxes[0] - 10, nonzero_maxes[-1] + 10 if mn < 0: mn = 0 if mx >= lbl.shape[0]: mx = lbl.shape[0]-1 return mn, mx root = './raw_data/pac2018_data/' dest = './processed_data/hdf5_file/' if not os.path.exists(dest): os.makedirs(dest) else: shutil.rmtree(dest) os.makedirs(dest) vols = [] for f in next(os.walk(root))[2]: vols.append(os.path.join(root, f)) vols.sort() print len(vols), vols[10].split("/") # label paths labels = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Label'].as_matrix()[: len(vols)] age = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Age'].as_matrix()[: len(vols)] gender = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['Gender'].as_matrix()[: len(vols)] tiv = pd.read_csv('./raw_data/PAC2018_Covariates_Upload.csv')['TIV'].as_matrix()[: len(vols)] labels.shape for vol_path, label, a, g, t in tqdm(zip(vols,labels, age, gender, tiv)): # print("working on : " + vol_path) vol_img = sitk.ReadImage(vol_path) vol = sitk.GetArrayFromImage(vol_img) vol = histogram_equalization(vol) vol = normalize(vol) vol = np.swapaxes(vol, 1, 2) # imshow(vol[:,:,72]) dest_path = os.path.join(dest, vol_path.split("/")[3][:-4] +'.hdf5') hp = h5py.File(dest_path,'w') hp.create_dataset('volume', data=vol) hp.create_dataset('label', data=[label]) hp.create_dataset('gender', data=[g]) hp.create_dataset('tiv', data=[t]) hp.create_dataset('age', data=[a]) hp.close() df = pd.read_csv('./raw_data/PAC_info_sheet.csv') print (np.unique(df['Scanner'].as_matrix()), np.unique(df['Gender'].as_matrix())) scanner_1_male_df = df[(df['Scanner']==1)*df['Gender']==1.] scanner_2_male_df = df[(df['Scanner']==2)*df['Gender']==1.] scanner_3_male_df = df[(df['Scanner']==3)*df['Gender']==1.] scanner_1_female_df = df[(df['Scanner']==1)*df['Gender']==2.] scanner_2_female_df = df[(df['Scanner']==2)*df['Gender']==2.] scanner_3_female_df = df[(df['Scanner']==3)*df['Gender']==2.] print len(scanner_1_male_df), len(scanner_2_male_df), len(scanner_3_male_df) print len(scanner_1_female_df), len(scanner_2_female_df), len(scanner_3_female_df) src_path = './processed_data/hdf5_file' dest_path = './processed_data/' def generate_train_validate_test_set(df, name): """ Split the data into 70:15:15 for train-validate-test set arg: path: input data path generates CSV file with slice id and corrosponding bool value for train, test and validation """ SPLIT_TRAIN = 0.7 SPLIT_VALID = 0.15 train_files = df['PAC_ID'].as_matrix() labels = df['Label'].as_matrix() total_samples = len(train_files) print "total number of samples: {}".format(total_samples) index = np.random.randint(0, total_samples, size = total_samples) train_files = train_files[index] labels = labels[index] train_vols = train_files[0:int(SPLIT_TRAIN*total_samples)] valid_vols = train_files[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_vols = train_files[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] train_labels = labels[0:int(SPLIT_TRAIN*total_samples)] valid_labels = labels[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_labels = labels[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] vols_path, train, valid, test, labels = [], [], [], [], [] # to save ids and corrosponding bool values for vol, label in zip(train_vols, train_labels): folder_path = os.path.join(src_path, vol+'.hdf5') vols_path.append('.' + folder_path) train.append(True) valid.append(False) test.append(False) labels.append(label) print "Training Set Done!!" for vol, label in zip(valid_vols, valid_labels): folder_path = os.path.join(src_path, vol+'.hdf5') vols_path.append('.' + folder_path) train.append(False) valid.append(True) test.append(False) labels.append(label) print "Validation Set Done!!" for vol, label in zip(test_vols, test_labels): folder_path = os.path.join(src_path, vol+'hdf5') vols_path.append('.' + folder_path) train.append(False) valid.append(False) test.append(True) labels.append(label) print "Test Set Done!!" data = pd.DataFrame() data['Volume_Path'] = vols_path data['Labels'] = labels data['Training'] = train data['Testing'] = test data['Validation'] = valid data.to_csv(os.path.join(dest_path, name + 'train_test_split.csv'), index=False) print "Data Splitting Done..." generate_train_validate_test_set(scanner_1_female_df, 'scanner_1_female_') def generate_train_validate_test_set(src_path, labels, dest_path): """ Split the data into 70:15:15 for train-validate-test set arg: path: input data path generates CSV file with slice id and corrosponding bool value for train, test and validation """ SPLIT_TRAIN = 0.7 SPLIT_VALID = 0.15 train_files = np.array(next(os.walk(src_path))[2]) total_samples = len(train_files) print "total number of samples: {}".format(total_samples) index = np.random.randint(0, total_samples, size = total_samples) train_files = train_files[index] labels = labels[index] train_vols = train_files[0:int(SPLIT_TRAIN*total_samples)] valid_vols = train_files[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_vols = train_files[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] train_labels = labels[0:int(SPLIT_TRAIN*total_samples)] valid_labels = labels[int(SPLIT_TRAIN*total_samples): int((SPLIT_TRAIN+SPLIT_VALID)*total_samples)] test_labels = labels[int((SPLIT_TRAIN+SPLIT_VALID)*total_samples):] vols_path, train, valid, test, labels = [], [], [], [], [] # to save ids and corrosponding bool values for vol, label in zip(train_vols, train_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(True) valid.append(False) test.append(False) labels.append(label) print "Training Set Done!!" for vol, label in zip(valid_vols, valid_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(False) valid.append(True) test.append(False) labels.append(label) print "Validation Set Done!!" for vol, label in zip(test_vols, test_labels): folder_path = os.path.join(src_path, vol) vols_path.append('.' + folder_path) train.append(False) valid.append(False) test.append(True) labels.append(label) print "Test Set Done!!" data = pd.DataFrame() data['Volume Path'] = vols_path data['Labels'] = labels data['Training'] = train data['Testing'] = test data['Validation'] = valid data.to_csv(os.path.join(dest_path, 'train_test_split.csv'), index=False) print "Data Splitting Done..." generate_train_validate_test_set('./processed_data/hdf5_file', labels, './processed_data/') data = pd.read_csv('./processed_data/train_test_split.csv') data
0.107549
0.484502
# Visualizing Linear Regression ``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt %matplotlib inline np.random.seed(1) def f(x, a, b): n = train_X.size vals = np.zeros((1, n)) for i in range(0, n): ax = np.multiply(a, x.item(i)) val = np.add(ax, b) vals[0, i] = val return vals Wref = 0.7 bref = -1. n = 20 noise_var = 0.001 train_X = np.random.random((1, n)) ref_Y = f(train_X, Wref, bref) train_Y = ref_Y + np.sqrt(noise_var)*np.random.randn(1, n) n_samples = train_X.size # Plot plt.figure(1) plt.plot(train_X[0, :], ref_Y[0, :], 'ro', label='Original data') plt.plot(train_X[0, :], train_Y[0, :], 'bo', label='Training data') plt.axis('equal') plt.legend(loc='lower right') # Parameters training_epochs = 1000 display_step = 100 # Set TensorFlow Graph x = tf.placeholder(tf.float32, name="INPUT_x") y = tf.placeholder(tf.float32, name="OUTPUT_y") W = tf.Variable(np.random.randn(), name="WEIGHT_W") b = tf.Variable(np.random.randn(), name="BIAS_b") # Construct a Model activation = tf.add(tf.mul(x, W), b) # Define Error Measure and Optimizer learning_rate = 0.01 cost = tf.reduce_mean(tf.pow(activation-y, 2)) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent # Initializer init = tf.initialize_all_variables() # Run! sess = tf.Session() # Initialize sess.run(init) # Summary summary_writer = tf.train.SummaryWriter('/tmp/tf_logs/linear_regression', graph=sess.graph) for epoch in range(training_epochs): for (_x, _y) in zip(train_X[0, :], train_Y[0, :]): # print "x: ", x, " y: ", y sess.run(optimizer, feed_dict={x:_x, y:_y}) # Check cost if epoch % display_step == 0: costval = sess.run(cost, feed_dict={x: train_X, y:train_Y}) print("[%d/%d] cost :%.3f" % (epoch, training_epochs, costval)), Wtemp = sess.run(W) btemp = sess.run(b) print("Wtemp is %.3f and Wref is %.3f" % (Wtemp, Wref)), print("btemp is %.3f and bref is %.3f" % (btemp, bref)) # Final W and b Wopt = sess.run(W) bopt = sess.run(b) fopt = f(train_X, Wopt, bopt) # Plot Results plt.figure(2) plt.plot(train_X[0, :], ref_Y[0, :], 'ro', label='Original data') plt.plot(train_X[0, :], train_Y[0, :], 'bo', label='Training data') plt.plot(train_X[0, :], fopt[0, :], 'k-', label='Fitted Line') plt.axis('equal') plt.legend(loc='lower right') ``` ### Run the command line ##### tensorboard --logdir=/tmp/tf_logs/linear_regression ### Open http://localhost:6006/ into your web browser <img src="images/tsboard/linear_regression.png">
github_jupyter
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt %matplotlib inline np.random.seed(1) def f(x, a, b): n = train_X.size vals = np.zeros((1, n)) for i in range(0, n): ax = np.multiply(a, x.item(i)) val = np.add(ax, b) vals[0, i] = val return vals Wref = 0.7 bref = -1. n = 20 noise_var = 0.001 train_X = np.random.random((1, n)) ref_Y = f(train_X, Wref, bref) train_Y = ref_Y + np.sqrt(noise_var)*np.random.randn(1, n) n_samples = train_X.size # Plot plt.figure(1) plt.plot(train_X[0, :], ref_Y[0, :], 'ro', label='Original data') plt.plot(train_X[0, :], train_Y[0, :], 'bo', label='Training data') plt.axis('equal') plt.legend(loc='lower right') # Parameters training_epochs = 1000 display_step = 100 # Set TensorFlow Graph x = tf.placeholder(tf.float32, name="INPUT_x") y = tf.placeholder(tf.float32, name="OUTPUT_y") W = tf.Variable(np.random.randn(), name="WEIGHT_W") b = tf.Variable(np.random.randn(), name="BIAS_b") # Construct a Model activation = tf.add(tf.mul(x, W), b) # Define Error Measure and Optimizer learning_rate = 0.01 cost = tf.reduce_mean(tf.pow(activation-y, 2)) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent # Initializer init = tf.initialize_all_variables() # Run! sess = tf.Session() # Initialize sess.run(init) # Summary summary_writer = tf.train.SummaryWriter('/tmp/tf_logs/linear_regression', graph=sess.graph) for epoch in range(training_epochs): for (_x, _y) in zip(train_X[0, :], train_Y[0, :]): # print "x: ", x, " y: ", y sess.run(optimizer, feed_dict={x:_x, y:_y}) # Check cost if epoch % display_step == 0: costval = sess.run(cost, feed_dict={x: train_X, y:train_Y}) print("[%d/%d] cost :%.3f" % (epoch, training_epochs, costval)), Wtemp = sess.run(W) btemp = sess.run(b) print("Wtemp is %.3f and Wref is %.3f" % (Wtemp, Wref)), print("btemp is %.3f and bref is %.3f" % (btemp, bref)) # Final W and b Wopt = sess.run(W) bopt = sess.run(b) fopt = f(train_X, Wopt, bopt) # Plot Results plt.figure(2) plt.plot(train_X[0, :], ref_Y[0, :], 'ro', label='Original data') plt.plot(train_X[0, :], train_Y[0, :], 'bo', label='Training data') plt.plot(train_X[0, :], fopt[0, :], 'k-', label='Fitted Line') plt.axis('equal') plt.legend(loc='lower right')
0.740644
0.858719
# Convolutional: Instant Recognition with Caffe ### 1. Setup * First, set up Python, `numpy`, and `matplotlib`. ``` import time # set up Python environment: numpy for numerical routines, and matplotlib for plotting import numpy as np import matplotlib.pyplot as plt # display plots in this notebook %matplotlib inline # set display defaults plt.rcParams['figure.figsize'] = (10, 10) # large images plt.rcParams['image.interpolation'] = 'nearest' # don't interpolate: show square pixels plt.rcParams['image.cmap'] = 'gray' # use grayscale output rather than a (potentially misleading) color heatmap ``` * Load `caffe`. ``` # The caffe module needs to be on the Python path; # we'll add it here explicitly. import sys caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line) sys.path.insert(0, caffe_root + 'python') import caffe # If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path. ``` * If needed, download the reference model ("CaffeNet", a variant of AlexNet). ### 2. Load net and set up input preprocessing * Set Caffe to CPU mode and load the net from disk. ``` caffe.set_mode_cpu() model_def = caffe_root + 'models/DCGAN-finetune/deploy_DCGAN_train_val.prototxt' model_weights = caffe_root + 'models/DCGAN-finetune/model-Licmophora/_iter_1500.caffemodel' net = caffe.Net(model_def, # defines the structure of the model model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) ``` * Set up input preprocessing. (We'll use Caffe's `caffe.io.Transformer` to do this, but this step is independent of other parts of Caffe, so any custom preprocessing code may be used). Our default CaffeNet is configured to take images in BGR format. Values are expected to start in the range [0, 255] and then have the mean ImageNet pixel value subtracted from them. In addition, the channel dimension is expected as the first (_outermost_) dimension. As matplotlib will load images with values in the range [0, 1] in RGB format with the channel as the _innermost_ dimension, we are arranging for the needed transformations here. ``` # create transformer for the input called 'data' transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension #transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255] ``` ### 3. CPU classification * Now we're ready to perform classification. Even though we'll only classify one image, we'll set a batch size of 50 to demonstrate batching. ``` # set the size of the input (we can skip this if we're happy # with the default; we can also change it later, e.g., for different batch sizes) net.blobs['data'].reshape(100, # batch size 1, # 1-channel images 64, 64) # image size is 227x227 net.forward() plt.imshow(net.blobs['data'].data[:8, 0].transpose(2, 0, 1).reshape(64, 8*64), cmap='gray'); plt.axis('off') print 'train labels:', net.blobs['label'].data[:8] ``` * Load an image (that comes with Caffe) and perform the preprocessing we've set up. ``` image = caffe.io.load_image(caffe_root + 'data/MVCO/IFCB1_2012_258_172757_00311.png') transformed_image = transformer.preprocess('data', image) plt.imshow(image) ``` * Adorable! Let's classify it! ``` # copy the image data into the memory allocated for the net net.blobs['data'].data[...] = transformed_image.reshape(3,1,64,64) # reshape the imagedata and fit int the network input ### perform classification output = net.forward() #output_prob = output['loss'] # the output probability vector for the first image in the batch #print 'predicted class is:', output_prob.argmax() ``` * The net gives us a vector of probabilities; the most probable class was the 281st one. But is that correct? Let's check the ImageNet labels... ### 5. Examining intermediate output * A net is not just a black box; let's take a look at some of the parameters and intermediate activations. First we'll see how to read out the structure of the net in terms of activation and parameter shapes. * For each layer, let's look at the activation shapes, which typically have the form `(batch_size, channel_dim, height, width)`. The activations are exposed as an `OrderedDict`, `net.blobs`. * Since we're dealing with four-dimensional data here, we'll define a helper function for visualizing sets of rectangular heatmaps. ``` def vis_square(data, label, i): """Take an array of shape (n, height, width) or (n, height, width, 3) and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)""" # normalize data for display data = (data - data.min()) / (data.max() - data.min()) # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n ** 2 - data.shape[0]), (0, 1), (0, 1)) # add some space between filters + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one) data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data); plt.axis('off') name = str(i)+label + '.png' plt.imsave(name,data) ``` * First we'll look at the first layer filters, `conv1` ``` # the parameters are a list of [weights, biases] filters = net.params['conv1'][0].data #print filters.shape vis_square(filters) ``` * The first layer output, `conv1` (rectified responses of the filters above, first 36 only) ``` filters = net.params['gen_conv5'][0].data[1] print filters.shape vis_square(filters) output = net.forward() feat = net.blobs['gen_conv2'].data[1] vis_square(feat) numbers = 100 for i in range(0,numbers): output = net.forward() feat = net.blobs['gen_conv5'].data[i] vis_square(feat, '_Licmophora62_1k_generate', i) ``` * The fifth layer after pooling, `pool5` ``` output = net.forward() feat = net.blobs['gen_conv5'].data[:8,0] #vis_square(feat) plt.imshow(feat.transpose(2, 0, 1).reshape(64,8*64), cmap='gray'); plt.axis('off') ``` * The first fully connected layer, `fc6` (rectified) We show the output values and the histogram of the positive values ``` #print net.params['ip1encode'][0].data feat = net.blobs['rand_vec'].data[0] plt.subplot(2, 1, 1) plt.plot(feat.flat) plt.subplot(2, 1, 2) _ = plt.hist(feat.flat[feat.flat > 0], bins=100) feat = net.blobs['fc6decode'].data[0] plt.subplot(2, 1, 1) plt.plot(feat.flat) plt.subplot(2, 1, 2) _ = plt.hist(feat.flat[feat.flat > 0], bins=100) ``` * The final probability output, `prob` ``` feat = net.blobs['prob'].data[0] plt.figure(figsize=(15, 3)) plt.plot(feat.flat) ``` Note the cluster of strong predictions; the labels are sorted semantically. The top peaks correspond to the top predicted labels, as shown above. ### 6. Extract the feature To extract deepfeature ``` #load the net and set caffe model import sys caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line) sys.path.insert(0, caffe_root + 'python') caffe.set_mode_cpu() model_def = caffe_root + 'models/CAE_mvco/CAE_vgg_train_val.prototxt' model_weights = caffe_root + 'models/CAE_mvco/_iter_120000.caffemodel' net = caffe.Net(model_def, # defines the structure of the model model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) # perform classification f = open('feature.txt', "a+") iteration = 2 for a in range(0,iteration): net.forward() f.write(net.blobs['fc6decode'].data[0]) ```
github_jupyter
import time # set up Python environment: numpy for numerical routines, and matplotlib for plotting import numpy as np import matplotlib.pyplot as plt # display plots in this notebook %matplotlib inline # set display defaults plt.rcParams['figure.figsize'] = (10, 10) # large images plt.rcParams['image.interpolation'] = 'nearest' # don't interpolate: show square pixels plt.rcParams['image.cmap'] = 'gray' # use grayscale output rather than a (potentially misleading) color heatmap # The caffe module needs to be on the Python path; # we'll add it here explicitly. import sys caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line) sys.path.insert(0, caffe_root + 'python') import caffe # If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path. caffe.set_mode_cpu() model_def = caffe_root + 'models/DCGAN-finetune/deploy_DCGAN_train_val.prototxt' model_weights = caffe_root + 'models/DCGAN-finetune/model-Licmophora/_iter_1500.caffemodel' net = caffe.Net(model_def, # defines the structure of the model model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) # create transformer for the input called 'data' transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension #transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255] # set the size of the input (we can skip this if we're happy # with the default; we can also change it later, e.g., for different batch sizes) net.blobs['data'].reshape(100, # batch size 1, # 1-channel images 64, 64) # image size is 227x227 net.forward() plt.imshow(net.blobs['data'].data[:8, 0].transpose(2, 0, 1).reshape(64, 8*64), cmap='gray'); plt.axis('off') print 'train labels:', net.blobs['label'].data[:8] image = caffe.io.load_image(caffe_root + 'data/MVCO/IFCB1_2012_258_172757_00311.png') transformed_image = transformer.preprocess('data', image) plt.imshow(image) # copy the image data into the memory allocated for the net net.blobs['data'].data[...] = transformed_image.reshape(3,1,64,64) # reshape the imagedata and fit int the network input ### perform classification output = net.forward() #output_prob = output['loss'] # the output probability vector for the first image in the batch #print 'predicted class is:', output_prob.argmax() def vis_square(data, label, i): """Take an array of shape (n, height, width) or (n, height, width, 3) and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)""" # normalize data for display data = (data - data.min()) / (data.max() - data.min()) # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n ** 2 - data.shape[0]), (0, 1), (0, 1)) # add some space between filters + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one) data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data); plt.axis('off') name = str(i)+label + '.png' plt.imsave(name,data) # the parameters are a list of [weights, biases] filters = net.params['conv1'][0].data #print filters.shape vis_square(filters) filters = net.params['gen_conv5'][0].data[1] print filters.shape vis_square(filters) output = net.forward() feat = net.blobs['gen_conv2'].data[1] vis_square(feat) numbers = 100 for i in range(0,numbers): output = net.forward() feat = net.blobs['gen_conv5'].data[i] vis_square(feat, '_Licmophora62_1k_generate', i) output = net.forward() feat = net.blobs['gen_conv5'].data[:8,0] #vis_square(feat) plt.imshow(feat.transpose(2, 0, 1).reshape(64,8*64), cmap='gray'); plt.axis('off') #print net.params['ip1encode'][0].data feat = net.blobs['rand_vec'].data[0] plt.subplot(2, 1, 1) plt.plot(feat.flat) plt.subplot(2, 1, 2) _ = plt.hist(feat.flat[feat.flat > 0], bins=100) feat = net.blobs['fc6decode'].data[0] plt.subplot(2, 1, 1) plt.plot(feat.flat) plt.subplot(2, 1, 2) _ = plt.hist(feat.flat[feat.flat > 0], bins=100) feat = net.blobs['prob'].data[0] plt.figure(figsize=(15, 3)) plt.plot(feat.flat) #load the net and set caffe model import sys caffe_root = '../' # this file should be run from {caffe_root}/examples (otherwise change this line) sys.path.insert(0, caffe_root + 'python') caffe.set_mode_cpu() model_def = caffe_root + 'models/CAE_mvco/CAE_vgg_train_val.prototxt' model_weights = caffe_root + 'models/CAE_mvco/_iter_120000.caffemodel' net = caffe.Net(model_def, # defines the structure of the model model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) # perform classification f = open('feature.txt', "a+") iteration = 2 for a in range(0,iteration): net.forward() f.write(net.blobs['fc6decode'].data[0])
0.451568
0.954478
<h1>Federated Word Vectors</h1> This example demonstrates how word vector model PyTorch could be trained using federated learning with PySyft. We distribute the text data to two workers Bob and Alice to whom the model is sent and trained. Upon training the model the trained model is sent back to the owner of the model and used to make predictions or the embedding layer which consist of learnt word vectors could be used. Federated learning applied to word vectors could be a great way to analyze textual data without knowing the specifics of the text and risk invading privacy. In a real-time application , say understanding internal e-mail culture of a organization. In this example we learn a word embedding by trying to predict the next word given context of N words. Hrishikesh Kamath - GitHub: @<a href="http://github.com/kamathhrishi">kamathhrishi</a> ``` #Import modules required for PyTorch Neural Networks import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset # Shakespeare Sonnet 2 as text to be learned dataset = """When forty winters shall besiege thy brow, And dig deep trenches in thy beauty's field, Thy youth's proud livery so gazed on now, Will be a totter'd weed of small worth held: Then being asked, where all thy beauty lies, Where all the treasure of thy lusty days; To say, within thine own deep sunken eyes, Were an all-eating shame, and thriftless praise. How much more praise deserv'd thy beauty's use, If thou couldst answer 'This fair child of mine Shall sum my count, and make my old excuse,' Proving his beauty by succession thine! This were to be new made when thou art old, And see thy blood warm when thou feel'st it cold.""".split() class Arguments(): def __init__(self): self.batch_size = 1 self.test_batch_size = 1000 self.epochs = 10 self.lr = 0.01 self.momentum = 0.5 #<-We currenly do not support momentum self.no_cuda = False self.seed = 1 self.log_interval = 10 self.save_model = False self.context_size=3 self.embedding_dim=10 args=Arguments() #Define seed to maintain consistency torch.manual_seed(args.seed) #Import PySyft library required for federated learning import syft as sy #Define Syft workers Bob and Alice for federated learning hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning bob = sy.VirtualWorker(hook, id="bob") # <-- NEW: define remote worker bob alice = sy.VirtualWorker(hook, id="alice") # <-- NEW: and alice #vocabulary of from the corpus vocab = set(dataset) word_to_ix = {word: i for i, word in enumerate(vocab)} ix_to_word={word_to_ix[word]:word for word in word_to_ix} ``` <h2>Torch Dataset</h2> Convert text dataset into a torch dataset instance which we will need to create a federated dataset. ``` class TextDataset(Dataset): def __init__(self,text,transform=None): """arguments: text (List of Strings): Text corpus transform: List of transforms to be performed on the input data """ self.text = text self.data=[] self.targets=[] self.transform = transform #Create Trigrams self.create_context() def __len__(self): return len(self.data) def create_context(self): '''Function used to seperate target and context words and convert them to torch tensors''' context=[] for i in range(len(self.text)-args.context_size): vec=[] for j in range(0,args.context_size): vec.append(self.text[i+j]) context.append([vec,self.text[i+args.context_size]]) for words,target in context: tensor=torch.tensor([word_to_ix[w] for w in words],dtype=torch.long) self.data.append(tensor) self.targets.append(torch.tensor([word_to_ix[target]], dtype=torch.long)) def __getitem__(self, idx): sample=self.data[idx] target=self.targets[idx] if self.transform: sample = self.transform(sample) return sample,target ``` Use federated data loader to distribute dataset to workers. ``` federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader TextDataset(dataset) .federate((bob, alice)),batch_size=args.batch_size) ``` <h2>Neural Network Model</h2> Define Neural Network in PyTorch. The network is trained to predict the next word based on given context. Based on the trained model the required embedding is learnt. ``` class NGramLanguageModeler(nn.Module): def __init__(self, vocab_size, embedding_dim, context_size): super(NGramLanguageModeler, self).__init__() self.embeddings = nn.Embedding(vocab_size,embedding_dim) self.linear1 = nn.Linear(context_size * embedding_dim, 128) self.linear2 = nn.Linear(128, vocab_size) def forward(self, inputs): embeds = self.embeddings(inputs).view((1, -1)) out = F.relu(self.linear1(embeds)) out = self.linear2(out) log_probs = F.log_softmax(out,dim=1) return log_probs loss_function = nn.NLLLoss() model = NGramLanguageModeler(len(vocab),args.embedding_dim,args.context_size) optimizer = optim.SGD(model.parameters(), lr=args.lr) ``` <h2>Train Model</h2> ``` def train(): model.train() iteration=0 for context, target in federated_train_loader: model.send(context.location) # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words # into integer indices and wrap them in tensors) context_idxs = context # Step 2. Recall that torch *accumulates* gradients. Before passing in a # new instance, you need to zero out the gradients from the old # instance model.zero_grad() # Step 3. Run the forward pass, getting log probabilities over next # words log_probs = model(context_idxs) # Step 4. Compute your loss function. (Again, Torch wants the target # word wrapped in a tensor) loss = loss_function(log_probs,target[0]) # Step 5. Do the backward pass and update the gradient loss.backward() optimizer.step() model.get() # Get the Python number from a 1-element Tensor by calling tensor.item() # The loss decreased every iteration over the training data! iteration+=1 if(iteration%100==0): print(loss.get().item()) for epoch in range(0,args.epochs): train() print("EPOCH: ",epoch+1) if (args.save_model): torch.save(model.state_dict(), "word_vector.pt") ``` <h2>Visualize Results</h2> ``` def SimilarPairs(model,vocab,inverse_vocab): #Function to compute the most similar pairs matrix=[] for ref_index in range(0,len(vocab)): Max=-10.0 Index=0 ref=model.embeddings(torch.LongTensor([ref_index])) for i in range(0,len(vocab)): cos = nn.CosineSimilarity(dim=1, eps=1e-6) output = cos(ref,model.embeddings(torch.LongTensor([i]))) if(output.item()>Max and i!=ref_index): Max=output.item() Index=i matrix.append([ix_to_word[ref_index],ix_to_word[Index],Max]) return(matrix) similar_Pairs=SimilarPairs(model,word_to_ix,ix_to_word) ``` The word vectors learnt don't exactly capture meanings of actual words since it was trained on a smaller corpora. ``` #Similar pairs of first 20 words similar_Pairs[1:20] ``` ## Well Done! And voilà! We now are training a real world Learning model using Federated Learning! ## Shortcomings of this Example Of course, there are dozen of improvements we could think of. We would like the computation to operate in parallel on the workers, to update the central model every `n` batches only, to reduce the number of messages we use to communicate between workers, etc. On the security side it still has some major shortcomings. Most notably, when we call `model.get()` and receive the updated model from Bob or Alice, we can actually learn a lot about Bob and Alice's training data by looking at their gradients. We could **average the gradient across multiple individuals before uploading it to the central server**, like we did in Part 4. The above embeddings are not useful for practical purposes as they are trained on a very small corpus. Increasing corpus size could lead to more useful embeddings. # Congratulations!!! - Time to Join the Community! Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! ### Star PySyft on GitHub The easiest way to help our community is just by starring the repositories! This helps raise awareness of the cool tools we're building. - [Star PySyft](https://github.com/OpenMined/PySyft) ### Pick our tutorials on GitHub! We made really nice tutorials to get a better understanding of what Federated and Privacy-Preserving Learning should look like and how we are building the bricks for this to happen. - [Checkout the PySyft tutorials](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials) ### Join our Slack! The best way to keep up to date on the latest advancements is to join our community! - [Join slack.openmined.org](http://slack.openmined.org) ### Join a Code Project! The best way to contribute to our community is to become a code contributor! If you want to start "one off" mini-projects, you can go to PySyft GitHub Issues page and search for issues marked `Good First Issue`. - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) ### Donate If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! - [Donate through OpenMined's Open Collective Page](https://opencollective.com/openmined)
github_jupyter
#Import modules required for PyTorch Neural Networks import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset # Shakespeare Sonnet 2 as text to be learned dataset = """When forty winters shall besiege thy brow, And dig deep trenches in thy beauty's field, Thy youth's proud livery so gazed on now, Will be a totter'd weed of small worth held: Then being asked, where all thy beauty lies, Where all the treasure of thy lusty days; To say, within thine own deep sunken eyes, Were an all-eating shame, and thriftless praise. How much more praise deserv'd thy beauty's use, If thou couldst answer 'This fair child of mine Shall sum my count, and make my old excuse,' Proving his beauty by succession thine! This were to be new made when thou art old, And see thy blood warm when thou feel'st it cold.""".split() class Arguments(): def __init__(self): self.batch_size = 1 self.test_batch_size = 1000 self.epochs = 10 self.lr = 0.01 self.momentum = 0.5 #<-We currenly do not support momentum self.no_cuda = False self.seed = 1 self.log_interval = 10 self.save_model = False self.context_size=3 self.embedding_dim=10 args=Arguments() #Define seed to maintain consistency torch.manual_seed(args.seed) #Import PySyft library required for federated learning import syft as sy #Define Syft workers Bob and Alice for federated learning hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning bob = sy.VirtualWorker(hook, id="bob") # <-- NEW: define remote worker bob alice = sy.VirtualWorker(hook, id="alice") # <-- NEW: and alice #vocabulary of from the corpus vocab = set(dataset) word_to_ix = {word: i for i, word in enumerate(vocab)} ix_to_word={word_to_ix[word]:word for word in word_to_ix} class TextDataset(Dataset): def __init__(self,text,transform=None): """arguments: text (List of Strings): Text corpus transform: List of transforms to be performed on the input data """ self.text = text self.data=[] self.targets=[] self.transform = transform #Create Trigrams self.create_context() def __len__(self): return len(self.data) def create_context(self): '''Function used to seperate target and context words and convert them to torch tensors''' context=[] for i in range(len(self.text)-args.context_size): vec=[] for j in range(0,args.context_size): vec.append(self.text[i+j]) context.append([vec,self.text[i+args.context_size]]) for words,target in context: tensor=torch.tensor([word_to_ix[w] for w in words],dtype=torch.long) self.data.append(tensor) self.targets.append(torch.tensor([word_to_ix[target]], dtype=torch.long)) def __getitem__(self, idx): sample=self.data[idx] target=self.targets[idx] if self.transform: sample = self.transform(sample) return sample,target federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader TextDataset(dataset) .federate((bob, alice)),batch_size=args.batch_size) class NGramLanguageModeler(nn.Module): def __init__(self, vocab_size, embedding_dim, context_size): super(NGramLanguageModeler, self).__init__() self.embeddings = nn.Embedding(vocab_size,embedding_dim) self.linear1 = nn.Linear(context_size * embedding_dim, 128) self.linear2 = nn.Linear(128, vocab_size) def forward(self, inputs): embeds = self.embeddings(inputs).view((1, -1)) out = F.relu(self.linear1(embeds)) out = self.linear2(out) log_probs = F.log_softmax(out,dim=1) return log_probs loss_function = nn.NLLLoss() model = NGramLanguageModeler(len(vocab),args.embedding_dim,args.context_size) optimizer = optim.SGD(model.parameters(), lr=args.lr) def train(): model.train() iteration=0 for context, target in federated_train_loader: model.send(context.location) # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words # into integer indices and wrap them in tensors) context_idxs = context # Step 2. Recall that torch *accumulates* gradients. Before passing in a # new instance, you need to zero out the gradients from the old # instance model.zero_grad() # Step 3. Run the forward pass, getting log probabilities over next # words log_probs = model(context_idxs) # Step 4. Compute your loss function. (Again, Torch wants the target # word wrapped in a tensor) loss = loss_function(log_probs,target[0]) # Step 5. Do the backward pass and update the gradient loss.backward() optimizer.step() model.get() # Get the Python number from a 1-element Tensor by calling tensor.item() # The loss decreased every iteration over the training data! iteration+=1 if(iteration%100==0): print(loss.get().item()) for epoch in range(0,args.epochs): train() print("EPOCH: ",epoch+1) if (args.save_model): torch.save(model.state_dict(), "word_vector.pt") def SimilarPairs(model,vocab,inverse_vocab): #Function to compute the most similar pairs matrix=[] for ref_index in range(0,len(vocab)): Max=-10.0 Index=0 ref=model.embeddings(torch.LongTensor([ref_index])) for i in range(0,len(vocab)): cos = nn.CosineSimilarity(dim=1, eps=1e-6) output = cos(ref,model.embeddings(torch.LongTensor([i]))) if(output.item()>Max and i!=ref_index): Max=output.item() Index=i matrix.append([ix_to_word[ref_index],ix_to_word[Index],Max]) return(matrix) similar_Pairs=SimilarPairs(model,word_to_ix,ix_to_word) #Similar pairs of first 20 words similar_Pairs[1:20]
0.725551
0.962532
# Use AutoAI and batch deployment to predict credit risk with Watson Machine Learning REST API This notebook contains steps and code to demonstrate support of AutoAI experiments in Watson Machine Learning Service. It introduces commands for getting data, training experiments, persisting pipelines, publishing models, deploying models and scoring. Some familiarity with cURL is helpful. This notebook uses cURL examples. ## Learning goals The learning goals of this notebook are: - Working with Watson Machine Learning experiments to train AutoAI models. - Downloading computed models to local storage. - Batch deployment and scoring of trained model. ## Contents This notebook contains the following parts: 1. [Setup](#setup) 2. [Experiment definition](#experiment_definition) 3. [Experiment Run](#run) 4. [Historical runs](#runs) 5. [Deploy and Score](#deploy_and_score) 6. [Cleaning](#cleaning) 7. [Summary and next steps](#summary) <a id="setup"></a> ## 1. Set up the environment Before you use the sample code in this notebook, you must perform the following setup tasks: - Create a <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance (a free plan is offered and information about how to create the instance can be found <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-service-instance.html?context=analytics" target="_blank" rel="noopener no referrer">here</a>). - Create a <a href="https://console.bluemix.net/catalog/infrastructure/cloud-object-storage" target="_blank" rel="noopener no referrer">Cloud Object Storage (COS)</a> instance (a lite plan is offered and information about how to order storage can be found <a href="https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#order-storage" target="_blank" rel="noopener no referrer">here</a>). <br/>**Note: When using Watson Studio, you already have a COS instance associated with the project you are running the notebook in.** You can find your COS credentials in COS instance dashboard under the **Service credentials** tab. Go to the **Endpoint** tab in the COS instance's dashboard to get the endpoint information. Authenticate the Watson Machine Learning service on IBM Cloud. Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. **NOTE:** You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below. **Action:** Fill below information to get started with this notebook. ``` %env API_KEY=... %env WML_ENDPOINT_URL=... %env WML_INSTANCE_CRN="fill out only if you want to create a new space" %env WML_INSTANCE_NAME=... %env COS_CRN="fill out only if you want to create a new space" %env COS_ENDPOINT=... %env COS_BUCKET=... %env COS_ACCESS_KEY_ID=... %env COS_SECRET_ACCESS_KEY=... %env COS_API_KEY=... %env SPACE_ID="fill out only if you have space already created" %env DATAPLATFORM_URL=https://api.dataplatform.cloud.ibm.com %env AUTH_ENDPOINT=https://iam.cloud.ibm.com/oidc/token ``` <a id="wml_token"></a> ### Getting WML authorization token for further cURL calls <a href="https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-curl#curl-token" target="_blank" rel="noopener no referrer">Example of cURL call to get WML token</a> ``` %%bash --out token curl -sk -X POST \ --header "Content-Type: application/x-www-form-urlencoded" \ --header "Accept: application/json" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ --data-urlencode "apikey=$API_KEY" \ "$AUTH_ENDPOINT" \ | cut -d '"' -f 4 %env TOKEN=$token ``` <a id="space_creation"></a> ### Space creation **Tip:** If you do not have `space` already created, please convert below three cells to `code` and run them. First of all, you need to create a `space` that will be used in all of your further cURL calls. If you do not have `space` already created, below is the cURL call to create one. <a href="https://cpd-spaces-api.eu-gb.cf.appdomain.cloud/#/Spaces/spaces_create" target="_blank" rel="noopener no referrer">Space creation</a> Space creation is asynchronous. This means that you need to check space creation status after creation call. Make sure that your newly created space is `active`. <a href="https://cpd-spaces-api.eu-gb.cf.appdomain.cloud/#/Spaces/spaces_get" target="_blank" rel="noopener no referrer">Get space information</a> <a id="experiment_definition"></a> ## 2. Experiment / optimizer configuration Provide input information for AutoAI experiment / optimizer: - `name` - experiment name - `learning_type` - type of the problem - `label` - target column name - `scorer_for_ranking` - optimization metric - `holdout_param` - percentage of training data to use as a holdout [0 - 1] - `daub_include_only_estimators` - list of estimators to use You can modify `parameters` section of the following cURL call to change AutoAI experiment settings. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Pipelines/pipelines_create" target="_blank" rel="noopener no referrer">Define AutoAI experiment.</a> ``` %%bash --out pipeline_payload PIPELINE_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "name": "Credit Risk Prediction - AutoAI", "description": "", "document": {"doc_type": "pipeline", "version": "2.0", "pipelines": [{"id": "autoai", "runtime_ref": "hybrid", "nodes": [{"id": "automl", "type": "execution_node", "parameters": {"stage_flag": true, "output_logs": true, "input_file_separator": ",", "optimization": {"learning_type": "binary", "label": "Risk", "max_num_daub_ensembles": 1, "daub_include_only_estimators": ["ExtraTreesClassifierEstimator", "GradientBoostingClassifierEstimator", "LGBMClassifierEstimator", "LogisticRegressionEstimator", "RandomForestClassifierEstimator", "XGBClassifierEstimator", "DecisionTreeClassifierEstimator"], "scorer_for_ranking": "roc_auc", "compute_pipeline_notebooks_flag": true, "run_cognito_flag": true, "holdout_param": 0.1}}, "runtime_ref": "autoai", "op": "kube"}]}], "runtimes": [{"id": "autoai", "name": "auto_ai.kb", "app_data": {"wml_data": {"hardware_spec": { "name": "L"}}}, "version": "3.0.2"}],"primary_pipeline": "autoai"}}' echo $PIPELINE_PAYLOAD | python -m json.tool %env PIPELINE_PAYLOAD=$pipeline_payload %%bash --out pipeline_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$PIPELINE_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/pipelines?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 5p %env PIPELINE_ID=$pipeline_id ``` <a id="experiment_details"></a> ### Get experiment details To retrieve AutoAI experiment / optimizer configuration you can follow below cURL GET call. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Pipelines/pipelines_get" target="_blank" rel="noopener no referrer">Get experiment / optimizer information</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/pipelines/$PIPELINE_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="training_connection"></a> ### Training data connection Define connection information to COS bucket and training data CSV file. This example uses the German Credit Risk dataset. The dataset can be downloaded from [here](https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/credit_risk/credit_risk_training_light.csv). You can also download it to local filesystem by running the cell below. **Action**: Upload training data to COS bucket and enter location information in the next cURL examples. ``` %%bash wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/credit_risk/credit_risk_training_light.csv \ -O credit_risk_training_light.csv ``` <a id="cos_token"></a> ### Get COS token Retrieve COS token for further authentication calls. <a href="https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-curl#curl-token" target="_blank" rel="noopener no referrer">Retrieve COS authentication token</a> ``` %%bash --out cos_token curl -s -X "POST" "$AUTH_ENDPOINT" \ -H 'Accept: application/json' \ -H 'Content-Type: application/x-www-form-urlencoded' \ --data-urlencode "apikey=$COS_API_KEY" \ --data-urlencode "response_type=cloud_iam" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ | cut -d '"' -f 4 %env COS_TOKEN=$cos_token ``` <a id="cos_upload"></a> ### Upload file to COS Upload your local dataset into your COS bucket <a href="https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-curl#curl-put-object" target="_blank" rel="noopener no referrer">Upload file to COS</a> ``` %%bash curl -sk -X PUT \ --header "Authorization: Bearer $COS_TOKEN" \ --header "Content-Type: application/octet-stream" \ --data-binary "@credit_risk_training_light.csv" \ "$COS_ENDPOINT/$COS_BUCKET/credit_risk_training_light.csv" ``` There should be an empty response when upload finished succesfully. ### Create connection to COS Created connection will be used in training as a reference to given COS location. ``` %%bash --out connection_payload CONNECTION_PAYLOAD='{"name": "REST COS connection", "datasource_type": "193a97c1-4475-4a19-b90c-295c4fdc6517", "properties": {"bucket": "'"$COS_BUCKET"'", "access_key": "'"$COS_ACCESS_KEY_ID"'", "secret_key": "'"$COS_SECRET_ACCESS_KEY"'", "iam_url": "'"$AUTH_ENDPOINT"'", "url": "'"$COS_ENDPOINT"'"}, "origin_country": "US"}' echo $CONNECTION_PAYLOAD | python -m json.tool %env CONNECTION_PAYLOAD=$connection_payload %%bash --out connection_id CONNECTION_ID=$(curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$CONNECTION_PAYLOAD" \ "$DATAPLATFORM_URL/v2/connections?space_id=$SPACE_ID") CONNECTION_ID=${CONNECTION_ID#*asset_id\":\"} CONNECTION_ID=${CONNECTION_ID%%\"*} echo $CONNECTION_ID %env CONNECTION_ID=$connection_id ``` ### Connections to external databases. If you want to create experiment using data saved in the external database instead of COS, you should change the following properties in the `CONNECTION_PAYLOAD`: - `datasource_type` - `properties` Where `properties` key should contain credentials required for the connection creation which should follow the bellow pattern: ``` "properties": { "database": "***", "password": "***", "port": "***", "host": "***", "ssl": "***", "username": "***" } ``` The `datasource_type` list can be obtained by using bellow request. #### Exemplary payload for **DB2** database: ``` { "name": "REST DB2 connection", "datasource_type": "8c1a4480-1c29-4b33-9086-9cb799d7b157", "properties": { "database": "BLUDB", "username": "***" "password": "***", "port": 50000, "host": "dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net", "ssl": "true", }, "origin_country": "US" } ``` <a id="run"></a> ## 3. Experiment run This section provides samples about how to trigger AutoAI experiment via cURL calls. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Trainings/trainings_create" target="_blank" rel="noopener no referrer">Schedule a training job for AutoAI experiment</a> ``` %%bash --out training_payload TRAINING_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "training_data_references": [{"type": "connection_asset", "id": "credit_risk_training_light.csv", "connection": {"id": "'"$CONNECTION_ID"'"}, "location": {"bucket": "'"$COS_BUCKET"'", "file_name": "credit_risk_training_light.csv"}}], "results_reference": {"type": "connection_asset", "id": "autoai_results", "connection": {"id": "'"$CONNECTION_ID"'"}, "location": {"bucket": "'"$COS_BUCKET"'", "file_name": "."}}, "tags": [{"value": "autoai"}], "pipeline": {"id": "'"$PIPELINE_ID"'"}}' echo $TRAINING_PAYLOAD | python -m json.tool %env TRAINING_PAYLOAD=$training_payload %%bash --out training_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$TRAINING_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/trainings?version=2020-08-01" \ | awk -F'"id":' '{print $2}' | cut -c2-37 %env TRAINING_ID=$training_id ``` <a id="training_details"></a> ### Get training details Training is an asynchronous endpoint. In case you want to monitor training status and details, you need to use a GET method and specify which training you want to monitor by usage of training ID. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Trainings/trainings_get" target="_blank" rel="noopener no referrer">Get information about training job</a> ### Get training status ``` %%bash STATUS=$(curl -sk -X GET\ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/trainings/$TRAINING_ID?space_id=$SPACE_ID&version=2020-08-01") STATUS=${STATUS#*state\":\"} STATUS=${STATUS%%\"*} echo $STATUS ``` Please make sure that training is completed before you go to the next sections. Monitor `state` of your training by running above cell couple of times. <a id="runs"></a> ## 4. Historical runs In this section you will see cURL examples describing how to get historical training runs information. Output should be similar to the output from training creation but you should see more trainings entries. Listing trainings: <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Trainings/trainings_list" target="_blank" rel="noopener no referrer">Get list of historical training jobs information</a> ``` %%bash HISTORICAL_TRAINING_LIMIT_TO_GET=2 curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/trainings?space_id=$SPACE_ID&version=2020-08-01&limit=$HISTORICAL_TRAINING_LIMIT_TO_GET" \ | python -m json.tool ``` <a id="training_cancel"></a> ### Cancel training run **Tip:** If you want to cancel your training, please convert below cell to `code`, specify training ID and run. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Trainings/trainings_delete" target="_blank" rel="noopener no referrer">Canceling training</a> --- <a id="deploy_and_score"></a> ## 5. Deploy and Score In this section you will learn how to deploy and score pipeline model as webservice using WML instance. Before deployment creation, you need store your model in WML repository. Please see below cURL call example how to do it. Remember that you need to specify where your chosen model is stored in COS. <a id="model_store"></a> ### Store AutoAI model Store information about your model to WML repository. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_create" target="_blank" rel="noopener no referrer">Model storing</a> ``` %%bash --out model_payload MODEL_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "autoai_credit_risk_model","description": "This is description","type": "wml-hybrid_0.1", "software_spec": {"name": "hybrid_0.1"}, "content_location": { "type": "s3", "contents": [ { "content_format": "native", "file_name": "pipeline_model.json", "location": "'"$TRAINING_ID/assets/${TRAINING_ID}_P1_global_output/resources/wml_model/pipeline_model.json"'"}, { "content_format": "pipeline-node", "file_name": "P1_automl.zip", "location": "'"$TRAINING_ID/assets/${TRAINING_ID}_P1_global_output/resources/wml_model/P1_automl.zip"'", "pipeline_node_id": "automl"}],"connection": {"endpoint_url": "'"$COS_ENDPOINT"'", "access_key_id": "'"$COS_ACCESS_KEY_ID"'", "secret_access_key": "'"$COS_SECRET_ACCESS_KEY"'"}, "location": {"bucket": "'"$COS_BUCKET"'"}}}' echo $MODEL_PAYLOAD | python -m json.tool %env MODEL_PAYLOAD=$model_payload %%bash --out model_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$MODEL_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env MODEL_ID=$model_id ``` <a id="model_content_download"></a> ### Download model content If you want to download your saved model, please make the following call. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_filtered_download" target="_blank" rel="noopener no referrer">Download model content</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --output "model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/download?space_id=$SPACE_ID&version=2020-08-01" !ls -l model.tar.gz ``` ## <a id="deployment_creation"></a> ### Deployment creation An AutoAI Batch deployment creation is presented below. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_create" target="_blank" rel="noopener no referrer">Create deployment</a> ``` %%bash --out deployment_payload DFEPLOYMENT_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "AutoAI deployment","description": "This is description","batch": {}, "hybrid_pipeline_hardware_specs": [{"node_runtime_id": "auto_ai.kb", "hardware_spec": {"name": "M"}}],"asset": {"id": "'"$MODEL_ID"'"}}' echo $DFEPLOYMENT_PAYLOAD | python -m json.tool %env DFEPLOYMENT_PAYLOAD=$deployment_payload %%bash --out deployment_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$DFEPLOYMENT_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env DEPLOYMENT_ID=$deployment_id ``` <a id="deployment_details"></a> ### Get deployment details As deployment API is asynchronous, please make sure your deployment is in `ready` state before going to the next points. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_get" target="_blank" rel="noopener no referrer">Get deployment details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="batch_score"></a> ### Score your Batch deployment Scoring for Batch deployment is done by creating `jobs`. User can specify job payload as a json or as data connection to eg. COS. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployment%20Jobs/deployment_jobs_create" target="_blank" rel="noopener no referrer">Create deployment job</a> ``` %%bash --out job_payload JOB_PAYLOAD='{"name": "AutoAI job", "space_id": "'"$SPACE_ID"'","deployment": {"id": "'"$DEPLOYMENT_ID"'"}, "hybrid_pipeline_hardware_specs": [{"node_runtime_id": "auto_ai.kb", "hardware_spec": {"name": "M"}}], "scoring": {"input_data": [{"fields": ["CheckingStatus", "LoanDuration", "CreditHistory", "LoanPurpose", "LoanAmount", "ExistingSavings", "EmploymentDuration", "InstallmentPercent", "Sex", "OthersOnLoan", "CurrentResidenceDuration", "OwnsProperty", "Age", "InstallmentPlans", "Housing", "ExistingCreditsCount", "Job", "Dependents", "Telephone", "ForeignWorker"], "values": [["less_0", 6, "all_credits_paid_back", "car_used", 250, "less_100", "1_to_4", 2, "male", "none", 2, "savings_insurance", 28, "stores", "rent", 1, "skilled", 1, "none", "yes"]]}]}}' echo $JOB_PAYLOAD | python -m json.tool %env JOB_PAYLOAD=$job_payload %%bash --out job_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$JOB_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env JOB_ID=$job_id ``` <a id="job_list"></a> ### Listing all Batch jobs <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployment%20Jobs/deployment_jobs_list" target="_blank" rel="noopener no referrer">List jobs</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="job_get"></a> ### Get particular job details <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployment%20Jobs/deployment_jobs_get" target="_blank" rel="noopener no referrer">Get job details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs/$JOB_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="job_cancel"></a> ### Cancel job **Tip:** You can cancel running job by calling delete method. Just convert below cell to `code` and run it. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployment%20Jobs/deployment_jobs_delete" target="_blank" rel="noopener no referrer">Cancel job</a> <a id="deployments_list"></a> ### Listing all deployments <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_list" target="_blank" rel="noopener no referrer">List deployments details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="cleaning"></a> ## 6. Cleaning section Below section is useful when you want to clean all of your previous work within this notebook. Just convert below cells into the `code` and run them. <a id="training_delete"></a> ### Delete training run **Tip:** You can completely delete a training run with its metadata. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Trainings/trainings_delete" target="_blank" rel="noopener no referrer">Deleting training</a> <a id="job_delete"></a> ### Delete job **Tip:** If you want remove job completely (with metadata), just specify `hard_delete` to True. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployment%20Jobs/deployment_jobs_delete" target="_blank" rel="noopener no referrer">Delete job</a> <a id="deployment_delete"></a> ### Deleting deployment **Tip:** You can delete existing deployment by calling DELETE method. <a id="model_delete"></a> ### Delete model from repository **Tip:** If you want to completely remove your stored model and model metadata, just use a DELETE method. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_delete" target="_blank" rel="noopener no referrer">Delete model from repository</a> <a id="summary"></a> ## 7. Summary and next steps You successfully completed this notebook!. You learned how to use `cURL` calls to store, deploy and score a AutoAI model in WML. ### Authors **Amadeusz Masny**, Python Software Developer in Watson Machine Learning at IBM **Jan Sołtysik**, Intern in Watson Machine Learning at IBM Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
%env API_KEY=... %env WML_ENDPOINT_URL=... %env WML_INSTANCE_CRN="fill out only if you want to create a new space" %env WML_INSTANCE_NAME=... %env COS_CRN="fill out only if you want to create a new space" %env COS_ENDPOINT=... %env COS_BUCKET=... %env COS_ACCESS_KEY_ID=... %env COS_SECRET_ACCESS_KEY=... %env COS_API_KEY=... %env SPACE_ID="fill out only if you have space already created" %env DATAPLATFORM_URL=https://api.dataplatform.cloud.ibm.com %env AUTH_ENDPOINT=https://iam.cloud.ibm.com/oidc/token %%bash --out token curl -sk -X POST \ --header "Content-Type: application/x-www-form-urlencoded" \ --header "Accept: application/json" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ --data-urlencode "apikey=$API_KEY" \ "$AUTH_ENDPOINT" \ | cut -d '"' -f 4 %env TOKEN=$token %%bash --out pipeline_payload PIPELINE_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "name": "Credit Risk Prediction - AutoAI", "description": "", "document": {"doc_type": "pipeline", "version": "2.0", "pipelines": [{"id": "autoai", "runtime_ref": "hybrid", "nodes": [{"id": "automl", "type": "execution_node", "parameters": {"stage_flag": true, "output_logs": true, "input_file_separator": ",", "optimization": {"learning_type": "binary", "label": "Risk", "max_num_daub_ensembles": 1, "daub_include_only_estimators": ["ExtraTreesClassifierEstimator", "GradientBoostingClassifierEstimator", "LGBMClassifierEstimator", "LogisticRegressionEstimator", "RandomForestClassifierEstimator", "XGBClassifierEstimator", "DecisionTreeClassifierEstimator"], "scorer_for_ranking": "roc_auc", "compute_pipeline_notebooks_flag": true, "run_cognito_flag": true, "holdout_param": 0.1}}, "runtime_ref": "autoai", "op": "kube"}]}], "runtimes": [{"id": "autoai", "name": "auto_ai.kb", "app_data": {"wml_data": {"hardware_spec": { "name": "L"}}}, "version": "3.0.2"}],"primary_pipeline": "autoai"}}' echo $PIPELINE_PAYLOAD | python -m json.tool %env PIPELINE_PAYLOAD=$pipeline_payload %%bash --out pipeline_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$PIPELINE_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/pipelines?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 5p %env PIPELINE_ID=$pipeline_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/pipelines/$PIPELINE_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/credit_risk/credit_risk_training_light.csv \ -O credit_risk_training_light.csv %%bash --out cos_token curl -s -X "POST" "$AUTH_ENDPOINT" \ -H 'Accept: application/json' \ -H 'Content-Type: application/x-www-form-urlencoded' \ --data-urlencode "apikey=$COS_API_KEY" \ --data-urlencode "response_type=cloud_iam" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ | cut -d '"' -f 4 %env COS_TOKEN=$cos_token %%bash curl -sk -X PUT \ --header "Authorization: Bearer $COS_TOKEN" \ --header "Content-Type: application/octet-stream" \ --data-binary "@credit_risk_training_light.csv" \ "$COS_ENDPOINT/$COS_BUCKET/credit_risk_training_light.csv" %%bash --out connection_payload CONNECTION_PAYLOAD='{"name": "REST COS connection", "datasource_type": "193a97c1-4475-4a19-b90c-295c4fdc6517", "properties": {"bucket": "'"$COS_BUCKET"'", "access_key": "'"$COS_ACCESS_KEY_ID"'", "secret_key": "'"$COS_SECRET_ACCESS_KEY"'", "iam_url": "'"$AUTH_ENDPOINT"'", "url": "'"$COS_ENDPOINT"'"}, "origin_country": "US"}' echo $CONNECTION_PAYLOAD | python -m json.tool %env CONNECTION_PAYLOAD=$connection_payload %%bash --out connection_id CONNECTION_ID=$(curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$CONNECTION_PAYLOAD" \ "$DATAPLATFORM_URL/v2/connections?space_id=$SPACE_ID") CONNECTION_ID=${CONNECTION_ID#*asset_id\":\"} CONNECTION_ID=${CONNECTION_ID%%\"*} echo $CONNECTION_ID %env CONNECTION_ID=$connection_id "properties": { "database": "***", "password": "***", "port": "***", "host": "***", "ssl": "***", "username": "***" } { "name": "REST DB2 connection", "datasource_type": "8c1a4480-1c29-4b33-9086-9cb799d7b157", "properties": { "database": "BLUDB", "username": "***" "password": "***", "port": 50000, "host": "dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net", "ssl": "true", }, "origin_country": "US" } %%bash --out training_payload TRAINING_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "training_data_references": [{"type": "connection_asset", "id": "credit_risk_training_light.csv", "connection": {"id": "'"$CONNECTION_ID"'"}, "location": {"bucket": "'"$COS_BUCKET"'", "file_name": "credit_risk_training_light.csv"}}], "results_reference": {"type": "connection_asset", "id": "autoai_results", "connection": {"id": "'"$CONNECTION_ID"'"}, "location": {"bucket": "'"$COS_BUCKET"'", "file_name": "."}}, "tags": [{"value": "autoai"}], "pipeline": {"id": "'"$PIPELINE_ID"'"}}' echo $TRAINING_PAYLOAD | python -m json.tool %env TRAINING_PAYLOAD=$training_payload %%bash --out training_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$TRAINING_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/trainings?version=2020-08-01" \ | awk -F'"id":' '{print $2}' | cut -c2-37 %env TRAINING_ID=$training_id %%bash STATUS=$(curl -sk -X GET\ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/trainings/$TRAINING_ID?space_id=$SPACE_ID&version=2020-08-01") STATUS=${STATUS#*state\":\"} STATUS=${STATUS%%\"*} echo $STATUS %%bash HISTORICAL_TRAINING_LIMIT_TO_GET=2 curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/trainings?space_id=$SPACE_ID&version=2020-08-01&limit=$HISTORICAL_TRAINING_LIMIT_TO_GET" \ | python -m json.tool %%bash --out model_payload MODEL_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "autoai_credit_risk_model","description": "This is description","type": "wml-hybrid_0.1", "software_spec": {"name": "hybrid_0.1"}, "content_location": { "type": "s3", "contents": [ { "content_format": "native", "file_name": "pipeline_model.json", "location": "'"$TRAINING_ID/assets/${TRAINING_ID}_P1_global_output/resources/wml_model/pipeline_model.json"'"}, { "content_format": "pipeline-node", "file_name": "P1_automl.zip", "location": "'"$TRAINING_ID/assets/${TRAINING_ID}_P1_global_output/resources/wml_model/P1_automl.zip"'", "pipeline_node_id": "automl"}],"connection": {"endpoint_url": "'"$COS_ENDPOINT"'", "access_key_id": "'"$COS_ACCESS_KEY_ID"'", "secret_access_key": "'"$COS_SECRET_ACCESS_KEY"'"}, "location": {"bucket": "'"$COS_BUCKET"'"}}}' echo $MODEL_PAYLOAD | python -m json.tool %env MODEL_PAYLOAD=$model_payload %%bash --out model_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$MODEL_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env MODEL_ID=$model_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --output "model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/download?space_id=$SPACE_ID&version=2020-08-01" !ls -l model.tar.gz %%bash --out deployment_payload DFEPLOYMENT_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "AutoAI deployment","description": "This is description","batch": {}, "hybrid_pipeline_hardware_specs": [{"node_runtime_id": "auto_ai.kb", "hardware_spec": {"name": "M"}}],"asset": {"id": "'"$MODEL_ID"'"}}' echo $DFEPLOYMENT_PAYLOAD | python -m json.tool %env DFEPLOYMENT_PAYLOAD=$deployment_payload %%bash --out deployment_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$DFEPLOYMENT_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env DEPLOYMENT_ID=$deployment_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash --out job_payload JOB_PAYLOAD='{"name": "AutoAI job", "space_id": "'"$SPACE_ID"'","deployment": {"id": "'"$DEPLOYMENT_ID"'"}, "hybrid_pipeline_hardware_specs": [{"node_runtime_id": "auto_ai.kb", "hardware_spec": {"name": "M"}}], "scoring": {"input_data": [{"fields": ["CheckingStatus", "LoanDuration", "CreditHistory", "LoanPurpose", "LoanAmount", "ExistingSavings", "EmploymentDuration", "InstallmentPercent", "Sex", "OthersOnLoan", "CurrentResidenceDuration", "OwnsProperty", "Age", "InstallmentPlans", "Housing", "ExistingCreditsCount", "Job", "Dependents", "Telephone", "ForeignWorker"], "values": [["less_0", 6, "all_credits_paid_back", "car_used", 250, "less_100", "1_to_4", 2, "male", "none", 2, "savings_insurance", 28, "stores", "rent", 1, "skilled", 1, "none", "yes"]]}]}}' echo $JOB_PAYLOAD | python -m json.tool %env JOB_PAYLOAD=$job_payload %%bash --out job_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$JOB_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs?version=2020-08-01" \ | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env JOB_ID=$job_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployment_jobs/$JOB_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool
0.378
0.936923
``` import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import warnings warnings.filterwarnings('ignore') os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="1" import pandas as pd import numpy as np from gtda.time_series import SlidingWindow import matplotlib.pyplot as plt from tensorflow.python.keras.backend import set_session import tensorflow as tf config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.log_device_placement = True sess2 = tf.compat.v1.Session(config=config) set_session(sess2) from tensorflow.keras.layers import Dense, MaxPooling1D, Flatten from tensorflow.keras import Input, Model from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow.compat.v1.keras.backend as K from tensorflow.keras.models import load_model from tcn import TCN, tcn_full_summary from tcn import compiled_tcn from tensorflow.keras.utils import to_categorical import csv import random import itertools from keras_flops import get_flops from mango.tuner import Tuner import time import pickle from hardware_utils import * from data_utils import * ``` ## Import Dataset ``` sampling_rate = 100 window_size = 550 stride = 50 f = '/home/nesl/earable/Earable/Activity_Dataset/' #dataset directory X_tr, Y_tr, X_test, Y_test = import_auritus_activity_dataset(dataset_folder = f, use_timestamp=False, shuffle=True, window_size = window_size, stride = stride, return_test_set = True, test_set_size = 300,channels=0) print(X_tr.shape) print(Y_tr.shape) print(X_test.shape) print(Y_test.shape) ``` # Training and NAS ``` dirpath="/home/nesl/Mbed Programs/auritus_tcn/" #hardware program directory - this is where the TCN deployment code is stored device = "NUCLEO_F446RE" #which hardware to use model_name = 'Auritus_HIL'+device+'.hdf5' HIL = True #use real hardware or proxy? quantization = False #use quantization or not? model_epochs = 900 #epochs to train each model for NAS_epochs = 50 #epochs for hyperparameter tuning output_name = 'g_model.tflite' log_file_name = 'TCN_Auritus_'+device+'.csv' if os.path.exists(log_file_name): os.remove(log_file_name) row_write = ['score', '1-accuracy','RAM','Flash','FLOPS','Latency','nb_filters','kernel_size', 'dilations','use_skip_connections'] with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) if os.path.exists(log_file_name[0:-4]+'.p'): os.remove(log_file_name[0:-4]+'.p') def objective_NN(epochs=1000,nb_filters=10,kernel_size=3, dilations=[1, 2, 4, 8, 16, 32, 64, 128, 256], use_skip_connections=True): err = 'inf' input_dim=X_tr.shape[2] model = compiled_tcn(return_sequences=False, num_feat=input_dim, num_classes=Y_tr.shape[1], nb_filters=nb_filters, kernel_size=kernel_size, dilations=dilations, nb_stacks=1, max_len=window_size, use_weight_norm=False, use_skip_connections=use_skip_connections) opt = tf.keras.optimizers.Adam() model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=opt,metrics=['accuracy']) Flops = get_flops(model, batch_size=1) convert_to_tflite_model(model=model,training_data=X_tr,quantization=quantization,output_name=output_name) maxRAM, maxFlash = return_hardware_specs(device) if(HIL==True): convert_to_cpp_model(dirpath) RAM, Flash, Latency, idealArenaSize, errorCode = HIL_controller(dirpath=dirpath, chosen_device=device, window_size=window_size, number_of_channels = input_dim, quantization=quantization) score = -5.0 if(Flash==-1): row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name+'.csv', 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) return score elif(Flash!=-1): checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=epochs,callbacks=[checkpoint],shuffle=True,verbose=1) err = 1-checkpoint.best resource_usage = (RAM/maxRAM) + (Flash/maxFlash) score = -err + 0.01*resource_usage - 0.05*Latency #weigh each component as you like row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) else: score = -5.0 Flash = os.path.getsize(output_name) RAM = get_model_memory_usage(batch_size=1,model=model) Latency=-1 max_flops = (30e6) if(RAM < maxRAM and Flash<maxFlash): checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=epochs,callbacks=[checkpoint],shuffle=True,verbose=1) err = 1-checkpoint.best resource_usage = (RAM/maxRAM) + (Flash/maxFlash) score = -err + 0.01*resource_usage - 0.05*(Flops/max_flops) #weigh each component as you like row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) return score import pickle def save_res(data, file_name): pickle.dump( data, open( file_name, "wb" ) ) min_layer = 3 max_layer = 8 a_list = [1,2,4,8,16,32,64,128,256] all_combinations = [] dil_list = [] for r in range(len(a_list) + 1): combinations_object = itertools.combinations(a_list, r) combinations_list = list(combinations_object) all_combinations += combinations_list all_combinations = all_combinations[1:] for item in all_combinations: if(len(item) >= min_layer and len(item) <= max_layer): dil_list.append(list(item)) param_dict = { 'nb_filters': range(2,64), 'kernel_size': range(2,16), 'use_skip_connections': [True, False], 'dil_list': dil_list } def objfunc(args_list): objective_evaluated = [] start_time = time.time() for hyper_par in args_list: nb_filters = hyper_par['nb_filters'] kernel_size = hyper_par['kernel_size'] use_skip_connections = hyper_par['use_skip_connections'] dil_list = hyper_par['dil_list'] objective = objective_NN(epochs=model_epochs,nb_filters=nb_filters,kernel_size=kernel_size, dilations=dil_list,use_skip_connections=use_skip_connections,) objective_evaluated.append(objective) end_time = time.time() print('objective:', objective, ' time:',end_time-start_time) return objective_evaluated conf_Dict = dict() conf_Dict['batch_size'] = 1 conf_Dict['num_iteration'] = NAS_epochs conf_Dict['initial_random']= 5 tuner = Tuner(param_dict, objfunc,conf_Dict) all_runs = [] results = tuner.maximize() all_runs.append(results) save_res(all_runs,log_file_name+'.p') ``` # Train the best model ``` input_dim=X_tr.shape[2] model = compiled_tcn(return_sequences=False, num_feat=input_dim, num_classes=Y_tr.shape[1], nb_filters=results['best_params']['nb_filters'], kernel_size=results['best_params']['kernel_size'], dilations=results['best_params']['dilations'], nb_stacks=1, max_len=window_size, use_weight_norm=False, use_skip_connections=results['best_params']['use_skip_connections']) opt = tf.keras.optimizers.Adam() model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=opt,metrics=['accuracy']) checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=model_epochs,callbacks=[checkpoint],shuffle=True,verbose=1) model = load_model(model_name,custom_objects={'TCN': TCN}) test_accu = model.evaluate(x=X_test,y=Y_test)[1] print('Test Accuracy:', test_accu) ``` # Deployment ### Conversion to TFLite ``` convert_to_tflite_model(model=model,training_data=X_tr,quantization=quantization,output_name='g_model.tflite') ``` ### Conversion to C++ ``` convert_to_cpp_model(dirpath) ```
github_jupyter
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import warnings warnings.filterwarnings('ignore') os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="1" import pandas as pd import numpy as np from gtda.time_series import SlidingWindow import matplotlib.pyplot as plt from tensorflow.python.keras.backend import set_session import tensorflow as tf config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.log_device_placement = True sess2 = tf.compat.v1.Session(config=config) set_session(sess2) from tensorflow.keras.layers import Dense, MaxPooling1D, Flatten from tensorflow.keras import Input, Model from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow.compat.v1.keras.backend as K from tensorflow.keras.models import load_model from tcn import TCN, tcn_full_summary from tcn import compiled_tcn from tensorflow.keras.utils import to_categorical import csv import random import itertools from keras_flops import get_flops from mango.tuner import Tuner import time import pickle from hardware_utils import * from data_utils import * sampling_rate = 100 window_size = 550 stride = 50 f = '/home/nesl/earable/Earable/Activity_Dataset/' #dataset directory X_tr, Y_tr, X_test, Y_test = import_auritus_activity_dataset(dataset_folder = f, use_timestamp=False, shuffle=True, window_size = window_size, stride = stride, return_test_set = True, test_set_size = 300,channels=0) print(X_tr.shape) print(Y_tr.shape) print(X_test.shape) print(Y_test.shape) dirpath="/home/nesl/Mbed Programs/auritus_tcn/" #hardware program directory - this is where the TCN deployment code is stored device = "NUCLEO_F446RE" #which hardware to use model_name = 'Auritus_HIL'+device+'.hdf5' HIL = True #use real hardware or proxy? quantization = False #use quantization or not? model_epochs = 900 #epochs to train each model for NAS_epochs = 50 #epochs for hyperparameter tuning output_name = 'g_model.tflite' log_file_name = 'TCN_Auritus_'+device+'.csv' if os.path.exists(log_file_name): os.remove(log_file_name) row_write = ['score', '1-accuracy','RAM','Flash','FLOPS','Latency','nb_filters','kernel_size', 'dilations','use_skip_connections'] with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) if os.path.exists(log_file_name[0:-4]+'.p'): os.remove(log_file_name[0:-4]+'.p') def objective_NN(epochs=1000,nb_filters=10,kernel_size=3, dilations=[1, 2, 4, 8, 16, 32, 64, 128, 256], use_skip_connections=True): err = 'inf' input_dim=X_tr.shape[2] model = compiled_tcn(return_sequences=False, num_feat=input_dim, num_classes=Y_tr.shape[1], nb_filters=nb_filters, kernel_size=kernel_size, dilations=dilations, nb_stacks=1, max_len=window_size, use_weight_norm=False, use_skip_connections=use_skip_connections) opt = tf.keras.optimizers.Adam() model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=opt,metrics=['accuracy']) Flops = get_flops(model, batch_size=1) convert_to_tflite_model(model=model,training_data=X_tr,quantization=quantization,output_name=output_name) maxRAM, maxFlash = return_hardware_specs(device) if(HIL==True): convert_to_cpp_model(dirpath) RAM, Flash, Latency, idealArenaSize, errorCode = HIL_controller(dirpath=dirpath, chosen_device=device, window_size=window_size, number_of_channels = input_dim, quantization=quantization) score = -5.0 if(Flash==-1): row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name+'.csv', 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) return score elif(Flash!=-1): checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=epochs,callbacks=[checkpoint],shuffle=True,verbose=1) err = 1-checkpoint.best resource_usage = (RAM/maxRAM) + (Flash/maxFlash) score = -err + 0.01*resource_usage - 0.05*Latency #weigh each component as you like row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) else: score = -5.0 Flash = os.path.getsize(output_name) RAM = get_model_memory_usage(batch_size=1,model=model) Latency=-1 max_flops = (30e6) if(RAM < maxRAM and Flash<maxFlash): checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=epochs,callbacks=[checkpoint],shuffle=True,verbose=1) err = 1-checkpoint.best resource_usage = (RAM/maxRAM) + (Flash/maxFlash) score = -err + 0.01*resource_usage - 0.05*(Flops/max_flops) #weigh each component as you like row_write = [score, err,RAM,Flash,Flops,Latency, nb_filters,kernel_size,dilations,use_skip_connections] print('Design choice:',row_write) with open(log_file_name, 'a', newline='') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(row_write) return score import pickle def save_res(data, file_name): pickle.dump( data, open( file_name, "wb" ) ) min_layer = 3 max_layer = 8 a_list = [1,2,4,8,16,32,64,128,256] all_combinations = [] dil_list = [] for r in range(len(a_list) + 1): combinations_object = itertools.combinations(a_list, r) combinations_list = list(combinations_object) all_combinations += combinations_list all_combinations = all_combinations[1:] for item in all_combinations: if(len(item) >= min_layer and len(item) <= max_layer): dil_list.append(list(item)) param_dict = { 'nb_filters': range(2,64), 'kernel_size': range(2,16), 'use_skip_connections': [True, False], 'dil_list': dil_list } def objfunc(args_list): objective_evaluated = [] start_time = time.time() for hyper_par in args_list: nb_filters = hyper_par['nb_filters'] kernel_size = hyper_par['kernel_size'] use_skip_connections = hyper_par['use_skip_connections'] dil_list = hyper_par['dil_list'] objective = objective_NN(epochs=model_epochs,nb_filters=nb_filters,kernel_size=kernel_size, dilations=dil_list,use_skip_connections=use_skip_connections,) objective_evaluated.append(objective) end_time = time.time() print('objective:', objective, ' time:',end_time-start_time) return objective_evaluated conf_Dict = dict() conf_Dict['batch_size'] = 1 conf_Dict['num_iteration'] = NAS_epochs conf_Dict['initial_random']= 5 tuner = Tuner(param_dict, objfunc,conf_Dict) all_runs = [] results = tuner.maximize() all_runs.append(results) save_res(all_runs,log_file_name+'.p') input_dim=X_tr.shape[2] model = compiled_tcn(return_sequences=False, num_feat=input_dim, num_classes=Y_tr.shape[1], nb_filters=results['best_params']['nb_filters'], kernel_size=results['best_params']['kernel_size'], dilations=results['best_params']['dilations'], nb_stacks=1, max_len=window_size, use_weight_norm=False, use_skip_connections=results['best_params']['use_skip_connections']) opt = tf.keras.optimizers.Adam() model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=opt,metrics=['accuracy']) checkpoint = ModelCheckpoint(model_name, monitor='val_accuracy', verbose=1, save_best_only=True) model.fit(x=X_tr, y=Y_tr,validation_split=0.1, epochs=model_epochs,callbacks=[checkpoint],shuffle=True,verbose=1) model = load_model(model_name,custom_objects={'TCN': TCN}) test_accu = model.evaluate(x=X_test,y=Y_test)[1] print('Test Accuracy:', test_accu) convert_to_tflite_model(model=model,training_data=X_tr,quantization=quantization,output_name='g_model.tflite') convert_to_cpp_model(dirpath)
0.365004
0.300361
``` import pandas as pd from sklearn.preprocessing import MultiLabelBinarizer import sys from colorama import Fore, Back, Style from collections import Counter import json import re from pprint import pprint import re import numpy as np import copy import math from sklearn.cluster import KMeans from scipy.spatial import distance from sklearn import mixture from sklearn.neighbors import NearestNeighbors from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression import time import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ``` # This json file is already in flat form by using BatFish on the original raw data ``` data = 'datasets/flat-sample/serverProperties.json' props = [] datas = [] # Handling json file input to load as the json object with open(data) as f: json_object = json.load(f) # Extract the property names from the json object props = [] for i, prop in enumerate(json_object[0]): if i > 0: props.append(prop) datas.append([]) # Extract data for i in range(len(json_object)): for j, prop in enumerate(props): datas[j].append(json_object[i][prop]) json_object # The Features props datas ``` # Using Scikit Learn's MultiLabelBinarizer to convert the categorical data ``` mlb = MultiLabelBinarizer() encodedLists = [] frequencyLists = [] uniqueClasses = [] proportion = 0 for i, data in enumerate(datas): # fit_transform calculates the size of each category automatically based on the input data # and then encodes it into the multilabel bit encoding encodedList = mlb.fit_transform(datas[i]) encodedLists.append(encodedList) uniqueClasses.append(mlb.classes_) frequencyList = [0] * len(encodedList[0]) proportion += len(encodedList[0]) * len(encodedList) for e in encodedList: for i in range(len(e)): frequencyList[i] += e[i] frequencyLists.append(frequencyList) # After applying the encoder on original data encodedLists # For every feature in our data frequencyLists uniqueClasses # All the unique classes in our data set mlb.classes_ densityLists = [] normalizedDensityLists = [] aggregatedDensityList = [0] * len(encodedLists[0]) for i in range(len(encodedLists)): densityList = [0] * len(encodedLists[i]) normalizedDensityList = [0] * len(encodedLists[i]) for j in range(len(densityList)): for k in range(len(encodedLists[i][j])): densityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] normalizedDensityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] / float(proportion) aggregatedDensityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] densityLists.append(densityList) normalizedDensityLists.append(normalizedDensityList) densityLists normalizedDensityLists aggregatedDensityList ``` # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ # Below is the same data but in the form of DataFrames. ## I made dataframes for reference so that we can easily visualize and understand our data. ## Not being used in our code in this format. # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ``` temp_data = [[datas[j][i] for j in range(len(datas))] for i in range(len(datas[0]))] df_data = pd.DataFrame(temp_data, columns=props) ``` # Actual Data in json file ``` df_data temp_encodedList = [[encodedLists[j][i] for j in range(len(encodedLists))] for i in range(len(encodedLists[0]))] df_encodedList = pd.DataFrame(temp_encodedList, columns=props) ``` # Data after applying the MultiLabelBinarizer on the above categorical dataset. ## Presence of 1 indicates that a particular value is present for the device. If 1 is not present then we conclude that the value is absent for the device. ``` df_encodedList temp_densityList = [[densityLists[j][i] for j in range(len(densityLists))] for i in range(len(densityLists[0]))] df_densityList = pd.DataFrame(temp_densityList, columns=props) ``` # Data in the form of DensityList ## Density list is calculated by adding all the values if a particular categorical variable appears. If 1 is present we add this value or ignore the value if 0 is present. ``` df_densityList ``` # AggregatedDensityList ## aggregatedDensityList is calculated by taking sum of a row in DensityList ``` aggregatedDensityList ``` # Concatenated Features ``` concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) df_concatenatedFeatures = pd.DataFrame(concatenated_features) ``` # We get ConcatenatedFeatures by simply combining all the valuee in encodedList row wise. ## Below is a visual of how concatenatedFeatures is calculated from encodedList ``` df_encodedList df_concatenatedFeatures ``` # @@@@@@@@@@@@@@@ # Implementation # @@@@@@@@@@@@@@@ # Statistical Methods Implementation ## Tukey Method ``` def tukey(densityList): q1 = np.percentile(densityList, 25) q3 = np.percentile(densityList, 75) iqr = q3 - q1 lower_distance = q1 - 1.5 * iqr upper_distance = q3 + 1.5 * iqr outliers = [] for i, n in enumerate(densityList): if n < lower_distance or n > upper_distance: outliers.append(i) return outliers ``` ## Z-Score ``` def z_score(densityList): mean = np.mean(densityList) std = np.std(densityList) outliers = [] for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) return outliers ``` ## Modified Z-Score ``` def modified_z_score(densityList): median = np.median(densityList) df = pd.DataFrame() df['a'] = densityList mad = df['a'].mad() outliers = [] for i, n in enumerate(densityList): z = (n - median) / mad if abs(z) >= 1: outliers.append(i) return outliers ``` ## Regression ``` def regression(points): # pointers should be a list of pairs of numbers (tuples) n = len(points) sum_x = 0.0 sum_y = 0.0 sum_xy = 0.0 sum_x2 = 0.0 sum_y2 = 0.0 for i in range(n): x = points[i][0] y = points[i][1] sum_x += x sum_y += y sum_xy += x * y sum_x2 += x * x sum_y2 += y * y a = (sum_y * sum_x2 - sum_x * sum_xy) / (n * sum_x2 - sum_x * sum_x) b = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x) return a, b def predict(x, a, b): y = a + b * x return y ``` ## Calculating MSE ``` def mean_squared_error(points, a, b): mse = 0 for i in range(len(points)): prediction = predict(points[i][0], a, b) error = prediction - points[i][1] mse += error * error mse /= len(points) return mse ``` ## Cook's Distance ``` def cooks_distance(points): # points should be a list of pairs of numbers (tuples) a, b = regression(points) outliers = [] s = mean_squared_error(points, a, b) for i in range(len(points)): points_missing = copy.deepcopy(points) del points_missing[i] a_missing, b_missing = regression(points_missing) distance = 0 # print(predict(points[i][0], a, b) - predict(points[i][0], a_missing, b_missing)) for j in range(len(points)): distance += math.pow((predict(points[i][0], a, b) - predict(points[i][0], a_missing, b_missing)), 2) distance /= (3 * s) # print(distance) if distance > 0.05: # outliers.append(points[i]) outliers.append(i) return outliers ``` ## Mahalanobis Distance ``` def mahalanobis_distance(densityLists): vectors = [] for i in range(len(densityLists[0])): vector = [] for j in range(len(densityLists)): vector.append(densityLists[j][i]) vectors.append(vector) # calculate average vector average_vector = [0] * len(densityLists) for vector in vectors: for i in range(len(vector)): average_vector[i] += vector[i] for i in range(len(average_vector)): average_vector[i] /= len(vectors) # calculate mahalanobis distance for each point outliers = [] try: for i, vector in enumerate(vectors): combination = np.vstack((vector, average_vector)) covariance_matrix = np.cov(combination) mahalanobis_dist = distance.mahalanobis(vector, average_vector, covariance_matrix) if mahalanobis_dist > 200: outliers.append(i) except: print('ERROR: matrices not aligned, no mahalanobis distance outliers') pass return outliers ``` # ML Based Techniques ## Inter-Cluster Method ``` #This is the intercluster distance criteria. #In this criteria, the minimum distance between the centroids is used as the parameter. #Optimal value for the weight has to be set. def read_values_inter_cluster_criteria(main_list): debug_flag = 0 l = [] dimensions = len(main_list) for i in range(len(main_list[0])): temp = [] for j in range(dimensions): temp.append(main_list[j][i]) l.append(temp) if(debug_flag == 1): print("list of properties is") print(l) no_clusters = 2 clf = KMeans(n_clusters = no_clusters) clf.fit(l) centroids = clf.cluster_centers_ if(debug_flag == 1): print(" Centroids are") print(centroids) labels = clf.labels_ if(debug_flag == 1): for i in range(len(l)): print("coordinate:", l[i], "label:", labels[i], "centroid:", centroids[labels[i]]) weight = 0.1 if(debug_flag == 1): print("weight is") print(weight) cluster_distances = [] for i in range(len(centroids) ): j = i + 1 while(j < len (centroids)): cluster_distances.append(distance.euclidean(centroids[i], centroids[j])) j = j + 1 if(debug_flag == 1): print("distance between the various clusters is as follows:") print(cluster_distances) print("minimum inter-cluster distance is") min_intercluster_dist = min(cluster_distances) if(debug_flag == 1): print("minimum distance between the clsuters is") print(min_intercluster_dist) #weighing parameter w = weight outliers1 = [] for i in range(len(l)): if(distance.euclidean(l[i], centroids[labels[i]]) > min_intercluster_dist*w ): if(debug_flag == 1): print("outlier detected at index:", i) print("encoded outlier is", l[i]) outliers1.append(i) if(debug_flag == 1): print("outliers by inter cluster criteria are ") print(outliers1) return outliers1 ``` ## Intra-Cluster Method ``` #This is the intracluster distance criteria. # In this criteria, the minimum distance between the centroid and the own cluster elements is used as the parameter # Optimal value for the threshold has to be set. def read_values_intra_cluster_criteria(main_list): l = [] debug_flag = 0 dimensions = len(main_list) for i in range(len(main_list[0])): temp = [] for j in range(dimensions): temp.append(main_list[j][i]) l.append(temp) no_clusters = 2 clf = KMeans(n_clusters=no_clusters) clf.fit(l) centroids = clf.cluster_centers_ if(debug_flag == 1): print(" Centroids are") print(centroids) labels = clf.labels_ if(debug_flag == 1): for i in range(len(l)): print("coordinate:", l[i], "label:", labels[i], "centroid:", centroids[labels[i]]) threshold = 0.1 if(debug_flag == 1): print("threshold is") print(threshold) points_cluster_dist= [] for i in range(no_clusters): points_cluster_dist.append([]) for i in range(len(l)): points_cluster_dist[labels[i]].append( distance.euclidean(l[i], centroids[labels[i]]) ) outliers2=[] for i in range(len(l)): mini = min(points_cluster_dist[labels[i]]) center_dist = distance.euclidean(l[i], centroids[labels[i]]) if(mini < threshold *center_dist ): if(debug_flag == 1): print("outlier detected at index:", i) print("encoded outlier is", l[i]) outliers2.append(i) if(debug_flag == 1): print("outliers by intra-cluster criteria are") print(outliers2) return outliers2 ``` ## Gaussian Mixture Method ``` def Gaussian(encodedLists): #Gaussian Mixture is used for soft clustering. Insted of assigning points to specific classes it assigns probability. #The n_components parameter in the Gaussian is used to specify the number of Gaussians. concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) clf = mixture.GaussianMixture(n_components=2, covariance_type='full') clf.fit(concatenated_features) clf.means_ Z = -clf.score_samples(np.array(concatenated_features)) return Z ``` ## KNN Method ``` def KNN(encodedLists): concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(concatenated_features) distances, indices = nbrs.kneighbors(concatenated_features) print("indices in KNN are") print(indices) print("distances in KNN are") print(distances) ``` # Random Forest Method ``` def RandomForests(densityList,encodedLists): #First apply an existing outlier detection technique as RandomForests works on supervised data. mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In RandomForests method") # print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) # print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0) clf.fit(X_train, y_train) print("RandomForests predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers ``` ## Isolation Forest Method ``` def isolationForests(densityList,encodedLists): #First apply an existing outlier detection technique as RandomForests works on supervised data. mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In RandomForests method") # print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = IsolationForest(max_samples=100) clf.fit(X_train) y_pred_train = list(clf.predict(X_train)) y_pred_test = list(clf.predict(X_test)) print("isolationForests predictions on train data are") print(y_pred_train) print("isolationForests predictions on test data are") print(y_pred_test) outliers = [] for i in range(len(y_pred_test)): if y_pred_test[i] == 1: outliers.append(idx2[i]) return outliers ``` ## Naive Bayes Method ``` def NaiveBayes(densityList,encodedLists): #First apply an existing outlier detection technique as Naive Bayes works on supervised data. #So, first we are using z-score threshold to train the Naive Bayes Classifier. t1=time.time() mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In Naive Bayes method") #print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) #print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = GaussianNB() clf.fit(X_train, y_train) print("Naive Bayes predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) print("Time taken by NaiveBayes is") print(time.time()-t1) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers ``` ## Logistic Regression Method ``` def Logistic_Regression(densityList,encodedLists): t1 = time.time() mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In Logistic Regression method") #print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) #print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) #print("concateanted feature is") #print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial') clf.fit(X_train, y_train) print("Logistic Regression predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) print("Time taken by Logistic Regression is") print(time.time()-t1) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers ``` # Calling Methods ## ML Based Methods ``` #Inter-Cluster outliers = read_values_inter_cluster_criteria(densityLists) label = 'Inter-cluster distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() #Intra-Cluster outliers = read_values_intra_cluster_criteria(densityLists) label = 'Intra-cluster distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Gaussian Mixture Model likelihood = Gaussian(encodedLists) print("Likelihood given by G.M.M is\n{}".format(likelihood)) print() plt.figure(figsize=(20,5)) sns.distplot(likelihood) # KNN KNN(encodedLists) print() #Naive Bayes outliers = NaiveBayes(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Logistic Regression outliers = Logistic_Regression(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Random Forest outliers = RandomForests(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Isolation Forest outliers = isolationForests(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() ``` ## Statistical Based Methods ``` # Tukey's method. outliers = tukey(aggregatedDensityList) label = 'Tukey\'s method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Z-Score outliers = z_score(aggregatedDensityList) label = 'Z-Score method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Modified Z-Score outliers = modified_z_score(aggregatedDensityList) label = 'Modified Z-Score method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Cook's Method cooksDensityList = [] for i, value in enumerate(aggregatedDensityList): cooksDensityList.append((i, value)) outliers = cooks_distance(cooksDensityList) label = 'Cook\'s distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Mahalanobis Method outliers = mahalanobis_distance(densityLists) label = 'Malanobis distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() ```
github_jupyter
import pandas as pd from sklearn.preprocessing import MultiLabelBinarizer import sys from colorama import Fore, Back, Style from collections import Counter import json import re from pprint import pprint import re import numpy as np import copy import math from sklearn.cluster import KMeans from scipy.spatial import distance from sklearn import mixture from sklearn.neighbors import NearestNeighbors from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression import time import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline data = 'datasets/flat-sample/serverProperties.json' props = [] datas = [] # Handling json file input to load as the json object with open(data) as f: json_object = json.load(f) # Extract the property names from the json object props = [] for i, prop in enumerate(json_object[0]): if i > 0: props.append(prop) datas.append([]) # Extract data for i in range(len(json_object)): for j, prop in enumerate(props): datas[j].append(json_object[i][prop]) json_object # The Features props datas mlb = MultiLabelBinarizer() encodedLists = [] frequencyLists = [] uniqueClasses = [] proportion = 0 for i, data in enumerate(datas): # fit_transform calculates the size of each category automatically based on the input data # and then encodes it into the multilabel bit encoding encodedList = mlb.fit_transform(datas[i]) encodedLists.append(encodedList) uniqueClasses.append(mlb.classes_) frequencyList = [0] * len(encodedList[0]) proportion += len(encodedList[0]) * len(encodedList) for e in encodedList: for i in range(len(e)): frequencyList[i] += e[i] frequencyLists.append(frequencyList) # After applying the encoder on original data encodedLists # For every feature in our data frequencyLists uniqueClasses # All the unique classes in our data set mlb.classes_ densityLists = [] normalizedDensityLists = [] aggregatedDensityList = [0] * len(encodedLists[0]) for i in range(len(encodedLists)): densityList = [0] * len(encodedLists[i]) normalizedDensityList = [0] * len(encodedLists[i]) for j in range(len(densityList)): for k in range(len(encodedLists[i][j])): densityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] normalizedDensityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] / float(proportion) aggregatedDensityList[j] += encodedLists[i][j][k] * frequencyLists[i][k] densityLists.append(densityList) normalizedDensityLists.append(normalizedDensityList) densityLists normalizedDensityLists aggregatedDensityList temp_data = [[datas[j][i] for j in range(len(datas))] for i in range(len(datas[0]))] df_data = pd.DataFrame(temp_data, columns=props) df_data temp_encodedList = [[encodedLists[j][i] for j in range(len(encodedLists))] for i in range(len(encodedLists[0]))] df_encodedList = pd.DataFrame(temp_encodedList, columns=props) df_encodedList temp_densityList = [[densityLists[j][i] for j in range(len(densityLists))] for i in range(len(densityLists[0]))] df_densityList = pd.DataFrame(temp_densityList, columns=props) df_densityList aggregatedDensityList concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) df_concatenatedFeatures = pd.DataFrame(concatenated_features) df_encodedList df_concatenatedFeatures def tukey(densityList): q1 = np.percentile(densityList, 25) q3 = np.percentile(densityList, 75) iqr = q3 - q1 lower_distance = q1 - 1.5 * iqr upper_distance = q3 + 1.5 * iqr outliers = [] for i, n in enumerate(densityList): if n < lower_distance or n > upper_distance: outliers.append(i) return outliers def z_score(densityList): mean = np.mean(densityList) std = np.std(densityList) outliers = [] for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) return outliers def modified_z_score(densityList): median = np.median(densityList) df = pd.DataFrame() df['a'] = densityList mad = df['a'].mad() outliers = [] for i, n in enumerate(densityList): z = (n - median) / mad if abs(z) >= 1: outliers.append(i) return outliers def regression(points): # pointers should be a list of pairs of numbers (tuples) n = len(points) sum_x = 0.0 sum_y = 0.0 sum_xy = 0.0 sum_x2 = 0.0 sum_y2 = 0.0 for i in range(n): x = points[i][0] y = points[i][1] sum_x += x sum_y += y sum_xy += x * y sum_x2 += x * x sum_y2 += y * y a = (sum_y * sum_x2 - sum_x * sum_xy) / (n * sum_x2 - sum_x * sum_x) b = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x) return a, b def predict(x, a, b): y = a + b * x return y def mean_squared_error(points, a, b): mse = 0 for i in range(len(points)): prediction = predict(points[i][0], a, b) error = prediction - points[i][1] mse += error * error mse /= len(points) return mse def cooks_distance(points): # points should be a list of pairs of numbers (tuples) a, b = regression(points) outliers = [] s = mean_squared_error(points, a, b) for i in range(len(points)): points_missing = copy.deepcopy(points) del points_missing[i] a_missing, b_missing = regression(points_missing) distance = 0 # print(predict(points[i][0], a, b) - predict(points[i][0], a_missing, b_missing)) for j in range(len(points)): distance += math.pow((predict(points[i][0], a, b) - predict(points[i][0], a_missing, b_missing)), 2) distance /= (3 * s) # print(distance) if distance > 0.05: # outliers.append(points[i]) outliers.append(i) return outliers def mahalanobis_distance(densityLists): vectors = [] for i in range(len(densityLists[0])): vector = [] for j in range(len(densityLists)): vector.append(densityLists[j][i]) vectors.append(vector) # calculate average vector average_vector = [0] * len(densityLists) for vector in vectors: for i in range(len(vector)): average_vector[i] += vector[i] for i in range(len(average_vector)): average_vector[i] /= len(vectors) # calculate mahalanobis distance for each point outliers = [] try: for i, vector in enumerate(vectors): combination = np.vstack((vector, average_vector)) covariance_matrix = np.cov(combination) mahalanobis_dist = distance.mahalanobis(vector, average_vector, covariance_matrix) if mahalanobis_dist > 200: outliers.append(i) except: print('ERROR: matrices not aligned, no mahalanobis distance outliers') pass return outliers #This is the intercluster distance criteria. #In this criteria, the minimum distance between the centroids is used as the parameter. #Optimal value for the weight has to be set. def read_values_inter_cluster_criteria(main_list): debug_flag = 0 l = [] dimensions = len(main_list) for i in range(len(main_list[0])): temp = [] for j in range(dimensions): temp.append(main_list[j][i]) l.append(temp) if(debug_flag == 1): print("list of properties is") print(l) no_clusters = 2 clf = KMeans(n_clusters = no_clusters) clf.fit(l) centroids = clf.cluster_centers_ if(debug_flag == 1): print(" Centroids are") print(centroids) labels = clf.labels_ if(debug_flag == 1): for i in range(len(l)): print("coordinate:", l[i], "label:", labels[i], "centroid:", centroids[labels[i]]) weight = 0.1 if(debug_flag == 1): print("weight is") print(weight) cluster_distances = [] for i in range(len(centroids) ): j = i + 1 while(j < len (centroids)): cluster_distances.append(distance.euclidean(centroids[i], centroids[j])) j = j + 1 if(debug_flag == 1): print("distance between the various clusters is as follows:") print(cluster_distances) print("minimum inter-cluster distance is") min_intercluster_dist = min(cluster_distances) if(debug_flag == 1): print("minimum distance between the clsuters is") print(min_intercluster_dist) #weighing parameter w = weight outliers1 = [] for i in range(len(l)): if(distance.euclidean(l[i], centroids[labels[i]]) > min_intercluster_dist*w ): if(debug_flag == 1): print("outlier detected at index:", i) print("encoded outlier is", l[i]) outliers1.append(i) if(debug_flag == 1): print("outliers by inter cluster criteria are ") print(outliers1) return outliers1 #This is the intracluster distance criteria. # In this criteria, the minimum distance between the centroid and the own cluster elements is used as the parameter # Optimal value for the threshold has to be set. def read_values_intra_cluster_criteria(main_list): l = [] debug_flag = 0 dimensions = len(main_list) for i in range(len(main_list[0])): temp = [] for j in range(dimensions): temp.append(main_list[j][i]) l.append(temp) no_clusters = 2 clf = KMeans(n_clusters=no_clusters) clf.fit(l) centroids = clf.cluster_centers_ if(debug_flag == 1): print(" Centroids are") print(centroids) labels = clf.labels_ if(debug_flag == 1): for i in range(len(l)): print("coordinate:", l[i], "label:", labels[i], "centroid:", centroids[labels[i]]) threshold = 0.1 if(debug_flag == 1): print("threshold is") print(threshold) points_cluster_dist= [] for i in range(no_clusters): points_cluster_dist.append([]) for i in range(len(l)): points_cluster_dist[labels[i]].append( distance.euclidean(l[i], centroids[labels[i]]) ) outliers2=[] for i in range(len(l)): mini = min(points_cluster_dist[labels[i]]) center_dist = distance.euclidean(l[i], centroids[labels[i]]) if(mini < threshold *center_dist ): if(debug_flag == 1): print("outlier detected at index:", i) print("encoded outlier is", l[i]) outliers2.append(i) if(debug_flag == 1): print("outliers by intra-cluster criteria are") print(outliers2) return outliers2 def Gaussian(encodedLists): #Gaussian Mixture is used for soft clustering. Insted of assigning points to specific classes it assigns probability. #The n_components parameter in the Gaussian is used to specify the number of Gaussians. concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) clf = mixture.GaussianMixture(n_components=2, covariance_type='full') clf.fit(concatenated_features) clf.means_ Z = -clf.score_samples(np.array(concatenated_features)) return Z def KNN(encodedLists): concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(concatenated_features) distances, indices = nbrs.kneighbors(concatenated_features) print("indices in KNN are") print(indices) print("distances in KNN are") print(distances) def RandomForests(densityList,encodedLists): #First apply an existing outlier detection technique as RandomForests works on supervised data. mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In RandomForests method") # print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) # print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0) clf.fit(X_train, y_train) print("RandomForests predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers def isolationForests(densityList,encodedLists): #First apply an existing outlier detection technique as RandomForests works on supervised data. mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In RandomForests method") # print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = IsolationForest(max_samples=100) clf.fit(X_train) y_pred_train = list(clf.predict(X_train)) y_pred_test = list(clf.predict(X_test)) print("isolationForests predictions on train data are") print(y_pred_train) print("isolationForests predictions on test data are") print(y_pred_test) outliers = [] for i in range(len(y_pred_test)): if y_pred_test[i] == 1: outliers.append(idx2[i]) return outliers def NaiveBayes(densityList,encodedLists): #First apply an existing outlier detection technique as Naive Bayes works on supervised data. #So, first we are using z-score threshold to train the Naive Bayes Classifier. t1=time.time() mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In Naive Bayes method") #print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) #print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) # print("concateanted feature is") # print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = GaussianNB() clf.fit(X_train, y_train) print("Naive Bayes predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) print("Time taken by NaiveBayes is") print(time.time()-t1) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers def Logistic_Regression(densityList,encodedLists): t1 = time.time() mean = np.mean(densityList) std = np.std(densityList) outliers = [] labels = [] print("In Logistic Regression method") #print("density list is", densityList) for i, n in enumerate(densityList): z = (n - mean) / std if abs(z) >= 1: outliers.append(i) labels.append(1) else: labels.append(0) #print("labels are", labels) concatenated_features = [] for i in range(len(encodedLists[0])): temp = [] for j in range(len(encodedLists)): temp.extend(encodedLists[j][i]) concatenated_features.append(temp) #print("concateanted feature is") #print(concatenated_features) indices = np.arange(len(labels)) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(concatenated_features, labels, indices, test_size=0.33, random_state=42) clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial') clf.fit(X_train, y_train) print("Logistic Regression predictions are") pred = list(clf.predict(X_test)) print(pred) print("Actual classification is") print(y_test) print("Time taken by Logistic Regression is") print(time.time()-t1) outliers = [] for i in range(len(pred)): if pred[i] == 1: outliers.append(idx2[i]) return outliers #Inter-Cluster outliers = read_values_inter_cluster_criteria(densityLists) label = 'Inter-cluster distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() #Intra-Cluster outliers = read_values_intra_cluster_criteria(densityLists) label = 'Intra-cluster distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Gaussian Mixture Model likelihood = Gaussian(encodedLists) print("Likelihood given by G.M.M is\n{}".format(likelihood)) print() plt.figure(figsize=(20,5)) sns.distplot(likelihood) # KNN KNN(encodedLists) print() #Naive Bayes outliers = NaiveBayes(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Logistic Regression outliers = Logistic_Regression(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Random Forest outliers = RandomForests(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Isolation Forest outliers = isolationForests(aggregatedDensityList,encodedLists) print('\nOutliers are') print(outliers) print('Number of Outliers: {}'.format(len(outliers))) print() for outlier in outliers: print('Outlier index: %d' % outlier) for i, data in enumerate(datas): print('\t%s: %s' % (props[i], data[outlier])) print() print() # Tukey's method. outliers = tukey(aggregatedDensityList) label = 'Tukey\'s method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Z-Score outliers = z_score(aggregatedDensityList) label = 'Z-Score method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Modified Z-Score outliers = modified_z_score(aggregatedDensityList) label = 'Modified Z-Score method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Cook's Method cooksDensityList = [] for i, value in enumerate(aggregatedDensityList): cooksDensityList.append((i, value)) outliers = cooks_distance(cooksDensityList) label = 'Cook\'s distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print() # Mahalanobis Method outliers = mahalanobis_distance(densityLists) label = 'Malanobis distance method outliers: \n' + str(outliers) print(label) print('Number of Outliers: {}'.format(len(outliers))) for outlier in outliers: print("Outlier index: %d" % outlier) for i, data in enumerate(datas): print("\t%s: %s" % (props[i], data[outlier])) print() print()
0.290176
0.732065
# Explore the dataset In this notebook, we will perform an EDA (Exploratory Data Analysis) on the processed Waymo dataset (data in the `processed` folder). In the first part, you will create a function to display ``` from utils import get_dataset import os import glob import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import matplotlib.image as mpimg import numpy as np from PIL import Image import cv2 import tensorflow as tf paths = glob.glob('data/waymo/training_and_validation/*') i = 0 #filename = os.path.basename(paths) print(paths[i]) dataset = get_dataset(paths[i]) print('-----------------------------------------------------------') print(dataset) ``` print(dataset) ``` < DatasetV1Adapter shapes: { image: (None, None, 3), source_id: (), key: (), filename: (), groundtruth_image_confidences: (None,), groundtruth_verified_neg_classes: (None,), groundtruth_not_exhaustive_classes: (None,), groundtruth_boxes: (None, 4), groundtruth_area: (None,), groundtruth_is_crowd: (None,), groundtruth_difficult: (None,), groundtruth_group_of: (None,), groundtruth_weights: (None,), groundtruth_classes: (None,), groundtruth_image_classes: (None,), original_image_spatial_shape: (2,) }, types: { image: tf.uint8, source_id: tf.string, key: tf.string, filename: tf.string, groundtruth_image_confidences: tf.float32, groundtruth_verified_neg_classes: tf.int64, groundtruth_not_exhaustive_classes: tf.int64, groundtruth_boxes: tf.float32, groundtruth_area: tf.float32, groundtruth_is_crowd: tf.bool, groundtruth_difficult: tf.int64, groundtruth_group_of: tf.bool, groundtruth_weights: tf.float32, groundtruth_classes: tf.int64, groundtruth_image_classes: tf.int64, original_image_spatial_shape: tf.int32 } > ``` ## Write a function to display an image and the bounding boxes Implement the `display_instances` function below. This function takes a batch as an input and display an image with its corresponding bounding boxes. The only requirement is that the classes should be color coded (eg, vehicles in red, pedestrians in blue, cyclist in green). ``` def display_instances(batch): """ This function takes a batch from the dataset and display the image with the associated bounding boxes. この関数は、データセットからバッチを取得し、関連する境界ボックスとともに画像を表示します。 """ # ADD CODE HERE ##### 色指定 # color for different classes colormap = {1:'blue', 2:'green', 4:'red'} ##### サブプロット領域設定。2行×5列、画像サイズ=(20, 10) num_col = 5 num_row = (len(batch) + num_col -1) // num_col f, ax = plt.subplots(num_row, num_col, figsize=(20, 10)) ##### batchのインデックスとデータ分ループ for idx, batch_data in enumerate(batch): ##### 画像データ取り出し img = batch_data["image"] ##### サブプロット領域の位置(x, y)算出 x = idx // num_col y = idx % num_col ##### サブプロット領域に画像をセット ax[x, y].imshow(img) ##### バウンディボックス、クラス取得 gt_boxes = batch_data["groundtruth_boxes"] gt_classes = batch_data["groundtruth_classes"] ##### データごとループ for bb, obj_class in zip(gt_boxes, gt_classes): ##### バウンディボックスのx,y位置取得、スケーリング y1, x1, y2, x2 = bb x1 *= img.shape[0] y1 *= img.shape[1] y2 *= img.shape[0] x2 *= img.shape[1] ##### バウンディボックスの描画データ作成 rec = Rectangle((x1, y1), x2-x1, y2-y1, facecolor='none', edgecolor=colormap[obj_class]) ##### 画像にバウンディボックス描画を追加 ax[x, y].add_patch(rec) plt.tight_layout() plt.show() ``` ## Display 10 images Using the dataset created in the second cell and the function you just coded, display 10 random images with the associated bounding boxes. You can use the methods `take` and `shuffle` on the dataset. ``` ## STUDENT SOLUTION HERE batch = dataset.shuffle(100).take(10) display_instances(list(batch.as_numpy_iterator())) ``` This display is saved as the following image. ![Display_10_images](00_report_data\Exploratory_Data_Analysis\Display_10_images.PNG) ## Additional EDA In this last part, you are free to perform any additional analysis of the dataset. What else would like to know about the data? For example, think about data distribution. So far, you have only looked at a single file... ``` ##### データセットから100データ分取得 batch = dataset.shuffle(100).take(100) ##### 画像群取得 def get_images(batch): images = [] for idx, batch_data in enumerate(batch): img = batch_data["image"] images.append(img) return images ##### jpg画像保存 def save_jpg(images, save_dir='jpg_images'): for idx, img in enumerate(images): file_dir = save_dir + '/image' + str(idx) + '.jpg' print(type(img)) img = tf.image.encode_jpeg(img, format='rgb') # plt.imshow(img) # plt.show() # mpimg.imsave(file_dir, img) cv2.imwrite(file_dir, img) # mpimg.imsave(f'{save_dir}/{batch["filename"].decode("utf-8")}.jpg', output) ##### main range = (0, 255) save_dir='jpg_images' images = get_images(batch) #save_jpg(images, save_dir) ##### デバッグ #plt.imshow(images[0]) #plt.show() print(type(images[0])) print(len(images)) def pil2cv(image): ''' PIL型 -> OpenCV型 ''' new_image = np.array(image, dtype=np.uint8) if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA) return new_image ##### jpg画像取得 def open_jpg_images(image_dir): images = glob.glob(image_dir) jpg_images = [mpimg.imread(x) for x in images] return jpg_images ##### ヒストグラム表示 def show_histogram(target_type, images, range=(0, 255)): # images = [mpimg.imread(x) for x in images] # images = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in images] plot_data = [target_type(img) for img in images] plt.hist(plot_data, range=range, bins=20) plt.show() def red_mean(img): return img[...,0].numpy().mean() def green_mean(img): return img[...,1].numpy().mean() def blue_mean(img): return img[...,2].numpy().mean() def bright_value_mean(img): img = pil2cv(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) return img[..., 2].mean() def hue_mean(img): img = pil2cv(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) return img[..., 0].mean() image_dir = 'jpg_images/*.jpg' jpg_images = open_jpg_images(image_dir) show_histogram(red_mean, images) ``` ![red_histogram](00_report_data\Exploratory_Data_Analysis\red_histogram.png) ``` show_histogram(green_mean, images) ``` ![green_histogram](00_report_data\Exploratory_Data_Analysis\green_histogram.png) ``` show_histogram(blue_mean, images) ``` ![blue_histogram](00_report_data\Exploratory_Data_Analysis\blue_histogram.png) ``` show_histogram(bright_value_mean, images) ``` ![bright_value_mean_histogram](00_report_data\Exploratory_Data_Analysis\bright_value_mean_histogram.png) ``` show_histogram(hue_mean, images) ``` ![hue_histogram](00_report_data\Exploratory_Data_Analysis\hue_histogram.png) ``` ##### クラスごとのオブジェクト数調査 ##### クラスごとのオブジェクト数取得 def cnt_object_per_class(dataset): ##### クラスごとのオブジェクト数カウンタ obj_cnt_per_class = {1:0, 2:0, 4:0} for data in dataset.take(20000): for gt_c in data['groundtruth_classes'].numpy(): obj_cnt_per_class[gt_c] += 1 return obj_cnt_per_class # distributing data in bar graph def display_object_per_class(dataset): ##### クラスごとのオブジェクト数取得 obj_cnt_per_class = cnt_object_per_class(dataset) ##### クラス名とオブジェクト数の紐づけ obj_per_classes = {'vehicles':obj_cnt_per_class[1], 'pedestrians':obj_cnt_per_class[2],'cyclists':obj_cnt_per_class[4]} classes_name = list(obj_per_classes.keys()) ##### オブジェクト数取得 num_of_object = [obj_per_classes[c] for c in classes_name] ##### グラフ生成 fig = plt.figure(figsize=(10,5)) ##### グラフ設定、表示 plt.bar(classes_name,num_of_object,color=['blue','green','red'],width=0.4) plt.xlabel("classes_name") plt.ylabel("num_of_object") plt.title("distribution of num of object per classes") plt.show() display_object_per_class(dataset) ``` ![object_per_class](00_report_data\Exploratory_Data_Analysis\object_per_class.png)
github_jupyter
from utils import get_dataset import os import glob import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import matplotlib.image as mpimg import numpy as np from PIL import Image import cv2 import tensorflow as tf paths = glob.glob('data/waymo/training_and_validation/*') i = 0 #filename = os.path.basename(paths) print(paths[i]) dataset = get_dataset(paths[i]) print('-----------------------------------------------------------') print(dataset) < DatasetV1Adapter shapes: { image: (None, None, 3), source_id: (), key: (), filename: (), groundtruth_image_confidences: (None,), groundtruth_verified_neg_classes: (None,), groundtruth_not_exhaustive_classes: (None,), groundtruth_boxes: (None, 4), groundtruth_area: (None,), groundtruth_is_crowd: (None,), groundtruth_difficult: (None,), groundtruth_group_of: (None,), groundtruth_weights: (None,), groundtruth_classes: (None,), groundtruth_image_classes: (None,), original_image_spatial_shape: (2,) }, types: { image: tf.uint8, source_id: tf.string, key: tf.string, filename: tf.string, groundtruth_image_confidences: tf.float32, groundtruth_verified_neg_classes: tf.int64, groundtruth_not_exhaustive_classes: tf.int64, groundtruth_boxes: tf.float32, groundtruth_area: tf.float32, groundtruth_is_crowd: tf.bool, groundtruth_difficult: tf.int64, groundtruth_group_of: tf.bool, groundtruth_weights: tf.float32, groundtruth_classes: tf.int64, groundtruth_image_classes: tf.int64, original_image_spatial_shape: tf.int32 } > def display_instances(batch): """ This function takes a batch from the dataset and display the image with the associated bounding boxes. この関数は、データセットからバッチを取得し、関連する境界ボックスとともに画像を表示します。 """ # ADD CODE HERE ##### 色指定 # color for different classes colormap = {1:'blue', 2:'green', 4:'red'} ##### サブプロット領域設定。2行×5列、画像サイズ=(20, 10) num_col = 5 num_row = (len(batch) + num_col -1) // num_col f, ax = plt.subplots(num_row, num_col, figsize=(20, 10)) ##### batchのインデックスとデータ分ループ for idx, batch_data in enumerate(batch): ##### 画像データ取り出し img = batch_data["image"] ##### サブプロット領域の位置(x, y)算出 x = idx // num_col y = idx % num_col ##### サブプロット領域に画像をセット ax[x, y].imshow(img) ##### バウンディボックス、クラス取得 gt_boxes = batch_data["groundtruth_boxes"] gt_classes = batch_data["groundtruth_classes"] ##### データごとループ for bb, obj_class in zip(gt_boxes, gt_classes): ##### バウンディボックスのx,y位置取得、スケーリング y1, x1, y2, x2 = bb x1 *= img.shape[0] y1 *= img.shape[1] y2 *= img.shape[0] x2 *= img.shape[1] ##### バウンディボックスの描画データ作成 rec = Rectangle((x1, y1), x2-x1, y2-y1, facecolor='none', edgecolor=colormap[obj_class]) ##### 画像にバウンディボックス描画を追加 ax[x, y].add_patch(rec) plt.tight_layout() plt.show() ## STUDENT SOLUTION HERE batch = dataset.shuffle(100).take(10) display_instances(list(batch.as_numpy_iterator())) ##### データセットから100データ分取得 batch = dataset.shuffle(100).take(100) ##### 画像群取得 def get_images(batch): images = [] for idx, batch_data in enumerate(batch): img = batch_data["image"] images.append(img) return images ##### jpg画像保存 def save_jpg(images, save_dir='jpg_images'): for idx, img in enumerate(images): file_dir = save_dir + '/image' + str(idx) + '.jpg' print(type(img)) img = tf.image.encode_jpeg(img, format='rgb') # plt.imshow(img) # plt.show() # mpimg.imsave(file_dir, img) cv2.imwrite(file_dir, img) # mpimg.imsave(f'{save_dir}/{batch["filename"].decode("utf-8")}.jpg', output) ##### main range = (0, 255) save_dir='jpg_images' images = get_images(batch) #save_jpg(images, save_dir) ##### デバッグ #plt.imshow(images[0]) #plt.show() print(type(images[0])) print(len(images)) def pil2cv(image): ''' PIL型 -> OpenCV型 ''' new_image = np.array(image, dtype=np.uint8) if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA) return new_image ##### jpg画像取得 def open_jpg_images(image_dir): images = glob.glob(image_dir) jpg_images = [mpimg.imread(x) for x in images] return jpg_images ##### ヒストグラム表示 def show_histogram(target_type, images, range=(0, 255)): # images = [mpimg.imread(x) for x in images] # images = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in images] plot_data = [target_type(img) for img in images] plt.hist(plot_data, range=range, bins=20) plt.show() def red_mean(img): return img[...,0].numpy().mean() def green_mean(img): return img[...,1].numpy().mean() def blue_mean(img): return img[...,2].numpy().mean() def bright_value_mean(img): img = pil2cv(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) return img[..., 2].mean() def hue_mean(img): img = pil2cv(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) return img[..., 0].mean() image_dir = 'jpg_images/*.jpg' jpg_images = open_jpg_images(image_dir) show_histogram(red_mean, images) show_histogram(green_mean, images) show_histogram(blue_mean, images) show_histogram(bright_value_mean, images) show_histogram(hue_mean, images) ##### クラスごとのオブジェクト数調査 ##### クラスごとのオブジェクト数取得 def cnt_object_per_class(dataset): ##### クラスごとのオブジェクト数カウンタ obj_cnt_per_class = {1:0, 2:0, 4:0} for data in dataset.take(20000): for gt_c in data['groundtruth_classes'].numpy(): obj_cnt_per_class[gt_c] += 1 return obj_cnt_per_class # distributing data in bar graph def display_object_per_class(dataset): ##### クラスごとのオブジェクト数取得 obj_cnt_per_class = cnt_object_per_class(dataset) ##### クラス名とオブジェクト数の紐づけ obj_per_classes = {'vehicles':obj_cnt_per_class[1], 'pedestrians':obj_cnt_per_class[2],'cyclists':obj_cnt_per_class[4]} classes_name = list(obj_per_classes.keys()) ##### オブジェクト数取得 num_of_object = [obj_per_classes[c] for c in classes_name] ##### グラフ生成 fig = plt.figure(figsize=(10,5)) ##### グラフ設定、表示 plt.bar(classes_name,num_of_object,color=['blue','green','red'],width=0.4) plt.xlabel("classes_name") plt.ylabel("num_of_object") plt.title("distribution of num of object per classes") plt.show() display_object_per_class(dataset)
0.435421
0.905573
``` import numpy as np import math np.set_printoptions(linewidth=400) item_size = 5 feature_dimension = 3 max_length = 5 epsilon = 1E-10 scores = np.exp(0.01 * np.random.randn(item_size) + 0.2) print('scores:', scores) feature_vectors = np.random.randn(item_size, feature_dimension) print('feature_vectors:', feature_vectors, sep='\n') print('feature_vectors: ',feature_vectors, sep='\n') # 平方和开根号 print('按行计算范数:',np.linalg.norm(feature_vectors, axis=1, keepdims=True), sep='\n') print('按列计算范数:',np.linalg.norm(feature_vectors, axis=0, keepdims=True), sep='\n') # 向量的单位化,操作后向量的l2范数为1;l2归一化 feature_vectors = feature_vectors / np.linalg.norm(feature_vectors, axis=1, keepdims=True) # 行向量的范数,欧几里得范数 print('l2_norm_feature_vectors:', feature_vectors, sep='\n') print('归一化后的l2范数:',np.linalg.norm(feature_vectors, axis=1, keepdims=True), sep='\n') # 如需使用点积计算向量相似度,则必须对向量作归一化处理。处理后点积与余弦相似度等价。https://milvus.io/cn/docs/metric.md similarities = np.dot(feature_vectors, feature_vectors.T) print('similarities:', similarities, sep='\n') kernel_matrix = scores.reshape((item_size, 1)) * similarities * scores.reshape((1, item_size)) print('reshaeped score:', scores.reshape((item_size, 1)) * scores.reshape((1, item_size)), sep='\n') print('kernel_matrix:', kernel_matrix, sep='\n') # 全0矩阵 cis = np.zeros((max_length, item_size)) cis # 分数的平方 di2s = np.copy(np.diag(kernel_matrix)) di2s selected_items = list() selected_item = np.argmax(di2s) print('selected_items BEFORE:', selected_items) selected_items.append(selected_item) print('selected_items AFTER:', selected_items) while len(selected_items) < max_length: k = len(selected_items) - 1 print('k:', k) ci_optimal = cis[:k, selected_item] print('ci_optimal:', ci_optimal, sep='\n') di_optimal = math.sqrt(di2s[selected_item]) print('di_optimal:', di_optimal, sep='\n') elements = kernel_matrix[selected_item, :] print('elements:', elements, sep='\n') eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal print('eis:', eis, sep='\n') cis[k, :] = eis print('cis:', cis, sep='\n') di2s -= np.square(eis) print('di2s:', di2s, sep='\n') di2s[selected_item] = -np.inf print('di2s:', di2s, sep='\n') selected_item = np.argmax(di2s) print('selected_item:', selected_item, sep='\n') if di2s[selected_item] < epsilon: break selected_items.append(selected_item) print('selected_items:', selected_items, sep='\n') print('scores:', scores) print('selected_items_index:', selected_items) print("selected_items_value:", scores[selected_items]) ```
github_jupyter
import numpy as np import math np.set_printoptions(linewidth=400) item_size = 5 feature_dimension = 3 max_length = 5 epsilon = 1E-10 scores = np.exp(0.01 * np.random.randn(item_size) + 0.2) print('scores:', scores) feature_vectors = np.random.randn(item_size, feature_dimension) print('feature_vectors:', feature_vectors, sep='\n') print('feature_vectors: ',feature_vectors, sep='\n') # 平方和开根号 print('按行计算范数:',np.linalg.norm(feature_vectors, axis=1, keepdims=True), sep='\n') print('按列计算范数:',np.linalg.norm(feature_vectors, axis=0, keepdims=True), sep='\n') # 向量的单位化,操作后向量的l2范数为1;l2归一化 feature_vectors = feature_vectors / np.linalg.norm(feature_vectors, axis=1, keepdims=True) # 行向量的范数,欧几里得范数 print('l2_norm_feature_vectors:', feature_vectors, sep='\n') print('归一化后的l2范数:',np.linalg.norm(feature_vectors, axis=1, keepdims=True), sep='\n') # 如需使用点积计算向量相似度,则必须对向量作归一化处理。处理后点积与余弦相似度等价。https://milvus.io/cn/docs/metric.md similarities = np.dot(feature_vectors, feature_vectors.T) print('similarities:', similarities, sep='\n') kernel_matrix = scores.reshape((item_size, 1)) * similarities * scores.reshape((1, item_size)) print('reshaeped score:', scores.reshape((item_size, 1)) * scores.reshape((1, item_size)), sep='\n') print('kernel_matrix:', kernel_matrix, sep='\n') # 全0矩阵 cis = np.zeros((max_length, item_size)) cis # 分数的平方 di2s = np.copy(np.diag(kernel_matrix)) di2s selected_items = list() selected_item = np.argmax(di2s) print('selected_items BEFORE:', selected_items) selected_items.append(selected_item) print('selected_items AFTER:', selected_items) while len(selected_items) < max_length: k = len(selected_items) - 1 print('k:', k) ci_optimal = cis[:k, selected_item] print('ci_optimal:', ci_optimal, sep='\n') di_optimal = math.sqrt(di2s[selected_item]) print('di_optimal:', di_optimal, sep='\n') elements = kernel_matrix[selected_item, :] print('elements:', elements, sep='\n') eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal print('eis:', eis, sep='\n') cis[k, :] = eis print('cis:', cis, sep='\n') di2s -= np.square(eis) print('di2s:', di2s, sep='\n') di2s[selected_item] = -np.inf print('di2s:', di2s, sep='\n') selected_item = np.argmax(di2s) print('selected_item:', selected_item, sep='\n') if di2s[selected_item] < epsilon: break selected_items.append(selected_item) print('selected_items:', selected_items, sep='\n') print('scores:', scores) print('selected_items_index:', selected_items) print("selected_items_value:", scores[selected_items])
0.301568
0.320682
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # Plaid - Get transactions <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Plaid/Plaid_Get_transactions.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> **Tags:** #plaid #bank #transactions #snippet **Author:** [Martin Donadieu](https://www.linkedin.com/in/martindonadieu/) ## Input ### Install packages ``` pip install plaid-python ``` ### Create account here : https://plaid.com/ ### Import libraries ``` import os import plaid import naas import IPython.core.display import uuid import json ``` ### Config your variables ``` PLAID_CLIENT_ID = "*************" PLAID_SECRET = "*************" PLAID_ENV = 'sandbox' PLAID_PRODUCTS = ['transactions'] PLAID_COUNTRY_CODES = ['FR'] start_transaction = "2020-09-01" end_transaction = "2020-10-01" ``` ## Model ### Connect to plaid ``` client = plaid.Client(client_id=PLAID_CLIENT_ID, secret=PLAID_SECRET, environment=PLAID_ENV) def create_link_token(): response = client.LinkToken.create( { 'user': { # This should correspond to a unique id for the current user. 'client_user_id': 'user-id', }, 'client_name': "Plaid Quickstart", 'products': PLAID_PRODUCTS, 'country_codes': PLAID_COUNTRY_CODES, 'language': "en", 'redirect_uri': None, } ) return response token = create_link_token() token ``` ### Use Naas callback to get the plaid OAuth token ``` cb_url = naas.callback.add() ``` ### Select Bank connection ``` uid = uuid.uuid4().hex iframe = """ <head> <script src="https://cdn.plaid.com/link/v2/stable/link-initialize.js"></script> </head> <script> const handler_{uid} = Plaid.create({ token: '{GENERATED_LINK_TOKEN}', onSuccess: (public_token, metadata) => { const xhr = new XMLHttpRequest(); xhr.open("POST", "{CALLBACK_URL}", true); xhr.setRequestHeader('Content-Type', 'application/json'); xhr.send(JSON.stringify({ public_token: public_token })); } }); handler_{uid}.open(); </script> """ iframe = iframe.replace('{uid}', uid) iframe = iframe.replace('{CALLBACK_URL}', cb_url.get('url')) iframe = iframe.replace('{GENERATED_LINK_TOKEN}', token.get('link_token')) IPython.core.display.display(IPython.core.display.HTML(iframe)) ``` ### Get back plaid token ``` cb_data = naas.callback.get(cb_url.get('uuid')) cb_data = json.loads(cb_data) public_token = cb_data.get("public_token") public_token ``` ### Exange token ``` exchange_response = client.Item.public_token.exchange(public_token) access_token = exchange_response['access_token'] item_id = exchange_response['item_id'] ``` ## Output ### Show transactions ``` response = client.Transactions.get(access_token, start_date=start_transaction, end_date=end_transaction) transactions = response['transactions'] while len(transactions) < response['total_transactions']: response = client.Transactions.get(access_token, start_date=start, end_date=end, offset=len(transactions) ) transactions.extend(response['transactions']) transaction_df = pd.DataFrame.from_records(transactions) transaction_df ``` ### Save as csv ``` transaction_df.to_csv('transactions.csv') ``` #### If you need more data check the api doc https://plaid.com/docs/
github_jupyter
pip install plaid-python import os import plaid import naas import IPython.core.display import uuid import json PLAID_CLIENT_ID = "*************" PLAID_SECRET = "*************" PLAID_ENV = 'sandbox' PLAID_PRODUCTS = ['transactions'] PLAID_COUNTRY_CODES = ['FR'] start_transaction = "2020-09-01" end_transaction = "2020-10-01" client = plaid.Client(client_id=PLAID_CLIENT_ID, secret=PLAID_SECRET, environment=PLAID_ENV) def create_link_token(): response = client.LinkToken.create( { 'user': { # This should correspond to a unique id for the current user. 'client_user_id': 'user-id', }, 'client_name': "Plaid Quickstart", 'products': PLAID_PRODUCTS, 'country_codes': PLAID_COUNTRY_CODES, 'language': "en", 'redirect_uri': None, } ) return response token = create_link_token() token cb_url = naas.callback.add() uid = uuid.uuid4().hex iframe = """ <head> <script src="https://cdn.plaid.com/link/v2/stable/link-initialize.js"></script> </head> <script> const handler_{uid} = Plaid.create({ token: '{GENERATED_LINK_TOKEN}', onSuccess: (public_token, metadata) => { const xhr = new XMLHttpRequest(); xhr.open("POST", "{CALLBACK_URL}", true); xhr.setRequestHeader('Content-Type', 'application/json'); xhr.send(JSON.stringify({ public_token: public_token })); } }); handler_{uid}.open(); </script> """ iframe = iframe.replace('{uid}', uid) iframe = iframe.replace('{CALLBACK_URL}', cb_url.get('url')) iframe = iframe.replace('{GENERATED_LINK_TOKEN}', token.get('link_token')) IPython.core.display.display(IPython.core.display.HTML(iframe)) cb_data = naas.callback.get(cb_url.get('uuid')) cb_data = json.loads(cb_data) public_token = cb_data.get("public_token") public_token exchange_response = client.Item.public_token.exchange(public_token) access_token = exchange_response['access_token'] item_id = exchange_response['item_id'] response = client.Transactions.get(access_token, start_date=start_transaction, end_date=end_transaction) transactions = response['transactions'] while len(transactions) < response['total_transactions']: response = client.Transactions.get(access_token, start_date=start, end_date=end, offset=len(transactions) ) transactions.extend(response['transactions']) transaction_df = pd.DataFrame.from_records(transactions) transaction_df transaction_df.to_csv('transactions.csv')
0.396185
0.788827
# Preparation ``` !pip install -qU sentence-transformers !pip install -qU wikipedia-api !pip install -qU hazm !pip install -qU clean-text[gpl] !pip install -qU emoji !pip install -q bertopic==0.3.2 !apt-get install xz-utils -qy !mkdir resources !wget -q "https://github.com/sobhe/hazm/releases/download/v0.5/resources-0.5.zip" -P resources !unzip -qq resources/resources-0.5.zip -d resources !rm -rf /content/4ccae468eb73bf6c4f4de3075ddb5336 !rm -rf /content/preproc !rm preprocessing.py utils.py !mkdir -p /content/preproc !git clone https://gist.github.com/4ccae468eb73bf6c4f4de3075ddb5336.git /content/preproc/ !mv /content/preproc/* /content/ !rm -rf /content/preproc from preprocessing import cleaning from utils import num_lines_in_file from IPython import display import nltk import wikipediaapi import numpy as np import pandas as pd import hazm import requests import time from tqdm import tqdm import json from hazm import stopwords_list import torch from sentence_transformers import models, SentenceTransformer, util from bertopic import BERTopic def rtl_print(outputs, font_size="15px", n_to_br=False): outputs = outputs if isinstance(outputs, list) else [outputs] if n_to_br: outputs = [output.replace('\n', '<br/>') for output in outputs] outputs = [f'<p style="text-align: right; direction: rtl; margin-right: 10px; font-size: {font_size};">{output}</p>' for output in outputs] display.display(display.HTML(' '.join(outputs))) def load_st_model(model_name_or_path): word_embedding_model = models.Transformer(model_name_or_path) pooling_model = models.Pooling( word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False) model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) return model ``` # Topic Modeling ``` # VoA News # Borrowed from https://jon.dehdari.org/corpora/#persian !gdown https://drive.google.com/uc?id=1mBeSSrEnajB2qxYs67tQbEDWmpRMZ0U0 !unxz voa_fa_2003-2008_orig.txt.xz !head -n 10 voa_fa_2003-2008_orig.txt data = [] voa_corpus_path = '/content/voa_fa_2003-2008_orig.txt' tmp_file = '# File:' tmp_date = '# Date:' tmp_headline = '# Headline:' tmp_caption = '# Caption:' counter = 0 with open(voa_corpus_path, encoding='utf-8') as f: _data = {'url': '', 'date': '', 'headline': '', 'caption': '', 'text': ''} texts = [] for line in tqdm(f, total=num_lines_in_file(voa_corpus_path)): line = line.strip() if line.startswith(tmp_file): sp = line.split(tmp_file) sp = sp[-1].strip() _data['url'] = sp if counter > 0: _data['text'] = '\n'.join(texts).strip() data.append(_data) texts = [] _data = {'url': '', 'date': '', 'headline': '', 'caption': '', 'text': ''} elif line.startswith(tmp_date): sp = line.split(tmp_date) sp = sp[-1].strip() _data['date'] = sp elif line.startswith(tmp_headline): sp = line.split(tmp_headline) sp = sp[-1].strip() _data['headline'] = sp elif line.startswith(tmp_caption): sp = line.split(tmp_caption) sp = sp[-1].strip() _data['caption'] = sp else: texts.append(line) counter += 1 df = pd.DataFrame(data) df = df[['text']] df['text'] = df['text'].apply(lambda t: cleaning(t)) df = df.dropna() df = df.drop_duplicates() df = df.reset_index(drop=True) print(f'We have #{len(df)} news!') df.head() documents = df['text'].sample(frac=0.2).values.tolist() print(f'We have #{len(documents)} news!') _model = load_st_model('m3hrdadfi/bert-fa-base-uncased-wikinli-mean-tokens') _model.save('/content/bert-fa-base-uncased-wikinli-mean-tokens/') model = BERTopic( '/content/bert-fa-base-uncased-wikinli-mean-tokens/', stop_words=stopwords_list(), verbose=True) topics, probabilities = model.fit_transform(documents) len(model.get_topics().keys()) model.visualize_distribution(probabilities[0]) idx = np.random.randint(0, len(df)) sample_1 = df.iloc[idx]['text'] rtl_print(f'Document [{idx}]: {sample_1}') topic_id = model.transform(sample_1) predicted_topics = [f'{r[0]} --- Score: {r[1]:.3f}' for r in model.get_topic(topic_id[0][0])] predicted_topics = '\n'.join(predicted_topics) rtl_print(f"Predicted Topics:", font_size='18px') rtl_print(' - - ' * 50) rtl_print(predicted_topics, font_size='14px', n_to_br=True) idx = np.random.randint(0, len(df)) sample_2 = df.iloc[idx]['text'] rtl_print(f'Document [{idx}]: {sample_2}') topic_id = model.transform(sample_2) predicted_topics = [f'{r[0]} --- Score: {r[1]:.3f}' for r in model.get_topic(topic_id[0][0])] predicted_topics = '\n'.join(predicted_topics) rtl_print(f"Predicted Topics:", font_size='18px') rtl_print(' - - ' * 50) rtl_print(predicted_topics, font_size='14px', n_to_br=True) ```
github_jupyter
!pip install -qU sentence-transformers !pip install -qU wikipedia-api !pip install -qU hazm !pip install -qU clean-text[gpl] !pip install -qU emoji !pip install -q bertopic==0.3.2 !apt-get install xz-utils -qy !mkdir resources !wget -q "https://github.com/sobhe/hazm/releases/download/v0.5/resources-0.5.zip" -P resources !unzip -qq resources/resources-0.5.zip -d resources !rm -rf /content/4ccae468eb73bf6c4f4de3075ddb5336 !rm -rf /content/preproc !rm preprocessing.py utils.py !mkdir -p /content/preproc !git clone https://gist.github.com/4ccae468eb73bf6c4f4de3075ddb5336.git /content/preproc/ !mv /content/preproc/* /content/ !rm -rf /content/preproc from preprocessing import cleaning from utils import num_lines_in_file from IPython import display import nltk import wikipediaapi import numpy as np import pandas as pd import hazm import requests import time from tqdm import tqdm import json from hazm import stopwords_list import torch from sentence_transformers import models, SentenceTransformer, util from bertopic import BERTopic def rtl_print(outputs, font_size="15px", n_to_br=False): outputs = outputs if isinstance(outputs, list) else [outputs] if n_to_br: outputs = [output.replace('\n', '<br/>') for output in outputs] outputs = [f'<p style="text-align: right; direction: rtl; margin-right: 10px; font-size: {font_size};">{output}</p>' for output in outputs] display.display(display.HTML(' '.join(outputs))) def load_st_model(model_name_or_path): word_embedding_model = models.Transformer(model_name_or_path) pooling_model = models.Pooling( word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False) model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) return model # VoA News # Borrowed from https://jon.dehdari.org/corpora/#persian !gdown https://drive.google.com/uc?id=1mBeSSrEnajB2qxYs67tQbEDWmpRMZ0U0 !unxz voa_fa_2003-2008_orig.txt.xz !head -n 10 voa_fa_2003-2008_orig.txt data = [] voa_corpus_path = '/content/voa_fa_2003-2008_orig.txt' tmp_file = '# File:' tmp_date = '# Date:' tmp_headline = '# Headline:' tmp_caption = '# Caption:' counter = 0 with open(voa_corpus_path, encoding='utf-8') as f: _data = {'url': '', 'date': '', 'headline': '', 'caption': '', 'text': ''} texts = [] for line in tqdm(f, total=num_lines_in_file(voa_corpus_path)): line = line.strip() if line.startswith(tmp_file): sp = line.split(tmp_file) sp = sp[-1].strip() _data['url'] = sp if counter > 0: _data['text'] = '\n'.join(texts).strip() data.append(_data) texts = [] _data = {'url': '', 'date': '', 'headline': '', 'caption': '', 'text': ''} elif line.startswith(tmp_date): sp = line.split(tmp_date) sp = sp[-1].strip() _data['date'] = sp elif line.startswith(tmp_headline): sp = line.split(tmp_headline) sp = sp[-1].strip() _data['headline'] = sp elif line.startswith(tmp_caption): sp = line.split(tmp_caption) sp = sp[-1].strip() _data['caption'] = sp else: texts.append(line) counter += 1 df = pd.DataFrame(data) df = df[['text']] df['text'] = df['text'].apply(lambda t: cleaning(t)) df = df.dropna() df = df.drop_duplicates() df = df.reset_index(drop=True) print(f'We have #{len(df)} news!') df.head() documents = df['text'].sample(frac=0.2).values.tolist() print(f'We have #{len(documents)} news!') _model = load_st_model('m3hrdadfi/bert-fa-base-uncased-wikinli-mean-tokens') _model.save('/content/bert-fa-base-uncased-wikinli-mean-tokens/') model = BERTopic( '/content/bert-fa-base-uncased-wikinli-mean-tokens/', stop_words=stopwords_list(), verbose=True) topics, probabilities = model.fit_transform(documents) len(model.get_topics().keys()) model.visualize_distribution(probabilities[0]) idx = np.random.randint(0, len(df)) sample_1 = df.iloc[idx]['text'] rtl_print(f'Document [{idx}]: {sample_1}') topic_id = model.transform(sample_1) predicted_topics = [f'{r[0]} --- Score: {r[1]:.3f}' for r in model.get_topic(topic_id[0][0])] predicted_topics = '\n'.join(predicted_topics) rtl_print(f"Predicted Topics:", font_size='18px') rtl_print(' - - ' * 50) rtl_print(predicted_topics, font_size='14px', n_to_br=True) idx = np.random.randint(0, len(df)) sample_2 = df.iloc[idx]['text'] rtl_print(f'Document [{idx}]: {sample_2}') topic_id = model.transform(sample_2) predicted_topics = [f'{r[0]} --- Score: {r[1]:.3f}' for r in model.get_topic(topic_id[0][0])] predicted_topics = '\n'.join(predicted_topics) rtl_print(f"Predicted Topics:", font_size='18px') rtl_print(' - - ' * 50) rtl_print(predicted_topics, font_size='14px', n_to_br=True)
0.424531
0.310975
``` from sklearn import datasets import matplotlib.pyplot as plt iris=datasets.load_iris() X=iris.data y=iris.target iris.target_names import seaborn as sns sns.set(style="ticks") df=sns.load_dataset("iris") sns.pairplot(df,hue='species') df g = sns.PairGrid(df, diag_sharey=False) g.map_lower(sns.kdeplot) g.map_upper(sns.scatterplot) g.map_diag(sns.kdeplot, lw=3) X[0] iris print(iris.feature_names) print(iris.target) print(iris.data.shape) print(iris.target.shape) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(X,y) logreg.predict([[1,2,3,4]]) y_pred=logreg.predict(X) y_pred ``` # Accuracy ``` from sklearn import metrics print(metrics.accuracy_score(y,y_pred)) ``` # KNN(K=5) ``` from sklearn.neighbors import KNeighborsClassifier knn=KNeighborsClassifier(n_neighbors=5) knn.fit(X,y) y_pred=knn.predict(X) print(metrics.accuracy_score(y,y_pred)) ``` # KNN(K=1) ``` knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X,y) y_pred=knn.predict(X) print(metrics.accuracy_score(y,y_pred)) m=90 knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X[:m],y[:m]) y_pred=knn.predict(X[m:]) print(metrics.accuracy_score(y[m:],y_pred)) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(X[:m],y[:m]) y_pred=logreg.predict(X[m:]) print(metrics.accuracy_score(y[m:],y_pred)) X[:m].shape X[m:].shape y.shape y[:m].shape m from sklearn.cross_validation import train_test_split x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=4) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(x_train,y_train) y_predic=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_predic)) k_range=range(1,30) scores=[] for k in k_range: knn=KNeighborsClassifier(n_neighbors=k) knn.fit(x_train,y_train) y_pred=knn.predict(x_test) scores.append(metrics.accuracy_score(y_test,y_pred)) scores import matplotlib.pyplot as plt plt.plot(k_range,scores) plt.xlabel("value of k for KNN") plt.ylabel("Testign Accuracy") import pandas as pd data=pd.read_csv('Downloads//Advertising.csv',index_col=0) data.head() data.shape data.tail() import seaborn as sns %matplotlib inline sns.pairplot(data,x_vars=['TV','radio','newspaper'],y_vars='sales',size=10,aspect=0.7,kind='scatter') X=data[['TV','radio','newspaper']] y=data['sales'] X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=1) X_train.shape y_train.shape X_test.shape y_test.shape from sklearn.linear_model import LinearRegression linreg=LinearRegression() linreg.fit(X_train,y_train) print(linreg.intercept_) print(linreg.coef_) from sklearn import linear_model reg=linear_model.Ridge(alpha=100) reg.fit(X_train,y_train) print(reg.coef_) print(reg.intercept_) ```
github_jupyter
from sklearn import datasets import matplotlib.pyplot as plt iris=datasets.load_iris() X=iris.data y=iris.target iris.target_names import seaborn as sns sns.set(style="ticks") df=sns.load_dataset("iris") sns.pairplot(df,hue='species') df g = sns.PairGrid(df, diag_sharey=False) g.map_lower(sns.kdeplot) g.map_upper(sns.scatterplot) g.map_diag(sns.kdeplot, lw=3) X[0] iris print(iris.feature_names) print(iris.target) print(iris.data.shape) print(iris.target.shape) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(X,y) logreg.predict([[1,2,3,4]]) y_pred=logreg.predict(X) y_pred from sklearn import metrics print(metrics.accuracy_score(y,y_pred)) from sklearn.neighbors import KNeighborsClassifier knn=KNeighborsClassifier(n_neighbors=5) knn.fit(X,y) y_pred=knn.predict(X) print(metrics.accuracy_score(y,y_pred)) knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X,y) y_pred=knn.predict(X) print(metrics.accuracy_score(y,y_pred)) m=90 knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X[:m],y[:m]) y_pred=knn.predict(X[m:]) print(metrics.accuracy_score(y[m:],y_pred)) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(X[:m],y[:m]) y_pred=logreg.predict(X[m:]) print(metrics.accuracy_score(y[m:],y_pred)) X[:m].shape X[m:].shape y.shape y[:m].shape m from sklearn.cross_validation import train_test_split x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=4) from sklearn.linear_model import LogisticRegression logreg=LogisticRegression() logreg.fit(x_train,y_train) y_predic=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_predic)) k_range=range(1,30) scores=[] for k in k_range: knn=KNeighborsClassifier(n_neighbors=k) knn.fit(x_train,y_train) y_pred=knn.predict(x_test) scores.append(metrics.accuracy_score(y_test,y_pred)) scores import matplotlib.pyplot as plt plt.plot(k_range,scores) plt.xlabel("value of k for KNN") plt.ylabel("Testign Accuracy") import pandas as pd data=pd.read_csv('Downloads//Advertising.csv',index_col=0) data.head() data.shape data.tail() import seaborn as sns %matplotlib inline sns.pairplot(data,x_vars=['TV','radio','newspaper'],y_vars='sales',size=10,aspect=0.7,kind='scatter') X=data[['TV','radio','newspaper']] y=data['sales'] X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=1) X_train.shape y_train.shape X_test.shape y_test.shape from sklearn.linear_model import LinearRegression linreg=LinearRegression() linreg.fit(X_train,y_train) print(linreg.intercept_) print(linreg.coef_) from sklearn import linear_model reg=linear_model.Ridge(alpha=100) reg.fit(X_train,y_train) print(reg.coef_) print(reg.intercept_)
0.635109
0.680014
# Evaluating Word (and Concept) Embeddings In the previous notebooks we have seen how to generate word, knowledge graph and joint (word-concept) embeddings. We also saw that it is easy to explore the resulting embedding spaces using cosine similarity and selecting the *k-nearest neighbours*. In this notebook we look further into how (word) embeddings are evaluated. In particular, we look into the following methods: - **Visual Exploration**: whereby (a subsection of) the embeddings are displayed - **Intrinsic Evaluation**: whereby the embeddings are used to perform a token-based task and the results are compared with a gold standard. + **Word Prediction**: whereby we look into using a test corpus to evaluate the embeddings by defining a word prediction task. - **Extrinsic Evaluation**: whereby a new model is learned (using the embeddings as inputs) to perform a complex task. KG embeddings tend to be evaluated using **graph completion** tasks, which we will also discuss briefly. ## Recommended papers in this area [Schnabel, T., Labutov, I., Mimno, D., & Joachims, T. (2015). Evaluation methods for unsupervised word embeddings. In EMNLP (pp. 298–307). Association for Computational Linguistics.](http://anthology.aclweb.org/D/D15/D15-1036.pdf) Provides a good overview of methods and introduces terminology to refer to different types of evaluations. [Baroni, M., Dinu, G., & Kruszewski, G. (2014). Don’t count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors. In ACL (pp. 238–247).](http://anthology.aclweb.org/P/P14/P14-1023.pdf) Focuses mostly on *intrinsic* evaluations. Showed that predictive models (like word2vec) produced better results than count models (based on co-occurrence counting). [Levy, O., Goldberg, Y., & Dagan, I. (2015). Improving Distributional Similarity with Lessons Learned from Word Embeddings. Transactions of the Association for Computational Linguistics, 3(0), 211–225.](https://www.transacl.org/ojs/index.php/tacl/article/view/570) Studied how various implementation or optimization 'details' used in predictive models, which were not needed or used in count models affect the performance of the resulting embeddings. Example of such details are: negative sampling, dynamic context windows, subsampling and vector normalization. The paper shows that once such details are taken into account, the difference between count and predictive models is not that large. ``` %cd /content/tutorial !git pull %cd /content/ !git clone https://github.com/HybridNLP2018/tutorial.git ``` ## Visual Exploration Use dimensionality reduction algorithms such as t-SNE and PCA to visualize (a subset) of the embedding space to project points to a 2-D or 3-D space. [Embedding Projector](http://projector.tensorflow.org/) - Pros: - Can give you a sense of whether the model has correctly learned meaningful relations. Especially if you have a small number of pre-categorized words. - Easy to explore the space - Cons: - Subjective: neighbourhoods may look good, but are they? There is no gold standard - Works best for a small subset of the embedding space. But who decides which subset? - resulting projection can be deceiving: what looks close in 3-D space can be far in 300-D space (and vice-versa). ## Intrinsic Evaluation **Intrinsic** evaluations are those where you can use embeddings to perform relatively simple, word-related tasks. Schnabel et al. distinguish between: - **Absolute intrinsic**: you have a (human annotated) gold standard for a particular task and use the embeddings to make predictions. - **Comparative intrinsic**: you use the embedding space to present predictions to humans, who then rate them. Mostly used when there is no gold standard available. Tasks: - **Relatedness**: How well do embeddings capture human-perceived word similarity? Datasets typically consist of triples: two words and a similarity score (e.g. between 0.0 and 1.0). Several available datasets, although interpretation of 'word similarity' can vary. - **Synonym detection**: Can embeddings select a synonym for a given word and a set of options? Datasts are n-tuples where the first word is the input word and the other `n-1` words are the options. Only one of the options is a synonym. - **Analogy**: Do embeddings encode relations between words? Datasets are 4-tuples: the first two words define the relation, the third word is the source of the query and the fourth word is the solution. Good embeddings should predict an embedding close to the solution word. - **Categorization**: Can embeddings be clustered into hand-annotated categories? Datasets are word-category pairs. Standard clustering algorithms can then be used to generate k-clusters and the purity of the clusters can be computed. - **Selectional preference**: Can embeddings predict whether a noun-verb pair is more likely to represent a verb-subject or a verb-object relation? E.g. people-eat is more likely to be found as a verb-subject. ### Compute Relatedness Score Swivel comes with a `eval.mk` script that downloads and unzips various relatedness and analogy datasets. The script also compiles an `analogy` executable. It assumes you have a unix environment and tools such as `wget`, `tar`, `unzip` and `egrep`, as well as `make` and a `c++` compiler. For convenience, we have included various relatedness datasets as part of this repo in `eval-datastets/relatedness`. We assume you have generated vectors as part of previous notebooks, which we will test here. ``` import os %ls /content/tutorial/datasamples/relatedness/ %cp /content/umbc/coocs/tlgs_wnscd_5K_ls_f/row_vocab.txt /content/umbc/vec/tlgs_wnscd_5k_ls_f/vocab.txt umbc_5k_vec = '/content/umbc/vec/tlgs_wnscd_5k_ls_f/' umbc_full_vec = '/content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d/' ``` You can use Swivel's `wordsim.py` to produce metrics for the k-cap embeddings we produced in previous notebooks: ``` !python /content/tutorial/scripts/swivel/wordsim.py --vocab={umbc_5k_vec}vocab.txt \ --embeddings={umbc_5k_vec}vecs.bin \ --word_prefix="lem_" \ /content/tutorial/datasamples/relatedness/*.ws.tab %ls {umbc_full_vec}vocab.txt !python /content/tutorial/scripts/swivel/wordsim.py --vocab=/content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d/vocab.txt \ --embeddings={umbc_full_vec}vecs.bin \ --word_prefix="lem_" \ /content/tutorial/datasamples/relatedness/*.ws.tab ``` The numbers show that both embedding spaces only have a small coverage of the evaluation datasets. Furthermore, the correlation score achieved is in the range of 0.07 to 0.22, which is very poor, but expected given the size of the corpus. For comparison state-of-the-art results are in the range of 0.65 to 0.8. ### Conclusion for Intrinsic Evaluation Intrinsic evaluations are the most direct way of evaluating (word) embeddings. Pros: - they provide a single objective metric that enables easy comparison between different embeddings - there are several readily available evaluation datasets (for English) - if you have an existing, manually crafted, knowledge graph, you can generate your own evaluation datasets Cons: - evaluation datasets are small and can be biased in terms of word selection and annotation - you need to take coverage into account (besides final metric) - existing datasets only support English words (few datasets in other languages, few compound words, few concepts) - tasks are low level and thus somewhat artificial: people care about document classification, but not about word categories or word similarities. ## Word Prediction (plots) This can be seen as a task for intrinsic evaluation, however the task is very close to the original training task used to derive the embeddings in the first place. Recall that *predictive models* (such as `word2vec`), try to minimize the distance between a word embedding and the embeddings of the context words (and that over a whole corpus). ![word2vec diagrams](https://github.com/hybridNLP2018/tutorial/blob/master/images/word2vec_diagrams.png?raw=1) This means that, if we have a **test corpus**, we can use the embeddings to try to predict words based on their contexts. Assuming the test corpus and the training corpus contain similar language we should expect better embeddings to produce better predictions on average. A major advantage of this approach is that we do not need human annotation. Also, we can reuse the tokenization pipeline used for training to produce similar tokens as those in our embedding space. E.g. we can use word-sense-disambiguation to generate a test corpus including lemmas and concepts. The algorithm in pseudo-code is: ``` python similarities = {} for window in corpus: focus_word, context_words = window focus_vector = embedding(focus_word) context_vector = predict_embedding(context_words, focus_word) similarities[focus_word].append(cosine_similarity(focus_vector, context_vector)) return similarities.values().average() ``` The result is a single number that tells you how far the prediction embedding was from the actual word embedding over the whole test corpus. When using cosine similarity this should be a number between -1 and 1. #### Word prediction plots We can also use the intermediate `similarities` dictionary to plot diagrams which can provide further insight. For example, random embeddings result in ![Word prediction plot for random embeddings](https://github.com/hybridNLP2018/tutorial/blob/master/images/Avg_cosine_similarities_for_random_words_at_different_winSizes_recentered.PNG?raw=1) The horizontal axis is the rank of the `focus_word` sorted by their frequency in the training corpus. (For example, frequent words such as 'be' and 'the' would be close to the origin, while infrequent words would be towards the end of the axis. The plot shows that, when words have random embeddings, on average the distance between the prediction for each word and the word embedding is close to 0. These plots can be useful for detecting implementation bugs. For example, when we were implementing the `CogitoPrep` utility for counting co-occurrences for lemmas and concepts, we generated the following plot: ![Buggy embeddings](https://github.com/hybridNLP2018/tutorial/blob/master/images/correlationbug-avg_token_cosine_similarity_skipgram_10.PNG?raw=1) This showed that we were learning to predict frequent words and some non-frequent words, but that we were not learning most non-frequent words correctly. After fixing the bug, we got the following plot: ![uncentered](https://github.com/hybridNLP2018/tutorial/blob/master/images/uncentered-avg_token_cosine_similarity_skipgram_4.PNG?raw=1) This shows that now we were able to learn embeddings that improved word prediction across the whole vocabulary. But it also showed that prediction for the most frequent words lagged behind more uncommon words. After applying some vector normalization techniques to Swivel and re-centering the vectors (we noticed that the centroid of all the vocabulary embeddings was not the origin), we got: ![recentered](https://github.com/hybridNLP2018/tutorial/blob/master/images/recentered-es10k-avg_token_cosine_similarity_average_rowcol__skipgram__harmonic__5.PNG?raw=1) This shows better overall prediction. ### Conclusion for Word Prediction Pros: - provides a single objective metric - does not require human annotation (although it may requiring pre-processing of the test corpus) - allows to re-use the tokenization steps used during embedding creation. - can be used to generate plots, which can provide insights about implementation or representation issues Cons: - there are no standard test corpora - can be slow to generate the metric for large test corpus. We recommend balancing the size of the test corpus to maximise the vocabulary coverage, while minimising the time required to process the corpus. ## Extrinsic Evaluation In Extrinsic Evaluations, we have a more complex task we are interested in (e.g. text classification, text translation, image captioning), whereby we can use embeddings as a way to represent words (or tokens). Assuming we have: - a model architecture and - a corpus for training and evaluation (for which the embeddings provide adequate coverage), we can then train the model using different embeddings and evaluate its overall performance. The idea is that better embeddings will make it easier for the model to learn the overall task.
github_jupyter
%cd /content/tutorial !git pull %cd /content/ !git clone https://github.com/HybridNLP2018/tutorial.git import os %ls /content/tutorial/datasamples/relatedness/ %cp /content/umbc/coocs/tlgs_wnscd_5K_ls_f/row_vocab.txt /content/umbc/vec/tlgs_wnscd_5k_ls_f/vocab.txt umbc_5k_vec = '/content/umbc/vec/tlgs_wnscd_5k_ls_f/' umbc_full_vec = '/content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d/' !python /content/tutorial/scripts/swivel/wordsim.py --vocab={umbc_5k_vec}vocab.txt \ --embeddings={umbc_5k_vec}vecs.bin \ --word_prefix="lem_" \ /content/tutorial/datasamples/relatedness/*.ws.tab %ls {umbc_full_vec}vocab.txt !python /content/tutorial/scripts/swivel/wordsim.py --vocab=/content/umbc/vec/vecsi_tlgs_wnscd_ls_f_6e_160d/vocab.txt \ --embeddings={umbc_full_vec}vecs.bin \ --word_prefix="lem_" \ /content/tutorial/datasamples/relatedness/*.ws.tab
0.283881
0.984017
# Building Your Predictor The next step after preparing and importing your data via `Getting_Data_Ready.ipynb` is to build your first model. The overall process for this is: * Setup * Create a Predictor * Deploy a Predictor * Obtain a Forecast To get started, simply execute the cells below: ## Setup Import the standard Python Libraries that are used in this lesson. ``` import boto3 from time import sleep import subprocess import pandas as pd import json import time ``` The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that. ``` session = boto3.Session(region_name='us-east-1') forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') ``` ## Create a Predictor Now in the previous notebook, your data was imported to be used by Forecast, here we will once again define your dataset information and then start building your model or predictor. Forecast horizon is the number of number of time points to predicted in the future. For weekly data, a value of 12 means 12 weeks. Our example is hourly data, we try forecast the next day, so we can set to 24. ``` project = 'electric_power_forecastdemo' # This should be the same as your previous notebook predictorName= project+'_prophet_algo' forecastHorizon = 24 algorithmArn = 'arn:aws:forecast:::algorithm/Prophet' datasetGroupArn = "arn:aws:forecast:us-east-1:325928439752:dataset-group/util_power_forecastdemo_dsg" # Fill in the quotes from the output of the previous notebook. create_predictor_response=forecast.create_predictor(PredictorName=predictorName, AlgorithmArn=algorithmArn, ForecastHorizon=forecastHorizon, PerformAutoML= False, PerformHPO=False, EvaluationParameters= {"NumberOfBacktestWindows": 1, "BackTestWindowOffset": 24}, InputDataConfig= {"DatasetGroupArn": datasetGroupArn}, FeaturizationConfig= {"ForecastFrequency": "H", "Featurizations": [ {"AttributeName": "target_value", "FeaturizationPipeline": [ {"FeaturizationMethodName": "filling", "FeaturizationMethodParameters": {"frontfill": "none", "middlefill": "zero", "backfill": "zero"} } ] } ] } ) predictorArn=create_predictor_response['PredictorArn'] ``` Check the status of the predictor. When the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**. ``` while True: predictorStatus = forecast.describe_predictor(PredictorArn=predictorArn)['Status'] print(predictorStatus) if predictorStatus != 'ACTIVE' and predictorStatus != 'CREATE_FAILED': sleep(30) else: break ``` ### Get Error Metrics ``` #predictorArn = "arn:aws:forecast:us-east-1:325928439752:predictor/electric_power_forecastdemo_prophet_algo" forecast.get_accuracy_metrics(PredictorArn=predictorArn) ``` ## Create a Forecast Now create a forecast using the model that was trained ``` forecastName= project+'_prophet_algo_forecast' create_forecast_response=forecast.create_forecast(ForecastName=forecastName, PredictorArn=predictorArn) forecastArn = create_forecast_response['ForecastArn'] ``` Check the status of the forecast process, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**. ``` while True: forecastStatus = forecast.describe_forecast(ForecastArn=forecastArn)['Status'] print(forecastStatus) if forecastStatus != 'ACTIVE' and forecastStatus != 'CREATE_FAILED': sleep(30) else: break ``` ### Get Forecast Once created, the forecast results are ready and you view them. ``` print(forecastArn) print() forecastResponse = forecastquery.query_forecast( ForecastArn=forecastArn, Filters={"item_id":"client_12"} ) print(forecastResponse) ``` ## Next Steps Now that your forecast has been created, use the arn that was printed above to evaluate it in `Evaluating_Your_Predictor.ipynb`
github_jupyter
import boto3 from time import sleep import subprocess import pandas as pd import json import time session = boto3.Session(region_name='us-east-1') forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') project = 'electric_power_forecastdemo' # This should be the same as your previous notebook predictorName= project+'_prophet_algo' forecastHorizon = 24 algorithmArn = 'arn:aws:forecast:::algorithm/Prophet' datasetGroupArn = "arn:aws:forecast:us-east-1:325928439752:dataset-group/util_power_forecastdemo_dsg" # Fill in the quotes from the output of the previous notebook. create_predictor_response=forecast.create_predictor(PredictorName=predictorName, AlgorithmArn=algorithmArn, ForecastHorizon=forecastHorizon, PerformAutoML= False, PerformHPO=False, EvaluationParameters= {"NumberOfBacktestWindows": 1, "BackTestWindowOffset": 24}, InputDataConfig= {"DatasetGroupArn": datasetGroupArn}, FeaturizationConfig= {"ForecastFrequency": "H", "Featurizations": [ {"AttributeName": "target_value", "FeaturizationPipeline": [ {"FeaturizationMethodName": "filling", "FeaturizationMethodParameters": {"frontfill": "none", "middlefill": "zero", "backfill": "zero"} } ] } ] } ) predictorArn=create_predictor_response['PredictorArn'] while True: predictorStatus = forecast.describe_predictor(PredictorArn=predictorArn)['Status'] print(predictorStatus) if predictorStatus != 'ACTIVE' and predictorStatus != 'CREATE_FAILED': sleep(30) else: break #predictorArn = "arn:aws:forecast:us-east-1:325928439752:predictor/electric_power_forecastdemo_prophet_algo" forecast.get_accuracy_metrics(PredictorArn=predictorArn) forecastName= project+'_prophet_algo_forecast' create_forecast_response=forecast.create_forecast(ForecastName=forecastName, PredictorArn=predictorArn) forecastArn = create_forecast_response['ForecastArn'] while True: forecastStatus = forecast.describe_forecast(ForecastArn=forecastArn)['Status'] print(forecastStatus) if forecastStatus != 'ACTIVE' and forecastStatus != 'CREATE_FAILED': sleep(30) else: break print(forecastArn) print() forecastResponse = forecastquery.query_forecast( ForecastArn=forecastArn, Filters={"item_id":"client_12"} ) print(forecastResponse)
0.361052
0.884139
# Exercise session nº 5 --- # Furrow Constriction in Animal Cell Cytokinesis __*Sacha Ichbiah, 21/02/22, ENS Paris*__ This subject is extracted from : > Hervé Turlier et al., *Furrow Constriction in Animal Cell Cytokinesis*, Biophysical Journal, 2014. \ > https://doi.org/10.1016/j.bpj.2013.11.014 Cytokinesis is the process of physical cleavage at the end of cell division; it proceeds by ingression of an actomyosin furrow at the equator of the cell. Its failure leads to multinucleated cells and is a possible cause of tumorigenesis. Despite its ubiquity in developmental biology, its precise description and understanding have challenged biologists and physicists. In this paper, the authors propose a model based on a minimal geometry and scaling arguments that gives a physical interpretation of the process appearing during cytokinesis. It notably demonstrates that because of the cytoplasm incompressibility, the cytokinesis leads to a competition between the furrow line tension and the cell poles' surface tension. This competition sets a threshold for cytokinesis completion, and explains cytokinesis dynamics. <img src="Images/Cytokinesis.png" alt="drawing" width="800"/> During these session, we will derive the equations of this scaling model of furrow constriction, and we will integrate these equations to study constriction dynamics. We will show that it allows to have a cytokinesis duration independant of cell size, which has been observed in C-Elegans. --- ## I - The Scaling Model The geometry of the dividing cell is described by the apposition of two spherical caps, parametrized by an angle $\theta$ as shown on the left sketch. The volume of a spherical cap (in blue) is : $\mathcal{V}_{sc}(r,h) = \dfrac{\pi}{3} h^2 (3r - h)$, and its area : $\mathcal{A}_{sc}(r,h)=2\pi r h $ (right sketch). <img src="Images/Spherical_cap_model_2.png" alt="drawing" width="800"/> #### **Question 1 :** > Noting that the cytoplasmic is an incompressible fluid, establish that $R_0 = R F(\theta)$ **Correction** : Noting that $h = R(1-\text{cos}(\theta))$, we have : $\begin{align} \frac{V}{2} & = \frac{4}{3} \pi R^3 - V_{sc}(R) \newline &= \frac{\pi R^3}{3}(4 - (1-\text{cos}(\theta))^2 (3R - R(1-\text{cos}(\theta))) \newline &= \frac{\pi R^3}{3}(2 + 3\text{cos}(\theta) - \text{cos}^3(\theta)) \newline &= \frac{2\pi R^3}{3}(1 + \frac{3}{2}\text{cos}(\theta) - \frac{1}{2}\text{cos}^3(\theta)) \newline \end{align} $ The incompressibility gives us $V = V_0 = \frac{4}{3} \pi R_0^3$, thus $R_0 = R.F(\theta)$ with $F(\theta)=1 + \frac{3}{2}\text{cos}(\theta) - \frac{1}{2}\text{cos}^3(\theta)$ We define a dimensionless parameter $\kappa$ to express the competition between the mean contractile surface tension at the furrow and the tension at the cell poles: $\kappa = \dfrac{\gamma}{2R_0N^a_0}$. The polar contractility tends to reduce the surface $A_p = 2\pi R^2 (1+\text{cos}(\theta))$ of each cell poles, whereas the line tension tends to reduce the contractile ring circumference $r_f$. These effects are captured by a simple mechanical energy $\mathcal{E} = 2\pi r_f \gamma + 2 A_p N^a_0$. #### **Question 2 :** > Rescale the energy $\mathcal{E}$ by an energy $\mathcal{E}_0 = 4 \pi R_0^2 N^a_0$ to make it only depend on $\theta$ and $\kappa$. **Correction** : As $r_f = R\text{sin}(\theta)$, we have : $\begin{align} \mathcal{E} &= 2\pi \gamma \frac{R_0}{F(\theta)^{1/3}} \text{sin}(\theta) + 4 \pi \frac{R_0^2}{F(\theta)^{2/3}} (1+\text{cos}(\theta)) N^a_0 \newline \Rightarrow \frac{\mathcal{E}}{4 \pi R_0^2 N^a_0} &= \frac{\gamma}{2 R_0 N^a_0} \frac{\text{sin}(\theta)}{F(\theta)^{1/3}} + \frac{1+\text{cos}(\theta)}{F(\theta)^{2/3}} \newline \Rightarrow \frac{\mathcal{E}}{\mathcal{E}_0} &= \kappa \frac{ \text{sin}(\theta)}{F(\theta)^{1/3}} + \frac{1+\text{cos}(\theta)}{F(\theta)^{2/3}} \end{align} $ ## II - Mechanical Equilibrium The local minimum of the energy gives the equilibrium configuration of the cell. To find this minimum, we will use a library doing symbolic calculus in Python called sympy. This will allow us to compute the derivatives effortlessly. ### Symbolic Computation with Sympy *(In french "Calcul Formel")* We will use sympy, a library that allows to do symbolic computation. Analytical results are always the best, but sometimes the equations does not lead to beautiful simplifications. If we are interested in the numerical result of the equations, we can use sympy to work on the analytical expression directly, obtain derivatives, etc.. before evaluating them on real values. There are three main functions that we will use in sympy, that we will present briefly. If interested, the best symbolic calculus tools are present in Wolfram Mathematica, which is under license. #### a) Defining expression with symbols and trigonometric functions, and obtain derivatives : ``` #!pip install sympy import sympy from sympy import symbols, diff,lambdify, simplify from sympy import cos, sin import numpy as np from scipy.optimize import minimize from scipy.optimize import fsolve,root import matplotlib.pyplot as plt from tqdm import tqdm #Symbols are unknown variables, that we aim to replace with real values in the end. We define them with the function symbols : a,b,c = symbols("a b c") #We can then define a symbolic expression, containing or not trigonometric functions (among many other possibilities !) E = a**2 + a*b + cos(c) #And obtain its derivatives with respect to any variables, eg a : First_derivative = E.diff(a) Second_derivative = First_derivative.diff(a) First_derivative, Second_derivative ``` #### b) Substituting variables and evaluating symbolic expressions : ``` # We can replace symbols with real variables with the method subs : print("c = pi gives :",E.subs([(c,np.pi)])) print("Subs method : ",E.subs([(a,2),(b,1),(c,0)]) ) #We can also transform a symbolic expression into a lambda function #This is a faster process than subs if we need to evaluate the function on many points : f = lambdify((a,b,c),E,"numpy") print("Lambify method : ",f(2,1,0)) #We can combine both to replace certain variables before creating a lambda function with the remaining variables : g = lambdify(a,E.subs([(b,1),(c,0)])) print("Subs and lambdify combined :",g(2)) #Short benchmarking from time import time values_evaluated = np.linspace(0,np.pi,1000) t1 = time() g = lambdify(a,E.subs([(b,2),(c,0.2)])) g(values_evaluated) t2 = time() for value in values_evaluated : E.subs([(a,value),(b,2),(c,0.2)]) t3 = time() print("Time with lambdify :",round((t2-t1),4)) print("Time with subs :",round((t3-t2),4)) ``` ### The equilibrium configuration during cytokinesis Let's go back to the initial problem. Our goal is to study the properties of the normalized energy $\overline{\mathcal{E}} = \mathcal{E}/\mathcal{E}_0$. ``` x, k = symbols("x k") F = 1 +1.5*cos(x) - 0.5*(cos(x))**3 energy = k * sin(x)/(F**(1/3)) + (1+ cos(x))/(F**(2/3)) #We see that there is no simplification easily given by sympy : print(simplify(energy),'\n') #We can replace the values of k and x : print(energy.subs([(x,np.pi/3),(k,.5)])) energy ``` Now that we have implemented our energy in sympy, we can automatically obtain the derivatives with the diff engine. We see that the analytical formulas are quite long and obtaining by hand the derivatives would be both painful and prone to errors. #### **Question 3 :** > Obtain the expression of the first and second derivatives of the energy $\overline{\mathcal{E}}$ with respect to theta (i.e x) with the diff function : **Correction** : ``` first_derivative = diff(energy,x) second_derivative = diff(first_derivative,x) ``` #### **Question 4 :** > Plot the energy profile $\overline{\mathcal{E}}(\theta), \theta \in [0,\dfrac{\pi}{2}]$ for $\kappa \in \{0.0,0.1,0.2,0.3,0.4,0.5\}$. What do you observe ? **Correction** : We see that at a moment, for a value $\kappa_c \in [0.4,0.5]$, the local minimum disappears. It means that there is no value $\theta >0$ where the cell is in a stable configuration. It gives a threshold from incomplete constriction (stable regime) towards full division (unstable regime). ``` plt.figure(figsize = (15,25)) vals_theta = np.linspace(0,np.pi/2,10000) for k_val in np.linspace(0,0.5,6): e = lambdify(x, energy.subs(k,k_val), "numpy") plt.plot(vals_theta,e(vals_theta),label = k_val.round(2)) plt.legend() ``` #### **Question 5 :** > Starting with $\theta = \pi/4$, find the angle giving the local energy minimum for each k in values_k. Plot the equilibrium angle $\theta_{min}$, and the value of the derivatives of the energy $\left. \dfrac{\partial \mathcal{E}}{\partial \theta}\right\rvert_{\theta_{min}}$, $\left. \dfrac{\partial^2 \mathcal{E}}{\partial \theta^2}\right\rvert_{\theta_{min}}$, $\left (\left. \dfrac{\partial^2 \mathcal{E}}{\partial \theta^2}\right\rvert_{\theta_{min}}\right)^2$ at this angle for each k. **Correction** : ``` e = lambdify((x,k), energy, "numpy") d = lambdify((x,k), first_derivative, "numpy") s = lambdify((x,k), second_derivative, "numpy") npoints = 10001 B = [] Derivatives = [] Second_Derivatives =[] Solutions = [] values_k = np.linspace(0, 1, npoints) eps = 1e-3 for j,k_val in enumerate(tqdm(values_k)) : #print(j) f = lambda x : e(x,k_val) g = lambda x : d(x,k_val) h = lambda x : s(x,k_val) sols = minimize(fun = f,x0=(np.pi/4),method = "SLSQP",bounds=[(0,np.pi/2)]) assert (sols.success) min_theta = sols['x'] if g(min_theta)<eps : B.append(k_val) Derivatives.append(g(min_theta)) Second_Derivatives.append(h(min_theta)) Solutions.append(min_theta) Derivatives=np.array(Derivatives) Second_Derivatives=np.array(Second_Derivatives) Solutions=np.array(Solutions) fig,ax = plt.subplots(1,4,figsize = (21,3)) ax[0].plot(values_k, Solutions*180/np.pi) ax[0].set_xlabel("k") ax[0].set_ylabel("minimum found") ax[1].plot(values_k, Derivatives) ax[1].set_xlabel("k") ax[1].set_ylabel("derivative value at minimum") ax[2].plot(values_k, Second_Derivatives) ax[2].set_xlabel("k") ax[2].set_ylabel("second derivative value at minimum") ax[3].plot(values_k, Second_Derivatives**2) ax[3].set_xlabel("k") ax[3].set_ylabel("square of second derivative value at minimum") ``` #### **Question 6 :** > Estimate the value $k_c$ where this local minimum disappears, and its associated angle $\theta_c$. Compute the values of the first two derivatives $\left. \dfrac{\partial \mathcal{E}}{\partial \theta}\right\rvert_{\theta_c} $ and $\left. \dfrac{\partial^2 \mathcal{E}}{\partial \theta^2}\right\rvert_{\theta_c} $. **Correction** ``` k_crit=values_k[np.argmin(Second_Derivatives**2)] g = lambda x : d(x,k_crit) h = lambda x : s(x,k_crit) theta_c = fsolve(g,np.pi/4) print("Critical point :",(k_crit,theta_c[0]*180/np.pi)) print("First derivative :",g(theta_c),"Second derivative :", h(theta_c)) ``` In this rescaled energy, the disappearance of the local minimum $\theta > 0$ when k is increased above some threshold defines a critical point $(\kappa_c, \theta_c)$. This critical point, where the final cell jumps from an incomplete constriction to a full division, is given by : $0 = \left. \dfrac{\partial \mathcal{E}}{\partial \theta}\right\rvert_{\theta_c} $ and $0 = \left. \dfrac{\partial^2 \mathcal{E}}{\partial \theta^2}\right\rvert_{\theta_c} $ This is a first order phase transition. #### **Question 7 :** > Plot the equilibrium angles and draw the shape of the cells for $\kappa \in \{0,0.2,0.4,0.6\}$ **Correction** ``` fig,ax = plt.subplots(1,5,figsize =(25,5)) colors = ['tab:blue','tab:orange','tab:green','tab:red'] R0 = 1 for j,idx_val in enumerate([0,2000,4000,6000]) : theta_sol = Solutions[idx_val] k_value = values_k[idx_val] R = R0/((F.subs(x,theta_sol[0]))**(1/3)) e = lambdify((x), energy.subs(k,k_value), "numpy") theta_values_k = np.linspace(theta_sol, 2*np.pi-theta_sol,100) circle_x = R*np.cos(theta_values_k) circle_y = R*np.sin(theta_values_k) ax[0].plot(vals_theta*180/np.pi,e(vals_theta)) ax[0].scatter(theta_sol*180/np.pi,e(theta_sol),s = 180) ax[0].set_ylabel("Energy") ax[0].set_xlabel("Angle value") ax[0].set_xlim(-5,95) ax[j+1].plot(circle_x-R*np.cos(theta_sol),circle_y,color=colors[j],linewidth=5) ax[j+1].plot(R*np.cos(theta_sol)-circle_x,circle_y,color=colors[j],linewidth=5) ax[j+1].set_title("Equilibrium angle value :" + str((theta_sol[0]*180/np.pi).round(2))) ax[j+1].set_xlim(-2,2) ax[j+1].set_ylim(-2,2) ax[j+1].set_aspect('equal')#, adjustable='box') ``` ## III - Dynamics We now want to study the furrow constriction dynamics, i.e the temporal evolution of $\dfrac{r_f}{R_0}$. We will establish these dynamics by expressing the derivative of this quantity with respect to $\theta$. As before, we will use the symbolic computation library sympy to evaluate numerical quantities. To establish the dynamic equation, we note that the power of active effects is exactly dissipated by viscous cell deformations. The viscous dissipation is made of two contributions, the stretching of the poles and the constriction of the ring, which we estimate in scaling. The volume of acto-myosin in the poles is $V_p = 2A_p e_p$ and in the ring $V_f = 2\pi r_f w e_f$, where $w$ and $e_f$ are the width and thickness of the contractile ring. (We remind that the surface of each cell pole writes : $A_p = 2\pi R^2 (1+\text{cos}(\theta))$). The value $e_p \approx e_0$ and the ring thickness $e_f$ reach a steady-state value that depends on turnover. This yields the viscous dissipated power : $\begin{align} P_d &= \dfrac{1}{2} \eta \left[ V_p \left(\dfrac{1}{R} \dfrac{dr_f}{dt} \right)^2 + V_f \left( \dfrac{1}{r_f} \dfrac{dr_f}{dt}\right)^2 \right] \newline P_d &= \dfrac{1}{2} \eta \left[ e_f 4 \pi R^2 (1+\text{cos}\theta) \dfrac{1}{R^2} \left(\dfrac{dr_f}{dt} \right)^2 + 2\pi w e_f r_f \dfrac{1}{r_f^2} \left( \dfrac{dr_f}{dt}\right)^2 \right] \newline &\approx \dfrac{1}{2} \eta \left[ 4 e_0 \pi(1+\text{cos}\theta) \left(\dfrac{dr_f}{dt} \right)^2 + 2\pi w e_f\dfrac{1}{r_f} \left( \dfrac{dr_f}{dt}\right)^2 \right] \newline &= \left(\dfrac{dr_f}{dt} \right)^2 \dfrac{1}{2} \eta \left[ 4 \pi e_0 (1+\text{cos}\theta) + \dfrac{4\pi}{2} w e_f\dfrac{F(\theta)^{1/3}}{R_0 \sin \theta} \right] \newline &= \left(\dfrac{dr_f}{dt} \right)^2 4 \pi e_0 \eta \left[ (1+\text{cos}\theta) + \dfrac{1}{2 R_0} w \dfrac{e_f}{e_0} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] \newline &= \left(\dfrac{dr_f}{dt} \right)^2 4 \pi e_0 \eta \left[ (1+\text{cos}\theta) + \lambda \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] \newline \end{align} $ The balance of mechanical and dissipated powers yields : $\dfrac{d \mathcal{E}}{dt} + P_d = 0$ Besides : $ \dfrac{1}{\mathcal{E}_0} \dfrac{d\mathcal{E}}{dt} = \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \dfrac{\partial \theta}{\partial r_f} \dfrac{d r_f}{d_t} = \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \left(\dfrac{\partial r_f}{\partial \theta}\right)^{-1} \dfrac{d r_f}{d_t} $ And, with $T_a=\frac{\eta e_0}{N^a_0}$ : $\dfrac{1}{\mathcal{E}_0} P_d = \left(\dfrac{dr_f}{dt} \right)^2 \dfrac{4\pi e_0 \eta}{4 \pi R_0^2 N^a_0} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] = \left(\dfrac{dr_f}{dt} \right)^2 \dfrac{T_a}{ R_0^2} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] $ We have thus : $ \begin{align} \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \left(\dfrac{\partial r_f}{\partial \theta}\right)^{-1} \dfrac{d r_f}{d_t} &= - \left(\dfrac{dr_f}{dt} \right)^2 \dfrac{T_a}{ R_0^2} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] \newline \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \left(\dfrac{\partial r_f/R_0}{\partial \theta}\right)^{-1} &= - \dfrac{dr_f}{dt} \dfrac{T_a}{ R_0} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right] \newline \dfrac{dr_f}{dt} \dfrac{T_a}{ R_0} &= - \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \left(\dfrac{\partial r_f/R_0}{\partial \theta}\right)^{-1} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right]^{-1} = -\mathcal{H}(\theta,\kappa,\lambda) \end{align} $ We will compute numerical the values of this function $\mathcal{H}$ to obtain the evolution of the furrow radius $r_f$ #### **Question 8 :** > From the last equation, express the angle temporal variation $\dot \theta$. **Correction** : We have $\dfrac{r_f}{R_0} = \sin(\theta) F(\theta)^{-1/3}$, thus $\dfrac{d r_f/R_0}{dt} = \dot \theta \left(\dfrac{\partial r_f/R_0}{\partial \theta}\right) = \dot \theta \dfrac{\partial \left(\sin (\theta) F(\theta)^{-1/3}\right)}{\partial \theta} = \dot \theta \left(\sin (\theta) F(\theta)^{-1/3}\right)'$ Eventually : $T_a \dot \theta =- \mathcal{H}(\theta, \kappa, \lambda)\left(\dfrac{\partial r_f/R_0}{\partial \theta}\right)^{-1} = - \dfrac{\partial \mathcal{E}/\mathcal{E}_0}{\partial \theta} \left(\dfrac{\partial r_f/R_0}{\partial \theta}\right)^{-2} \left[ (1+\text{cos}\theta) + \dfrac{\lambda}{2} \dfrac{F(\theta)^{1/3}}{\sin \theta} \right]^{-1} $ #### **Question 9 :** > Compute numerically $\dot \theta$ with sympy, and integrate the evolution of $\theta$, $r_f$ in time with a forward-euler-scheme for $t \in [0,15]$, starting with $\theta(0)=\pi/2$, with $\lambda = 1$ and $\kappa \in \{0.1,0.25,0.4,0.5,0.75,1\}$. Check that it is compatible with the previous results obtained from the static analysis. **Correction** : At each timestep, we have : $ \begin{align} \theta(t+dt) &= \theta(t) + \dot \theta (t) dt \newline r_f(t+dt) &= sin(\theta(t+dt)) F(\theta(t+dt))^{-1/3} R_0 \end{align} $ ``` x, k, l = symbols("x k l") F = 1 +1.5*cos(x) - 0.5*(cos(x))**3 energy = k * sin(x)/(F**(1/3)) + (1+ cos(x))/(F**(2/3)) r_f = sin(x)*(F**(-1/3)) dr_f = diff(r_f,x) first_derivative = diff(energy,x) H = ((1+cos(x)) + l*(F**(1/3))/sin(x))**(-1)*first_derivative/dr_f dtheta = - H/dr_f dtheta = dtheta.subs(l,0.1) theta0 = np.pi/2 R0 = 1 for k_value in [0.1,0.25,0.4,0.5,0.75,1]: expr_dtheta = dtheta.subs(k,k_value) func_dtheta = lambdify(x,expr_dtheta,"numpy") func_rf = lambdify(x,r_f,"numpy") npoints = 1000 timepoints = np.linspace(0,15,npoints) Thetas = np.zeros(npoints) Rf = np.zeros(npoints) dt = timepoints[1]-timepoints[0] for j,t in enumerate(timepoints) : if j == 0 : Thetas[0] = theta0 Rf[0] = func_rf(Thetas[0])*R0 continue Thetas[j] = Thetas[j-1] + dt*func_dtheta(Thetas[j-1]) Rf[j] = func_rf(Thetas[j])*R0 plt.plot(timepoints,Rf,label = k_value) plt.title("Constriction completion or failure") plt.xlabel("time t/Ta") plt.ylabel("furrow radius r_f/R_0") plt.legend() ``` #### **Question 10 :** > Determine the cytokinesis duration with $R_0 \in \{0.5,1,2,4\}$. Show that in case of cytokinesis completion $\lambda = 1, \kappa = 0.75$, the initial cell radius R0 has no impact on the cytokinesis time. **Correction** : ``` theta0 = np.pi/2 k_value = 0.75 for R0 in [0.5,1,2,4]: expr_dtheta = dtheta.subs(k,k_value) func_dtheta = lambdify(x,expr_dtheta,"numpy") func_rf = lambdify(x,r_f,"numpy") npoints = 1000 timepoints = np.linspace(0,6,npoints) Thetas = np.zeros(npoints) Rf = np.zeros(npoints) dt = timepoints[1]-timepoints[0] for j,t in enumerate(timepoints) : if j == 0 : Thetas[0] = theta0 Rf[0] = func_rf(Thetas[0])*R0 continue Thetas[j] = Thetas[j-1] + dt*func_dtheta(Thetas[j-1]) Rf[j] = func_rf(Thetas[j])*R0 plt.plot(timepoints,Rf,label ="R0: " +str(R0)) plt.title("Cytokinesis time is independent of the cell initial radius") plt.xlabel("time t/Ta") plt.ylabel("furrow radius r_f/R_0") plt.legend() ``` ## Conclusion Using both numerical and scaling analyses, the authors rationalized the physical mechanisms governing ring constriction in the cytokinesis of animal cells. A gradient of surface contractility from the poles to the equator is sufficient to drive cytokinesis and to reproduce its main features: formation of the contractile ring, cortical flow toward the equator, and furrow constriction. This model provides quantitative criteria for cytokinesis completion and propose a general framework for interpreting and characterizing constriction failure. The authors furthermore demonstrate the physical mechanism leading to the independence of cytokinesis duration on the initial cell size in embryos. The thin shell model described in the paper studied can be generalized in 3D. The numerical integration of the equations describing cortex dynamics is done via finite-element methods. The results gives a biology accurate (and beautiful !) view of the cortex actin flows and of the cytokinesis dynamics. For more information, see : > H. Borja Da Rocha et al., *A viscous active shell theory of the cell cortex*, 2021 \ https://arxiv.org/abs/2110.12089 <img src="Images/Thin_shell_model_of_cytokinesis.png" alt="drawing" width="800"/>
github_jupyter
#!pip install sympy import sympy from sympy import symbols, diff,lambdify, simplify from sympy import cos, sin import numpy as np from scipy.optimize import minimize from scipy.optimize import fsolve,root import matplotlib.pyplot as plt from tqdm import tqdm #Symbols are unknown variables, that we aim to replace with real values in the end. We define them with the function symbols : a,b,c = symbols("a b c") #We can then define a symbolic expression, containing or not trigonometric functions (among many other possibilities !) E = a**2 + a*b + cos(c) #And obtain its derivatives with respect to any variables, eg a : First_derivative = E.diff(a) Second_derivative = First_derivative.diff(a) First_derivative, Second_derivative # We can replace symbols with real variables with the method subs : print("c = pi gives :",E.subs([(c,np.pi)])) print("Subs method : ",E.subs([(a,2),(b,1),(c,0)]) ) #We can also transform a symbolic expression into a lambda function #This is a faster process than subs if we need to evaluate the function on many points : f = lambdify((a,b,c),E,"numpy") print("Lambify method : ",f(2,1,0)) #We can combine both to replace certain variables before creating a lambda function with the remaining variables : g = lambdify(a,E.subs([(b,1),(c,0)])) print("Subs and lambdify combined :",g(2)) #Short benchmarking from time import time values_evaluated = np.linspace(0,np.pi,1000) t1 = time() g = lambdify(a,E.subs([(b,2),(c,0.2)])) g(values_evaluated) t2 = time() for value in values_evaluated : E.subs([(a,value),(b,2),(c,0.2)]) t3 = time() print("Time with lambdify :",round((t2-t1),4)) print("Time with subs :",round((t3-t2),4)) x, k = symbols("x k") F = 1 +1.5*cos(x) - 0.5*(cos(x))**3 energy = k * sin(x)/(F**(1/3)) + (1+ cos(x))/(F**(2/3)) #We see that there is no simplification easily given by sympy : print(simplify(energy),'\n') #We can replace the values of k and x : print(energy.subs([(x,np.pi/3),(k,.5)])) energy first_derivative = diff(energy,x) second_derivative = diff(first_derivative,x) plt.figure(figsize = (15,25)) vals_theta = np.linspace(0,np.pi/2,10000) for k_val in np.linspace(0,0.5,6): e = lambdify(x, energy.subs(k,k_val), "numpy") plt.plot(vals_theta,e(vals_theta),label = k_val.round(2)) plt.legend() e = lambdify((x,k), energy, "numpy") d = lambdify((x,k), first_derivative, "numpy") s = lambdify((x,k), second_derivative, "numpy") npoints = 10001 B = [] Derivatives = [] Second_Derivatives =[] Solutions = [] values_k = np.linspace(0, 1, npoints) eps = 1e-3 for j,k_val in enumerate(tqdm(values_k)) : #print(j) f = lambda x : e(x,k_val) g = lambda x : d(x,k_val) h = lambda x : s(x,k_val) sols = minimize(fun = f,x0=(np.pi/4),method = "SLSQP",bounds=[(0,np.pi/2)]) assert (sols.success) min_theta = sols['x'] if g(min_theta)<eps : B.append(k_val) Derivatives.append(g(min_theta)) Second_Derivatives.append(h(min_theta)) Solutions.append(min_theta) Derivatives=np.array(Derivatives) Second_Derivatives=np.array(Second_Derivatives) Solutions=np.array(Solutions) fig,ax = plt.subplots(1,4,figsize = (21,3)) ax[0].plot(values_k, Solutions*180/np.pi) ax[0].set_xlabel("k") ax[0].set_ylabel("minimum found") ax[1].plot(values_k, Derivatives) ax[1].set_xlabel("k") ax[1].set_ylabel("derivative value at minimum") ax[2].plot(values_k, Second_Derivatives) ax[2].set_xlabel("k") ax[2].set_ylabel("second derivative value at minimum") ax[3].plot(values_k, Second_Derivatives**2) ax[3].set_xlabel("k") ax[3].set_ylabel("square of second derivative value at minimum") k_crit=values_k[np.argmin(Second_Derivatives**2)] g = lambda x : d(x,k_crit) h = lambda x : s(x,k_crit) theta_c = fsolve(g,np.pi/4) print("Critical point :",(k_crit,theta_c[0]*180/np.pi)) print("First derivative :",g(theta_c),"Second derivative :", h(theta_c)) fig,ax = plt.subplots(1,5,figsize =(25,5)) colors = ['tab:blue','tab:orange','tab:green','tab:red'] R0 = 1 for j,idx_val in enumerate([0,2000,4000,6000]) : theta_sol = Solutions[idx_val] k_value = values_k[idx_val] R = R0/((F.subs(x,theta_sol[0]))**(1/3)) e = lambdify((x), energy.subs(k,k_value), "numpy") theta_values_k = np.linspace(theta_sol, 2*np.pi-theta_sol,100) circle_x = R*np.cos(theta_values_k) circle_y = R*np.sin(theta_values_k) ax[0].plot(vals_theta*180/np.pi,e(vals_theta)) ax[0].scatter(theta_sol*180/np.pi,e(theta_sol),s = 180) ax[0].set_ylabel("Energy") ax[0].set_xlabel("Angle value") ax[0].set_xlim(-5,95) ax[j+1].plot(circle_x-R*np.cos(theta_sol),circle_y,color=colors[j],linewidth=5) ax[j+1].plot(R*np.cos(theta_sol)-circle_x,circle_y,color=colors[j],linewidth=5) ax[j+1].set_title("Equilibrium angle value :" + str((theta_sol[0]*180/np.pi).round(2))) ax[j+1].set_xlim(-2,2) ax[j+1].set_ylim(-2,2) ax[j+1].set_aspect('equal')#, adjustable='box') x, k, l = symbols("x k l") F = 1 +1.5*cos(x) - 0.5*(cos(x))**3 energy = k * sin(x)/(F**(1/3)) + (1+ cos(x))/(F**(2/3)) r_f = sin(x)*(F**(-1/3)) dr_f = diff(r_f,x) first_derivative = diff(energy,x) H = ((1+cos(x)) + l*(F**(1/3))/sin(x))**(-1)*first_derivative/dr_f dtheta = - H/dr_f dtheta = dtheta.subs(l,0.1) theta0 = np.pi/2 R0 = 1 for k_value in [0.1,0.25,0.4,0.5,0.75,1]: expr_dtheta = dtheta.subs(k,k_value) func_dtheta = lambdify(x,expr_dtheta,"numpy") func_rf = lambdify(x,r_f,"numpy") npoints = 1000 timepoints = np.linspace(0,15,npoints) Thetas = np.zeros(npoints) Rf = np.zeros(npoints) dt = timepoints[1]-timepoints[0] for j,t in enumerate(timepoints) : if j == 0 : Thetas[0] = theta0 Rf[0] = func_rf(Thetas[0])*R0 continue Thetas[j] = Thetas[j-1] + dt*func_dtheta(Thetas[j-1]) Rf[j] = func_rf(Thetas[j])*R0 plt.plot(timepoints,Rf,label = k_value) plt.title("Constriction completion or failure") plt.xlabel("time t/Ta") plt.ylabel("furrow radius r_f/R_0") plt.legend() theta0 = np.pi/2 k_value = 0.75 for R0 in [0.5,1,2,4]: expr_dtheta = dtheta.subs(k,k_value) func_dtheta = lambdify(x,expr_dtheta,"numpy") func_rf = lambdify(x,r_f,"numpy") npoints = 1000 timepoints = np.linspace(0,6,npoints) Thetas = np.zeros(npoints) Rf = np.zeros(npoints) dt = timepoints[1]-timepoints[0] for j,t in enumerate(timepoints) : if j == 0 : Thetas[0] = theta0 Rf[0] = func_rf(Thetas[0])*R0 continue Thetas[j] = Thetas[j-1] + dt*func_dtheta(Thetas[j-1]) Rf[j] = func_rf(Thetas[j])*R0 plt.plot(timepoints,Rf,label ="R0: " +str(R0)) plt.title("Cytokinesis time is independent of the cell initial radius") plt.xlabel("time t/Ta") plt.ylabel("furrow radius r_f/R_0") plt.legend()
0.367724
0.992892
<!--NAVIGATION--> <a href="https://colab.research.google.com/github/masdeseiscaracteres/ml_course/blob/master/exercises/06_boosted_trees.ipynb" target="_parent"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Abrir en Google Colab" title="Abrir y ejecutar en Google Colab"/></a> # Boosted Trees Vamos a analizar el funcionamiento de los métodos de Gradient Boosting de `sklearn` mediante ejemplos ilustrativos. Contenidos: 1. Un problema de clasificación 2. Un problema de regresión ## 0. Configuración del entorno Lo primero es cargar las biblotecas, funciones y datos necesarios. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap %matplotlib inline cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) import warnings warnings.filterwarnings('ignore') import requests import os def download_if_missing(url, filename): if not os.path.exists(filename): os.makedirs(os.path.dirname(filename), exist_ok=True) r = requests.get(url, allow_redirects=True) with open(filename, 'wb') as f: f.write(r.content) download_if_missing("https://github.com/masdeseiscaracteres/ml_course/raw/master/exercises/data/diabetes.csv", "./data/diabetes.csv") download_if_missing("https://github.com/masdeseiscaracteres/ml_course/raw/master/exercises/data/kc_house_data.csv", "./data/kc_house_data.csv") ``` ## 1. Un problema de clasificación Utilizaremos el [Pima Indians Diabetes dataset](https://www.kaggle.com/uciml/pima-indians-diabetes-database). ### Ejercicio Cargue los datos almacenados en el fichero *diabetes.csv*. ### Solución Cargamos los datos: ``` data = pd.read_csv('./data/diabetes.csv', sep=',', decimal='.') print(data.shape) data.head() ``` Nos aseguramos de que los tipos de datos son correctos: ``` data.dtypes ``` Miramos la frecuencia de cada una de las clases: ``` data['Outcome'].value_counts() ``` ### Análisis exploratorio básico #### Ejercicio Represente el histograma de cada variable separando por clases. #### Solución ``` # Pintamos histogramas para cada clase plt.figure(figsize=(20,8)) idx_0 = data['Outcome'] == 0 idx_1 = data['Outcome'] == 1 for i, feature in enumerate(data.columns.drop(['Outcome'])): plt.subplot(2, 4, i+1) plt.hist(data.ix[idx_0,feature],density=1, bins=20, alpha=0.6, label='y=0') plt.hist(data.ix[idx_1,feature],density=1, bins=20, facecolor='red', alpha=0.6, label='y=1') plt.legend() plt.title(feature) plt.show() ``` #### Ejercicio A la vista de los histogramas anteriores, ¿cómo de separable crees que es el problema? #### Solución Los histogramas para cada una de las clases se solapan considerablemente. Parece que las clases serán más difíciles de separar que en el notebook 05. ### Preparación de los conjuntos de datos Construyamos nuestro conjunto de test y de entrenamiento simulando una situación real. ``` from sklearn.model_selection import train_test_split # preparamos los datos features = data.columns.drop(['Outcome']) X = data[features].values y = data['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, shuffle = True, test_size = 0.2, random_state=0) print('Datos train: ', X_train.shape) print('Datos test: ', X_test.shape) print(np.unique(y_train,return_counts=True)) print(np.unique(y_test, return_counts=True)) ``` ### Decision trees Probemos primero con un árbol de decisión sencillo #### Ejercicio Entrena un árbol de decisión sencillo, y muestra las prestaciones para el conjunto de test. #### Solución ``` from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier max_depth = range(1, 15) param_grid = {'max_depth': max_depth} n_folds = 5 clf = DecisionTreeClassifier(random_state=0) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds, return_train_score=True) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) scores_test = np.array(grid.cv_results_['mean_test_score']) scores_train = np.array(grid.cv_results_['mean_train_score']) plt.plot(max_depth, scores_test, '-o', label='Validación') plt.plot(max_depth, scores_train, '-o', label='Entrenamiento') plt.xlabel('max_depth', fontsize=16) plt.ylabel('{}-fold accuracy'.format(n_folds)) plt.legend(loc='best') plt.show() best_max_depth = grid.best_params_['max_depth'] tree_model = DecisionTreeClassifier(max_depth=best_max_depth) tree_model.fit(X_train,y_train) print("Train: ", tree_model.score(X_train, y_train)) print("Test: ", tree_model.score(X_test, y_test)) ``` #### Ejercicio Representa el árbol entrenado #### Solución Pintemos el árbol, a ver qué variables nos salen más relevantes: ``` from sklearn.tree import export_graphviz import graphviz from sklearn.tree import export_graphviz import graphviz tree_dot = export_graphviz(tree_model, out_file=None, feature_names=features, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(tree_dot) graph ``` Parece un problema complicado de clasificar, porque la profundidad del árbol óptima es 2. El algoritmo está bien entrenado porque la diferencia entre train y test es pequeña. Si queremos mejorar prestaciones tendremos que acudir a un modelo más complejo. ### Boosted Trees El entrenamiento del algoritmo de boosting requiere fijar tres parámetros libres: - Número de iteraciones. - Tasa de aprendizaje ($\alpha$). - Los parámetros propios del árbol: habitualmente su complejidad controlada por el parámetro `max_depth`. Se podría hacer una búsqueda sobre los tres parámetros conjuntamente mediante `GridSearchCV`, sin embargo, es muy costoso computacionalmente, con lo que es más sencillo aplicar una optimización secuencial: se prueban distintos valores de los parámetros libres, se fijan los óptimos y se busca sobre el resto. ``` from sklearn.ensemble import GradientBoostingClassifier Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01] # max_depth = [2, 3, 4, 5] # mantenemos max_depth estático: max_depth=2 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=2) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) ``` Representemos el error que estamos cometiendo para los distintos valores de los parámetros libres ``` bt = GradientBoostingClassifier(random_state=0, max_depth=2, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f'%(bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() ``` Las prestaciones no son mucho mejores que con respecto a un árbol sencillo. Como el coste de entrenamiento de este conjunto no es muy grande, replicaremos el análisis anterior aumentando la complejidad de los árboles. ``` Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=n_folds) grid = GridSearchCV(clf, param_grid=param_grid, cv = 3) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingClassifier(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i,lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() ``` En general, cuanto más complejo es el problema, menor es la tasa de aprendizaje requerida y mayor el número de iteraciones que necesita el algoritmo. Parece que podemos ir un poco más allá, disminuyamos un poco más la tasa de aprendizaje. ``` Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01, 0.005] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=3) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingClassifier(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() ``` #### Ejercicio Representa la importancia de las variables. #### Solución ``` importances = bt.feature_importances_ importances = importances / np.max(importances) indices = np.argsort(importances)[::-1] plt.figure(figsize=(10,10)) plt.barh(range(X_train.shape[1]),importances[indices]) plt.yticks(range(X_train.shape[1]),features[indices]) plt.show() ``` Y también podemos representar la dependencia de cada característica con la variable objetivo. ``` from sklearn.ensemble.partial_dependence import plot_partial_dependence fig, ax = plot_partial_dependence(bt, X_train, indices, feature_names=features, percentiles=(0.0, 1.0), n_cols = 4) fig.set_size_inches(15, 10) plt.show() ``` ## 2. Un problema de regresión Cargamos el conjunto de datos `kc_house_data`: ``` # cargamos datos house_data = pd.read_csv("./data/kc_house_data.csv") # cargamos fichero # Eliminamos las columnas id y date house_data = house_data.drop(['id','date'], axis=1) # convertir las variables en pies al cuadrado en metros al cuadrado feetFeatures = ['sqft_living','sqft_lot','sqft_above','sqft_basement','sqft_living15','sqft_lot15'] house_data[feetFeatures] = house_data[feetFeatures].apply(lambda x: x * 0.3048 * 0.3048) # renombramos house_data.columns = ['price','bedrooms','bathrooms','sqm_living','sqm_lot','floors','waterfront','view','condition', 'grade','sqm_above','sqm_basement','yr_built','yr_renovated','zip_code','lat','long', 'sqm_living15','sqm_lot15'] # añadimos las nuevas variables house_data['years'] = pd.Timestamp('today').year - house_data['yr_built'] #house_data['bedrooms_squared'] = house_data['bedrooms'].apply(lambda x: x**2) #house_data['bed_bath_rooms'] = house_data['bedrooms']*house_data['bathrooms'] house_data['sqm_living'] = house_data['sqm_living'].apply(lambda x: np.log(x)) house_data['price'] = house_data['price'].apply(lambda x: np.log(x)) #house_data['lat_plus_long'] = house_data['lat']*house_data['long'] # convertimos el DataFrame al formato necesario para scikit-learn data = house_data.values y = data[:,0:1] # nos quedamos con la 1ª columna, price X = data[:,1:] # nos quedamos con el resto feature_names = house_data.columns[1:] # Dividimos los datos en entrenamiento y test (80 training, 20 test) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state = 2) print('Datos entrenamiento: ', X_train.shape) print('Datos test: ', X_test.shape) ``` Dado que el número de muestras del problema es mayor, y para no alargar demasiado el proceso de entrenamiento, escogeremos de forma muy secuencial los parámetros libres. AVISO: Este proceso puede tardar un rato ... ``` from sklearn.ensemble import GradientBoostingRegressor Niterations = [500, 1000, 1500, 2000] learningRate = [0.1, 0.05] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingRegressor(random_state=0, max_depth=3) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingRegressor(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations,error[i,:],colors[i] + '--o', label='lr = %g' % lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() ``` ### Importancia y dependencia ``` importances = bt.feature_importances_ importances = importances / np.max(importances) indices = np.argsort(importances)[::-1] plt.figure(figsize=(10,10)) plt.barh(range(X_train.shape[1]),importances[indices]) plt.yticks(range(X_train.shape[1]),feature_names[indices]) plt.show() ``` Por último, representemos los conocidos como *partial dependence plots*: ``` from sklearn.ensemble.partial_dependence import plot_partial_dependence fig, ax = plot_partial_dependence(bt, X_train, indices, feature_names=feature_names, percentiles=(0.0, 1.0), n_cols = 5) fig.set_size_inches(20, 20) plt.show() ``` ## Siguientes pasos ¿Qué más podríamos hacer? Ajustar con mayor cuidado todos los parámetros que controlan la complejidad de los árboles y el proceso de *boosting*. Esto, seguramente, requeriría mucho tiempo de cómputo y hay que saber elegir adecuadamente cómo hacerlo y los algoritmos a utilizar. Aquí radica el arte del *machine learning*: 1. Utilizar bibliotecas más eficientes de *boosted trees* como [xgBoost](https://xgboost.readthedocs.io/en/latest/), [LightGBM](https://lightgbm.readthedocs.io/en/latest/) o [catBoost](https://catboost.ai/). 2. Conocer la naturaleza del problema (negocio), para generar nuevas *features* relevantes para el objetivo que se busca conseguir. 3. Tratar de buscar más ejemplos.
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap %matplotlib inline cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) import warnings warnings.filterwarnings('ignore') import requests import os def download_if_missing(url, filename): if not os.path.exists(filename): os.makedirs(os.path.dirname(filename), exist_ok=True) r = requests.get(url, allow_redirects=True) with open(filename, 'wb') as f: f.write(r.content) download_if_missing("https://github.com/masdeseiscaracteres/ml_course/raw/master/exercises/data/diabetes.csv", "./data/diabetes.csv") download_if_missing("https://github.com/masdeseiscaracteres/ml_course/raw/master/exercises/data/kc_house_data.csv", "./data/kc_house_data.csv") data = pd.read_csv('./data/diabetes.csv', sep=',', decimal='.') print(data.shape) data.head() data.dtypes data['Outcome'].value_counts() # Pintamos histogramas para cada clase plt.figure(figsize=(20,8)) idx_0 = data['Outcome'] == 0 idx_1 = data['Outcome'] == 1 for i, feature in enumerate(data.columns.drop(['Outcome'])): plt.subplot(2, 4, i+1) plt.hist(data.ix[idx_0,feature],density=1, bins=20, alpha=0.6, label='y=0') plt.hist(data.ix[idx_1,feature],density=1, bins=20, facecolor='red', alpha=0.6, label='y=1') plt.legend() plt.title(feature) plt.show() from sklearn.model_selection import train_test_split # preparamos los datos features = data.columns.drop(['Outcome']) X = data[features].values y = data['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, shuffle = True, test_size = 0.2, random_state=0) print('Datos train: ', X_train.shape) print('Datos test: ', X_test.shape) print(np.unique(y_train,return_counts=True)) print(np.unique(y_test, return_counts=True)) from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier max_depth = range(1, 15) param_grid = {'max_depth': max_depth} n_folds = 5 clf = DecisionTreeClassifier(random_state=0) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds, return_train_score=True) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) scores_test = np.array(grid.cv_results_['mean_test_score']) scores_train = np.array(grid.cv_results_['mean_train_score']) plt.plot(max_depth, scores_test, '-o', label='Validación') plt.plot(max_depth, scores_train, '-o', label='Entrenamiento') plt.xlabel('max_depth', fontsize=16) plt.ylabel('{}-fold accuracy'.format(n_folds)) plt.legend(loc='best') plt.show() best_max_depth = grid.best_params_['max_depth'] tree_model = DecisionTreeClassifier(max_depth=best_max_depth) tree_model.fit(X_train,y_train) print("Train: ", tree_model.score(X_train, y_train)) print("Test: ", tree_model.score(X_test, y_test)) from sklearn.tree import export_graphviz import graphviz from sklearn.tree import export_graphviz import graphviz tree_dot = export_graphviz(tree_model, out_file=None, feature_names=features, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(tree_dot) graph from sklearn.ensemble import GradientBoostingClassifier Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01] # max_depth = [2, 3, 4, 5] # mantenemos max_depth estático: max_depth=2 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=2) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingClassifier(random_state=0, max_depth=2, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f'%(bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=n_folds) grid = GridSearchCV(clf, param_grid=param_grid, cv = 3) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingClassifier(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i,lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() Niterations = [25, 50, 75, 100, 125, 150, 175, 200, 300] learningRate = [0.5, 0.1, 0.05, 0.01, 0.005] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingClassifier(random_state=0, max_depth=3) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingClassifier(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations, error[i,:], colors[i] + '--o', label='lr = %g'%lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() importances = bt.feature_importances_ importances = importances / np.max(importances) indices = np.argsort(importances)[::-1] plt.figure(figsize=(10,10)) plt.barh(range(X_train.shape[1]),importances[indices]) plt.yticks(range(X_train.shape[1]),features[indices]) plt.show() from sklearn.ensemble.partial_dependence import plot_partial_dependence fig, ax = plot_partial_dependence(bt, X_train, indices, feature_names=features, percentiles=(0.0, 1.0), n_cols = 4) fig.set_size_inches(15, 10) plt.show() # cargamos datos house_data = pd.read_csv("./data/kc_house_data.csv") # cargamos fichero # Eliminamos las columnas id y date house_data = house_data.drop(['id','date'], axis=1) # convertir las variables en pies al cuadrado en metros al cuadrado feetFeatures = ['sqft_living','sqft_lot','sqft_above','sqft_basement','sqft_living15','sqft_lot15'] house_data[feetFeatures] = house_data[feetFeatures].apply(lambda x: x * 0.3048 * 0.3048) # renombramos house_data.columns = ['price','bedrooms','bathrooms','sqm_living','sqm_lot','floors','waterfront','view','condition', 'grade','sqm_above','sqm_basement','yr_built','yr_renovated','zip_code','lat','long', 'sqm_living15','sqm_lot15'] # añadimos las nuevas variables house_data['years'] = pd.Timestamp('today').year - house_data['yr_built'] #house_data['bedrooms_squared'] = house_data['bedrooms'].apply(lambda x: x**2) #house_data['bed_bath_rooms'] = house_data['bedrooms']*house_data['bathrooms'] house_data['sqm_living'] = house_data['sqm_living'].apply(lambda x: np.log(x)) house_data['price'] = house_data['price'].apply(lambda x: np.log(x)) #house_data['lat_plus_long'] = house_data['lat']*house_data['long'] # convertimos el DataFrame al formato necesario para scikit-learn data = house_data.values y = data[:,0:1] # nos quedamos con la 1ª columna, price X = data[:,1:] # nos quedamos con el resto feature_names = house_data.columns[1:] # Dividimos los datos en entrenamiento y test (80 training, 20 test) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state = 2) print('Datos entrenamiento: ', X_train.shape) print('Datos test: ', X_test.shape) from sklearn.ensemble import GradientBoostingRegressor Niterations = [500, 1000, 1500, 2000] learningRate = [0.1, 0.05] # mantenemos max_depth estático: max_depth=3 n_folds = 5 param_grid = {'n_estimators': Niterations, 'learning_rate': learningRate} clf = GradientBoostingRegressor(random_state=0, max_depth=3) grid = GridSearchCV(clf, param_grid=param_grid, cv=n_folds) grid.fit(X_train, y_train) print("best mean cross-validation score: {:.3f}".format(grid.best_score_)) print("best parameters: {}".format(grid.best_params_)) bt = GradientBoostingRegressor(random_state=0, max_depth=3, **grid.best_params_) bt.fit(X_train, y_train) error = 1-grid.cv_results_['mean_test_score'].reshape(len(learningRate), len(Niterations)) colors = ['r', 'b', 'g', 'k', 'm'] for i, lr in enumerate(learningRate): plt.plot(Niterations,error[i,:],colors[i] + '--o', label='lr = %g' % lr) plt.legend() plt.xlabel('# iteraciones') plt.ylabel('{}-fold CV Error'.format(n_folds)) plt.title('train: %0.3f\ntest: %0.3f' % (bt.score(X_train, y_train), bt.score(X_test, y_test))) plt.grid() plt.show() importances = bt.feature_importances_ importances = importances / np.max(importances) indices = np.argsort(importances)[::-1] plt.figure(figsize=(10,10)) plt.barh(range(X_train.shape[1]),importances[indices]) plt.yticks(range(X_train.shape[1]),feature_names[indices]) plt.show() from sklearn.ensemble.partial_dependence import plot_partial_dependence fig, ax = plot_partial_dependence(bt, X_train, indices, feature_names=feature_names, percentiles=(0.0, 1.0), n_cols = 5) fig.set_size_inches(20, 20) plt.show()
0.521959
0.928279
``` import pandas as pd import numpy as np import datetime %matplotlib inline import pandas as pd import psycopg2 from sqlalchemy import create_engine import matplotlib.pyplot as plt from matplotlib import pyplot as plt import matplotlib.ticker as mtick import matplotlib.ticker as ticker import matplotlib.mlab as mlab import matplotlib.gridspec as gridspec import matplotlib.lines as mlines import matplotlib.transforms as mtransforms import numpy as np import seaborn as sns plt.style.use('seaborn-whitegrid') from scipy import stats # Hide warning messages in notebook import warnings warnings.filterwarnings("ignore") ``` ## DATA SHOWING AVERAGE LAND TEMPERATURES AND AVERAGE EARTHQUAKE MAGNITUDES FOR COUNTRIES TURKEY, IRAN, GREECE AND PAKISTAN DURING YEARS 2000 - 2013 ## LAND TEMPERATURE DATA ``` # Import GlobalLandTemperatures_ByCountry.csv file raw_Global_data = pd.read_csv(r'../csv-files/output_data/GlobalLandTemperatures.csv', error_bad_lines=False) raw_Global_data.head() # YEARS 2000 - 2013 GlobalLand_data = raw_Global_data[(raw_Global_data['dt'] > '1999-12-31')] GlobalLand_data['Year'] = pd.DatetimeIndex(GlobalLand_data['dt']).year GlobalLand_data.head() GlobalLand_data.reset_index(inplace = True) GlobalLand_data.head() GlobalLand_renamed = GlobalLand_data.rename(columns={"index": "index_la"}) GlobalLand_renamed.head() GlobalLand_clean = GlobalLand_renamed.drop(["AverageTemperatureUncertainty","index_la","dt"], axis=1) GlobalLand_clean.head() GlobalLand_clean['Country'] = GlobalLand_clean['Country'].str.upper() GlobalLand_clean['Country'] = GlobalLand_clean['Country'].replace({'UNITED STATES': 'USA'}) Global_Temp = GlobalLand_clean.dropna(subset = ["Country", "AverageTemperature"]) Global_Temp = Global_Temp.set_index('Country') GlobalLand_bycountry = Global_Temp.loc[["TURKEY", "IRAN","GREECE", "PAKISTAN"]] GlobalLand_bycountry.head() ``` ### CLEAN LAND TEMPERATURE DATAFRAME ``` # MEAN LAND TEMPERATURE FOR COUNTRIES TURKEY, IRAN, GREECE, PAKISTAN # GROUPED BY YEAR Global_Temp_final = GlobalLand_bycountry.groupby(['Year','Country']).mean() Global_final = pd.DataFrame(Global_Temp_final) Global_final.head() def f(x): x = x * 1.8 + 32 return float(x) Global_final['AverageTemperature'] = Global_final['AverageTemperature'].apply(f) Global_final.head() ``` ## EARTHQUAKE DATA ``` # Import earthquakes.csv file raw_Earthquakes_data = pd.read_csv(r'../csv-files/output_data/Earthquakes.csv', error_bad_lines=False) raw_Earthquakes_data.head() raw_Earthquakes_data['Year'] = pd.DatetimeIndex(raw_Earthquakes_data['date']).year raw_Earthquakes_data.head() # YEARS 2000 - 2013 Earthquakes_data = raw_Earthquakes_data[(raw_Earthquakes_data['Year'] < 2014)] Earthquakes_data.head() Earthquakes_data.reset_index(inplace = True) Earthquakes_data.head() Earthquakes_renamed = Earthquakes_data.rename(columns={"index": "index_la", "mag": "AverageMagnitude", "country": "Country"}) Earthquakes_renamed.head() Earthquakes_clean = Earthquakes_renamed.drop(["index_la", "date", "latitude", "longitude", "depth", "net", "updated", "place", "depthError"], axis=1) Earthquakes_clean.head() Earthquakes_clean['Country'] = Earthquakes_clean['Country'].str.upper() Earthquakes_clean['Country'] = Earthquakes_clean['Country'].replace({'US': 'USA'}) Earthquakes_clean.head() Earthquakes_Mag = Earthquakes_clean.dropna(subset = ["Country", "AverageMagnitude"]) Earthquakes_Mag.head() Earthquakes_Mag = Earthquakes_Mag.set_index('Country') Earthquakes_Mag.head() Earthquakes_bycountry = Earthquakes_Mag.loc[["TURKEY", "IRAN","GREECE", "PAKISTAN"]] Earthquakes_bycountry.head() ``` ### CLEAN EARTHQUAKE MAGNITUDE DATAFRAME ``` # MEAN EARTHQUAKE VALUES FOR COUNTRIES TURKEY, IRAN, GREECE, PAKISTAN # GROUPED BY YEAR Earthquakes_final = Earthquakes_bycountry.groupby(['Year','Country']).mean() Earthquakes_final = pd.DataFrame(Earthquakes_final) Earthquakes_final.head() ``` ## FINAL DATAFRAME ``` # MERGED DATAFRAME COUNTAINING AVERAGE LAND TEMPERATURES AND AVERAGE EARTHWUAKE MAGNITUDE VALUES PER YEAR # FOR COUNTRIES AFGHANISTAN, TURKEY, IRAN, AZERBAIJAN, GREECE, PAKISTAN # DURING YEARS 2000-2013 Earthquake_Temp = pd.merge(Earthquakes_final, Global_final, on=["Country", "Year"]) #Earthquake_Temp.reset_index(inplace=True) Earthquake_Temp.head() ``` ## CSV FILES ``` Earthquake_Temp.to_csv(r'../csv-files/output_data/Earthquake_Temp0.csv') EarthTemp_TURKEY = Earthquake_Temp.loc[["TURKEY"]] EarthTemp_TURKEY.to_csv(r'../csv-files/output_data/Earth_Turkey1.csv') EarthTemp_TURKEY EarthTemp_IRAN = Earthquake_Temp.loc[["IRAN"]] EarthTemp_IRAN.to_csv(r'../csv-files/output_data/Earth_Iran2.csv') EarthTemp_IRAN EarthTemp_GREECE = Earthquake_Temp.loc[["GREECE"]] EarthTemp_GREECE.to_csv(r'../csv-files/output_data/Earth_Greece3.csv') EarthTemp_GREECE EarthTemp_PAKISTAN = Earthquake_Temp.loc[["PAKISTAN"]] EarthTemp_PAKISTAN.to_csv(r'../csv-files/output_data/Earth_Pakistan4.csv') EarthTemp_PAKISTAN ```
github_jupyter
import pandas as pd import numpy as np import datetime %matplotlib inline import pandas as pd import psycopg2 from sqlalchemy import create_engine import matplotlib.pyplot as plt from matplotlib import pyplot as plt import matplotlib.ticker as mtick import matplotlib.ticker as ticker import matplotlib.mlab as mlab import matplotlib.gridspec as gridspec import matplotlib.lines as mlines import matplotlib.transforms as mtransforms import numpy as np import seaborn as sns plt.style.use('seaborn-whitegrid') from scipy import stats # Hide warning messages in notebook import warnings warnings.filterwarnings("ignore") # Import GlobalLandTemperatures_ByCountry.csv file raw_Global_data = pd.read_csv(r'../csv-files/output_data/GlobalLandTemperatures.csv', error_bad_lines=False) raw_Global_data.head() # YEARS 2000 - 2013 GlobalLand_data = raw_Global_data[(raw_Global_data['dt'] > '1999-12-31')] GlobalLand_data['Year'] = pd.DatetimeIndex(GlobalLand_data['dt']).year GlobalLand_data.head() GlobalLand_data.reset_index(inplace = True) GlobalLand_data.head() GlobalLand_renamed = GlobalLand_data.rename(columns={"index": "index_la"}) GlobalLand_renamed.head() GlobalLand_clean = GlobalLand_renamed.drop(["AverageTemperatureUncertainty","index_la","dt"], axis=1) GlobalLand_clean.head() GlobalLand_clean['Country'] = GlobalLand_clean['Country'].str.upper() GlobalLand_clean['Country'] = GlobalLand_clean['Country'].replace({'UNITED STATES': 'USA'}) Global_Temp = GlobalLand_clean.dropna(subset = ["Country", "AverageTemperature"]) Global_Temp = Global_Temp.set_index('Country') GlobalLand_bycountry = Global_Temp.loc[["TURKEY", "IRAN","GREECE", "PAKISTAN"]] GlobalLand_bycountry.head() # MEAN LAND TEMPERATURE FOR COUNTRIES TURKEY, IRAN, GREECE, PAKISTAN # GROUPED BY YEAR Global_Temp_final = GlobalLand_bycountry.groupby(['Year','Country']).mean() Global_final = pd.DataFrame(Global_Temp_final) Global_final.head() def f(x): x = x * 1.8 + 32 return float(x) Global_final['AverageTemperature'] = Global_final['AverageTemperature'].apply(f) Global_final.head() # Import earthquakes.csv file raw_Earthquakes_data = pd.read_csv(r'../csv-files/output_data/Earthquakes.csv', error_bad_lines=False) raw_Earthquakes_data.head() raw_Earthquakes_data['Year'] = pd.DatetimeIndex(raw_Earthquakes_data['date']).year raw_Earthquakes_data.head() # YEARS 2000 - 2013 Earthquakes_data = raw_Earthquakes_data[(raw_Earthquakes_data['Year'] < 2014)] Earthquakes_data.head() Earthquakes_data.reset_index(inplace = True) Earthquakes_data.head() Earthquakes_renamed = Earthquakes_data.rename(columns={"index": "index_la", "mag": "AverageMagnitude", "country": "Country"}) Earthquakes_renamed.head() Earthquakes_clean = Earthquakes_renamed.drop(["index_la", "date", "latitude", "longitude", "depth", "net", "updated", "place", "depthError"], axis=1) Earthquakes_clean.head() Earthquakes_clean['Country'] = Earthquakes_clean['Country'].str.upper() Earthquakes_clean['Country'] = Earthquakes_clean['Country'].replace({'US': 'USA'}) Earthquakes_clean.head() Earthquakes_Mag = Earthquakes_clean.dropna(subset = ["Country", "AverageMagnitude"]) Earthquakes_Mag.head() Earthquakes_Mag = Earthquakes_Mag.set_index('Country') Earthquakes_Mag.head() Earthquakes_bycountry = Earthquakes_Mag.loc[["TURKEY", "IRAN","GREECE", "PAKISTAN"]] Earthquakes_bycountry.head() # MEAN EARTHQUAKE VALUES FOR COUNTRIES TURKEY, IRAN, GREECE, PAKISTAN # GROUPED BY YEAR Earthquakes_final = Earthquakes_bycountry.groupby(['Year','Country']).mean() Earthquakes_final = pd.DataFrame(Earthquakes_final) Earthquakes_final.head() # MERGED DATAFRAME COUNTAINING AVERAGE LAND TEMPERATURES AND AVERAGE EARTHWUAKE MAGNITUDE VALUES PER YEAR # FOR COUNTRIES AFGHANISTAN, TURKEY, IRAN, AZERBAIJAN, GREECE, PAKISTAN # DURING YEARS 2000-2013 Earthquake_Temp = pd.merge(Earthquakes_final, Global_final, on=["Country", "Year"]) #Earthquake_Temp.reset_index(inplace=True) Earthquake_Temp.head() Earthquake_Temp.to_csv(r'../csv-files/output_data/Earthquake_Temp0.csv') EarthTemp_TURKEY = Earthquake_Temp.loc[["TURKEY"]] EarthTemp_TURKEY.to_csv(r'../csv-files/output_data/Earth_Turkey1.csv') EarthTemp_TURKEY EarthTemp_IRAN = Earthquake_Temp.loc[["IRAN"]] EarthTemp_IRAN.to_csv(r'../csv-files/output_data/Earth_Iran2.csv') EarthTemp_IRAN EarthTemp_GREECE = Earthquake_Temp.loc[["GREECE"]] EarthTemp_GREECE.to_csv(r'../csv-files/output_data/Earth_Greece3.csv') EarthTemp_GREECE EarthTemp_PAKISTAN = Earthquake_Temp.loc[["PAKISTAN"]] EarthTemp_PAKISTAN.to_csv(r'../csv-files/output_data/Earth_Pakistan4.csv') EarthTemp_PAKISTAN
0.281109
0.536981
# Churn Clustering ``` import sys sys.path.append("..") from pyspark.sql import DataFrameReader from pyspark.sql import SparkSession from pyspark.ml.feature import IndexToString, Normalizer, StringIndexer, VectorAssembler, VectorIndexer from pyspark.ml.clustering import KMeans, BisectingKMeans from pyspark.ml.evaluation import ClusteringEvaluator from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.ml import Pipeline from helpers.path_translation import translate_to_file_string ``` ## Select the churn file ``` inputFile = translate_to_file_string("../data/churn.csv") ``` ## Create the Spark Session ``` #create a SparkSession spark = (SparkSession .builder .appName("ChurnDecisionTree") .getOrCreate()) # create a DataFrame using an ifered Schema df = spark.read.option("header", "true") \ .option("inferSchema", "true") \ .option("delimiter", ";") \ .csv(inputFile) ``` ## Data Preparation ### Transform labels into index ``` labelIndexer = StringIndexer().setInputCol("LEAVE").setOutputCol("label").fit(df) collegeIndexer = StringIndexer().setInputCol("COLLEGE").setOutputCol("COLLEGE_NUM").fit(df) satIndexer = StringIndexer().setInputCol("REPORTED_SATISFACTION").setOutputCol("REPORTED_SATISFACTION_NUM").fit(df) usageIndexer = StringIndexer().setInputCol("REPORTED_USAGE_LEVEL").setOutputCol("REPORTED_USAGE_LEVEL_NUM").fit(df) changeIndexer = StringIndexer().setInputCol("CONSIDERING_CHANGE_OF_PLAN").setOutputCol("CONSIDERING_CHANGE_OF_PLAN_NUM").fit(df) ``` ### Build the feature vector ``` featureCols = df.columns.copy() featureCols.remove("LEAVE") featureCols.remove("COLLEGE") featureCols.remove("REPORTED_SATISFACTION") featureCols.remove("REPORTED_USAGE_LEVEL") featureCols.remove("CONSIDERING_CHANGE_OF_PLAN") featureCols = featureCols +["COLLEGE_NUM","REPORTED_SATISFACTION_NUM","REPORTED_USAGE_LEVEL_NUM","CONSIDERING_CHANGE_OF_PLAN_NUM"] ``` ### Build the feature Vector Assembler ``` assembler = VectorAssembler(outputCol="features", inputCols=list(featureCols)) ``` ### Build a featureIndexer Automatically identify categorical features, and index them. Features with > 5 distinct values are treated as continuous. ``` featureIndexer = VectorIndexer(inputCol="features",outputCol="indexedFeatures", maxCategories=6) ``` ## Build KMeans ``` km = KMeans (seed=12345, featuresCol="indexedFeatures",predictionCol="prediction") ``` ## Data Preparation ### Build a network parameters grid ``` paramGrid = ParamGridBuilder().addGrid(km.k, [2, 3, 4, 5, 6, 7, 8, 9, 10 ]) \ .addGrid(km.maxIter, [10, 100 ]) \ .build() ``` ### Build a pipeline ``` pipeline = Pipeline(stages = [labelIndexer, collegeIndexer, satIndexer, usageIndexer, changeIndexer, assembler, featureIndexer, km ]) ``` ## Build an evaluator ``` evaluator = ClusteringEvaluator() ``` ## Build the Cross Validator ``` cv = CrossValidator(estimator=pipeline, evaluator=evaluator, estimatorParamMaps=paramGrid, numFolds=2, parallelism=2) ``` ## Train the Model ``` cvModel = cv.fit(df) ``` ## Find out what is the best model ``` kmModel = cvModel.bestModel.stages[7] print(kmModel.explainParams()) centers = kmModel.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) ``` ## Test the model ``` predictions = cvModel.transform(df) silhouette = evaluator.evaluate(predictions) print("Silhouette with squared euclidean distance = " , silhouette) ``` ## Bisecting k-means Build the Bisecting KMeans ``` bkm = BisectingKMeans(k=2, seed=12345, featuresCol="indexedFeatures", predictionCol="prediction") ``` Param Grid for Bisecting ``` paramGridBkm = ParamGridBuilder().addGrid(bkm.k, [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ]) \ .addGrid(bkm.maxIter, [10, 100]) \ .build() ``` Pipeline for Bisecting ``` pipelineBkm = Pipeline(stages = [labelIndexer, collegeIndexer, satIndexer, usageIndexer, changeIndexer, assembler, featureIndexer, bkm ]) ``` Build the cross validation ``` cvbkm = CrossValidator(estimator=pipelineBkm,evaluator=evaluator,estimatorParamMaps=paramGridBkm, numFolds=2,parallelism=2) ``` ## Train the Model ``` cvModebkml = cvbkm.fit(df) ``` ### Find the best model ``` kmModelbkm = cvModebkml.bestModel.stages[7] print(kmModelbkm.explainParams()) centers = kmModelbkm.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) ``` ### Test the model ``` predictions = cvModebkml.transform(df) silhouette = evaluator.evaluate(predictions) print("Silhouette with squared euclidean distance = " , silhouette) spark.stop() ```
github_jupyter
import sys sys.path.append("..") from pyspark.sql import DataFrameReader from pyspark.sql import SparkSession from pyspark.ml.feature import IndexToString, Normalizer, StringIndexer, VectorAssembler, VectorIndexer from pyspark.ml.clustering import KMeans, BisectingKMeans from pyspark.ml.evaluation import ClusteringEvaluator from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.ml import Pipeline from helpers.path_translation import translate_to_file_string inputFile = translate_to_file_string("../data/churn.csv") #create a SparkSession spark = (SparkSession .builder .appName("ChurnDecisionTree") .getOrCreate()) # create a DataFrame using an ifered Schema df = spark.read.option("header", "true") \ .option("inferSchema", "true") \ .option("delimiter", ";") \ .csv(inputFile) labelIndexer = StringIndexer().setInputCol("LEAVE").setOutputCol("label").fit(df) collegeIndexer = StringIndexer().setInputCol("COLLEGE").setOutputCol("COLLEGE_NUM").fit(df) satIndexer = StringIndexer().setInputCol("REPORTED_SATISFACTION").setOutputCol("REPORTED_SATISFACTION_NUM").fit(df) usageIndexer = StringIndexer().setInputCol("REPORTED_USAGE_LEVEL").setOutputCol("REPORTED_USAGE_LEVEL_NUM").fit(df) changeIndexer = StringIndexer().setInputCol("CONSIDERING_CHANGE_OF_PLAN").setOutputCol("CONSIDERING_CHANGE_OF_PLAN_NUM").fit(df) featureCols = df.columns.copy() featureCols.remove("LEAVE") featureCols.remove("COLLEGE") featureCols.remove("REPORTED_SATISFACTION") featureCols.remove("REPORTED_USAGE_LEVEL") featureCols.remove("CONSIDERING_CHANGE_OF_PLAN") featureCols = featureCols +["COLLEGE_NUM","REPORTED_SATISFACTION_NUM","REPORTED_USAGE_LEVEL_NUM","CONSIDERING_CHANGE_OF_PLAN_NUM"] assembler = VectorAssembler(outputCol="features", inputCols=list(featureCols)) featureIndexer = VectorIndexer(inputCol="features",outputCol="indexedFeatures", maxCategories=6) km = KMeans (seed=12345, featuresCol="indexedFeatures",predictionCol="prediction") paramGrid = ParamGridBuilder().addGrid(km.k, [2, 3, 4, 5, 6, 7, 8, 9, 10 ]) \ .addGrid(km.maxIter, [10, 100 ]) \ .build() pipeline = Pipeline(stages = [labelIndexer, collegeIndexer, satIndexer, usageIndexer, changeIndexer, assembler, featureIndexer, km ]) evaluator = ClusteringEvaluator() cv = CrossValidator(estimator=pipeline, evaluator=evaluator, estimatorParamMaps=paramGrid, numFolds=2, parallelism=2) cvModel = cv.fit(df) kmModel = cvModel.bestModel.stages[7] print(kmModel.explainParams()) centers = kmModel.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) predictions = cvModel.transform(df) silhouette = evaluator.evaluate(predictions) print("Silhouette with squared euclidean distance = " , silhouette) bkm = BisectingKMeans(k=2, seed=12345, featuresCol="indexedFeatures", predictionCol="prediction") paramGridBkm = ParamGridBuilder().addGrid(bkm.k, [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ]) \ .addGrid(bkm.maxIter, [10, 100]) \ .build() pipelineBkm = Pipeline(stages = [labelIndexer, collegeIndexer, satIndexer, usageIndexer, changeIndexer, assembler, featureIndexer, bkm ]) cvbkm = CrossValidator(estimator=pipelineBkm,evaluator=evaluator,estimatorParamMaps=paramGridBkm, numFolds=2,parallelism=2) cvModebkml = cvbkm.fit(df) kmModelbkm = cvModebkml.bestModel.stages[7] print(kmModelbkm.explainParams()) centers = kmModelbkm.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) predictions = cvModebkml.transform(df) silhouette = evaluator.evaluate(predictions) print("Silhouette with squared euclidean distance = " , silhouette) spark.stop()
0.410874
0.860545
## ROADMAP FOR MULTI-CLASS SENTIMENT ANALYSIS WITH DEEP LEARNING ### A practical guide to create increasingly accurate models <br> (This blog assumes some familiarity with deep learning) Sentiment analysis quickly gets difficult as we increase the number of classes. For this blog, we'll have a look at what difficulties you might face and how to get around them when you try to solve such a problem. Instead of prioritizing theoretical rigor, I'll focus on how to practically apply some ideas on a toy dataset and how to edge yourself out of a rut. I'll be using **Keras** throughout. As a disclaimer, I'd say it's unwise to throw the most powerful model at your problem at first glance. Traditional natural language processing methods work surprisingly well on most problems and your initial analysis of the dataset can be built upon with deep learning. However, this blog aims to be a refresher for deep learning techniques _exclusively_ and an implementational baseline or a general flowchart for hackathons or competitions. Theory throughout this post will either be oversimplified or absent, to avoid losing the attention of the casual reader. ## The problem --- We'll analyze a fairly simple dataset I recently came across, which can be downloaded [from here](https://github.com/ad71/multi-class-sentiment-analysis/blob/master/data/data.zip). <br> About 50 thousand people were asked to respond to a single question, >"What is one recent incident that made you happy?". Their responses were tabulated and their reason of happiness was categorized into seven broad classes like 'affection', 'bonding', 'leisure', etc. Additionally, we also know whether the incident happened within 24 hours of the interview or not. <br> This problem is quite different from your regular positive negative classification because even though there are seven classes, all the responses are inherently happy and differentiating between them might be quite difficult even for humans. <br> Before we start, [this is where](https://github.com/ad71/multi-class-sentiment-analysis) you'll find the complete notebook for this blog as well as all the discussed architectures in separate files if you want to tinker with them yourself. You are free to use whatever you find there, however you like, no strings attached. ``` import numpy as np import pandas as pd import nltk import gensim from gensim.models.doc2vec import TaggedDocument from gensim.models.word2vec import Word2Vec from gensim.scripts.glove2word2vec import glove2word2vec from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.utils import class_weight from sklearn.preprocessing import scale from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer import tensorflow as tf import tensorflow_hub as hub from keras import backend as K from keras.engine import Layer from keras.models import Sequential, Model, load_model from keras.layers import Input, Dense, LSTM, GRU, LeakyReLU, Dropout from keras.layers import CuDNNLSTM, CuDNNGRU, Embedding, Bidirectional from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping from keras.optimizers import Adam from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import bokeh.plotting as bp from bokeh.models import HoverTool, BoxSelectTool from bokeh.plotting import figure, show, output_notebook import matplotlib.pyplot as plt %matplotlib inline ``` ## The dataset --- Let's see what we're working with. ``` df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() ``` Here's what each column means - `id` is just a unique id for each sentence - `period` is the period during which the interviewee had their experience, which can be either during the last 24 hours (`24h`) or the last 3 months (`3m`) - `response` is the response of the interviewee and the most important independent variable - `n` is the number of sentences in the response, and - `sentiment` is our target variable ``` labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes ``` ## Preprocessing --- To keep the first model simple, we'll go ahead and drop the `n` column. We'll see soon that it doesn't matter anyway. <br> We'll also drop the `id` column because that's just a random number <br> <br> ...or is it? <br> (cue vsauce music) <br> <br> Assuming anything about the data beforehand will almost always mislead our model. For example, it might be possible that while collecting the data, the ids were assigned serially and it just so happened that every fifth observation was taken in a park full of people, where the predominant cause of happiness was `exercise` or `nature`. This is probably useless in the real world, but insights like these might win you a hackathon. We'll keep it to track if our shuffles are working correctly but we won't be using it for training our models. <br> And we'll obviously drop the `sentiment` column as it is the target variable. ``` df.drop(['n', 'sentiment'], axis=1, inplace=True) ``` Usually with these problems, the classes are not always balanced, but we'll worry about that later. First, we want to get a simple model up and running to compare our future models with. <br> Let's quickly convert our categories into one-hot arrays before proceeding further. ``` label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) y[:5] ``` Converting the response column to lowercase. ``` df.response = df.response.apply(str.lower) df.head() ``` All the steps upto here are dataset-independent. We would have to go through the same preprocessing steps for our test set as well as all the other models we'll try, regardless of architecture. <br> <br> ## Postprocessing --- Our first few models will follow the traditional approach of doing a lot of work ourselves and gradually move on to higher and higher levels of abstraction. However, the _preprocessing_ step will be common across all pipelines. <br> <br> Neural networks cannot process strings, let alone strings of arbitrary size, so we first split them at punctuations and spaces after lowercasing the sentence. This is called tokenization (well...it's a bit more complicated than what I just said). <br> We'll use the `word_tokenize` function from `nltk` to help us with this. ``` def tokenize(df): df['tokens'] = df['response'].map(lambda x: nltk.word_tokenize(x)) tokenize(df) df.head() ``` Stopwords are words that appear way too frequently in the English language to be actually meaningful, like 'a', 'an', 'the', 'there', etc. `nltk.corpus` has a handy `stopwords` function that enumerates these. We could do a stopword removal process while tokenization, but I decided against it as it might affect the context. The stopword corpus includes a 'not', a negation that can flip the emotion of the passage. Moreover, phrases like 'To be or not to be' would be entirely removed. We could make our own corpus of stopwords, but the performance would hardly improve as our dataset is pretty small already. So we drop the idea and move on. Once we have the tokens, we don't need the original responses, because our model can't make any sense of it anyway. ``` df.drop(['response'], axis=1, inplace=True) ``` It's a great time now to separate a part of the training set into the validation set, to make sure we aren't cheating. As the data is unstructured, a random shuffle will work just fine. ``` df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) ``` Remove the random-seed parameter if you want a new permutation every run. ``` print(df_train.shape, y_train.shape) print(df_val.shape, y_val.shape) ``` ## Embeddings --- There is just one more problem. Neural networks work on strictly numerical data and still can't make sense of the tokens in our dataset. We need to find a way to represent each word as a vector, somehow. <br> Let's take a little detour. <br> Suppose we want to differentiate between _pop_ and _metal_. What are some properties we can use to describe these genres? <br> Let's use percussion, electric guitar, acoustic guitar, synth, happiness, sadness, anger and complexity as the features to describe each genre. The vector for _pop_ might look something like <br> $$ (0.5\ \ 0.2\ \ 0.5\ \ 1.0\ \ 0.8\ \ 0.5\ \ 0.2\ \ 0.3) $$ and the one for _metal_ might look like <br> $$ (0.9\ \ 0.9\ \ 0.3\ \ 0.1\ \ 0.4\ \ 0.5\ \ 0.8\ \ 0.7) $$ So if we want to classify _heavy-metal_, its vector might be <br> $$ (1.0\ \ 1.0\ \ 0.0\ \ 0.1\ \ 0.1\ \ 0.5\ \ 1.0\ \ 0.9) $$ These vectors can be plotted in an 8-dimensional space and the euclidean distance (`np.linalg.norm`) between _metal_ and _heavy-metal_ (0.529) will be closer than the euclidean distance between _pop_ and _metal_ (1.476), for example. <br> Similarly, we can encode every single word in our corpus in some way, to form a vector. We have algorithms that can train a model to generate an n-dimensional vector for each word. We have no way of interpreting (that I know of) what features were selected or what the numbers in the vectors actually mean, but we'll see that they work anyway and similar words huddle up together. <br> `gensim` provides a handy tool that can train a set of embeddings according to your corpus, but we have to 'Tag' them first as the model accepts a vector of `TaggedDocument` objects. ``` def tag_sentences(sentences, label): tagged = [] for index, sentence in enumerate(sentences): label = f'{label}_{index}' tagged.append(TaggedDocument(sentence, [label])) return tagged vector_train_corpus = tag_sentences(df_train.tokens, 'TRAIN') vector_val_corpus = tag_sentences(df_val.tokens, 'TEST') ``` A tagged vector looks like this. ``` vector_train_corpus[1] ``` The `Word2Vec` module can train a dictionary of embeddings, given a vector of `TaggedDocument` objects. ``` embeddings = Word2Vec(size=200, min_count=3) embeddings.build_vocab([sentence.words for sentence in vector_train_corpus]) embeddings.train([sentence.words for sentence in vector_train_corpus], total_examples=embeddings.corpus_count, epochs=embeddings.epochs) ``` Let's see if our embeddings are any good. ``` embeddings.wv.most_similar('exercise') ``` We learnt some good correlations to 'exercise' like 'cardio' and 'workout' but the rest aren't good enough. Anyway, this will do for now. ### Visualizing the embeddings We cannot directly visualize high-dimensional data. To see if our embeddings actually carry useful information, we need to reduce the dimensionality to 2 somehow. There are two extremely useful techniques **PCA** (principal component analysis) and **t-SNE** (T-distributed stochastic neighboring entities) that do just this, flatten high-dimensional data into the best possible representation in the specified number of lower dimensions. <br> t-SNE is a probabilistic method and takes a while to run, but we'll try both methods for the 2000 most common words in our embeddings. ### PCA ``` vectors = [embeddings[word] for word in list(embeddings.wv.vocab.keys())[:2000]] pca = PCA(n_components=2, random_state=42) pca_vectors = pca.fit_transform(vectors) reduced_df = pd.DataFrame(pca_vectors, columns=['dim_1', 'dim_2']) reduced_df['words'] = list(embeddings.wv.vocab.keys())[:2000] ``` Bokeh is an extremely useful library for interactive plots which has flown under the radar of quite a lot of people for a long time. ``` output_notebook() b_figure = bp.figure(plot_width=700, plot_height=600, tools='pan, wheel_zoom, box_zoom, reset, hover, previewsave') b_figure.scatter(x='dim_1', y='dim_2', source=reduced_df) hovertool = b_figure.select(dict(type=HoverTool)) hovertool.tooltips={'word': '@words'} show(b_figure) ``` ### T-SNE ``` tsne = TSNE(n_components=2, n_iter=300, verbose=1, random_state=42) tsne_vectors = tsne.fit_transform(vectors) reduced_df = pd.DataFrame(tsne_vectors, columns=['dim_1', 'dim_2']) reduced_df['words'] = list(embeddings.wv.vocab.keys())[:2000] output_notebook() b_figure = bp.figure(plot_width=700, plot_height=600, tools='pan, wheel_zoom, box_zoom, reset, hover, previewsave') b_figure.scatter(x='dim_1', y='dim_2', source=reduced_df) hovertool = b_figure.select(dict(type=HoverTool)) hovertool.tooltips={'word': '@words'} show(b_figure) ``` t-SNE usually does a better job showing more separated clusters, while PCA just bunched everything up in the middle in this example. However, performance is dataset dependent and it never hurts to try both. ## Dense networks --- For our first model, we'll try a very common approach to binary sentiment classification, for which we first need to calculate the `Tf-Idf` score of each word in our corpus. Tf-idf stands for 'Term frequency - inverse document frequency'. If you haven't heard of it, all it does is assign a weight to each word based on the frequency of its appearance in a corpus. Words that appear often, like 'the', 'when' and 'very' will have a low score and the rarer ones, like 'tremendous', 'undergraduate' and 'publication', which might actually help us classify a sentence, will have a higher score. This is a simple heuristic in order to better understand our data. It is corpus specific and we can train one for the embedding vectors we generated. The `TfidfVectorizer` class from `sklearn` makes quick work of it and we can fit one to our vectors as follows. ``` gen_tfidf = TfidfVectorizer(analyzer=lambda x: x, min_df=3) matrix = gen_tfidf.fit_transform([sentence.words for sentence in vector_train_corpus]) tfidf_map = dict(zip(gen_tfidf.get_feature_names(), gen_tfidf.idf_)) len(tfidf_map) ``` The `min_df` parameter is a threshold for the minimum frequency. In this case, we do not want to track the `tf-idf` score of a word that appears less than thrice in our corpus. <br> <br> Now, for every `response` object, we will create a vector of size 200 (the same dimension as our embedding vector). This is our sentence-level embedding. We will take the average of the embedding vectors of each token in each response and weight it by the `tf-idf` score of each word. The embedding for the sentence "I went out for dinner" can be calculated as follows. ![title](images/encoding2.jpg) The `encode_sentence` function adds up the vector of each token in a sentence, weighted by the tf-idf score and generates a vector of length 200 for each response. ``` def encode_sentence(tokens, emb_size): _vector = np.zeros((1, emb_size)) length = 0 for word in tokens: try: _vector += embeddings.wv[word].reshape((1, emb_size)) * tfidf_map[word] length += 1 except KeyError: continue break if length > 0: _vector /= length return _vector x_train = scale(np.concatenate([encode_sentence(ele, 200) for ele in map(lambda x: x.words, vector_train_corpus)])) x_val = scale(np.concatenate([encode_sentence(ele, 200) for ele in map(lambda x: x.words, vector_val_corpus)])) print(x_train.shape, x_val.shape) ``` Let's build a simple two layer dense net. This is just to check if we have done everything correctly up to this point. <br> Let's call this our zero'th model. Dense-net on sequential data without transformations is a joke anyway right? ``` model = Sequential() model.add(Dense(32, activation='relu', input_dim=200)) model.add(Dense(7, activation='softmax')) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score ``` We get a loss of 1.41 and a validation accuracy of 0.46. This exact same model manages to get a validation score of about 0.8 on binary sentiment analysis, but given the difference in complexity, hopefully you weren't expecting much. <br> <br> Throwing in another dense layer doesn't help either. ``` model = Sequential() model.add(Dense(256, activation='relu', input_dim=200)) model.add(Dense(64, activation='relu')) model.add(Dense(7, activation='softmax')) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score ``` Unsurprisingly, the results are still pretty bad, as dense layers can not capture temporal correlations. <br> ## Recurrent networks --- A recurrent network using LSTM or GRU cells will surely solve the problem, but upon reading the documentation of `keras.layers.LSTM` you'll realize it expects an input batch shape of `(batch_size, timesteps, data_dim)`. Obviously it would want some data along the dimension of time as well, but our encoded vectors have a shape of `(batch_size, data_dim)`. <br> For our case, `timesteps` refers to the tokens. Instead of averaging out the vectors of each response, we want to keep them as they are. To fit our RNN, we can create a new way of encoding our tokens. We will ignore the tf-idf scores altogether and expect the LSTM to find out whatever useful features it needs for itself over the epochs. <br> <br> There is just _one_ more problem. LSTMs expect same sized inputs for each sample, i.e it wants all the sentences to have exactly the same number of words, which we will call the _sequence length_. <br> To see what we're working with, here's a scatter-plot of the distribution of token lengths in our training set. ``` lengths = [len(token) for token in df_train.tokens] plt.scatter(lengths, range(len(lengths)), alpha=0.2); print(np.mean(lengths), np.max(lengths)) ``` The longest response was found out to be 1349 words long but the mean length was about 21 words. You can do broadly two things here, set the sequence length equal to the number of words in the longest response you have found, but you don't know how long the longest response in the test set might be and you might have to truncate anyway, or keep your sequence length close to the mean but just enough to not lose much data. We'll see better ways of handling long responses later. Once we decide our sequence length, longer responses will be truncated and shorter responses will be padded with a vector of zeros (or a vector of the means along the transverse axis, but zeros work just fine). <br> For now, I'll use a sequence length of 80. No specific reason. ``` def encode_sentence_lstm(tokens, emb_size): vec = np.zeros((80, 200)) for i, word in enumerate(tokens): if i > 79: break try: vec[i] = embeddings.wv[word].reshape((1, emb_size)) except KeyError: continue return vec x_train = np.array([encode_sentence_lstm(ele, 200) for ele in map(lambda x: x.words, vector_train_corpus)]) x_train.shape x_val = np.array([encode_sentence_lstm(ele, 200) for ele in map(lambda x: x.words, vector_val_corpus)]) x_val.shape ``` We're done here. <br> Finally we can build our first recurrent neural network. I'll use the `CuDNNLSTM` class, which is astronomically faster than the `LSTM` class if you're on a GPU. `LSTM` is so much slower that I don't have the patience to benchmark it for you. <br> Additionally, let's use the functional API of keras instead of the `.add` syntax for a change. It is a lot more flexible. This is our __actual__ baseline model. ``` input_tensor = Input(shape=(80, 200)) x = CuDNNLSTM(256, return_sequences=False)(input_tensor) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(inputs=[input_tensor], outputs=[output_tensor]) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score ``` The loss now is 0.57 and the validation accuracy is 0.855, which is a great improvement, just as we expected. <br> <br> ### keras.layers.Bidirectional In the current state, our model can just remember the past. It might benefit from a bit of context, maybe read a full phrase before sending an output to the next layer. <br> For example, "It was hilarious to see" and "It was hilarious to see how bad it was" mean very different things. <br> A bidirectional recurrent neural network (BRNN) overcomes this difficulty by propagating once in the forward direction and once in the backward direction and weighting them appropriately. I don't expect the score to increase much, as sentiment analysis doesn't really need this structure. Machine translation or handwriting recognition can make better use of bidirectional layers, but it never hurts to try. In keras, you can just call `Bidirectional` with your existing layer. <br> However, Bidirectional LSTMs tend to overfit a bit, so I'll validate after each epoch, just to measure how much impact a bidirectional layer can potentially have. It's a bit unfair to the previous models, but there won't be much improvement anyway. ``` input_tensor = Input(shape=(80, 200)) x = Bidirectional(CuDNNLSTM(256, return_sequences=False))(input_tensor) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(inputs=[input_tensor], outputs=[output_tensor]) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=10, verbose=1) ``` The best validation accuracy was 0.8640 at the end of epoch 5, a 1% improvement. It's not much, but we'll take it. ### keras.layers.Embedding --- There is a slightly less stupid way of doing this. We can just add a keras `Embedding` layer and skip dealing with gensim altogether. All the document-tagging, vector-building and training will be taken care of by keras. We can skip tokenization as well, as the `Tokenizer` class in keras tokenizes everything in the way `Embedding` likes. You can rerun this notebook upto the preprocessing section, so that your dataframe looks like this ``` df.head() ``` Shuffle the data ``` df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] ``` These are our new tokens, which are obviously not all the same length, so we'll quickly pad them with zeros. `pad_sequences` is a handy function to do just this. ``` SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() ``` We'll be using this two layer RNN extensively to benchmark different approaches. The `Embedding` layer takes in a vocabulary size, the length of each word-vector, the input sequence length and a boolean that tells it whether it should train itself. We set this to false if we're using embeddings from someone else, unless we're transfer-learning, or training from scratch. ``` input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, input_length=SEQ_LEN, trainable=True)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() ``` `df_train.tokens` returns a list, but we need a numpy array of numpy arrays as our training set ``` x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score ``` Our validation score is good. With half the work, we managed to get a slightly better model than the previous one, or is it because we have two LSTM layers this time? The influences are compounded and it might not work out so well for the test set. <br> There is just one problem. If you train your own embeddings on a dataset this small, you're likely to not generalize well on the test set. Your real world accuracy might plummet further if you plan to use that model in production. <br> To prevent this, you need to train on a larger dataset, but the 6 million parameters will soon be 6 billion parameters. Besides, it might not be easy to collect more data if you're solving a problem for a company. ## Pre-trained embeddings --- Let's face it. Nobody trains their own embeddings nowadays, unless your model needs to understand domain-specific language. If you take somebody's model, tweak it and call it your own, you'll have better results in less time. <br> Using pre-trained models is part of transfer learning, where you try to create a ripoff of a great model to suit your dataset. More specifically, there are two very commonly used open source embeddings that will outperform self-trained embeddings 95 out of 100 times. There's nothing special about it, they're just high dimensional vectors trained on huge datasets, on hardware more powerful than anything you'll ever own, and they give _the best_ results for most NLP tasks. <br> (Spoiler: No they don't. _Even_ better embeddings were released last year. We'll get to that.) ### GloVe **Glo**bal **Ve**ctors for word representation is a suite of word embeddings trained on a billion tokens with a vocabulary of 400 thousand words. These embeddings can be downloaded [here](https://nlp.stanford.edu/projects/glove/) <br> From here onwards, we will use the keras `Embedding` layer as it is easier to work with. We'll also use the keras `Tokenizer` class as it works well with `Embedding`. <br> There is a major difference between `keras.preprocessing.text.Tokenizer` and `nltk.word_tokenize`. `Tokenizer` returns a list of numbers, assigned according to frequency, instead of a list of words and internally maintains a vocabulary dictionary that maps words to numbers. Restart your kernel and rerun upto the preprocessing section if you're running out of memory. Now is a good time to split our dataset into training and validation sets. We shouldn't be training the tokenizer on data we aren't allowed to see ``` df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] ``` We'll `pad_sequences` just like last time. ``` SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() ``` We'll use `gensim` to generate a dictionary of embeddings from the downloaded data, however the file you downloaded isn't in the format `gensim` likes. Thankfully, there's a workaround for this by gensim themselves. The `glove2word2vec` function converts the file into set of vectors. We'll save this file in the same directory as the original. ``` glove_input = 'D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt' word2vec_output = 'D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt.word2vec' glove2word2vec(glove_input, word2vec_output) embedding_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt.word2vec', binary=False) ``` We just want embeddings for words that are actually in our corpus. Filter out the unwanted words and count the number of words that we don't have embeddings for. ``` embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embedding_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape ``` We still don't have everything we need. For multi class classification, tracking the accuracy is often misleading, especially if you have a class weight imbalance. You can trivially get 90% accuracy on a dataset that has 90 positive samples and 10 negative samples by just predicting the mode, but the model will be pretty useless. We should instead track the __F1 score__ as well. If you know what _precision_ and _recall_ is, you probably know what an _f1-score_ is. <br> <br> __Precision__ measures how many positive-predicted samples were actually positive. <br> __Recall__ measures how many actual positive samples were predicted to be positive. <br> <br> The _F1 score_ is the harmonic mean of the two, which serves as a great metric for tracking your model's progress. <br> <br> Unfortunately, the native f1-score metrics of keras was removed in version 2.0, so we have to write our own. Keras accuracy metrics expect vectors of target classes and predicted classes. ``` def recall(y_true, y_pred): true_pos = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_pos = K.sum(K.round(K.clip(y_true, 0, 1))) _recall = true_pos / (possible_pos + K.epsilon()) return _recall def precision(y_true, y_pred): true_pos = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_pos = K.sum(K.round(K.clip(y_pred, 0, 1))) _precision = true_pos / (predicted_pos + K.epsilon()) return _precision def f1(y_true, y_pred): p = precision(y_true, y_pred) r = recall(y_true, y_pred) return 2 * ((p * r) / (p + r + K.epsilon())) ``` We can finally build our model using the `Embedding` class. The weights will be initialized using the `emb_matrix` and `trainable` will be set to False. Setting `trainable` to True usually gives slightly better results at the expense of ~6 million more trainable variables (corpus dependent). Suit yourself. As an aside, I will intentionally leave out GRUs throughout this notebook as LSTMs almost always work better in practice. But you can try them out yourself. Just replace `LSTM` with `GRU`, or `CuDNNLSTM` with `CuDNNGRU` if you're on a GPU. ``` input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() ``` `df_train.tokens` returns a list, but we need a numpy array of numpy arrays as our training set ``` x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) ``` Let's validate our model. We'll go through the exact same preprocessing steps as our training set. ``` encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score ``` The validation score this time is 0.88, but pre-trained embeddings will almost certainly generalize better to the test set or real world data, and handle anomalies more effectively. ### Word2Vec Google released their pre-trained Word2Vec embeddings a few years ago. It was trained on the __Google News__ corpus of about 3 billion tokens. You can download the vectors [here](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing) <br> Let's split the dataset. ``` df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_vectors = [list(doc) for doc in padded_train] df_train.tokens = train_vectors lengths = [len(doc) for doc in train_vectors] np.mean(lengths) ``` This time, the downloaded file is good enough for `gensim` to import it directly. The model and everything else is exactly the same like above and we'll still be tracking the F1-score. ``` embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(128, activation='relu')(x) output = Dense(7, activation='softmax')(x) model = Model(input_tensor, output) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set df_val.head() padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() lengths = [len(doc) for doc in val_vectors] np.mean(lengths) x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score ``` The validation score is 0.879 this time, which is a very small difference from the previous model and we can't objectively say which model is better. Word2Vec is usually slightly better than GloVe on most NLP applications, but this time it wasn't. ## Debugging Over the last few models, our validation score has parked itself at about 0.88, which leads us to think, is this the best accuracy we can reach? Our training accuracies have almost always surpassed 95%, are we overfitting? Or are we underfitting? Maybe adding more layers interspersed with Dropout layers or other regularization will help? <br> <br> For multi-class classification, if you have flatlined, the answer to these questions is almost always no. This is where you should have a look at your dataset. Plot all charts that you think might be useful and try to gain some insights. Maybe plotting the confusion matrix for our last model will help. ``` y_pred = model.predict(x_val, verbose=1) print(y_pred.shape, y_val.shape) ``` The confusion matrix can not handle one-hot vectors, let's convert them into integer classes. ``` y_pred_class = np.array([np.argmax(x) for x in y_pred]) y_val_class = np.array([np.argmax(x) for x in y_val]) print(y_pred_class.shape, y_val_class.shape) c = confusion_matrix(y_val_class, y_pred_class) classes = [v for k, v in cat_to_label.items()] plt.figure(figsize=(20, 20)) plt.imshow(c, interpolation='nearest', cmap='jet') plt.colorbar() ticks = np.arange(len(classes)) plt.xticks(ticks, classes, rotation=45) plt.yticks(ticks, classes) plt.ylabel('True class') plt.xlabel('Predicted class') plt.tight_layout() ``` It classified 'achievement' and 'affection' pretty accurately, was horrible at classifying 'nature' and 'exercise' and pretty bad at everything else. Our model was also somewhat confused between 'achievement' and 'enjoy_the_moment', which, if you think about it, would be the case even for a human sometimes. <br> Right now, our model is basically an affection classifier. The large discrepancy between accuracies of different classes is what stands out and it only means one thing. Class imbalance. Let's plot a pie chart to see how bad it is. ``` plt.figure(figsize=(7, 7)) plt.pie(labels.sentiment.value_counts(), labels=classes); ``` Turns out, its pretty bad! ``` labels.sentiment.value_counts() ``` The smallest class, 'exercise' has about 3.5% the number of samples as the largest class, 'achievement'. <br> Ideally you would want the exact same number of samples for all classes in your training set. In practice, a little variance doesn't hurt. <br> <br> ## Sampling To overcome this problem, there are a few things we can do, the first being sampling. To balance our datasets, we can __oversample__ instances of the minority class or __undersample__ instances of the majority class. <br> Both come with their disadvantages however, which are more prominent in datasets with a greater imbalance, like ours. <br> __Oversampling__ the minority overfits the model because of the high duplication, while __undersampling__ might leave crucial information out. A more powerful sampling method __SMOTE__, artificially generates new instances of the minority class by forming combinations of neighboring clusters, but this still doesn't eliminate overfitting. <br> <br> We won't try undersampling, as it would leave our training set with about 4500 samples, which is too small even for binary classification. <br> Let's try oversampling. We'll not make the number of samples exactly equal, but bring it within the same ballpark. We'll start afresh ``` df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() ``` We need to first split our training and validation sets. Since we normally wouldn't augment our test set, we shouldn't augment our validation set either. ``` df, df_val = train_test_split(df, test_size=0.15, random_state=42) labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes ``` Let's separate the dataframes by sentiment. ``` dfs = [] for sentiment in classes: df_temp = df.where(df.sentiment == sentiment) df_temp.dropna(axis=0, inplace=True) dfs.append(df_temp) ls = [len(df) for df in dfs] dfs[0].head() ls ``` `pd.concat([df] * int(max(lengths) / len(df))` generates a new dataframe with `df` replicated the required number of times. <br> We can write a one-liner to generate a list of augmented dataframes. ``` new_dfs = [pd.concat([df]*int(max(ls)/len(df)), ignore_index=True) for df in dfs] new_ls = [len(df) for df in new_dfs] new_ls ``` The new classes look pretty balanced. Let's concatenate everything into one large dataframe ``` df = pd.concat(new_dfs, ignore_index=True) labels = df[['id', 'sentiment']] print(df.shape, len(labels)) classes = sorted(labels.sentiment.unique()) classes plt.figure(figsize=(7, 7)) plt.pie(labels.sentiment.value_counts(), labels=classes); ``` Looks good. We just have to try preventing overfitting. ``` df.drop(['n', 'sentiment'], axis=1, inplace=True) label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) y[:5] df.response = df.response.apply(str.lower) df.head() ``` Let's shuffle the dataset. ``` df_train, _, y_train, _ = train_test_split(df, y, test_size=0, random_state=42) print(df_train.shape, y_train.shape) print(df_val.shape) ``` We'll use the GoogleNews Word2Vec model to train on this set. All the steps are exactly the same. ``` t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_vectors = [list(doc) for doc in padded_train] df_train.tokens = train_vectors np.mean([len(doc) for doc in train_vectors]) embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(128, activation='relu')(x) output = Dense(7, activation='softmax')(x) model = Model(input_tensor, output) ``` This will take longer to train, so let's validate after each epoch and save a checkpoint each time our validation score increases. We just need to prepare our validation set. ``` df_val.head() val_labels = df_val[['id', 'sentiment']] df_val.drop(['n', 'sentiment'], axis=1, inplace=True) df_val.response = df_val.response.str.lower() df_val.head() encoded_val_set = t.texts_to_sequences(df_val.response) np.mean([len(doc) for doc in encoded_val_set]) df_val['tokens'] = encoded_val_set df_val.drop(['response'], axis=1, inplace=True) padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() np.mean([len(doc) for doc in val_vectors]) x_val = np.array([np.array(token) for token in df_val.tokens]) x_val.shape y_val = np.array([np.array(label_to_cat[label]) for label in val_labels.sentiment]) y_val.shape y_val[:5] ``` The `ModelCheckpoint` callback expects a file path, and a metric to monitor. `save_best_only` was set to True to save us some disk space. <br> Additionally, I have also set the learning rate to decay by a factor of $10^{-6}$ after each epoch as our model will overfit pretty quickly. ``` checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], epochs=10, verbose=1) ``` Training accuracy reached 99.16%, but validation accuracy didn't cross 90%. Though this is the best result we got so far, we definitely did overfit. Using the same dataset, we'll now try to create a bigger model, but with more regularization, in an attempt to reduce overfitment. Additionally, let's use `LeakyReLU` activations. <br> ![ml-cheatsheet.readthedocs.io](images/lrelu.jpg) <br> If you use `LeayReLU` as an activation function of a layer in keras, using `model.save()` later will give you this error (at the time of writing this blog) <br> `AttributeError: 'LeakyReLU' object has no attribute '__name__'` <br> To fix this, you will have to use `LeakyReLU` as a layer. <br> We'll use LeakyReLU with `alpha = 0.1` and additionally, `Dropout` will be used for regularization. ``` input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.6)(x) x = Dense(128)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.5)(x) x = Dense(64)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], epochs=10, verbose=1) ``` Our validation accuracy did not change much even though training accuracy crossed 98%. The regularized model isn't doing any better either, we overfit again due to the imbalance. Let's plot the confusion matrix for this model to see if anything changed. If we run `model.predict` now, we'll use the `model` object that was trained for the complete 10 epochs, not the one that gave us the highest validation accuracy. To use the best one, we need to load it from our last checkpoint file. We also have to define what custom objects we have used, for example, `load_model` doesn't know what `f1` means. ``` model = load_model('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', custom_objects={'f1': f1}) y_pred = model.predict(x_val, verbose=1) print(y_pred.shape, y_val.shape) y_pred_class = np.array([np.argmax(x) for x in y_pred]) y_val_class = np.array([np.argmax(x) for x in y_val]) c = confusion_matrix(y_val_class, y_pred_class) classes = [v for k, v in cat_to_label.items()] plt.figure(figsize=(20, 20)) plt.imshow(c, interpolation='nearest', cmap='jet') plt.colorbar() ticks = np.arange(len(classes)) plt.xticks(ticks, classes, rotation=45) plt.yticks(ticks, classes) plt.ylabel('True class') plt.xlabel('Predicted class') plt.tight_layout() ``` The confusion matrix is hardly any different, so our model overfit after all. The imbalance in this dataset is proving to be too difficult to combat. <br> But there's another, perhaps less stupid way of dealing with imbalance that we haven't tried yet. ## Cost-sensitive learning In this method, we penalize misclassifications differently. Misclassifications of the minority class are penalized more heavily than ones of the major class, which means, the loss is different for each class. Such a penalty system may induce the model to pay more attention to the minority class. <br> Concretely, we calculate a class weight dictionary and feed it to the `.fit` method during training and keras modifies the loss function accordingly. Scikit-learn has a handy function to calculate class weights. ``` df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() df, df_val = train_test_split(df, test_size=0.15, random_state=42) labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes class_weights = class_weight.compute_class_weight('balanced', np.unique(sorted(labels.sentiment)), labels.sentiment) class_weights ``` We need to convert this into an enumerated dictionary for keras to be able to parse it. ``` class_weight_dict = dict(enumerate(class_weights)) class_weight_dict ``` We can pass this dictionary to keras to change its loss function accordingly. ``` print(df.shape, labels.shape) print(df_val.shape) df.drop(['n', 'sentiment'], axis=1, inplace=True) label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) df.response = df.response.apply(str.lower) df.head() df_train = df.copy() y_train = y.copy() print(df_train.shape, y_train.shape) print(df_val.shape) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape ``` We'll use the same model as above, but this time, we'll set trainable to True in the embedding layer. ``` input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=True)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.6)(x) x = Dense(128)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.5)(x) x = Dense(64)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) df_val.head() val_labels = df_val[['id', 'sentiment']] df_val.drop(['n', 'sentiment'], axis=1, inplace=True) df_val.response = df_val.response.str.lower() df_val.head() encoded_val_set = t.texts_to_sequences(df_val.response) np.mean([len(doc) for doc in encoded_val_set]) df_val['tokens'] = encoded_val_set df_val.drop(['response'], axis=1, inplace=True) padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() np.mean([len(doc) for doc in val_vectors]) x_val = np.array([np.array(token) for token in df_val.tokens]) x_val.shape y_val = np.array([np.array(label_to_cat[label]) for label in val_labels.sentiment]) y_val.shape y_val[:5] checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v3.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape ``` Set the `class_weight` parameter before calling `fit`. ``` model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], class_weight=class_weights, epochs=15, verbose=1) ``` We've finally hit almost 91% validation accuracy! <br> There's one last thing I want us to try. ## ELMo Embeddings These are sentence-level embeddings, released by [Allen NLP](https://allennlp.org/elmo) last year. As per the inventors, > ELMo is a deep contextualized word representation that models both complex characters of word use, and how these uses vary across linguistic contexts. The word vectors are learned functions of the internal states of a deep bidirectional language model (biLM), which is pre-trained on a large text corpus. <br> These embeddings are available through the tensorflow hub API. Since these embeddings are sentence-level, we don't need to tokenize them. Make sure your dataframe has the following columns. ``` df_train.head() x_train = np.array([np.array(sentence) for sentence in df_train.response]) x_train[:5] y_train[:5] ``` We'll have to write our own class inheriting the `Layer` class from keras and define a few mandatory functions. ``` class ELMo(Layer): def __init__(self, **kwargs): self.dimensions = 1024 self.trainable = False # set trainable to False super(ELMo, self).__init__(**kwargs) def build(self, input_shape): self.elmo = hub.Module('https://tfhub.dev/google/elmo/2', trainable=self.trainable, name='{}_module'.format(self.name)) self.trainable_weights += K.tf.trainable_variables(scope="^{}_module/.*".format(self.name)) super(ELMo, self).build(input_shape) def call(self, x, mask=None): result = self.elmo(K.squeeze(K.cast(x, tf.string), axis=1), as_dict=True, signature='default',)['default'] return result def compute_mask(self, inputs, mask=None): return K.not_equal(inputs, '--PAD--') def compute_output_shape(self, input_shape): return (input_shape[0], self.dimensions) df_val.head() x_val = np.array([np.array(sentence) for sentence in df_val.response]) x_val.shape val_labels = df_val[['id', 'sentiment']] y_val = np.array([label_to_cat[label] for label in val_labels.sentiment]) x_val[:5] y_val[:5] ``` We'll be using the same architecture as before, but let's drop the fancy activation function this time. ``` input_tensor = Input(shape=(1,), dtype='string') e = ELMo()(input_tensor) x = Dense(256, activation='relu')(e) x = Dropout(0.6)(x) x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_elmo_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() model.fit(x_train, y_train, batch_size=8, validation_data=[x_val, y_val], callbacks=[checkpoint], class_weight=class_weights, epochs=5, verbose=1) ``` ## Ensemble __Hard voting:__ Let's say we have ten prediction vectors from ten different models for a single row in the test set. We take the argmax of each vector, and predict the mode of those ten values. This method is usually preferred as the prediction vectors do not interact with each other and correlation is minimum. Majority wins and the classifiers that differ, are silenced. <br> __Soft voting:__ In this, the ten prediction vectors are added up together element-wise and the argmax of the resulting vector is returned as the prediction. This takes interactions into considerations and hence, total accuracy is not a lot greater than individual accuracies, but the ensemble is less likely to predict false positives. Its a jury, where every opinion matters.
github_jupyter
import numpy as np import pandas as pd import nltk import gensim from gensim.models.doc2vec import TaggedDocument from gensim.models.word2vec import Word2Vec from gensim.scripts.glove2word2vec import glove2word2vec from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.utils import class_weight from sklearn.preprocessing import scale from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer import tensorflow as tf import tensorflow_hub as hub from keras import backend as K from keras.engine import Layer from keras.models import Sequential, Model, load_model from keras.layers import Input, Dense, LSTM, GRU, LeakyReLU, Dropout from keras.layers import CuDNNLSTM, CuDNNGRU, Embedding, Bidirectional from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping from keras.optimizers import Adam from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import bokeh.plotting as bp from bokeh.models import HoverTool, BoxSelectTool from bokeh.plotting import figure, show, output_notebook import matplotlib.pyplot as plt %matplotlib inline df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes df.drop(['n', 'sentiment'], axis=1, inplace=True) label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) y[:5] df.response = df.response.apply(str.lower) df.head() def tokenize(df): df['tokens'] = df['response'].map(lambda x: nltk.word_tokenize(x)) tokenize(df) df.head() df.drop(['response'], axis=1, inplace=True) df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) print(df_train.shape, y_train.shape) print(df_val.shape, y_val.shape) def tag_sentences(sentences, label): tagged = [] for index, sentence in enumerate(sentences): label = f'{label}_{index}' tagged.append(TaggedDocument(sentence, [label])) return tagged vector_train_corpus = tag_sentences(df_train.tokens, 'TRAIN') vector_val_corpus = tag_sentences(df_val.tokens, 'TEST') vector_train_corpus[1] embeddings = Word2Vec(size=200, min_count=3) embeddings.build_vocab([sentence.words for sentence in vector_train_corpus]) embeddings.train([sentence.words for sentence in vector_train_corpus], total_examples=embeddings.corpus_count, epochs=embeddings.epochs) embeddings.wv.most_similar('exercise') vectors = [embeddings[word] for word in list(embeddings.wv.vocab.keys())[:2000]] pca = PCA(n_components=2, random_state=42) pca_vectors = pca.fit_transform(vectors) reduced_df = pd.DataFrame(pca_vectors, columns=['dim_1', 'dim_2']) reduced_df['words'] = list(embeddings.wv.vocab.keys())[:2000] output_notebook() b_figure = bp.figure(plot_width=700, plot_height=600, tools='pan, wheel_zoom, box_zoom, reset, hover, previewsave') b_figure.scatter(x='dim_1', y='dim_2', source=reduced_df) hovertool = b_figure.select(dict(type=HoverTool)) hovertool.tooltips={'word': '@words'} show(b_figure) tsne = TSNE(n_components=2, n_iter=300, verbose=1, random_state=42) tsne_vectors = tsne.fit_transform(vectors) reduced_df = pd.DataFrame(tsne_vectors, columns=['dim_1', 'dim_2']) reduced_df['words'] = list(embeddings.wv.vocab.keys())[:2000] output_notebook() b_figure = bp.figure(plot_width=700, plot_height=600, tools='pan, wheel_zoom, box_zoom, reset, hover, previewsave') b_figure.scatter(x='dim_1', y='dim_2', source=reduced_df) hovertool = b_figure.select(dict(type=HoverTool)) hovertool.tooltips={'word': '@words'} show(b_figure) gen_tfidf = TfidfVectorizer(analyzer=lambda x: x, min_df=3) matrix = gen_tfidf.fit_transform([sentence.words for sentence in vector_train_corpus]) tfidf_map = dict(zip(gen_tfidf.get_feature_names(), gen_tfidf.idf_)) len(tfidf_map) def encode_sentence(tokens, emb_size): _vector = np.zeros((1, emb_size)) length = 0 for word in tokens: try: _vector += embeddings.wv[word].reshape((1, emb_size)) * tfidf_map[word] length += 1 except KeyError: continue break if length > 0: _vector /= length return _vector x_train = scale(np.concatenate([encode_sentence(ele, 200) for ele in map(lambda x: x.words, vector_train_corpus)])) x_val = scale(np.concatenate([encode_sentence(ele, 200) for ele in map(lambda x: x.words, vector_val_corpus)])) print(x_train.shape, x_val.shape) model = Sequential() model.add(Dense(32, activation='relu', input_dim=200)) model.add(Dense(7, activation='softmax')) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score model = Sequential() model.add(Dense(256, activation='relu', input_dim=200)) model.add(Dense(64, activation='relu')) model.add(Dense(7, activation='softmax')) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score lengths = [len(token) for token in df_train.tokens] plt.scatter(lengths, range(len(lengths)), alpha=0.2); print(np.mean(lengths), np.max(lengths)) def encode_sentence_lstm(tokens, emb_size): vec = np.zeros((80, 200)) for i, word in enumerate(tokens): if i > 79: break try: vec[i] = embeddings.wv[word].reshape((1, emb_size)) except KeyError: continue return vec x_train = np.array([encode_sentence_lstm(ele, 200) for ele in map(lambda x: x.words, vector_train_corpus)]) x_train.shape x_val = np.array([encode_sentence_lstm(ele, 200) for ele in map(lambda x: x.words, vector_val_corpus)]) x_val.shape input_tensor = Input(shape=(80, 200)) x = CuDNNLSTM(256, return_sequences=False)(input_tensor) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(inputs=[input_tensor], outputs=[output_tensor]) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, epochs=10, verbose=1) score = model.evaluate(x_val, y_val, verbose=1) score input_tensor = Input(shape=(80, 200)) x = Bidirectional(CuDNNLSTM(256, return_sequences=False))(input_tensor) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(inputs=[input_tensor], outputs=[output_tensor]) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=10, verbose=1) df.head() df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, input_length=SEQ_LEN, trainable=True)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() glove_input = 'D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt' word2vec_output = 'D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt.word2vec' glove2word2vec(glove_input, word2vec_output) embedding_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/GloVe-6B/glove.6B.300d.txt.word2vec', binary=False) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embedding_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape def recall(y_true, y_pred): true_pos = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_pos = K.sum(K.round(K.clip(y_true, 0, 1))) _recall = true_pos / (possible_pos + K.epsilon()) return _recall def precision(y_true, y_pred): true_pos = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_pos = K.sum(K.round(K.clip(y_pred, 0, 1))) _precision = true_pos / (predicted_pos + K.epsilon()) return _precision def f1(y_true, y_pred): p = precision(y_true, y_pred) r = recall(y_true, y_pred) return 2 * ((p * r) / (p + r + K.epsilon())) input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(64, activation='relu')(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score df_train, df_val, y_train, y_val = train_test_split(df, y, test_size=0.15, random_state=42) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_vectors = [list(doc) for doc in padded_train] df_train.tokens = train_vectors lengths = [len(doc) for doc in train_vectors] np.mean(lengths) embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(128, activation='relu')(x) output = Dense(7, activation='softmax')(x) model = Model(input_tensor, output) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, epochs=10, verbose=1) encoded_val_set = t.texts_to_sequences(df_val.response) len(encoded_val_set) df_val['tokens'] = encoded_val_set df_val.head() padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() lengths = [len(doc) for doc in val_vectors] np.mean(lengths) x_val = np.array([np.array(token) for token in df_val.tokens]) print(x_val.shape, y_val.shape) score = model.evaluate(x_val, y_val, verbose=1) score y_pred = model.predict(x_val, verbose=1) print(y_pred.shape, y_val.shape) y_pred_class = np.array([np.argmax(x) for x in y_pred]) y_val_class = np.array([np.argmax(x) for x in y_val]) print(y_pred_class.shape, y_val_class.shape) c = confusion_matrix(y_val_class, y_pred_class) classes = [v for k, v in cat_to_label.items()] plt.figure(figsize=(20, 20)) plt.imshow(c, interpolation='nearest', cmap='jet') plt.colorbar() ticks = np.arange(len(classes)) plt.xticks(ticks, classes, rotation=45) plt.yticks(ticks, classes) plt.ylabel('True class') plt.xlabel('Predicted class') plt.tight_layout() plt.figure(figsize=(7, 7)) plt.pie(labels.sentiment.value_counts(), labels=classes); labels.sentiment.value_counts() df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() df, df_val = train_test_split(df, test_size=0.15, random_state=42) labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes dfs = [] for sentiment in classes: df_temp = df.where(df.sentiment == sentiment) df_temp.dropna(axis=0, inplace=True) dfs.append(df_temp) ls = [len(df) for df in dfs] dfs[0].head() ls new_dfs = [pd.concat([df]*int(max(ls)/len(df)), ignore_index=True) for df in dfs] new_ls = [len(df) for df in new_dfs] new_ls df = pd.concat(new_dfs, ignore_index=True) labels = df[['id', 'sentiment']] print(df.shape, len(labels)) classes = sorted(labels.sentiment.unique()) classes plt.figure(figsize=(7, 7)) plt.pie(labels.sentiment.value_counts(), labels=classes); df.drop(['n', 'sentiment'], axis=1, inplace=True) label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) y[:5] df.response = df.response.apply(str.lower) df.head() df_train, _, y_train, _ = train_test_split(df, y, test_size=0, random_state=42) print(df_train.shape, y_train.shape) print(df_val.shape) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_vectors = [list(doc) for doc in padded_train] df_train.tokens = train_vectors np.mean([len(doc) for doc in train_vectors]) embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(128, activation='relu')(x) output = Dense(7, activation='softmax')(x) model = Model(input_tensor, output) df_val.head() val_labels = df_val[['id', 'sentiment']] df_val.drop(['n', 'sentiment'], axis=1, inplace=True) df_val.response = df_val.response.str.lower() df_val.head() encoded_val_set = t.texts_to_sequences(df_val.response) np.mean([len(doc) for doc in encoded_val_set]) df_val['tokens'] = encoded_val_set df_val.drop(['response'], axis=1, inplace=True) padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() np.mean([len(doc) for doc in val_vectors]) x_val = np.array([np.array(token) for token in df_val.tokens]) x_val.shape y_val = np.array([np.array(label_to_cat[label]) for label in val_labels.sentiment]) y_val.shape y_val[:5] checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], epochs=10, verbose=1) input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=False)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.6)(x) x = Dense(128)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.5)(x) x = Dense(64)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], epochs=10, verbose=1) model = load_model('D:/Datasets/mc-sent/models/w2v_balanced_v1.hdf5', custom_objects={'f1': f1}) y_pred = model.predict(x_val, verbose=1) print(y_pred.shape, y_val.shape) y_pred_class = np.array([np.argmax(x) for x in y_pred]) y_val_class = np.array([np.argmax(x) for x in y_val]) c = confusion_matrix(y_val_class, y_pred_class) classes = [v for k, v in cat_to_label.items()] plt.figure(figsize=(20, 20)) plt.imshow(c, interpolation='nearest', cmap='jet') plt.colorbar() ticks = np.arange(len(classes)) plt.xticks(ticks, classes, rotation=45) plt.yticks(ticks, classes) plt.ylabel('True class') plt.xlabel('Predicted class') plt.tight_layout() df = pd.read_csv('D:/Datasets/mc-sent/p_train.csv', low_memory=False) df.head() df, df_val = train_test_split(df, test_size=0.15, random_state=42) labels = df[['id', 'sentiment']] classes = sorted(labels.sentiment.unique()) classes class_weights = class_weight.compute_class_weight('balanced', np.unique(sorted(labels.sentiment)), labels.sentiment) class_weights class_weight_dict = dict(enumerate(class_weights)) class_weight_dict print(df.shape, labels.shape) print(df_val.shape) df.drop(['n', 'sentiment'], axis=1, inplace=True) label_to_cat = dict() for i in range(len(classes)): dummy = np.zeros((len(classes),), dtype='int8') dummy[i] = 1 label_to_cat[classes[i]] = dummy cat_to_label = dict() for k, v in label_to_cat.items(): cat_to_label[tuple(v)] = k y = np.array([label_to_cat[label] for label in labels.sentiment]) df.response = df.response.apply(str.lower) df.head() df_train = df.copy() y_train = y.copy() print(df_train.shape, y_train.shape) print(df_val.shape) t = Tokenizer() t.fit_on_texts(df_train.response) vocab_size = len(t.word_index) + 1 vocab_size encoded_train_set = t.texts_to_sequences(df_train.response) len(encoded_train_set) df_train['tokens'] = encoded_train_set df_train.drop(['response'], axis=1, inplace=True) df_train.head() y_train[:5] SEQ_LEN = 80 padded_train = pad_sequences(encoded_train_set, maxlen=SEQ_LEN, padding='post') train_docs = [list(doc) for doc in padded_train] df_train['tokens'] = train_docs df_train.head() embeddings_index = gensim.models.KeyedVectors.load_word2vec_format('D:/Datasets/embeddings/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) embedding_matrix = np.zeros((vocab_size, 300)) count = 0 for word, i in t.word_index.items(): try: embedding_vector = embeddings_index[word] embedding_matrix[i] = embedding_vector except KeyError: count += 1 count embedding_matrix.shape input_tensor = Input(shape=(SEQ_LEN,), dtype='int32') e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=SEQ_LEN, trainable=True)(input_tensor) x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(e) x = Bidirectional(CuDNNLSTM(64, return_sequences=False))(x) x = Dense(256)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.6)(x) x = Dense(128)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.5)(x) x = Dense(64)(x) x = LeakyReLU(alpha=0.1)(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) df_val.head() val_labels = df_val[['id', 'sentiment']] df_val.drop(['n', 'sentiment'], axis=1, inplace=True) df_val.response = df_val.response.str.lower() df_val.head() encoded_val_set = t.texts_to_sequences(df_val.response) np.mean([len(doc) for doc in encoded_val_set]) df_val['tokens'] = encoded_val_set df_val.drop(['response'], axis=1, inplace=True) padded_val = pad_sequences(encoded_val_set, maxlen=SEQ_LEN, padding='post') val_vectors = [list(doc) for doc in padded_val] df_val.tokens = val_vectors df_val.head() np.mean([len(doc) for doc in val_vectors]) x_val = np.array([np.array(token) for token in df_val.tokens]) x_val.shape y_val = np.array([np.array(label_to_cat[label]) for label in val_labels.sentiment]) y_val.shape y_val[:5] checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_v3.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() x_train = np.array([np.array(token) for token in df_train.tokens]) x_train.shape model.fit(x_train, y_train, validation_data=[x_val, y_val], callbacks=[checkpoint], class_weight=class_weights, epochs=15, verbose=1) df_train.head() x_train = np.array([np.array(sentence) for sentence in df_train.response]) x_train[:5] y_train[:5] class ELMo(Layer): def __init__(self, **kwargs): self.dimensions = 1024 self.trainable = False # set trainable to False super(ELMo, self).__init__(**kwargs) def build(self, input_shape): self.elmo = hub.Module('https://tfhub.dev/google/elmo/2', trainable=self.trainable, name='{}_module'.format(self.name)) self.trainable_weights += K.tf.trainable_variables(scope="^{}_module/.*".format(self.name)) super(ELMo, self).build(input_shape) def call(self, x, mask=None): result = self.elmo(K.squeeze(K.cast(x, tf.string), axis=1), as_dict=True, signature='default',)['default'] return result def compute_mask(self, inputs, mask=None): return K.not_equal(inputs, '--PAD--') def compute_output_shape(self, input_shape): return (input_shape[0], self.dimensions) df_val.head() x_val = np.array([np.array(sentence) for sentence in df_val.response]) x_val.shape val_labels = df_val[['id', 'sentiment']] y_val = np.array([label_to_cat[label] for label in val_labels.sentiment]) x_val[:5] y_val[:5] input_tensor = Input(shape=(1,), dtype='string') e = ELMo()(input_tensor) x = Dense(256, activation='relu')(e) x = Dropout(0.6)(x) x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.4)(x) output_tensor = Dense(7, activation='softmax')(x) model = Model(input_tensor, output_tensor) checkpoint = ModelCheckpoint('D:/Datasets/mc-sent/models/w2v_balanced_elmo_v1.hdf5', monitor='val_acc', save_best_only=True, mode='max', verbose=1) model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy', f1]) model.summary() model.fit(x_train, y_train, batch_size=8, validation_data=[x_val, y_val], callbacks=[checkpoint], class_weight=class_weights, epochs=5, verbose=1)
0.610453
0.964389
# Temperature forecast and history data example (Turku Artukainen) Using Python 3.6 FMI open data https://en.ilmatieteenlaitos.fi/open-data ``` import datetime import pandas as pd import re import requests import xml.etree.ElementTree as et from io import StringIO api_url = 'https://opendata.fmi.fi/wfs' ``` ## Forecast data ``` def get_fmidata_multipointcoverage(parameters): r = requests.get(f'{api_url}?{parameters}') # XML root and namespaces root = et.fromstring(r.text) namespaces = dict([node for _, node in et.iterparse(StringIO(r.text), events=['start-ns'])]) # Extract name list names = list(map(lambda f: f.attrib['name'] ,root.findall('.//swe:field', namespaces))) # Extract Unix timestamps timestamps = re.split(r'\s+',root.find('.//gmlcov:positions', namespaces).text)[3:-1:3] # Convert Unix timestamps to datetimes with Helsinki timezone datetimeindex = pd.to_datetime(sorted(timestamps*len(names)), unit='s') datetimeindex = datetimeindex.tz_localize(tz='UTC').tz_convert('Europe/Helsinki') # Extract data values = re.split(r'\s+',root.find('.//gml:doubleOrNilReasonTupleList', namespaces).text)[1:-1] # Get URL for and print property explanations property_url = root.find('.//om:observedProperty', namespaces).attrib[ '{http://www.w3.org/1999/xlink}href'] print(f'Properties: {property_url}') # Create and return DataFrame df = pd.DataFrame({ 'name': names*len(timestamps), 'value': values}, index=datetimeindex) return df # Get geoids from https://www.geonames.org geoid = 660972 # Turku, Artukainen # List of stored queries https://ilmatieteenlaitos.fi/tallennetut-kyselyt query = 'fmi::forecast::hirlam::surface::point::multipointcoverage' df = get_fmidata_multipointcoverage(f'request=getFeature&storedquery_id={query}&geoid={geoid}') temperature = df[df['name']=='Temperature'] temperature ``` ## History data ``` def get_fmidata_simple(parameters): r = requests.get(f'{api_url}?{parameters}') # XML root and namespaces root = et.fromstring(r.text) namespaces = dict([node for _, node in et.iterparse(StringIO(r.text), events=['start-ns'])]) # Extract datetimes, names and values datetimes = list(map(lambda f: f.text, root.findall('.//BsWfs:Time', namespaces))) names = list(map(lambda f: f.text, root.findall('.//BsWfs:ParameterName', namespaces))) values = list(map(lambda f: f.text, root.findall('.//BsWfs:ParameterValue', namespaces))) # Convert to Helsinki timezone datetimeindex = pd.to_datetime(datetimes).tz_convert('Europe/Helsinki') # Create and return DataFrame df = pd.DataFrame({ 'name': names, 'value': values}, index=datetimeindex ) return df # Get geoids from https://www.geonames.org geoid2 = 660972 # Turku, Artukainen # Set time interval and timestep end = datetime.datetime.utcnow().replace(second=0, microsecond=0) start = end - datetime.timedelta(days=1) timestep = 60 # minutes query2 = 'fmi::observations::weather::simple' df2 = get_fmidata_simple(f'request=getFeature&storedquery_id={query2}&geoid={geoid2}&starttime={start}&endtime={end}&timestep={timestep}') history = df2[df2['name']=='t2m'] history ```
github_jupyter
import datetime import pandas as pd import re import requests import xml.etree.ElementTree as et from io import StringIO api_url = 'https://opendata.fmi.fi/wfs' def get_fmidata_multipointcoverage(parameters): r = requests.get(f'{api_url}?{parameters}') # XML root and namespaces root = et.fromstring(r.text) namespaces = dict([node for _, node in et.iterparse(StringIO(r.text), events=['start-ns'])]) # Extract name list names = list(map(lambda f: f.attrib['name'] ,root.findall('.//swe:field', namespaces))) # Extract Unix timestamps timestamps = re.split(r'\s+',root.find('.//gmlcov:positions', namespaces).text)[3:-1:3] # Convert Unix timestamps to datetimes with Helsinki timezone datetimeindex = pd.to_datetime(sorted(timestamps*len(names)), unit='s') datetimeindex = datetimeindex.tz_localize(tz='UTC').tz_convert('Europe/Helsinki') # Extract data values = re.split(r'\s+',root.find('.//gml:doubleOrNilReasonTupleList', namespaces).text)[1:-1] # Get URL for and print property explanations property_url = root.find('.//om:observedProperty', namespaces).attrib[ '{http://www.w3.org/1999/xlink}href'] print(f'Properties: {property_url}') # Create and return DataFrame df = pd.DataFrame({ 'name': names*len(timestamps), 'value': values}, index=datetimeindex) return df # Get geoids from https://www.geonames.org geoid = 660972 # Turku, Artukainen # List of stored queries https://ilmatieteenlaitos.fi/tallennetut-kyselyt query = 'fmi::forecast::hirlam::surface::point::multipointcoverage' df = get_fmidata_multipointcoverage(f'request=getFeature&storedquery_id={query}&geoid={geoid}') temperature = df[df['name']=='Temperature'] temperature def get_fmidata_simple(parameters): r = requests.get(f'{api_url}?{parameters}') # XML root and namespaces root = et.fromstring(r.text) namespaces = dict([node for _, node in et.iterparse(StringIO(r.text), events=['start-ns'])]) # Extract datetimes, names and values datetimes = list(map(lambda f: f.text, root.findall('.//BsWfs:Time', namespaces))) names = list(map(lambda f: f.text, root.findall('.//BsWfs:ParameterName', namespaces))) values = list(map(lambda f: f.text, root.findall('.//BsWfs:ParameterValue', namespaces))) # Convert to Helsinki timezone datetimeindex = pd.to_datetime(datetimes).tz_convert('Europe/Helsinki') # Create and return DataFrame df = pd.DataFrame({ 'name': names, 'value': values}, index=datetimeindex ) return df # Get geoids from https://www.geonames.org geoid2 = 660972 # Turku, Artukainen # Set time interval and timestep end = datetime.datetime.utcnow().replace(second=0, microsecond=0) start = end - datetime.timedelta(days=1) timestep = 60 # minutes query2 = 'fmi::observations::weather::simple' df2 = get_fmidata_simple(f'request=getFeature&storedquery_id={query2}&geoid={geoid2}&starttime={start}&endtime={end}&timestep={timestep}') history = df2[df2['name']=='t2m'] history
0.395601
0.850655
# Tensorboard Tutorial ## Getting Started Attribution: [Neptune.ai](https://neptune.ai/blog/tensorboard-tutorial) ### Useful Resources - https://www.tensorflow.org/tensorboard - https://github.com/tensorflow/tensorboard - https://tensorboard.dev/ ## Key Concepts ### Summary Ops: How TensorBoard gets data from TensorFlow The first step in using TensorBoard is acquiring data from your TensorFlow run. For this, you need [summary ops](https://www.tensorflow.org/api_docs/python/tf/summary). Summary ops are ops, just like [`tf.matmul`](https://www.tensorflow.org/api_docs/python/tf/linalg/matmul) and [`tf.nn.relu`](https://www.tensorflow.org/api_docs/python/tf/nn/relu), which means they take in tensors, produce tensors, and are evaluated from within a TensorFlow graph. However, summary ops have a twist: the Tensors they produce contain serialized protobufs, which are written to disk and sent to TensorBoard. To visualize the summary data in TensorBoard, you should evaluate the summary op, retrieve the result, and then write that result to disk using a summary.FileWriter. A full explanation, with examples, is in [the tutorial](https://www.tensorflow.org/get_started/summaries_and_tensorboard). The supported summary ops include: * [`tf.summary.scalar`](https://www.tensorflow.org/api_docs/python/tf/summary/scalar) * [`tf.summary.image`](https://www.tensorflow.org/api_docs/python/tf/summary/image) * [`tf.summary.audio`](https://www.tensorflow.org/api_docs/python/tf/summary/audio) * [`tf.summary.text`](https://www.tensorflow.org/api_docs/python/tf/summary/text) * [`tf.summary.histogram`](https://www.tensorflow.org/api_docs/python/tf/summary/histogram) ### Tags: Giving names to data When you make a summary op, you will also give it a `tag`. The tag is basically a name for the data recorded by that op, and will be used to organize the data in the frontend. The scalar and histogram dashboards organize data by tag, and group the tags into folders according to a directory/like/hierarchy. If you have a lot of tags, we recommend grouping them with slashes. ### Event Files & LogDirs: How TensorBoard loads the data `summary.FileWriters` take summary data from TensorFlow, and then write them to a specified directory, known as the `logdir`. Specifically, the data is written to an append-only record dump that will have "tfevents" in the filename. TensorBoard reads data from a full directory, and organizes it into the history of a single TensorFlow execution. Why does it read the whole directory, rather than an individual file? You might have been using [supervisor.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/supervisor.py) to run your model, in which case if TensorFlow crashes, the supervisor will restart it from a checkpoint. When it restarts, it will start writing to a new events file, and TensorBoard will stitch the various event files together to produce a consistent history of what happened. ### Runs: Comparing different executions of your model You may want to visually compare multiple executions of your model; for example, suppose you've changed the hyperparameters and want to see if it's converging faster. TensorBoard enables this through different "runs". When TensorBoard is passed a `logdir` at startup, it recursively walks the directory tree rooted at `logdir` looking for subdirectories that contain tfevents data. Every time it encounters such a subdirectory, it loads it as a new `run`, and the frontend will organize the data accordingly. For example, here is a well-organized TensorBoard log directory, with two runs, "run1" and "run2". ``` /some/path/mnist_experiments/ /some/path/mnist_experiments/run1/ /some/path/mnist_experiments/run1/events.out.tfevents.1456525581.name /some/path/mnist_experiments/run1/events.out.tfevents.1456525585.name /some/path/mnist_experiments/run2/ /some/path/mnist_experiments/run2/events.out.tfevents.1456525385.name /tensorboard --logdir /some/path/mnist_experiments ``` #### Logdir & Logdir_spec (Legacy Mode) You may also pass a comma separated list of log directories, and TensorBoard will watch each directory. You can also assign names to individual log directories by putting a colon between the name and the path, as in ``` tensorboard --logdir_spec name1:/path/to/logs/1,name2:/path/to/logs/2 ``` _This flag (`--logdir_spec`) is discouraged and can usually be avoided_. TensorBoard walks log directories recursively; for finer-grained control, prefer using a symlink tree. _Some features may not work when using `--logdir_spec` instead of `--logdir`._ Install these libraries - I'm using a `virtualenv` with Python 3.7 ``` !pip install tensorflow !pip install matplotlib !pip install tensorboard !pip install sklearn ``` With TensorBoard installed, you can now load it into your Notebook. Note that you can use it in a Jupyter Notebook or Google’s Colab. ``` %load_ext tensorboard ``` Once that is done you have to set a log directory. This is where TensorBoard will store all the logs. It will read from these logs in order to display the various visualizations. ``` log_folder = 'logs' import datetime log_folder = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") ``` ### How to run TensorBoard Running Tensorboard involves just one line of code. In this section you’ll see how to do this. Let’s now walk through an example where you will use TensorBoard to visualize model metrics. For that purpose, you need to build a simple image classification model. ``` import tensorflow as tf mnist = tf.keras.datasets.mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train, X_test = X_train / 255.0, X_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ``` Next, load in the TensorBoard notebook extension and create a variable pointing to the log folder. ``` %load_ext tensorboard log_folder = 'logs' ``` ### Tensorflow Callback The next step is to specify the TensorBoard callback during the model’s fit method. In order to do that you first have to import the TensorBoard callback. This callback is responsible for logging events such as Activation Histograms, Metrics Summary Plots, Profiling and Training Graph Visualizations. ``` from tensorflow.keras.callbacks import TensorBoard callbacks = [TensorBoard(log_dir=log_folder, histogram_freq=1, write_graph=True, write_images=True, update_freq='epoch', profile_batch=2, embeddings_freq=1)] ``` With that in place, you can now create the TensorBoard callback and specify the log directory using log_dir. The TensorBoard callback also takes other parameters: - `histogram_freq` is the frequency at which to compute activation and weight histograms for layers of the model. Setting this to 0 means that histograms will not be computed. In order for this to work you have to set the validation data or the validation split. - `write_graph` dictates if the graph will be visualized in TensorBoard - `write_images` when set to true, model weights are visualized as an image in TensorBoard - `update_freq` determines how losses and metrics are written to TensorBoard. When set to an integer, say 100, losses and metrics are logged every 100 batches. When set to batch the losses and metrics are set after every batch. When set to epoch they are written after every epoch - `profile_batch` determines which batches will be profiled. By default, the second batch is profiled. You can also set, for example from 5 and to 10, to profile batches 5 to 10, i.e profile_batch=’5,10′ . Setting profile_batch to 0 disables profiling. - `embeddings_freq` the frequency at which the embedding layers will be visualized. Setting this to zero means that the embeddings will not be visualized The next item is to fit the model and pass in the callback. ``` model.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=callbacks) ``` Open Tensorboard by visitng http://localhost:6006 ``` %tensorboard --logdir={'/Users/dileepholla/PycharmProjects/tensorboard/logs'} ``` #### TensorBoard scalars The Scalars tab shows changes in the loss and metrics over the epochs. It can be used to track other scalar values such as learning rate and training speed. #### TensorBoard images This dashboard has images that show the weights. Adjusting the slider displays the weights at various epochs. #### TensorBoard graphs This tab shows your model’s layers. You can use this to check if the architecture of the model looks as intended. #### TensorBoard distributions The distribution tab shows the distribution of tensors. For example in the dense layer below, you can see the distribution of the weights and biases over each epoch. #### TensorBoard histograms The Histograms show the distribution of tensors over time. For example, looking at dense_1 below, you can see the distribution of the biases over each epoch. ### Plot training examples with TensorBoard You can use TensorFlow Image Summary API to visualize training images. This is especially useful when working with image data like in this case. Now, create a new log directory for the images as shown below & create a file writer pointing to the directory ``` logdir = "logs/train_data/" file_writer = tf.summary.create_file_writer(logdir) ``` At the beginning of this article (in the “How to run TensorBoard” section), you specified that the image shape was 28 by 28. It is important information when reshaping the images before writing them to TensorBoard. You also need to specify the channel to be 1 because the images are grayscale. Afterward, you use the file_write to write the images to TensorBoard. In this example, the images at index 10 to 30 will be written to TensorBoard. ``` import numpy as np with file_writer.as_default(): images = np.reshape(X_train[10:30], (-1, 28, 28, 1)) tf.summary.image("20 Digits", images, max_outputs=25, step=0) ``` Apart from visualizing image tensors, you can also visualize actual images in TensorBoard. In order to illustrate that, you need to convert the MNIST tensors to images using Matplotlib. After that, you need to use `tf.summary.image` to plot the images in Tensorboard. Start by clearing the logs, alternatively you can use timestamped log folders. After that specify the log directory and create a `tf.summary.create_file_writer` that will be used to write the images to TensorBoard ``` !rm -rf logs import io import matplotlib.pyplot as plt class_names = ['Zero','One','Two','Three','Four','Five','Six','Seven','Eight','Nine'] logdir = "logs/plots/" file_writer = tf.summary.create_file_writer(logdir) ``` Next, create a grid that will hold the images. In this case, the grid will hold 36 digits. ``` def image_grid(): figure = plt.figure(figsize=(12,8)) for i in range(36): plt.subplot(6, 6, i + 1) plt.xlabel(class_names[y_train[i]]) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train[i], cmap=plt.cm.coolwarm) return figure figure = image_grid() ``` Now convert the digits into a single image to visualize it in the TensorBoard. ``` def plot_to_image(figure): buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(figure) buf.seek(0) digit = tf.image.decode_png(buf.getvalue(), channels=4) digit = tf.expand_dims(digit, 0) return digit ``` The next step is to use the writer and `plot_to_image` to display the images on TensorBoard. ``` with file_writer.as_default(): tf.summary.image("MNIST Digits", plot_to_image(figure), step=0) %tensorboard --logdir logs/plots ``` ## Log confusion matrix to TensorBoard Using the same example, you can log the confusion matrix for all epochs. First, define a function that will return a Matplotlib figure holding the confusion matrix. ``` import itertools def plot_confusion_matrix(cm, class_names): figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Accent) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure ``` Next, clear the previous logs, define the log directory for the confusion matrix, and create a writer variable for writing into the log folder. ``` !rm -rf logs logdir = "logs" file_writer_cm = tf.summary.create_file_writer(logdir) ``` The step that follows this is to create a function that will make predictions from the model and log the confusion matrix as an image. After that use the `file_writer_cm to write` the confusion matrix to the log directory. ``` from tensorflow import keras from sklearn import metrics def log_confusion_matrix(epoch, logs): predictions = model.predict(X_test) predictions = np.argmax(predictions, axis=1) cm = metrics.confusion_matrix(y_test, predictions) figure = plot_confusion_matrix(cm, class_names=class_names) cm_image = plot_to_image(figure) with file_writer_cm.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) ``` This will be followed by the definition of the TensorBoard callback and the `LambdaCallback`. The `LambdaCallback` will log the confusion matrix on every epoch. Finally fit the model using these two callbacks. Since you’ve already fitted the model before, it would be advisable to restart your runtime and ensure that you are fitting the model just once. ``` callbacks = [ TensorBoard(log_dir=log_folder, histogram_freq=1, write_graph=True, write_images=True, update_freq='epoch', profile_batch=2, embeddings_freq=1), keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix) ] model.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=callbacks) ``` Now run TensorBoard and check the confusion matrix on the Images tab. ``` %tensorboard --logdir logs ```
github_jupyter
/some/path/mnist_experiments/ /some/path/mnist_experiments/run1/ /some/path/mnist_experiments/run1/events.out.tfevents.1456525581.name /some/path/mnist_experiments/run1/events.out.tfevents.1456525585.name /some/path/mnist_experiments/run2/ /some/path/mnist_experiments/run2/events.out.tfevents.1456525385.name /tensorboard --logdir /some/path/mnist_experiments tensorboard --logdir_spec name1:/path/to/logs/1,name2:/path/to/logs/2 !pip install tensorflow !pip install matplotlib !pip install tensorboard !pip install sklearn %load_ext tensorboard log_folder = 'logs' import datetime log_folder = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") import tensorflow as tf mnist = tf.keras.datasets.mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train, X_test = X_train / 255.0, X_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy']) %load_ext tensorboard log_folder = 'logs' from tensorflow.keras.callbacks import TensorBoard callbacks = [TensorBoard(log_dir=log_folder, histogram_freq=1, write_graph=True, write_images=True, update_freq='epoch', profile_batch=2, embeddings_freq=1)] model.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=callbacks) %tensorboard --logdir={'/Users/dileepholla/PycharmProjects/tensorboard/logs'} logdir = "logs/train_data/" file_writer = tf.summary.create_file_writer(logdir) import numpy as np with file_writer.as_default(): images = np.reshape(X_train[10:30], (-1, 28, 28, 1)) tf.summary.image("20 Digits", images, max_outputs=25, step=0) !rm -rf logs import io import matplotlib.pyplot as plt class_names = ['Zero','One','Two','Three','Four','Five','Six','Seven','Eight','Nine'] logdir = "logs/plots/" file_writer = tf.summary.create_file_writer(logdir) def image_grid(): figure = plt.figure(figsize=(12,8)) for i in range(36): plt.subplot(6, 6, i + 1) plt.xlabel(class_names[y_train[i]]) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train[i], cmap=plt.cm.coolwarm) return figure figure = image_grid() def plot_to_image(figure): buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(figure) buf.seek(0) digit = tf.image.decode_png(buf.getvalue(), channels=4) digit = tf.expand_dims(digit, 0) return digit with file_writer.as_default(): tf.summary.image("MNIST Digits", plot_to_image(figure), step=0) %tensorboard --logdir logs/plots import itertools def plot_confusion_matrix(cm, class_names): figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Accent) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2) threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure !rm -rf logs logdir = "logs" file_writer_cm = tf.summary.create_file_writer(logdir) from tensorflow import keras from sklearn import metrics def log_confusion_matrix(epoch, logs): predictions = model.predict(X_test) predictions = np.argmax(predictions, axis=1) cm = metrics.confusion_matrix(y_test, predictions) figure = plot_confusion_matrix(cm, class_names=class_names) cm_image = plot_to_image(figure) with file_writer_cm.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) callbacks = [ TensorBoard(log_dir=log_folder, histogram_freq=1, write_graph=True, write_images=True, update_freq='epoch', profile_batch=2, embeddings_freq=1), keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix) ] model.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=callbacks) %tensorboard --logdir logs
0.685107
0.984956
<a href="https://colab.research.google.com/github/matheuspercario/TT007-AnaliseDadosPython/blob/master/exercicios/Aula6-strings/strings.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Strings - Python suporta strings como um tipo de dado básico. - Stings são objetos imutáveis. Não podem ser alteradas após sua criação. Quando um método que altera uma string é chamado; na verdade este retorna uma nova string. ## Strings formatadas ### Tipos de Apresentação Você viu a formatação básica de strings com f-strings. Quando você especifica um espaço reservado para um valor em uma f-string, Python assume que o valor deve ser exibido como uma string, a menos que você especifique outro tipo. Em alguns casos, o tipo é obrigatório. Por exemplo, vamos formatar o valor de ponto flutuante 17.489 arredondado para a posição dois centésimos: ``` f'{17.489:.2f}' ``` Python oferece suporte a precisão apenas para valores de ponto flutuante e **Decimal**. - A formatação é dependente do tipo - se você tentar usar **.2f** para formatar uma string como **'hello'**, ocorre um **ValueError**. - O tipo de apresentação **f** no especificador de formato **.2f** é necessário. - Indica que tipo que está sendo formatado para que o Python possa determinar se as outras informações de formatação são permitidas para esse tipo. - A lista completa com dos tipos pode ser acessada em: https://docs.python.org/3/library/string.html#formatspec #### Inteiros O tipo de apresentação **d** formata valores inteiros como strings ``` f'{10:d}' ``` #### Caracteres O tipo de apresentação **c** formata um código de caractere inteiro como o caractere correspondente ``` f'{65:c} {97:c}' ``` O tipo de apresentação **s** é o padrão. - Se você especificar **s** explicitamente, o valor a formatar deve ser uma variável que faz referência a uma string, - uma expressão que produz uma string - ou um literal de string, como em f'{"hello":s}. - Se você não especificar um tipo de apresentação, como em {7}, os valores não string, como o inteiro 7, são convertidos em strings: ``` f'{"hello":s} {7}' ``` #### Valores de ponto flutuante e Decimal Você usou o tipo de apresentação **f** para formatar valores de ponto flutuante e decimais. - Para valores extremamente grandes, ou pequenos, a notação exponencial (científica) pode ser usada para formatar os valores com o tipo de apresentação **e** ou (**E**). ``` from decimal import Decimal f'{Decimal("10000000000000000000000000.0"):.3f}' f'{Decimal("10000000000000000000000000.0"):.3e}' ``` ** Larguras e alinhamento de campo Por padrão, o Python alinha os números à direita e à esquerda outros valores, como strings - Os resultados abaixo estão entre colchetes ([]) para que você possa ver como os valores se alinham no campo: ``` f'[{27:10d}]' f'[{3.5:10f}]' f'[{"hello":10}]' ``` Por padrão, Python formata valores flutuantes com seis dígitos de precisão para à direita do ponto decimal. - Para valores com menos caracteres do que o campo largura, as posições restantes dos caracteres são preenchidas com espaços. - Valores com mais caracteres do que a largura do campo, use quantas posições de caracteres forem necessárias. #### Especificando explicitamente o alinhamento à esquerda e à direita em um campo Lembre-se de que você pode especificar o alinhamento à esquerda e à direita com **<** e **>**: ``` f'[{27:<15d}]' f'[{3.5:<15f}]' f'[{"hello":>15}]' ``` #### Centralizando um valor em um campo Além disso, você pode centralizar valores: ``` f'[{27:^7d}]' f'[{3.5:^7.1f}]' f'[{"hello":^7}]' ``` # **Exercício 01** Exiba em linhas separadas o seu nome à direita, ao centro e alinhado à esquerda em um campo de 10 caracteres. Coloque cada resultado entre colchetes para que você possa ver oo alinhamento resulta mais claramente. ``` print(f'Alinhado à direita \t:\t [{"Matheus":>10}]') print(f'Alinhado à esquerda \t:\t [{"Matheus":<10}]') print(f'Alinhado ao centro \t:\t [{"Matheus":^10}]') ``` #### Formatação Numérica Existem vários recursos de formatação numérica. - Por exemplo, àss vezes é desejável forçar o sinal em um número positivo ``` f'[{27:+10d}]' ``` O + antes da largura do campo especifica que um número positivo deve ser precedido por um **+**. Um número negativo sempre começa com um **-**. - Para preencher os caracteres restantes do campo com **0**s em vez de espaços, coloque um 0 antes da largura do campo (e depois do + se houver): ``` f'[{27:+010d}]' ``` ##### Usando um espaço onde um sinal + apareceria Um espaço indica que os números positivos devem mostrar um caractere de espaço na posição do sinal. - Isso é útil para alinhar valores positivos e negativos para fins de exibição: ``` print(f'{27:d}\n{27: d}\n{-27: d}') ``` #### Agrupando dígitos Você pode formatar números com separadores de milhares usando uma vírgula (,), da seguinte maneira: ``` f'{12345678:,d}' f'{123456.78:,.2f}' ``` ### O Método de string *format* As f-strings do Python foram adicionadas à linguagem na versão 3.6. - Antes disso, a formatação era feita usando o método __*format*__. - Mostramos o método de formatação aqui porque você encontrará em código escrito antes do Python 3.6. - Você verá frequentemente o método de formatação no Python documentação e em muitos livros e artigos Python escritos antes que as f-strings fossem introduzidas. Você chama o formato do método em uma string de formato contendo espaços reservados com chaves ({}), possivelmente com especificadores de formato. Você passa para o método os valores a serem formatados. Vamos formate o valor flutuante 17.489 arredondado para a posição dos centésimos: ``` '{:.2f}'.format(17.489) ``` Em um espaço reservado, se houver um especificador de formato, você o precede por dois-pontos (:), como nas strings f. O resultado da chamada de formato é uma nova string contendo os resultados formatados. #### Multiplos marcadores Uma string de formato pode conter vários marcadores de posição, caso em que o método do formato os argumentos correspondem aos marcadores da esquerda para a direita: ``` '{} {}'.format('Amanda', 'Cyan') ``` #### Argumentos de referência por número de posição A string de formato pode fazer referência a argumentos específicos por sua posição no formato lista de argumentos do método, começando com a posição 0: ``` '{0} {0} {1}'.format('Happy', 'Birthday') ``` #### Referenciando argumentos de palavras-chave Você pode fazer referência a argumentos de palavra-chave por suas chaves nos marcadores de posição: ``` '{first} {last}'.format(first='Amanda', last='Gray') '{last} {last}'.format(first='Amanda', last='Gray') ``` ### Concatenando e repetindo strings Lembre-se que já usamos o operador **+** para concatenar strings e o operador **\*** para repetir strings. Você também pode realizar essas operações com atribuições aumentadas. - Strings são imutáveis, então cada operação atribui um novo objeto de string à variável: ``` s1 = 'happy' s2 = 'birthday' s1 += ' ' + s2 s1 symbol = '>' symbol *= 5 symbol ``` # **Exercício 02** Use o operador **+=** para concatenar seu nome e sobrenome. Então use o operador **\*=** para criar uma barra de asteriscos com o mesmo número de caracteres que seu nome completo e exibir a barra acima e abaixo do seu nome. ``` name = 'Matheus' middle = 'Percário' last = 'Bruder' name += ' ' + middle name += ' ' + last ast = '*' ast *= len(name) print(f'{ast}\n{name}\n{ast}') ``` ### Métodos de string ![image.png](attachment:image.png) ``` sentence = 'to be or not to be that is the question' ``` O método de string **count** retorna o número de vezes que seu argumento ocorre na string em qual o método é chamado: ``` sentence.count('to') ``` ![image.png](attachment:image.png) #### Localizando uma substring em uma string O método de string **index** procura uma substring dentro de uma string e retorna o primeiro índice em em que a substring é encontrada; caso contrário, ocorre um **ValueError**: ``` sentence.index('be') ``` O método de string **rindex** executa a mesma operação que **index**, mas pesquisa a partir do final da string e retorna o último índice no qual a substring foi encontrada; caso contrário, um ocorre um erro **ValueError** ``` sentence.rindex('be') ``` ![image.png](attachment:image.png) #### Removendo espaços em branco de strings Existem vários métodos de string para remover espaços em branco das extremidades de uma string. - Cada um retorna uma nova string deixando a original inalterada. > Strings são imutáveis, então cada método que parece modificar uma string retorna uma nova. #### Removendo espaços em branco à esquerda e à direita Vamos usar o método de string **strip** para remover os espaços em branco à esquerda e à direita de uma string: - o método **lstrip** remove somente os espaços da esquerda. - o método **rstrip** remoce somente os espaços da direita. ``` sentence = '\t \n This is a test string. \t\t \n' print(sentence) sentence.strip() sentence ``` ![image.png](attachment:image.png) # <font color=red> Agrupar métodos</font> ### Alterando entre maiúsculas e mínusculas Nos capítulos anteriores, você usou métodos de string inferior e superior para converter strings para todas as minúsculas ou todas as letras maiúsculas. Você também pode alterar a capitalização de uma string com métodos capitalize e título. **Quadrilha, Carlos Drummond de Andrade** João amava Teresa que amava Raimundo<br> que amava Maria que amava Joaquim que amava Lili<br> que não amava ninguém.<br> João foi pra os Estados Unidos, Teresa para o convento,<br> Raimundo morreu de desastre, Maria ficou para tia,<br> Joaquim suicidou-se e Lili casou com J. Pinto Fernandes<br> que não tinha entrado na história.<br> # **Exercício 03** Considerando a **Quadrilha** como string de entrada. Escreva um programa para: - Contar quantas vezes aparece ‘amava’. - Encontrar o primeiro e o último índice em que aparece ‘amava’. - Encontrar todos os índices em que aparece ‘amava’. - Substituir ‘amava’ por ‘treinava’. - Indique quais linhas começam com ‘que’. ``` quadrilha = """João amava Teresa que amava Raimundo que amava Maria que amava Joaquim que amava Lili que não amava ninguém. João foi pra os Estados Unidos, Teresa para o convento, Raimundo morreu de desastre, Maria ficou para tia, Joaquim suicidou-se e Lili casou com J. Pinto Fernandes que não tinha entrado na história.""" # Contar quantas vezes aparece ‘amava’. print(f'quadrilha.count("amava") = {quadrilha.count("amava")}') # Encontrar o primeiro e o último índice em que aparece ‘amava’. print(f'Primeiro índice = {quadrilha.index("amava")}') print(f'Último índice = {quadrilha.rindex("amava")}') quadrilha.index("amava", 0, len(quadrilha)) len(quadrilha) # Encontrar todos os índices em que aparece ‘amava’. indices = [] idx = 0 while True: idx = quadrilha.index('amava', idx + 1) indices.append(idx) if idx == quadrilha.rindex('amava'): break print(indices) # Substituir ‘amava’ por ‘treinava’. quadrilha = quadrilha.replace('amava', 'treinava') # print(quadrilha) # Indique quais linhas começam com ‘que’. for idx, linha in enumerate(quadrilha.splitlines()): if linha[0] == 'q' and linha[1] == 'u' and linha[2] == 'e': # print(linha) print(f'Linha [{idx}] começa com "que"') ``` ## Tokens - Quando lemos uma sentença, nosso cérebro quebra a sentença em palavras, ou tokens, cada qual com um significado. - Este processo é chamado tokenização. - Interpretadores e compiladores realizam tokenização para quebrar as sentenças em palavras-chave, operadores, variaveis, etc. - Tokens são separados por delimitadores, tipicamente espaços em branco. - espaço, tab, nova linha e retorno de carro. # **Exercício 04** Ainda considerando a **Quadrilha**, escreva scripts para: - Obter uma lista com todas a palavras do texto - Separar o texto onde exista ‘,’. - Substitua os espaços por ‘__’. - Obter uma lista onde conste cada palavra existente no texto, uma única vez, juntamente com sua frequência. ``` quadrilha = """João amava Teresa que amava Raimundo que amava Maria que amava Joaquim que amava Lili que não amava ninguém. João foi pra os Estados Unidos, Teresa para o convento, Raimundo morreu de desastre, Maria ficou para tia, Joaquim suicidou-se e Lili casou com J. Pinto Fernandes que não tinha entrado na história.""" # Obter uma lista com todas a palavras do texto palavras = quadrilha.split() palavras # Separar o texto onde exista ‘,’. quadrilha.split(',') # Substitua os espaços por ‘__’. new_quadrilha = quadrilha.replace(" ", "__") new_quadrilha # Obter uma lista onde conste cada palavra existente no texto, uma única vez, juntamente com sua frequência. frequencias = [] palavras_processadas = [] palavras = quadrilha.split() # preprocessamento for p in palavras: if p[-1] in ",.:;?!": palavras_processadas.append(p[:-1]) else: palavras_processadas.append(p) # Determinando frequencias for palavra in palavras_processadas: freq = palavras_processadas.count(palavra) t = (palavra, freq) if t not in frequencias: frequencias.append(t) # Printando resultado for p, f in frequencias: print(f'{p:^14} -> {f}') ``` # Expressões regulares Uma expressão regular é um padrão de texto usado para encontrar strings que combinam com este padrão. O módulo **re**, fornece meios para o processamento de expressões regulares em Python https://docs.python.org/3/library/re.html - As expressões regulares podem ajudá-lo a extrair dados de texto não estruturado, como publicações em mídias sociais. - Elas também são importantes para garantir que os dados estejam no formato correto antes você tenta processar Validando Dados Antes de trabalhar com dados de texto, você costuma usar expressões regulares para validar os dados. Por exemplo, você pode verificar se: • Um CEP dos EUA consiste em cinco dígitos (como 02215) ou cinco dígitos seguidos por um hífen e mais quatro dígitos (como 02215-4775). • Um sobrenome de string contém apenas letras, espaços, apóstrofos e hifens. • Um endereço de e-mail contém apenas os caracteres permitidos na ordem permitida. • O número do Seguro Social dos EUA contém três dígitos, um hífen, dois dígitos, um hífen e quatro dígitos, e segue outras regras sobre os números específicos que pode ser usado em cada grupo de dígitos. Raramente você precisará criar suas próprias expressões regulares para itens comuns como esses. Sites como - https://regex101.com - http://www.regexlib.com - https://www.regular-expressions.info e outros oferecem repositórios de expressões regulares existentes que você pode copiar e usar. Muitos sites como esses também fornecem interfaces nas quais você pode testar expressões regulares para determinar se eles atenderão às suas necessidades ### Outros usos de expressões regulares Além de validar dados, as expressões regulares costumam ser usadas para: - Extrair dados do texto (às vezes conhecido como raspagem) - por exemplo, localizar todos URLs em uma página da web. [Você pode preferir ferramentas como BeautifulSoup, XPath e lxml.] - Limpar dados - Por exemplo, removendo dados desnecessários, removendo dados duplicados, tratar dados incompletos, correção de erros de digitação, garantia de formatos de dados consistentes, lidar com outliers e muito mais. - Transforme os dados em outros formatos - por exemplo, reformatando os dados que foram coletados como valores separados por tabulação ou por espaço em valores separados por vírgula (CSV) para um aplicativo que exige que os dados estejam no formato CSV. ### O módulo _re_ e a função _fullmatch_ Para usar expressões regulares, importe o módulo re da biblioteca padrão do Python: ``` import re ``` Uma das funções de expressão regular mais simples é a função **fullmatch**, que verifica se o string inteira em seu segundo argumento corresponde ao padrão em seu primeiro argumento. ``` pattern = '02215' 'Match' if re.fullmatch(pattern, '02215') else 'No match' 'Match' if re.fullmatch(pattern, '51220') else 'No match' ``` ### Classes de caracteres e sequências especiais Uma classe de caracteres especifica um grupo de caracteres em uma string. - Os metacaracteres ‘[’ e ‘]’ denotam uma **classe de expressões regulares**. - Exemplo, a expressão regular "[abc]" identifica as letras a, b, c. - Pode-se usar o caractere ‘-’ para indicar intervalos de caracteres. - Ex. "[a-d]" é idêntico à "[abcd]". ``` 'Valid' if re.fullmatch('[A-Z][a-z]', 'Ae') else 'Invalid' ``` - O metacaractere ‘^’ quando colocado junto a uma classe, este indica a negação da classe. - Ex. “[^a-c]” – todo caractere exceto os caracteres a,b,c ``` 'Match' if re.fullmatch('[^a-z]', 'A') else 'No match' 'Match' if re.fullmatch('[^a-z]', 'a') else 'No match' ``` - O caractere ‘|’ pode ser visto como ‘ou’. - Usado para testar a combinação de uma dentre as possíveis expressões. - Ex. com|org|edu - Adicinando ‘r’ antes da expressão regular, diz ao interpretador para considerar o ‘\’ como um caractere. - Assim r"\n" significa dois caracteres, o ‘\’ e o ‘n’. Classes de caracteres combinam, exatamente, com um caractere. - Metacaracteres de repetição (quantificadores) são usados para especificar o número de repetições que se deseja. **?** combina zero ou uma ocorrências da expressão que o precede. <br> **+** combina uma ou mais ocorrências da expressão que o precede. <br> **\*** combina zero ou mais ocorrências da expressão que a precede. <br> **{ n }** combina exatamente **n** ocorrências da expressão que o precede. <br> **{m,n}** combina **entre m e n** ocorrências da expressão que o precede. <br> #### Os quantificadores * e + ``` 'Valid' if re.fullmatch('[A-Z][a-z]+', 'Wally') else 'Invalid' 'Valid' if re.fullmatch('[A-Z][a-z]*', 'Wally') else 'Invalid' 'Valid' if re.fullmatch('[A-Z][a-z]+', 'E') else 'Invalid' # Invalid, pois 'E' precisa fazer parte das duas classes e não apenas uma. 'Valid' if re.fullmatch('[A-Z][a-z]*', 'E') else 'Invalid' 'Valid' if re.fullmatch('E{1}', 'E') else 'Invalid' ``` A expressão regular **labell?ed** corresponde à **labelled** (grafia do inglês britânico) e **labeled** (a grafia do inglês americano), mas não a palavra incorreta **labellled**. - Em cada trecho abaixo, os primeiros cinco caracteres literais na expressão regular (**label**) correspondem ao primeiro cinco caracteres dos segundos argumentos. Então **l?** indica que pode haver **zero ou um caractere l** antes dos caracteres **ed** restantes. ``` 'Match' if re.fullmatch('labell?ed', 'labelled') else 'No match' 'Match' if re.fullmatch('labell?ed', 'labeled') else 'No match' 'Match' if re.fullmatch('labell?ed', 'labellled') else 'No match' ``` ![image.png](attachment:image.png) ``` 'Valid' if re.fullmatch(r'\d{5}', '02215') else 'Invalid' 'Valid' if re.fullmatch(r'\d{5}', '9876') else 'Invalid' ``` Você pode combinar **pelo menos n ocorrências** de uma subexpressão com o quantificador **{n,}**. A seguinte expressão regular corresponde a strings contendo pelo menos três dígitos: ``` 'Match' if re.fullmatch(r'\d{3,}', '123') else 'No match' 'Match' if re.fullmatch(r'\d{3,}', '123456789') else 'No match' ``` # **Exercício 05** Crie e teste uma expressão regular que corresponda a um endereço de rua que consiste uma ou duas palavras, de um ou mais caracteres e um número com um ou mais dígitos. Os tokens devem ser separados por um espaço, como em **Paschoal Marmo 1888**. ``` import re endereço = 'Paschoal Marmo 1888' endereço = endereço.split() #print(endereço) flag = 0 # validando endereço for e in endereço: if re.fullmatch('[A-Z][a-z]+', e) or re.fullmatch('[0-9]+', e): flag += 1 # testando validação if flag == len(endereço): for e in endereço: print(f'{e}', end=' ') print(f'é um endereço válido.') ``` ### Substituindo substrings O módulo **_re_** fornece a função **sub** para substituir padrões em uma string com base em padrões. Por padrão, a função **sub** substitui todas as ocorrências de um padrão com o texto de substituição que você especificar. Vamos converter uma string delimitada por tabulação em delimitada por vírgulas: ``` re.sub(r'\t', ', ', '1\t2\t3\t4') ``` A função **sub** recebe três argumentos obrigatórios: - o padrão para combinar (o caractere de tabulação '\t') - o texto de substituição (',') e - a string a ser pesquisada ('1\t2\t3\t4') A palavra-chave **_count_** pode ser usada para especificar o máximo número de substituições: ``` re.sub(r'\t', ', ', '1\t2\t3\t4', count=2) ``` ### Dividindo strings A função **split** divide um string em **_tokens_**, usando uma expressão regular para especificar o delimitador, e retorna uma lista de strings. Considere dividir uma string em tokens dividindo-a onde ocorrer **uma vírgula seguida de zero ou mais espaços em branco** - **\s** é a classe de caractere de espaço em branco e **\*** indica zero ou mais ocorrências da subexpressão anterior: ``` re.split(r',\s*', '1, 2, 3,4, 5,6,7,8') ``` Use o argumento de palavra-chave **maxsplit** para especificar o número máximo de divisões: ``` re.split(r',\s*', '1, 2, 3,4, 5,6,7,8', maxsplit=2) ``` # **Exercício 06** Substitua cada ocorrência de um ou mais (+) caracteres de tabulação adjacentes na a seguinte string por uma vírgula e um espaço: **'A\tB\t\tC\t\t\tD'** ``` import re re.sub(r'\t+', ', ', 'A\tB\t\tC\t\t\tD') ``` # **Exercício 07** Use uma expressão regular e a função **split** para dividir a seguinte string eliminando o caracteres **$**. ``` import re re.split(r'\$+', 'Matheu&$$E$a nicolli vao$$$$passear no bosqu3$ hoj$e.') ``` ### Outras funções de pesquisa Anteriormente, usamos a função **fullmatch** para determinar se uma string inteira correspondia a um expressão regular. - Existem várias outras funções de pesquisa. Aqui, discutimos o funções de pesquisa, **search**, **match**, **findall** e **finditer** #### A função search - Encontra a primeira substring correspondente em qualquer lugar em uma string. - Retorna um objeto **match** (do tipo **SRE_Match**) que contém a substring correspondente. - O método **group** do objeto **match** retorna a substring: ``` result = re.search('Python', 'Python é legal') result.group() if result else 'not found' ``` Você pode pesquisar uma correspondência apenas no início de uma string com a função **match**. ``` testStrings = [ "2x+5y","7y-3z" ] expressions = [ "2x\+5y|7y-3z","[0-9][a-zA-Z0-9_].[0-9][yz]","\d\w-\d\w" ] ``` O operador **.** combina com qualquer caractere. ``` for expression in expressions: for testString in testStrings: if re.match( expression, testString ): print(expression, "matches", testString) ``` Muitas funções do módulo de referência recebem um argumento opcional de palavra-chave **flags** que muda como expressões regulares são combinadas. - Por exemplo, as correspondências diferenciam maiúsculas de minúsculas por padrão, mas pode-se usar a constante **IGNORECASE** do módulo **re**, para realizar uma pesquisa que não diferencia maiúsculas de minúsculas: ``` result3 = re.search('Sam', 'SAM WHITE', flags=re.IGNORECASE) result3.group() if result3 else 'not found' result3.group() ``` - O metacaractere ‘^’ indica que a expressão que o sucede deve aparecer no início da string; - “^OLA” – combina com strings que começam com OLA. - O metacaractere ‘\$’ indica que a expressão que o precede deve aparecer no final da string; - Ex. “AVA\$” – combina com strings que terminam com AVA. ``` result = re.search('^Python', 'Python is fun') result.group() if result else 'not found' result = re.search('^fun', 'Python is fun') result = re.search('Python$', 'Python is fun') result.group() if result else 'not found' result = re.search('fun$', 'Python is fun') result.group() if result else 'not found' ``` #### Encontrar todas as correspondências em uma string A função **findall** encontra todos as substring correspondentes em uma string e retorna uma lista das substrings. Vamos extrair todos os números de telefone de uma string. Para simplificar, vamos supor que os números de telefone tenham o formato #####-####: ``` contact = 'Wally White, Home: 55555-1234, Work: 55555-4321' re.findall(r'\d{5}-\d{4}', contact) ``` A função **finditer** funciona como **findall**, no entanto retorna um iterável dos objetos correspondentes. - Para um grande número de correspondências, **finditer** pode economizar memória porque retorna uma correspondência por vez - enquanto **findall** retorna todas as correspondências de uma vez ``` for phone in re.finditer(r'\d{5}-\d{4}', contact): print(phone.group()) ``` Você pode usar os metacaracteres parênteses - **( )** - para capturar substrings de uma correspondência. - Por exemplo, vamos capturar como substrings separadas o nome e o endereço de e-mail no texto da string: ``` text = 'Charlie Cyan, e-mail: demo1@unicamp.com' pattern = r'([A-Z][a-z]+ [A-Z][a-z]+), e-mail: (\w+@\w+\.\w{3})' ``` Vamos considerar a expressão regular: - **'([A-Z][a-z]+ [A-Z][a-z]+)'** corresponde a duas palavras separadas por um espaço. -Cada palavra deve ter a letra inicial maiúscula. - **', e-mail:'** contém caracteres literais que correspondem a eles próprios. - **(\w+@\w+\.\w{3})** corresponde a um endereço de e-mail simples consistindo em um ou mais caracteres alfanuméricos (\w+), o caractere @, um ou mais caracteres alfanuméricos (\w+), um ponto (\.) e três caracteres alfanuméricos (\w{3}). - O ponto deve ser precedido de \ pois é um metacaractere de expressão regular que corresponde um caractere. ``` result = re.search(pattern, text) ``` O método **groups** do objeto **match** reorna uma tupla das substrings capturadas: ``` result.groups() ``` O método **group** do objeto **match** retorna a correspondência inteira como uma única string: ``` result.group() ``` Você pode acessar cada substring capturada passando um número inteiro para o método **group**. - As substrings capturados são numerados a partir de 1 (ao contrário dos índices de lista, que começam em 0) ``` result.group(1) result.group(2) ``` # **Exercício 08** Crie uma expressão regular para obter o nome, o telefone e o email da senteça: Albert Antstein, phone: 123-4567, e-mail: albert@bug2bug.com ``` text = 'Albert Antstein, phone: 123-4567, e-mail: albert@bug2bug.com' pattern = r'([A-Z][a-z]+ [A-Z][a-z]+), phone: (\d{3}-\d{4}), e-mail: (\w+@\w+\.\w{3})' result = re.search(pattern, text) result.groups() result.group() ``` # Introdução à ciência de dados: pandas, expressões regulares e pré-processamento de dados Os dados nem sempre vêm em formulários prontos para análise. - Podem, por exemplo, estar no formato errado, incorreto ou mesmo ausente. - Cientistas de dados podem gastar até 75% de seu tempo preparando dados antes de começarem seus estudos. O pré-processamento de dados também é chamado de *data munging* ou *data wrangling*. Duas das etapas mais importantes na manipulação de dados são a limpeza e transformação de dados dados nos formatos ideais para seus sistemas de banco de dados e software analítico. Exemplos de limpeza de dados são: - excluir observações com valores ausentes, - substituir valores razoáveis ​​por valores ausentes, - excluir observações com valores ruins, - substituir valores razoáveis ​​por valores ruins, - lançando outliers (embora às vezes você queira mantê-los), - eliminação de duplicatas (embora às vezes as duplicatas sejam válidas), - lidar com dados inconsistentes, - e mais. Você provavelmente já está pensando que a limpeza de dados é um processo difícil e confuso, onde você poderia facilmente tomar decisões erradas que afetariam negativamente seus resultados. - Você está correto! Quando você chegar aos estudos de caso de ciência de dados nos capítulos posteriores, verá esses dados ciência é mais uma ciência empírica, como a medicina, e menos uma ciência teórica, como física Teórica. As ciências empíricas baseiam suas conclusões em observações e experiências. Por exemplo, muitos medicamentos que efetivamente resolvem problemas médicos hoje foram desenvolvido pela observação dos efeitos que as primeiras versões desses medicamentos tiveram em animais de laboratório e eventualmente humanos, e gradualmente refinando ingredientes e dosagens. Os dados de ações os cientistas tomam pode variar por projeto, com base na qualidade e natureza dos dados e ser afetados pela organização em evolução e padrões profissionais. Algumas transformações de dados comuns incluem:] • remover dados e recursos desnecessários (falaremos mais sobre os recursos nos dados estudos de caso de ciências), • combinando recursos relacionados, • dados de amostragem para obter um subconjunto representativo (veremos no caso da ciência de dados estudos que a amostragem aleatória é particularmente eficaz para isso e diremos por quê), • padronizar formatos de dados, • agrupamento de dados, • e mais. É sempre bom manter seus dados originais. Mostraremos exemplos simples de limpeza e transformação de dados usando **Pandas Series** e **DataFrames**. ### Limpando seus dados Valores de dados inválidos e valores ausentes podem afetar significativamente a análise de dados. Alguns cientistas de dados aconselham contra quaisquer tentativas de inserir "valores razoáveis". Em vez disso, eles defendem marcando claramente os dados ausentes e deixando para o pacote de análise de dados lidar com o questão. Vamos considerar um hospital que registra as temperaturas dos pacientes (e provavelmente outros sinais) quatro vezes por dia. Suponha que os dados consistam em um nome e quatro valores reais, tal como: ['Brown, Sue', 98.6, 98.4, 98.7, 0.0] Note que a última temperatura esta faltando e foi registrada como 0,0, talvez devido ao mau funcionamento do sensor. A média dos três primeiros valores é 98,57, o que está próximo do normal. Contudo, se você calcular a temperatura média incluindo o valor ausente para o qual 0,0 foi substituído, a média é de apenas 73,93, resultado claramente questionável. Certamente, os médicos iriam não quero tomar medidas corretivas drásticas neste paciente - é crucial "obter os dados corretos". Uma maneira comum de limpar os dados é substituir um valor razoável pelo ausente temperatura, como a média das outras leituras do paciente. Se tivéssemos feito isso acima, então a temperatura média do paciente permaneceria 98,57 - uma média muito mais provável temperatura, com base nas outras leituras. #### Validação de dados Vamos começar criando uma **Serie** de códigos postais de cinco dígitos a partir de um dicionário de nome da cidade / codigo de 5 dígitos - Inserimos intencionalmente um CEP inválido para Miami: ``` import pandas as pd zips = pd.Series({'Boston': '02215', 'Miami': '3310'}) zips ``` Embora *zips* pareça uma matriz bidimensional, na verdade é unidimensional. - A segundo coluna ”representa os valores do CEP da série (dos valores do dicionário), e a - “primeira coluna” representa seus índices (das chaves do dicionário). Podemos usar expressões regulares com **Pandas** para validar dados. - O atributo **str** de uma **Serie** fornece processamento de strings e vários métodos de expressão regular. - Vamos usar o método **match** do atributo **str** para verificar se cada CEP é válido: ``` zips.str.match(r'\d{5}') ``` O método **match** aplica a expressão regular **\d{5}** a _cada_ elemento da série, verificando se o elemento é composto de exatamente cinco dígitos. - Você não precisa fazer um laço explicitamente em todos os códigos postais. - Este é outro exemplo de estilo de programação com iteração interna em vez de externa. O método retorna uma nova **Serie** contendo **True** para cada elemento válido. Nesse caso, o CEP de Miami não corresponde, então seu elemento é **False**. Existem várias maneiras de lidar com dados inválidos. Uma é pegá-lo em sua fonte e interagir com a fonte para corrigir o valor. Isso nem sempre é possível. Por exemplo, o os dados podem estar vindo de sensores de alta velocidade na Internet das Coisas. Nesse caso, nós não seria capaz de corrigi-lo na fonte, portanto, poderíamos aplicar técnicas de limpeza de dados. No caso do CEP de Miami inválido de 3310, podemos procurar por CEPs de Miami começando com 3310. Existem dois - 33101 e 33109 - e poderíamos escolher um deles. Às vezes, em vez de combinar um valor inteiro com um padrão, você vai querer saber se um valor contém uma substring que corresponda ao padrão. Neste caso, use o método contém em vez de correspondência. Vamos criar uma série de strings, cada uma contendo uma cidade dos EUA, estado e CEP, em seguida, determine se cada string contém uma substring que corresponde ao padrão **'[A-Z]{2}'** (um espaço, seguido por duas letras maiúsculas, seguidas por um espaço): ``` cities = pd.Series(['Boston, MA 02215', 'Miami, FL 33101']) cities ``` Não especificamos os valores do índice, portanto, a Serie usa índices iniciado em zero por padrão. - O trecho a seguir usa **contains** para mostrar que ambos os elementos da série contêm substrings que correspondem a **'[A-Z]{2}'**. - Depois, **match** é usado para mostrar que nenhum valor do elemento corresponde a esse padrão em sua totalidade, porque cada um possui outros caracteres em seu valor completo. ``` cities.str.contains(r' [A-Z]{2} ') cities.str.match(r' [A-Z]{2} ') ```
github_jupyter
f'{17.489:.2f}' f'{10:d}' f'{65:c} {97:c}' f'{"hello":s} {7}' from decimal import Decimal f'{Decimal("10000000000000000000000000.0"):.3f}' f'{Decimal("10000000000000000000000000.0"):.3e}' f'[{27:10d}]' f'[{3.5:10f}]' f'[{"hello":10}]' f'[{27:<15d}]' f'[{3.5:<15f}]' f'[{"hello":>15}]' f'[{27:^7d}]' f'[{3.5:^7.1f}]' f'[{"hello":^7}]' print(f'Alinhado à direita \t:\t [{"Matheus":>10}]') print(f'Alinhado à esquerda \t:\t [{"Matheus":<10}]') print(f'Alinhado ao centro \t:\t [{"Matheus":^10}]') f'[{27:+10d}]' f'[{27:+010d}]' print(f'{27:d}\n{27: d}\n{-27: d}') f'{12345678:,d}' f'{123456.78:,.2f}' '{:.2f}'.format(17.489) '{} {}'.format('Amanda', 'Cyan') '{0} {0} {1}'.format('Happy', 'Birthday') '{first} {last}'.format(first='Amanda', last='Gray') '{last} {last}'.format(first='Amanda', last='Gray') s1 = 'happy' s2 = 'birthday' s1 += ' ' + s2 s1 symbol = '>' symbol *= 5 symbol name = 'Matheus' middle = 'Percário' last = 'Bruder' name += ' ' + middle name += ' ' + last ast = '*' ast *= len(name) print(f'{ast}\n{name}\n{ast}') sentence = 'to be or not to be that is the question' sentence.count('to') sentence.index('be') sentence.rindex('be') sentence = '\t \n This is a test string. \t\t \n' print(sentence) sentence.strip() sentence quadrilha = """João amava Teresa que amava Raimundo que amava Maria que amava Joaquim que amava Lili que não amava ninguém. João foi pra os Estados Unidos, Teresa para o convento, Raimundo morreu de desastre, Maria ficou para tia, Joaquim suicidou-se e Lili casou com J. Pinto Fernandes que não tinha entrado na história.""" # Contar quantas vezes aparece ‘amava’. print(f'quadrilha.count("amava") = {quadrilha.count("amava")}') # Encontrar o primeiro e o último índice em que aparece ‘amava’. print(f'Primeiro índice = {quadrilha.index("amava")}') print(f'Último índice = {quadrilha.rindex("amava")}') quadrilha.index("amava", 0, len(quadrilha)) len(quadrilha) # Encontrar todos os índices em que aparece ‘amava’. indices = [] idx = 0 while True: idx = quadrilha.index('amava', idx + 1) indices.append(idx) if idx == quadrilha.rindex('amava'): break print(indices) # Substituir ‘amava’ por ‘treinava’. quadrilha = quadrilha.replace('amava', 'treinava') # print(quadrilha) # Indique quais linhas começam com ‘que’. for idx, linha in enumerate(quadrilha.splitlines()): if linha[0] == 'q' and linha[1] == 'u' and linha[2] == 'e': # print(linha) print(f'Linha [{idx}] começa com "que"') quadrilha = """João amava Teresa que amava Raimundo que amava Maria que amava Joaquim que amava Lili que não amava ninguém. João foi pra os Estados Unidos, Teresa para o convento, Raimundo morreu de desastre, Maria ficou para tia, Joaquim suicidou-se e Lili casou com J. Pinto Fernandes que não tinha entrado na história.""" # Obter uma lista com todas a palavras do texto palavras = quadrilha.split() palavras # Separar o texto onde exista ‘,’. quadrilha.split(',') # Substitua os espaços por ‘__’. new_quadrilha = quadrilha.replace(" ", "__") new_quadrilha # Obter uma lista onde conste cada palavra existente no texto, uma única vez, juntamente com sua frequência. frequencias = [] palavras_processadas = [] palavras = quadrilha.split() # preprocessamento for p in palavras: if p[-1] in ",.:;?!": palavras_processadas.append(p[:-1]) else: palavras_processadas.append(p) # Determinando frequencias for palavra in palavras_processadas: freq = palavras_processadas.count(palavra) t = (palavra, freq) if t not in frequencias: frequencias.append(t) # Printando resultado for p, f in frequencias: print(f'{p:^14} -> {f}') import re pattern = '02215' 'Match' if re.fullmatch(pattern, '02215') else 'No match' 'Match' if re.fullmatch(pattern, '51220') else 'No match' 'Valid' if re.fullmatch('[A-Z][a-z]', 'Ae') else 'Invalid' 'Match' if re.fullmatch('[^a-z]', 'A') else 'No match' 'Match' if re.fullmatch('[^a-z]', 'a') else 'No match' 'Valid' if re.fullmatch('[A-Z][a-z]+', 'Wally') else 'Invalid' 'Valid' if re.fullmatch('[A-Z][a-z]*', 'Wally') else 'Invalid' 'Valid' if re.fullmatch('[A-Z][a-z]+', 'E') else 'Invalid' # Invalid, pois 'E' precisa fazer parte das duas classes e não apenas uma. 'Valid' if re.fullmatch('[A-Z][a-z]*', 'E') else 'Invalid' 'Valid' if re.fullmatch('E{1}', 'E') else 'Invalid' 'Match' if re.fullmatch('labell?ed', 'labelled') else 'No match' 'Match' if re.fullmatch('labell?ed', 'labeled') else 'No match' 'Match' if re.fullmatch('labell?ed', 'labellled') else 'No match' 'Valid' if re.fullmatch(r'\d{5}', '02215') else 'Invalid' 'Valid' if re.fullmatch(r'\d{5}', '9876') else 'Invalid' 'Match' if re.fullmatch(r'\d{3,}', '123') else 'No match' 'Match' if re.fullmatch(r'\d{3,}', '123456789') else 'No match' import re endereço = 'Paschoal Marmo 1888' endereço = endereço.split() #print(endereço) flag = 0 # validando endereço for e in endereço: if re.fullmatch('[A-Z][a-z]+', e) or re.fullmatch('[0-9]+', e): flag += 1 # testando validação if flag == len(endereço): for e in endereço: print(f'{e}', end=' ') print(f'é um endereço válido.') re.sub(r'\t', ', ', '1\t2\t3\t4') re.sub(r'\t', ', ', '1\t2\t3\t4', count=2) re.split(r',\s*', '1, 2, 3,4, 5,6,7,8') re.split(r',\s*', '1, 2, 3,4, 5,6,7,8', maxsplit=2) import re re.sub(r'\t+', ', ', 'A\tB\t\tC\t\t\tD') import re re.split(r'\$+', 'Matheu&$$E$a nicolli vao$$$$passear no bosqu3$ hoj$e.') result = re.search('Python', 'Python é legal') result.group() if result else 'not found' testStrings = [ "2x+5y","7y-3z" ] expressions = [ "2x\+5y|7y-3z","[0-9][a-zA-Z0-9_].[0-9][yz]","\d\w-\d\w" ] for expression in expressions: for testString in testStrings: if re.match( expression, testString ): print(expression, "matches", testString) result3 = re.search('Sam', 'SAM WHITE', flags=re.IGNORECASE) result3.group() if result3 else 'not found' result3.group() result = re.search('^Python', 'Python is fun') result.group() if result else 'not found' result = re.search('^fun', 'Python is fun') result = re.search('Python$', 'Python is fun') result.group() if result else 'not found' result = re.search('fun$', 'Python is fun') result.group() if result else 'not found' contact = 'Wally White, Home: 55555-1234, Work: 55555-4321' re.findall(r'\d{5}-\d{4}', contact) for phone in re.finditer(r'\d{5}-\d{4}', contact): print(phone.group()) text = 'Charlie Cyan, e-mail: demo1@unicamp.com' pattern = r'([A-Z][a-z]+ [A-Z][a-z]+), e-mail: (\w+@\w+\.\w{3})' result = re.search(pattern, text) result.groups() result.group() result.group(1) result.group(2) text = 'Albert Antstein, phone: 123-4567, e-mail: albert@bug2bug.com' pattern = r'([A-Z][a-z]+ [A-Z][a-z]+), phone: (\d{3}-\d{4}), e-mail: (\w+@\w+\.\w{3})' result = re.search(pattern, text) result.groups() result.group() import pandas as pd zips = pd.Series({'Boston': '02215', 'Miami': '3310'}) zips zips.str.match(r'\d{5}') cities = pd.Series(['Boston, MA 02215', 'Miami, FL 33101']) cities cities.str.contains(r' [A-Z]{2} ') cities.str.match(r' [A-Z]{2} ')
0.295738
0.978814
# Alternative p-values This notebook explores the use of the empirical p-value as an alternative to p-values calculated using the gamma hurdle model. Empirical p-values as used here refers roughly to the proportion of values greater than the observed value. ``` import concurrent.futures import matplotlib.pyplot as plt import numpy import pandas import scipy.sparse import tqdm import hetmech.hetmat import hetmech.degree_group import hetmech.degree_weight %matplotlib inline hetmat = hetmech.hetmat.HetMat('../../data/hetionet-v1.0.hetmat/') metapaths = ['DaGbC', 'SpDpS', 'SEcCrCtD', 'CiPCiCtD'] bins = numpy.linspace(0, 1, 101) bin_counts = {metapath: pandas.DataFrame() for metapath in metapaths} metapath = 'DaGbC' def matrix_to_dgp(matrix, dwpc_scaler, source_degree_to_ind, target_degree_to_ind): rows = dict() if scipy.sparse.issparse(matrix): matrix = matrix.toarray() for source_degree, row_inds in source_degree_to_ind.items(): row_matrix = matrix[row_inds, :] for target_degree, col_inds in target_degree_to_ind.items(): if source_degree == 0 or target_degree == 0: continue slice_matrix = row_matrix[:, col_inds] values = numpy.arcsinh(slice_matrix / dwpc_scaler) rows[(source_degree, target_degree)] = values.flatten().tolist() return rows def metapath_to_full_dgp(hetmat, metapath): # _, _, pc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.0) _, _, dwpc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.5) scaler = dwpc_matrix.mean() source_degree_to_ind, target_degree_to_ind = hetmech.degree_group.metapath_to_degree_dicts(hetmat, metapath) perm_dgp = None for name, permat in tqdm.tqdm(hetmat.permutations.items()): _, _, matrix = hetmech.degree_weight.dwpc(permat, metapath, damping=0.5, dense_threshold=0.7) degree_to_dgp = matrix_to_dgp(matrix, scaler, source_degree_to_ind, target_degree_to_ind) if perm_dgp is None: perm_dgp = degree_to_dgp.copy() else: for degree_combo, dgp_list in perm_dgp.items(): dgp_list.extend(degree_to_dgp[degree_combo]) return perm_dgp def empirical_p(permuted_values, observed_value): # Empirical p-value is adjusted to minimize misestimation. More conservative so that p!=0. # (sum(abs(avgdiff) > abs(obsdiff)) + 1) / (length(avgdiff) + 1) # http://doi.org/10.2202/1544-6115.1585 if isinstance(permuted_values, list): permuted_values = numpy.array(permuted_values) if isinstance(observed_value, list) or isinstance(observed_value, numpy.ndarray): return numpy.array([empirical_p(permuted_values, val) for val in observed_value]) return float(sum(permuted_values >= observed_value) + 1) / (len(permuted_values) + 1) %%time dwpc_df = pandas.DataFrame(hetmech.degree_group.dwpc_to_degrees(hetmat, metapath, ignore_zeros=False)) dwpc_df = dwpc_df[(dwpc_df['source_degree'] != 0) & (dwpc_df['target_degree'] != 0)] perm_dgp = metapath_to_full_dgp(hetmat, metapath) dwpc_df['degree_combo'] = dwpc_df.apply( lambda row: tuple(row[-4:-2]), axis=1, ) %%time dwpc_sample = dwpc_df.sample(10000) rows = dwpc_sample[['degree_combo', 'dwpc']].to_dict('index') with concurrent.futures.ProcessPoolExecutor(6) as executor: for index, row in tqdm.tqdm(rows.items()): row['p_value'] = executor.submit(empirical_p, perm_dgp[row['degree_combo']], row['dwpc']) rows_df = pandas.DataFrame.from_dict(rows, orient='index') rows_df['unscaled_p'] = rows_df['p_value'].apply(lambda x: x.result()) plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(rows_df['unscaled_p'], bins=bins, density=True, label='hetionet') plt.legend() plt.ylim((0, heights[0:-1].max())); ``` ## Permutation ``` perms = hetmat.permutations.copy() permat_1 = perms.pop('001') %%time dwpc_df = pandas.DataFrame(hetmech.degree_group.dwpc_to_degrees(permat_1, metapath, ignore_zeros=False)) dwpc_df = dwpc_df[(dwpc_df['source_degree'] != 0) & (dwpc_df['target_degree'] != 0)] def metapath_to_full_dgp(hetmat, metapath): # _, _, pc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.0) _, _, dwpc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.5) scaler = dwpc_matrix.mean() source_degree_to_ind, target_degree_to_ind = hetmech.degree_group.metapath_to_degree_dicts(hetmat, metapath) perm_dgp = None for name, permat in tqdm.tqdm(perms.items()): _, _, matrix = hetmech.degree_weight.dwpc(permat, metapath, damping=0.5, dense_threshold=0.7) degree_to_dgp = matrix_to_dgp(matrix, scaler, source_degree_to_ind, target_degree_to_ind) if perm_dgp is None: perm_dgp = degree_to_dgp.copy() else: for degree_combo, dgp_list in perm_dgp.items(): dgp_list.extend(degree_to_dgp[degree_combo]) return perm_dgp perm_dgp = metapath_to_full_dgp(permat_1, metapath) dwpc_df['degree_combo'] = dwpc_df.apply( lambda row: tuple(row[-4:-2]), axis=1, ) %%time dwpc_sample = dwpc_df.sample(10000) rows = dwpc_sample[['degree_combo', 'dwpc']].to_dict('index') with concurrent.futures.ProcessPoolExecutor(6) as executor: for index, row in tqdm.tqdm(rows.items()): row['p_value'] = executor.submit(empirical_p, perm_dgp[row['degree_combo']], row['dwpc']) perm_df = pandas.DataFrame.from_dict(rows, orient='index') perm_df['unscaled_p'] = perm_df['p_value'].apply(lambda x: x.result()) plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(perm_df['unscaled_p'], bins=bins, density=True, label='permat_1') plt.legend() plt.ylim((0, heights[0:-1].max())); plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(perm_df['unscaled_p'], bins=bins, density=True, label='permat_1', alpha=0.5) heights, bins, _ = plt.hist(rows_df['unscaled_p'], bins=bins, density=True, label='hetionet', alpha=0.5) plt.legend() plt.ylim((0, 1.1 * heights[0:-1].max())); ```
github_jupyter
import concurrent.futures import matplotlib.pyplot as plt import numpy import pandas import scipy.sparse import tqdm import hetmech.hetmat import hetmech.degree_group import hetmech.degree_weight %matplotlib inline hetmat = hetmech.hetmat.HetMat('../../data/hetionet-v1.0.hetmat/') metapaths = ['DaGbC', 'SpDpS', 'SEcCrCtD', 'CiPCiCtD'] bins = numpy.linspace(0, 1, 101) bin_counts = {metapath: pandas.DataFrame() for metapath in metapaths} metapath = 'DaGbC' def matrix_to_dgp(matrix, dwpc_scaler, source_degree_to_ind, target_degree_to_ind): rows = dict() if scipy.sparse.issparse(matrix): matrix = matrix.toarray() for source_degree, row_inds in source_degree_to_ind.items(): row_matrix = matrix[row_inds, :] for target_degree, col_inds in target_degree_to_ind.items(): if source_degree == 0 or target_degree == 0: continue slice_matrix = row_matrix[:, col_inds] values = numpy.arcsinh(slice_matrix / dwpc_scaler) rows[(source_degree, target_degree)] = values.flatten().tolist() return rows def metapath_to_full_dgp(hetmat, metapath): # _, _, pc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.0) _, _, dwpc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.5) scaler = dwpc_matrix.mean() source_degree_to_ind, target_degree_to_ind = hetmech.degree_group.metapath_to_degree_dicts(hetmat, metapath) perm_dgp = None for name, permat in tqdm.tqdm(hetmat.permutations.items()): _, _, matrix = hetmech.degree_weight.dwpc(permat, metapath, damping=0.5, dense_threshold=0.7) degree_to_dgp = matrix_to_dgp(matrix, scaler, source_degree_to_ind, target_degree_to_ind) if perm_dgp is None: perm_dgp = degree_to_dgp.copy() else: for degree_combo, dgp_list in perm_dgp.items(): dgp_list.extend(degree_to_dgp[degree_combo]) return perm_dgp def empirical_p(permuted_values, observed_value): # Empirical p-value is adjusted to minimize misestimation. More conservative so that p!=0. # (sum(abs(avgdiff) > abs(obsdiff)) + 1) / (length(avgdiff) + 1) # http://doi.org/10.2202/1544-6115.1585 if isinstance(permuted_values, list): permuted_values = numpy.array(permuted_values) if isinstance(observed_value, list) or isinstance(observed_value, numpy.ndarray): return numpy.array([empirical_p(permuted_values, val) for val in observed_value]) return float(sum(permuted_values >= observed_value) + 1) / (len(permuted_values) + 1) %%time dwpc_df = pandas.DataFrame(hetmech.degree_group.dwpc_to_degrees(hetmat, metapath, ignore_zeros=False)) dwpc_df = dwpc_df[(dwpc_df['source_degree'] != 0) & (dwpc_df['target_degree'] != 0)] perm_dgp = metapath_to_full_dgp(hetmat, metapath) dwpc_df['degree_combo'] = dwpc_df.apply( lambda row: tuple(row[-4:-2]), axis=1, ) %%time dwpc_sample = dwpc_df.sample(10000) rows = dwpc_sample[['degree_combo', 'dwpc']].to_dict('index') with concurrent.futures.ProcessPoolExecutor(6) as executor: for index, row in tqdm.tqdm(rows.items()): row['p_value'] = executor.submit(empirical_p, perm_dgp[row['degree_combo']], row['dwpc']) rows_df = pandas.DataFrame.from_dict(rows, orient='index') rows_df['unscaled_p'] = rows_df['p_value'].apply(lambda x: x.result()) plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(rows_df['unscaled_p'], bins=bins, density=True, label='hetionet') plt.legend() plt.ylim((0, heights[0:-1].max())); perms = hetmat.permutations.copy() permat_1 = perms.pop('001') %%time dwpc_df = pandas.DataFrame(hetmech.degree_group.dwpc_to_degrees(permat_1, metapath, ignore_zeros=False)) dwpc_df = dwpc_df[(dwpc_df['source_degree'] != 0) & (dwpc_df['target_degree'] != 0)] def metapath_to_full_dgp(hetmat, metapath): # _, _, pc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.0) _, _, dwpc_matrix = hetmech.degree_weight.dwpc(hetmat, metapath, dense_threshold=0.7, damping=0.5) scaler = dwpc_matrix.mean() source_degree_to_ind, target_degree_to_ind = hetmech.degree_group.metapath_to_degree_dicts(hetmat, metapath) perm_dgp = None for name, permat in tqdm.tqdm(perms.items()): _, _, matrix = hetmech.degree_weight.dwpc(permat, metapath, damping=0.5, dense_threshold=0.7) degree_to_dgp = matrix_to_dgp(matrix, scaler, source_degree_to_ind, target_degree_to_ind) if perm_dgp is None: perm_dgp = degree_to_dgp.copy() else: for degree_combo, dgp_list in perm_dgp.items(): dgp_list.extend(degree_to_dgp[degree_combo]) return perm_dgp perm_dgp = metapath_to_full_dgp(permat_1, metapath) dwpc_df['degree_combo'] = dwpc_df.apply( lambda row: tuple(row[-4:-2]), axis=1, ) %%time dwpc_sample = dwpc_df.sample(10000) rows = dwpc_sample[['degree_combo', 'dwpc']].to_dict('index') with concurrent.futures.ProcessPoolExecutor(6) as executor: for index, row in tqdm.tqdm(rows.items()): row['p_value'] = executor.submit(empirical_p, perm_dgp[row['degree_combo']], row['dwpc']) perm_df = pandas.DataFrame.from_dict(rows, orient='index') perm_df['unscaled_p'] = perm_df['p_value'].apply(lambda x: x.result()) plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(perm_df['unscaled_p'], bins=bins, density=True, label='permat_1') plt.legend() plt.ylim((0, heights[0:-1].max())); plt.title(f"empirical p-values of {metapath}") heights, bins, _ = plt.hist(perm_df['unscaled_p'], bins=bins, density=True, label='permat_1', alpha=0.5) heights, bins, _ = plt.hist(rows_df['unscaled_p'], bins=bins, density=True, label='hetionet', alpha=0.5) plt.legend() plt.ylim((0, 1.1 * heights[0:-1].max()));
0.468061
0.926103
``` ## Definitions and Framing * **Supervised Machine Learning:** Using known data to generate some useful predictions of on unseen data Label $y$: The target variable that we are trying to predict, for example 'spam' or 'not spam'. Features: Something about the data that is used to represent the data, that is later fed into a model. Complicated machine learning models can have multiple features $x_1, x_2, \ldots, x_n$. So this is a vector $\mathbf{x}$. Model: Maps unseen data to predictions of label $y$. It is defined by internal parameters that are learned using a training set of labeled data Labeled data can be represented as $(\mathbf{x}, y)$. In unlabeled data, $y$ is not known and is predicted by the model. * **Models** A model defines the relationship between the features and the label. There are two key phases in the life of a model: <br> Training: The phase where you the model is trained or learned. You show the model a number of examples of labeled data, and allow the model to learn the relationship between the features and the label. In other words, it is learning the values of the parameters in the model. These parameters in the model are often called hyperparameters. Inference: The phase where the model is used to generate labels $y'$ given features $\mathbf{x}$. * **Regression vs. Classification** A regression predicts continuous values while a classification predicts discrete values. ## Descending into ML Topics covered: Linear Regression, Training and Loss * The $L_2$ loss is defined as $$ \begin{align} L_2 \textrm{ Loss } = \sum\limits_i (y_i - prediction_i(\mathbf{x}))^2 \end{align} $$ where we sum over all data points in the training set $i$. * The convention in machine learning is to represent the linear model as $y' = w_1 x_1 + b$ instead of the more traditional $y = mx + b$. We could easily generalize this regression from one feature to multiple features and the prediction would be given by $y' = w_1 x_1 + w_2 x_2 + w_3 x_3 + b$, where all the $w_i$'s are weights and all the $x_i$'s are features. The process of fitting the model is called training and the process of using the trained model to make a prediction is called inference. * A commonly used loss function is the mean squared error (MSE), but this is neither the only loss function nor the best or most practical loss function for all cases. ## Reducing Loss * One of the popular ways to find the minimum in the loss function is to use Gradient Descent. We calculate a gradient at each point and move in the direction of decreasing gradient. The step size that we use as we advance in the direction of decreasing gradient is called the learning rate. This has to be chosen carefully: in multi-dimensional problems, too large a learning rate can cause the code to become unstable. * We can also get trapped in local minima if the space is *not-convex*. There is a whole sprawling field of non-convex optimization. * There are two important flavors of gradient descent: **Stochastic Gradient Descent:** In theory, while using gradient descent, we need to calculate the gradient of the loss function considering all examples. However in practice, this is found to be excessive and computationally expensive. We therefore select only one example at random and calculate the gradient of the loss function considering only that one example. This is called stochastic gradient descent. Although this might require more steps to reach the optimum, overall there is usually less computation when dealing with very large data sets. Gradient calculations can be very expensive. Mathematically, we want to calculate $$ \begin{align} w_{n+1} = w_n -\eta Q(w) \end{align} $$ where $\eta$ is the learning rate and $Q(w) = \frac{1}{N}\sum_i Q_i(w)$ is the $L_2$ loss for the $i$-th example. What we do instead, in stochastic gradient descent is to first randomnly pick a particular example $k$ and calculate the loss function $Q_k(w)$ and then update $w$ using: $$ \begin{align} w_{n+1} = w_n -\eta Q_k(w) \end{align} $$ and perform this iteratively until the minimum criterion is reached. When there are multiple parameters to optimize, the gradient is of course a vector, and so is $\mathbf{w} = (w_1, w_2, \ldots)$ and we proceed in the direction of steepest gradient in steps $\eta$ large. **Mini-batch Gradient Descent** This is very similar to stochastic gradient descent except that instead of taking only one data point, we take batches of 10 or 100 of them. Especially with datasets that contain duplicates, enormous datasets do not contain any further information than very large datasets. Mini-batch gradient descent exploits this fact and works very well in practice. Typical values of batch-size range between 10 and 1000. Stochastic gradient descent is an extreme example of mini-batch gradient descent with batch size 1. * The algorithm to train the model in this case is iterative: we start with some intial guesses for the parameters, compute the loss function for those values, update the values of the parameters through some scheme (with the goal of moving in the direction of lower loss), calculate the loss for the updated values of the parameters, and proceed iteratively until we achieve convergence. Convergence is usually assumed when the loss becomes below some threshold value, or if the loss function starts changing extremely slowly. ## First steps with TensorFlow * TensnsorFlow is a computational framework that allows you to build machine learning models. We can use both lower level APIs by defining models based on a series of mathematical operations or we could use predefined higher level APIs such as `tf.estimator`. These architectures can be linear regressors or neural networks. * TensorFlow consists of the following two components: 0. A graph protocol buffer 0. A runtime that executes the distributed graph * The **graph protocol buffer** or protobuf takes data structures written in a text file and then generates classes in Python (or other language) that allows you to load, save and interact with the data in a user friendly way. In this sense the protobuf and the runtime are akin to Python code and a Python interpreter. * Because TensorFlow is built using APIs of various levels of abstraction, we have a choice of level. In general, I should chose the layer that offers the highest level of abstraction. Of course, the highest layers are also less flexible, so for some special modeling cases if I need more flexibility, I can just drop one run lower in the level of API. ### `tf.estimator` API * The `tf.estimator` API is one of the highest level APIs that has a lot of prepackaged tools of use. Below we show some code to exemplify its use. In the example below, we are going to estimate the median housing pricebased on just one input feature. The data is from the 1990 California housing census data. Available [here](https://developers.google.com/machine-learning/crash-course/california-housing-data-description) First we perform some imports ```python import math from matplotlib import cm from matplotlib import gridspec import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf ``` First, let us load the data ```python california_housing_dataframe = pd.read_csv('https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv', sep=',') ``` We first want to randomize the data so that there are no unknown artifacts while performing the stochastic gradient descent. We also want to scale the value of `median_house_value` to be in the units of thousands so that we can use a reasonable and familiar learning rate. ```python california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index)) california_housing_dataframe['median_house_value'] /= 1000 ``` ```
github_jupyter
## Definitions and Framing * **Supervised Machine Learning:** Using known data to generate some useful predictions of on unseen data Label $y$: The target variable that we are trying to predict, for example 'spam' or 'not spam'. Features: Something about the data that is used to represent the data, that is later fed into a model. Complicated machine learning models can have multiple features $x_1, x_2, \ldots, x_n$. So this is a vector $\mathbf{x}$. Model: Maps unseen data to predictions of label $y$. It is defined by internal parameters that are learned using a training set of labeled data Labeled data can be represented as $(\mathbf{x}, y)$. In unlabeled data, $y$ is not known and is predicted by the model. * **Models** A model defines the relationship between the features and the label. There are two key phases in the life of a model: <br> Training: The phase where you the model is trained or learned. You show the model a number of examples of labeled data, and allow the model to learn the relationship between the features and the label. In other words, it is learning the values of the parameters in the model. These parameters in the model are often called hyperparameters. Inference: The phase where the model is used to generate labels $y'$ given features $\mathbf{x}$. * **Regression vs. Classification** A regression predicts continuous values while a classification predicts discrete values. ## Descending into ML Topics covered: Linear Regression, Training and Loss * The $L_2$ loss is defined as $$ \begin{align} L_2 \textrm{ Loss } = \sum\limits_i (y_i - prediction_i(\mathbf{x}))^2 \end{align} $$ where we sum over all data points in the training set $i$. * The convention in machine learning is to represent the linear model as $y' = w_1 x_1 + b$ instead of the more traditional $y = mx + b$. We could easily generalize this regression from one feature to multiple features and the prediction would be given by $y' = w_1 x_1 + w_2 x_2 + w_3 x_3 + b$, where all the $w_i$'s are weights and all the $x_i$'s are features. The process of fitting the model is called training and the process of using the trained model to make a prediction is called inference. * A commonly used loss function is the mean squared error (MSE), but this is neither the only loss function nor the best or most practical loss function for all cases. ## Reducing Loss * One of the popular ways to find the minimum in the loss function is to use Gradient Descent. We calculate a gradient at each point and move in the direction of decreasing gradient. The step size that we use as we advance in the direction of decreasing gradient is called the learning rate. This has to be chosen carefully: in multi-dimensional problems, too large a learning rate can cause the code to become unstable. * We can also get trapped in local minima if the space is *not-convex*. There is a whole sprawling field of non-convex optimization. * There are two important flavors of gradient descent: **Stochastic Gradient Descent:** In theory, while using gradient descent, we need to calculate the gradient of the loss function considering all examples. However in practice, this is found to be excessive and computationally expensive. We therefore select only one example at random and calculate the gradient of the loss function considering only that one example. This is called stochastic gradient descent. Although this might require more steps to reach the optimum, overall there is usually less computation when dealing with very large data sets. Gradient calculations can be very expensive. Mathematically, we want to calculate $$ \begin{align} w_{n+1} = w_n -\eta Q(w) \end{align} $$ where $\eta$ is the learning rate and $Q(w) = \frac{1}{N}\sum_i Q_i(w)$ is the $L_2$ loss for the $i$-th example. What we do instead, in stochastic gradient descent is to first randomnly pick a particular example $k$ and calculate the loss function $Q_k(w)$ and then update $w$ using: $$ \begin{align} w_{n+1} = w_n -\eta Q_k(w) \end{align} $$ and perform this iteratively until the minimum criterion is reached. When there are multiple parameters to optimize, the gradient is of course a vector, and so is $\mathbf{w} = (w_1, w_2, \ldots)$ and we proceed in the direction of steepest gradient in steps $\eta$ large. **Mini-batch Gradient Descent** This is very similar to stochastic gradient descent except that instead of taking only one data point, we take batches of 10 or 100 of them. Especially with datasets that contain duplicates, enormous datasets do not contain any further information than very large datasets. Mini-batch gradient descent exploits this fact and works very well in practice. Typical values of batch-size range between 10 and 1000. Stochastic gradient descent is an extreme example of mini-batch gradient descent with batch size 1. * The algorithm to train the model in this case is iterative: we start with some intial guesses for the parameters, compute the loss function for those values, update the values of the parameters through some scheme (with the goal of moving in the direction of lower loss), calculate the loss for the updated values of the parameters, and proceed iteratively until we achieve convergence. Convergence is usually assumed when the loss becomes below some threshold value, or if the loss function starts changing extremely slowly. ## First steps with TensorFlow * TensnsorFlow is a computational framework that allows you to build machine learning models. We can use both lower level APIs by defining models based on a series of mathematical operations or we could use predefined higher level APIs such as `tf.estimator`. These architectures can be linear regressors or neural networks. * TensorFlow consists of the following two components: 0. A graph protocol buffer 0. A runtime that executes the distributed graph * The **graph protocol buffer** or protobuf takes data structures written in a text file and then generates classes in Python (or other language) that allows you to load, save and interact with the data in a user friendly way. In this sense the protobuf and the runtime are akin to Python code and a Python interpreter. * Because TensorFlow is built using APIs of various levels of abstraction, we have a choice of level. In general, I should chose the layer that offers the highest level of abstraction. Of course, the highest layers are also less flexible, so for some special modeling cases if I need more flexibility, I can just drop one run lower in the level of API. ### `tf.estimator` API * The `tf.estimator` API is one of the highest level APIs that has a lot of prepackaged tools of use. Below we show some code to exemplify its use. In the example below, we are going to estimate the median housing pricebased on just one input feature. The data is from the 1990 California housing census data. Available [here](https://developers.google.com/machine-learning/crash-course/california-housing-data-description) First we perform some imports ```python import math from matplotlib import cm from matplotlib import gridspec import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf ``` First, let us load the data ```python california_housing_dataframe = pd.read_csv('https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv', sep=',') ``` We first want to randomize the data so that there are no unknown artifacts while performing the stochastic gradient descent. We also want to scale the value of `median_house_value` to be in the units of thousands so that we can use a reasonable and familiar learning rate. ```python california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index)) california_housing_dataframe['median_house_value'] /= 1000 ```
0.877464
0.993461
``` import cobra from cobra.flux_analysis import gapfill import glob import time import sys # Read in model_path names model_paths = glob.glob('../models/*.xml') # Check for duplicate models: there are none. len(model_paths) == len(set(model_paths)) model_paths[1] # Check each model for mass and charge balance with '.check_mass_balance()' t = time.time() bal_rxn_ids = [] imbal_charge_rxn_ids = [] imbal_mass_rxn_ids = [] imbal_charge_mass_rxn_ids = [] error_rxn_ids = [] bal_enuf_rxn_ids = [] index = 0 for model_path in model_paths: sys.stdout.write('\r'+ str(index)) sys.stdout.flush() model_x = cobra.io.read_sbml_model(model_path) # Make model specific element list elem_list = [] for metabolite in model_x.metabolites: elems = dict.keys(metabolite.elements) elem_list = elem_list + elems elem_set = set(elem_list) # Make model specific exchange rxn list exchange_ids = [x.id for x in model.boundary] # exchange_ids = [] # for exchange_rxn in model_x.exchanges: # ex_id = exchange_rxn.id # exchange_ids.append(ex_id) for rxn in model_x.reactions: rxn_id = rxn.id if not rxn_id in exchange_ids: #rxn_id = model_x.reactions[936].id imbalcharge = False rxn_mass_dict = model_x.reactions.get_by_id(rxn_id).check_mass_balance() rxn_imbalance_elems = set(dict.keys(rxn_mass_dict)) if bool(rxn_mass_dict) == False: #print "balanced" bal_rxn_ids.append(rxn_id) elif bool(rxn_mass_dict) == True: if set(['charge']).issubset(rxn_imbalance_elems) == True: #print "Charge imbalance" rxn_imbalance_elems.remove('charge') imbalcharge = True if (rxn_imbalance_elems != set()) & (rxn_imbalance_elems.issubset(elem_set) == True): #print "Mass imbalance" if imbalcharge == False: imbal_mass_rxn_ids.append(rxn_id) elif imbalcharge == True: imbal_charge_mass_rxn_ids.append(rxn_id) elif rxn_imbalance_elems == set(): #print "Mass balanced" imbal_charge_rxn_ids.append(rxn_id) else: #print "Error" error_rxn_ids.append(rxn_id) else: #print "ERROR with determining if rxn is balanced" error_rxn_ids.append(rxn_id) bal_enuf_rxn_ids = list(set(bal_rxn_ids + imbal_charge_rxn_ids)) index = index + 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" #print bal_rxn_ids #print imbal_charge_rxn_ids print "Mass Imbalanced Reactions" print "Total:", len(imbal_mass_rxn_ids) print "Set:", len(list(set(imbal_mass_rxn_ids))) print list(set(imbal_mass_rxn_ids)) print "Charge and Mass Imbalanced Reactions" print "Total:", len(imbal_charge_mass_rxn_ids) print "Set:", len(list(set(imbal_charge_mass_rxn_ids))) print list(set(imbal_charge_mass_rxn_ids)) print "Error Reactions" print "Total:", len(error_rxn_ids) print "Set:", len(list(set(error_rxn_ids))) print list(set(error_rxn_ids)) #print bal_enuf_rxn_ids # Which models have a problem and what is their problem for rxn10124_c? t = time.time() index = 0 rxn_id = 'rxn10124_c' # Issue rxn imbalanced = {} for model_path in model_paths: sys.stdout.write('\r'+ str(index)) sys.stdout.flush() model_x = cobra.io.read_sbml_model(model_path) if rxn_id in set([reaction.id for reaction in model_x.reactions]): # Make model specific element list if the model is worth looking at elem_list = [] for metabolite in model_x.metabolites: elems = dict.keys(metabolite.elements) elem_list = elem_list + elems elem_set = set(elem_list) # Check mass balance rxn = model_x.reactions.get_by_id(rxn_id) if len(list(rxn.check_mass_balance())) > 0: model_id = [model_path.replace("models/","").replace(".xml","")] imbalanced[model_id[0]] = rxn.check_mass_balance() index = index + 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" print " " imbalanced # Check for mass balance with FBA t = time.time() counter = 0 freemass_set = set() for model_path in model_paths: sys.stdout.write('\r'+ str(counter)) sys.stdout.flush() model = cobra.io.read_sbml_model(model_path) freemass = checkFreeMass(model) freemass_set |= set(freemass) counter += 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" # There are no metabolites being generated; this also accounts for energy generating loops. freemass_set # Identify gapfilled reactions def findGapFilled(model): gapfilled = [] for index in model.reactions: if len(list(index.genes)) == 0: if not index in model.boundary: gapfilled.append(index.id) if len(gapfilled) > 0: print(str(len(gapfilled)) + ' reactions not associated with genes') return gapfilled # Checks which metabolites are generated for free def checkFreeMass(raw_model): with raw_model as model: for index in model.boundary: model.reactions.get_by_id(index.id).lower_bound = 0. demand_metabolites = [x.reactants[0].id for x in model.demands if len(x.reactants) > 0] + [x.products[0].id for x in model.demands if len(x.products) > 0] free = [] for index in model.metabolites: if index.id in demand_metabolites: continue else: demand = model.add_boundary(index, type='demand') model.objective = demand obj_val = model.slim_optimize(error_value=0.) if obj_val > 1e-8: free.append([index.id, obj_val]) model.remove_reactions([demand]) if len(free) > 0: print(str(len(free)) + ' metabolites are generated for free') return(free) # Check for mass and charge balance in reactions def checkBalance(raw_model, exclude=[]): with raw_model as model: imbalanced = {} mass_imbal = 0 charge_imbal = 0 elem_set = set() for metabolite in model.metabolites: try: elem_set |= set(metabolite.elements.keys()) except: pass if not type(exclude) is list: exclude = [exclude] for index in model.reactions: if index in model.boundary or index.id in exclude: continue else: try: test = index.check_mass_balance() except ValueError: continue if len(list(test)) > 0: imbalanced[index.id] = test if 'charge' in test.keys(): charge_imbal += 1 if len(set(test.keys()).intersection(elem_set)) > 0: mass_imbal += 1 if mass_imbal != 0: print(str(mass_imbal) + ' reactions are mass imbalanced') if charge_imbal != 0: print(str(charge_imbal) + ' reactions are charge imbalanced') return(imbalanced) # Identifies blocked reactions, 1% cutoff for fraction of optimum def blockedReactions(model): fva = flux_variability_analysis(model, fraction_of_optimum=0.01) noflux = (fva["maximum"].abs() < 1e-8) & (fva["minimum"].abs() < 1e-8) blocked = noflux[noflux==True].index.tolist() if noflux.sum() != 0: print(str(noflux.sum()) + ' reactions are blocked') return blocked # Checks the quality of models by a couple metrics and returns problems def checkQuality(model, exclude=[]): start_time = time.time() if model.name != None: model_name = model.name else: model_name = 'model' gaps = findGapFilled(model) freemass = checkFreeMass(model) balance = checkBalance(model, exclude) blocked = blockedReactions(model) duration = int(round(time.time() - start_time)) print('Took ' + str(duration) + ' seconds to analyze ' + model_name) return gaps, freemass, balance, blocked ```
github_jupyter
import cobra from cobra.flux_analysis import gapfill import glob import time import sys # Read in model_path names model_paths = glob.glob('../models/*.xml') # Check for duplicate models: there are none. len(model_paths) == len(set(model_paths)) model_paths[1] # Check each model for mass and charge balance with '.check_mass_balance()' t = time.time() bal_rxn_ids = [] imbal_charge_rxn_ids = [] imbal_mass_rxn_ids = [] imbal_charge_mass_rxn_ids = [] error_rxn_ids = [] bal_enuf_rxn_ids = [] index = 0 for model_path in model_paths: sys.stdout.write('\r'+ str(index)) sys.stdout.flush() model_x = cobra.io.read_sbml_model(model_path) # Make model specific element list elem_list = [] for metabolite in model_x.metabolites: elems = dict.keys(metabolite.elements) elem_list = elem_list + elems elem_set = set(elem_list) # Make model specific exchange rxn list exchange_ids = [x.id for x in model.boundary] # exchange_ids = [] # for exchange_rxn in model_x.exchanges: # ex_id = exchange_rxn.id # exchange_ids.append(ex_id) for rxn in model_x.reactions: rxn_id = rxn.id if not rxn_id in exchange_ids: #rxn_id = model_x.reactions[936].id imbalcharge = False rxn_mass_dict = model_x.reactions.get_by_id(rxn_id).check_mass_balance() rxn_imbalance_elems = set(dict.keys(rxn_mass_dict)) if bool(rxn_mass_dict) == False: #print "balanced" bal_rxn_ids.append(rxn_id) elif bool(rxn_mass_dict) == True: if set(['charge']).issubset(rxn_imbalance_elems) == True: #print "Charge imbalance" rxn_imbalance_elems.remove('charge') imbalcharge = True if (rxn_imbalance_elems != set()) & (rxn_imbalance_elems.issubset(elem_set) == True): #print "Mass imbalance" if imbalcharge == False: imbal_mass_rxn_ids.append(rxn_id) elif imbalcharge == True: imbal_charge_mass_rxn_ids.append(rxn_id) elif rxn_imbalance_elems == set(): #print "Mass balanced" imbal_charge_rxn_ids.append(rxn_id) else: #print "Error" error_rxn_ids.append(rxn_id) else: #print "ERROR with determining if rxn is balanced" error_rxn_ids.append(rxn_id) bal_enuf_rxn_ids = list(set(bal_rxn_ids + imbal_charge_rxn_ids)) index = index + 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" #print bal_rxn_ids #print imbal_charge_rxn_ids print "Mass Imbalanced Reactions" print "Total:", len(imbal_mass_rxn_ids) print "Set:", len(list(set(imbal_mass_rxn_ids))) print list(set(imbal_mass_rxn_ids)) print "Charge and Mass Imbalanced Reactions" print "Total:", len(imbal_charge_mass_rxn_ids) print "Set:", len(list(set(imbal_charge_mass_rxn_ids))) print list(set(imbal_charge_mass_rxn_ids)) print "Error Reactions" print "Total:", len(error_rxn_ids) print "Set:", len(list(set(error_rxn_ids))) print list(set(error_rxn_ids)) #print bal_enuf_rxn_ids # Which models have a problem and what is their problem for rxn10124_c? t = time.time() index = 0 rxn_id = 'rxn10124_c' # Issue rxn imbalanced = {} for model_path in model_paths: sys.stdout.write('\r'+ str(index)) sys.stdout.flush() model_x = cobra.io.read_sbml_model(model_path) if rxn_id in set([reaction.id for reaction in model_x.reactions]): # Make model specific element list if the model is worth looking at elem_list = [] for metabolite in model_x.metabolites: elems = dict.keys(metabolite.elements) elem_list = elem_list + elems elem_set = set(elem_list) # Check mass balance rxn = model_x.reactions.get_by_id(rxn_id) if len(list(rxn.check_mass_balance())) > 0: model_id = [model_path.replace("models/","").replace(".xml","")] imbalanced[model_id[0]] = rxn.check_mass_balance() index = index + 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" print " " imbalanced # Check for mass balance with FBA t = time.time() counter = 0 freemass_set = set() for model_path in model_paths: sys.stdout.write('\r'+ str(counter)) sys.stdout.flush() model = cobra.io.read_sbml_model(model_path) freemass = checkFreeMass(model) freemass_set |= set(freemass) counter += 1 elapsed = time.time() - t print "Time to complete:", elapsed/60, "mins" # There are no metabolites being generated; this also accounts for energy generating loops. freemass_set # Identify gapfilled reactions def findGapFilled(model): gapfilled = [] for index in model.reactions: if len(list(index.genes)) == 0: if not index in model.boundary: gapfilled.append(index.id) if len(gapfilled) > 0: print(str(len(gapfilled)) + ' reactions not associated with genes') return gapfilled # Checks which metabolites are generated for free def checkFreeMass(raw_model): with raw_model as model: for index in model.boundary: model.reactions.get_by_id(index.id).lower_bound = 0. demand_metabolites = [x.reactants[0].id for x in model.demands if len(x.reactants) > 0] + [x.products[0].id for x in model.demands if len(x.products) > 0] free = [] for index in model.metabolites: if index.id in demand_metabolites: continue else: demand = model.add_boundary(index, type='demand') model.objective = demand obj_val = model.slim_optimize(error_value=0.) if obj_val > 1e-8: free.append([index.id, obj_val]) model.remove_reactions([demand]) if len(free) > 0: print(str(len(free)) + ' metabolites are generated for free') return(free) # Check for mass and charge balance in reactions def checkBalance(raw_model, exclude=[]): with raw_model as model: imbalanced = {} mass_imbal = 0 charge_imbal = 0 elem_set = set() for metabolite in model.metabolites: try: elem_set |= set(metabolite.elements.keys()) except: pass if not type(exclude) is list: exclude = [exclude] for index in model.reactions: if index in model.boundary or index.id in exclude: continue else: try: test = index.check_mass_balance() except ValueError: continue if len(list(test)) > 0: imbalanced[index.id] = test if 'charge' in test.keys(): charge_imbal += 1 if len(set(test.keys()).intersection(elem_set)) > 0: mass_imbal += 1 if mass_imbal != 0: print(str(mass_imbal) + ' reactions are mass imbalanced') if charge_imbal != 0: print(str(charge_imbal) + ' reactions are charge imbalanced') return(imbalanced) # Identifies blocked reactions, 1% cutoff for fraction of optimum def blockedReactions(model): fva = flux_variability_analysis(model, fraction_of_optimum=0.01) noflux = (fva["maximum"].abs() < 1e-8) & (fva["minimum"].abs() < 1e-8) blocked = noflux[noflux==True].index.tolist() if noflux.sum() != 0: print(str(noflux.sum()) + ' reactions are blocked') return blocked # Checks the quality of models by a couple metrics and returns problems def checkQuality(model, exclude=[]): start_time = time.time() if model.name != None: model_name = model.name else: model_name = 'model' gaps = findGapFilled(model) freemass = checkFreeMass(model) balance = checkBalance(model, exclude) blocked = blockedReactions(model) duration = int(round(time.time() - start_time)) print('Took ' + str(duration) + ' seconds to analyze ' + model_name) return gaps, freemass, balance, blocked
0.183484
0.18228
## <u><b>Heart Disease Prediction<b><u> Given many features we have to predict chances of heart disease 1 -> Person has heart disease, 0 -> Person has no heart disease Dataset link - https://www.kaggle.com/ronitf/heart-disease-uci ### Exploratory Data Analysis (EDA) ``` #Importing pandas.Using it for data analysis and data manipulation import pandas as pd #Reading the dataset df = pd.read_csv("heart.csv") #Checking head and examining df.head(10) #Checking tail and examining df.tail(10) #Mean,count, and standard deviation is checked df.describe() #Data types and non-null count is displayed for all features. We have balanced data set here df.info() #Number of rows and columns df.shape #Column names df.columns #Checking if any feature has null values. We have balanced dataset here df.isnull().any() ``` ### Data Visualization ``` import seaborn as sns import matplotlib.pyplot as plt #Count Plot sns.countplot(x="target", data=df) #Density Plot df.plot(kind='density', subplots=True, layout=(8,2), sharex=False , figsize =(18,20)) #Histogram df.hist(figsize = (12,10),color='cyan') #Box plot df.plot(kind='box', subplots=True, layout=(7,2), sharex=False,sharey=False ,figsize =(18,18), color='red') #Correlation correlation = df.corr() correlation #KDE plot x = df['thalach'] sns.kdeplot(x, shade=True, color='g') #Heat map plt.figure(figsize=(18,12)) plt.title('Correlation Heatmap plot') a = sns.heatmap(correlation, square=True, annot=True) a.set_xticklabels(a.get_xticklabels()) a.set_yticklabels(a.get_yticklabels()) plt.show() #Distance Plot x = df['age'] sns.displot(x, bins=15,color='orange') #Pair-Plot feature = ['trestbps', 'age', 'chol', 'oldpeak', 'target','thalach' ] sns.pairplot(df[feature], kind='scatter', diag_kind='hist') #Distance Plot sns.displot(df['trestbps'],bins = 10,kde=True,rug=True,color='g') #Category Plot plt.figure(figsize=(8,8)) sns.catplot(x='chol', kind = 'box', data = df,color='r') #Kernel Density Estimation plt.figure(figsize=(8,8)) sns.kdeplot(data=df['thalach'],color='maroon') ``` ### Feature Engineering and Feature Selection ``` categorical = [] continous= [] for column in df.columns: if len(df[column].unique()) <= 10: categorical.append(column) else: continous.append(column) categorical.remove('target') categorical continous dataset = pd.get_dummies(df, columns = categorical) cols = ['cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', 'exang'] X = df[cols] y = dataset.target ``` ### Modelling ``` df.columns X.head(10) X.columns y.head(10) ``` <b> 1. Logistic Regression<b> ``` #Splitting into Train and Test dataset from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) #Using this class to make my print statement look bold class color: BOLD = '\033[1m' from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.metrics import accuracy_score import numpy as np from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler accuracies = {} logreg = make_pipeline(StandardScaler(), LogisticRegression()) logreg.fit(X_train, y_train) accuracy = accuracy_score(y_test, logreg.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Logistic Regression'] = accuracy_rounded print(color.BOLD + "Accuracy of Logistic regression is ", accuracy_rounded,'%') ``` <b> 2. K-Nearest Neighbors<b> ``` from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 6) knn.fit(X_train, y_train) accuracy = accuracy_score(y_test, knn.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['K-Nearest Neighbors'] = accuracy_rounded print(color.BOLD + "Accuracy of K-Nearest Neigbors is ", accuracy_rounded,'%') #to find best k value scoreList = [] for i in range(1,20): knn2 = KNeighborsClassifier(n_neighbors = i) knn2.fit(X_train, y_train) scoreList.append(knn2.score(X_test, y_test)) plt.plot(range(1,20), scoreList) plt.xticks(np.arange(1,20,1)) plt.xlabel("K value") plt.ylabel("Score") plt.show() ``` As per above diagram, K value is best when we choose <b>6,14,18<b> <b> 3. Support Vector Machines<b> ``` from sklearn.svm import SVC, LinearSVC svc = SVC() svc.fit(X_train, y_train) accuracy = accuracy_score(y_test, svc.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Support Vector Machines(SVC)'] = accuracy_rounded print(color.BOLD + "Accuracy of Support Vector Machines(SVC) is ", accuracy_rounded,'%') ``` <b> 4. Naive Bayes<b> ``` from sklearn.naive_bayes import GaussianNB GNB = GaussianNB() GNB.fit(X_train, y_train) accuracy = accuracy_score(y_test, GNB.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Naive Bayes'] = accuracy_rounded print(color.BOLD + "Accuracy of Naive Bayes is ", accuracy_rounded,'%') ``` <b> 5. Decision Tree<u> ``` from sklearn.tree import DecisionTreeClassifier decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) accuracy = accuracy_score(y_test, decision_tree.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Decision Tree'] = accuracy_rounded print(color.BOLD + "Accuracy of Decision Tree is ", accuracy_rounded,'%') ``` <b> 6. Random Forest <b> ``` from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(X_train, y_train) model_accuracy = accuracy_score(y_test, classifier.predict(X_test)) accuracy_rounded = round(model_accuracy*100,2) accuracies['Random Forest'] = accuracy_rounded print(color.BOLD + "Accuracy of Random Forest is ", accuracy_rounded,'%') #Accuracies for all the models accuracies ``` ### <b>Comparing Models<b> ``` colors = ["purple", "green", "orange", "magenta","#CFC60E","#0FBBAE"] sns.set_style("whitegrid") plt.figure(figsize=(18,10)) plt.yticks(np.arange(0,100,10)) plt.ylabel("Accuracy %",fontsize=20) plt.xlabel("Algorithms",fontsize=20) sns.barplot(x=list(accuracies.keys()), y=list(accuracies.values()), palette=colors) plt.show() ``` ### Model Evaluation and Accuracy Measurement ``` # Predicted values for the test dataset y_head_lr = logreg.predict(X_test) knn3 = KNeighborsClassifier(n_neighbors = 10) knn3.fit(X_train, y_train) y_head_knn = knn3.predict(X_test) y_head_svm = svc.predict(X_test) y_head_nb = GNB.predict(X_test) y_head_dtc = decision_tree.predict(X_test) y_head_rf = classifier.predict(X_test) #Measuring Accuracy using confusion matrix for all the algorithms from sklearn.metrics import confusion_matrix cm_lr = confusion_matrix(y_test,y_head_lr) cm_knn = confusion_matrix(y_test,y_head_knn) cm_svm = confusion_matrix(y_test,y_head_svm) cm_nb = confusion_matrix(y_test,y_head_nb) cm_dtc = confusion_matrix(y_test,y_head_dtc) cm_rf = confusion_matrix(y_test,y_head_rf) #Plotting confusion matrix for all the algorithms plt.figure(figsize=(24,12)) plt.suptitle("Confusion Matrices",fontsize=28) plt.subplots_adjust(wspace = 0.4, hspace= 0.4) plt.subplot(2,3,1) plt.title("Logistic Regression Confusion Matrix",fontsize=20) sns.heatmap(cm_lr,annot=True,cmap="Blues",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,2) plt.title("K Nearest Neighbors Confusion Matrix",fontsize=20) sns.heatmap(cm_knn,annot=True,cmap="BuPu",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,3) plt.title("Support Vector Machines Confusion Matrix",fontsize=20) sns.heatmap(cm_svm,annot=True,cmap="Greens",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,4) plt.title("Naive Bayes' Confusion Matrix",fontsize=20) sns.heatmap(cm_nb,annot=True,cmap="YlGnBu",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,5) plt.title("Decision Tree Classifier Confusion Matrix",fontsize=20) sns.heatmap(cm_dtc,annot=True,cmap="icefire",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,6) plt.title("Random Forest Confusion Matrix",fontsize=20) sns.heatmap(cm_rf,annot=True,cmap="flare",fmt="d",cbar=True, annot_kws={"size": 24}) plt.show() #Exporting model using joblib library import joblib joblib.dump(logreg,"hdp_model.pkl") ```
github_jupyter
#Importing pandas.Using it for data analysis and data manipulation import pandas as pd #Reading the dataset df = pd.read_csv("heart.csv") #Checking head and examining df.head(10) #Checking tail and examining df.tail(10) #Mean,count, and standard deviation is checked df.describe() #Data types and non-null count is displayed for all features. We have balanced data set here df.info() #Number of rows and columns df.shape #Column names df.columns #Checking if any feature has null values. We have balanced dataset here df.isnull().any() import seaborn as sns import matplotlib.pyplot as plt #Count Plot sns.countplot(x="target", data=df) #Density Plot df.plot(kind='density', subplots=True, layout=(8,2), sharex=False , figsize =(18,20)) #Histogram df.hist(figsize = (12,10),color='cyan') #Box plot df.plot(kind='box', subplots=True, layout=(7,2), sharex=False,sharey=False ,figsize =(18,18), color='red') #Correlation correlation = df.corr() correlation #KDE plot x = df['thalach'] sns.kdeplot(x, shade=True, color='g') #Heat map plt.figure(figsize=(18,12)) plt.title('Correlation Heatmap plot') a = sns.heatmap(correlation, square=True, annot=True) a.set_xticklabels(a.get_xticklabels()) a.set_yticklabels(a.get_yticklabels()) plt.show() #Distance Plot x = df['age'] sns.displot(x, bins=15,color='orange') #Pair-Plot feature = ['trestbps', 'age', 'chol', 'oldpeak', 'target','thalach' ] sns.pairplot(df[feature], kind='scatter', diag_kind='hist') #Distance Plot sns.displot(df['trestbps'],bins = 10,kde=True,rug=True,color='g') #Category Plot plt.figure(figsize=(8,8)) sns.catplot(x='chol', kind = 'box', data = df,color='r') #Kernel Density Estimation plt.figure(figsize=(8,8)) sns.kdeplot(data=df['thalach'],color='maroon') categorical = [] continous= [] for column in df.columns: if len(df[column].unique()) <= 10: categorical.append(column) else: continous.append(column) categorical.remove('target') categorical continous dataset = pd.get_dummies(df, columns = categorical) cols = ['cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', 'exang'] X = df[cols] y = dataset.target df.columns X.head(10) X.columns y.head(10) #Splitting into Train and Test dataset from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) #Using this class to make my print statement look bold class color: BOLD = '\033[1m' from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.metrics import accuracy_score import numpy as np from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler accuracies = {} logreg = make_pipeline(StandardScaler(), LogisticRegression()) logreg.fit(X_train, y_train) accuracy = accuracy_score(y_test, logreg.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Logistic Regression'] = accuracy_rounded print(color.BOLD + "Accuracy of Logistic regression is ", accuracy_rounded,'%') from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 6) knn.fit(X_train, y_train) accuracy = accuracy_score(y_test, knn.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['K-Nearest Neighbors'] = accuracy_rounded print(color.BOLD + "Accuracy of K-Nearest Neigbors is ", accuracy_rounded,'%') #to find best k value scoreList = [] for i in range(1,20): knn2 = KNeighborsClassifier(n_neighbors = i) knn2.fit(X_train, y_train) scoreList.append(knn2.score(X_test, y_test)) plt.plot(range(1,20), scoreList) plt.xticks(np.arange(1,20,1)) plt.xlabel("K value") plt.ylabel("Score") plt.show() from sklearn.svm import SVC, LinearSVC svc = SVC() svc.fit(X_train, y_train) accuracy = accuracy_score(y_test, svc.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Support Vector Machines(SVC)'] = accuracy_rounded print(color.BOLD + "Accuracy of Support Vector Machines(SVC) is ", accuracy_rounded,'%') from sklearn.naive_bayes import GaussianNB GNB = GaussianNB() GNB.fit(X_train, y_train) accuracy = accuracy_score(y_test, GNB.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Naive Bayes'] = accuracy_rounded print(color.BOLD + "Accuracy of Naive Bayes is ", accuracy_rounded,'%') from sklearn.tree import DecisionTreeClassifier decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) accuracy = accuracy_score(y_test, decision_tree.predict(X_test)) accuracy_rounded = round(accuracy*100,2) accuracies['Decision Tree'] = accuracy_rounded print(color.BOLD + "Accuracy of Decision Tree is ", accuracy_rounded,'%') from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(X_train, y_train) model_accuracy = accuracy_score(y_test, classifier.predict(X_test)) accuracy_rounded = round(model_accuracy*100,2) accuracies['Random Forest'] = accuracy_rounded print(color.BOLD + "Accuracy of Random Forest is ", accuracy_rounded,'%') #Accuracies for all the models accuracies colors = ["purple", "green", "orange", "magenta","#CFC60E","#0FBBAE"] sns.set_style("whitegrid") plt.figure(figsize=(18,10)) plt.yticks(np.arange(0,100,10)) plt.ylabel("Accuracy %",fontsize=20) plt.xlabel("Algorithms",fontsize=20) sns.barplot(x=list(accuracies.keys()), y=list(accuracies.values()), palette=colors) plt.show() # Predicted values for the test dataset y_head_lr = logreg.predict(X_test) knn3 = KNeighborsClassifier(n_neighbors = 10) knn3.fit(X_train, y_train) y_head_knn = knn3.predict(X_test) y_head_svm = svc.predict(X_test) y_head_nb = GNB.predict(X_test) y_head_dtc = decision_tree.predict(X_test) y_head_rf = classifier.predict(X_test) #Measuring Accuracy using confusion matrix for all the algorithms from sklearn.metrics import confusion_matrix cm_lr = confusion_matrix(y_test,y_head_lr) cm_knn = confusion_matrix(y_test,y_head_knn) cm_svm = confusion_matrix(y_test,y_head_svm) cm_nb = confusion_matrix(y_test,y_head_nb) cm_dtc = confusion_matrix(y_test,y_head_dtc) cm_rf = confusion_matrix(y_test,y_head_rf) #Plotting confusion matrix for all the algorithms plt.figure(figsize=(24,12)) plt.suptitle("Confusion Matrices",fontsize=28) plt.subplots_adjust(wspace = 0.4, hspace= 0.4) plt.subplot(2,3,1) plt.title("Logistic Regression Confusion Matrix",fontsize=20) sns.heatmap(cm_lr,annot=True,cmap="Blues",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,2) plt.title("K Nearest Neighbors Confusion Matrix",fontsize=20) sns.heatmap(cm_knn,annot=True,cmap="BuPu",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,3) plt.title("Support Vector Machines Confusion Matrix",fontsize=20) sns.heatmap(cm_svm,annot=True,cmap="Greens",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,4) plt.title("Naive Bayes' Confusion Matrix",fontsize=20) sns.heatmap(cm_nb,annot=True,cmap="YlGnBu",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,5) plt.title("Decision Tree Classifier Confusion Matrix",fontsize=20) sns.heatmap(cm_dtc,annot=True,cmap="icefire",fmt="d",cbar=True, annot_kws={"size": 24}) plt.subplot(2,3,6) plt.title("Random Forest Confusion Matrix",fontsize=20) sns.heatmap(cm_rf,annot=True,cmap="flare",fmt="d",cbar=True, annot_kws={"size": 24}) plt.show() #Exporting model using joblib library import joblib joblib.dump(logreg,"hdp_model.pkl")
0.587352
0.923764
``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline from src.mnist import load_mnist_data # Pegar todos os dados dos digitos MNIST como binário, e transformar para np.array from src.Model import Model # Classe custom Modelo Deep from PIL import Image # Manipulação de Pixels !pip install requests ``` ### Treinando o Modelo da Classe *Model*. #### Esse modelo vem um classe custom da class *Model*, o qual é utilizado apenas numpy na sua implementação #### Vamos treinar para reconhecer o digits do data set MNIST, com seguintes configurações: #### - **2** hidden layers de **128** dimensões #### - **0.035** learning rate #### - **sigmoid** activation, em todas as camadas #### - **MSE** função custo #### - **2** épocas (pode botar mais se quiser, mas com 2 já trás uma boa acurácia), cada epoca passa por **42 mil** exemplos dos dados de treinamento ## Comentários sobre treinamento: ##### Houve experimentos anteriores, utilizando, duas (ou mais, porém esses demorariam muito na minha máquina) hidden layers de 64 dimensões, outra vez com 16 dimensões, e por fim 48. Á priori, percebi com sigmoid 64 para 128 dimensões sempre achava um caminho que aprendia rápido o suficiente, com 50 épocas com lr = 0.035 rapidamente chegava a 92% acurácia, e com mais algumas épocas chegava a 97% (*com dados de treinamento*). Porém eu temo que com tantas dimensões pode ocorrer de se especializar demais, eu penso nisso, porque mais lá pra baixo nesse notebook, existe uma sessão onde podemos assistir o que o modelo advinha com um exemplo *custom* de uma imagem 28x28 de um digito criada por mim no app **microsoft paint**, e nesse casos, com 128 dimensões, ele erra coisas que um modelo de 16 dimensões, não erraria, apesar do modelo de 16 ter uma acurácia menor (91%), me pareceu generalizar melhor. Eu imagino que com menos dimensões na camada escondida o Modelo é forçado a entender o que é mais importante, ao invez de "manter em mente" caracteristicas especificas dos dados de treinamento. Além disso, os modelos com mais camadas de pequenas dimensões, demoraram muito para aumentar acurácia, exemplo: Modelo de: - 3 hidden layer de dimensões na sequencia (20,16,20) - *tangente hiperbolica* ("tanh") activation, em todas as camadas - lr = 0.02 - MSE, custo - 500 épocas de 40 mil exemplos Esse modelo chegou a uma acurácia de 88%, muito inferior tanto em velocidade de treinamento, e acertos. Mas, esse modelo de tanh, ele teve uma generalização decente, chegando a ser parecido com o Modelo que vamos usar (2 hidden de 128 dim). Claro que foi só eu desenhando no paint e testando, mas te faz questionar um pouco se esse modelo continuasse treinando, ele teria uma generalização melhor? ``` # traning_data, foi os 42 mil usados pelo modelo. # dimensions, as dimensões usadas para treinamentodo modelo (784,128,128,10) all_data = load_mnist_data() split = 42000 normalize_term = np.amax(all_data["inputs" ]) print("Normalizing Term:", normalize_term) print("Quantidade de exemplos:", len(all_data["inputs"])) print("Dimensão da imagem: ", len(all_data["inputs"][0])) print("Quantidade de digitos: ", len(all_data["outputs"][0])) # Treinamos com 42 mil exemplos train_data = { "inputs" : all_data["inputs" ][:split]/normalize_term, "outputs": all_data["outputs"][:split] } # Restante dos 28 mil exemplos que o modelo não viu, será nossos dados de teste test_data = { "inputs" : all_data["inputs" ][split:]/normalize_term, "outputs": all_data["outputs"][split:] } test_data_being_used = test_data ``` ## Treinando Modelo ``` # Modelo com configurações mencionada anteriormente #model = Model((784,128,128,10), activation="sigmoid", verbose=0, wr=(-0.5,0.5)) """Caso queira começar do zero, comente a proxima linha e descomente a linha anterior""" model = Model.load("./models/model_128x128") """É possivel repetir essa celula diversas vezes para treinar o modelo, com epochs, our learning rate diferentes""" learning_rate = 0.035 # Fique a vontade pra trocar aumentar as épocas, sei que com 50 chega a 98% acurácia epochs = 0 model_filename = "model_128x128" print("\n> Model Started Training...\n") model.train( train_data["inputs"], train_data["outputs"], lr = learning_rate, epochs=epochs, shuffle=True, autosave=False) # Caso já tenha um modelo salvo, dê load assim #model = Model.load("./models/" + model_filename) print("> Done.") print(f"{np.std(train_data['inputs'])=}") model.print() model.save("./models/" + model_filename) print("> model saved in: ",model_filename) ``` ### Função usada para fazer predições do set MNIST com o Modelo. *Ao usar **test_prediction** ira mostrar a **imagem**, o **label** correto, e a **decisão** do modelo* ``` def test_prediction(index, data, model:Model): current_image = data["inputs"][index] y_predict = model.predict(current_image)[0] prediction = (y_predict == y_predict.max()).astype(int) guess = list(prediction).index(1) label = data["outputs"][index] ground_truth = list(label).index(1) # Opção de desobrigar de fornecer label correto, para quando formor utilizar paint if len(label) < 10: label = "made on paint" ground_truth = " paint" print("Label: ", label) print("Prediction: ", prediction) plt.gray() plt.title("Model thinks it is: " + str(guess) + "\nGround truth: " + str(ground_truth)) plt.imshow( current_image.reshape((28, 28)) * 255, interpolation='nearest') plt.xticks([]) plt.yticks([0]) plt.show() ``` ## Função p/ testar a acurácia dos restante 28 mil digitos que o modelo não viu ainda ### e ainda retorna todos os indices errados ``` def get_acurracy(testing_data): predicts = list() for y in testing_data["inputs"]: y_predict = model.predict(y)[0] prediction = (y_predict == y_predict.max()).astype(int) predicts.append(prediction) return model.accuracy(predictions=predicts, outputs=testing_data["outputs"], verbose=0) ``` ## Array de indices onde o modelo errou #### Acurácia: cerca de **97%** usando 28 mil exemplos dos dados de teste os quais o modelo nunca viu ``` accuracy,wrong_indexes = get_acurracy(test_data_being_used) print( "Qntd de exemplos:",len(test_data_being_used["inputs"])) print(f"Indexes onde está errado: {wrong_indexes}\n") print("Acurácia: ",accuracy,'\n' ) ``` ### Escolha um index no intervalo [0,28'000) Cerca de 97.9% vai estar certo #### *A ideia é poder ver a imagem e a predição do modelo* ``` # Escolha um index no intervalo [0,2800'000) Cerca de 96% vai estar certo index = 27049 test_prediction(index,test_data_being_used,model) ``` ## Algumas das imagens que deram errado #### *Apenas troque esses index e vejao exemplos em que o modelo errou* ``` # Alguns dos indexes errados: 27749, 21073, 27839, 26522, 21558 (usando 28k testing_data) index_errado = 21077 test_prediction(index_errado,test_data_being_used,model) ``` # Testaremos com novos dados gerando por desenhos no paint ### Essa função ajuda a pegar os pixels como np.array ``` def get_pixels(filename): im = Image.open(filename, "r") pixels = list(im.getdata()) mono_channel_pixels = [rgb[2] for rgb in pixels[0:784]] return np.array(mono_channel_pixels)/255.0 ``` ## Agora **edite** o aquivo *digit28x28.png* no **paint** e **rode** a celula abaixo novamente para ir atualizando a predição do modelo ### Lembre de manter o formato 28x28 ``` test_pixels = get_pixels("digit28x28.png") test_prediction(0,{"inputs":[test_pixels],"outputs":[[1]]},model) ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt %matplotlib inline from src.mnist import load_mnist_data # Pegar todos os dados dos digitos MNIST como binário, e transformar para np.array from src.Model import Model # Classe custom Modelo Deep from PIL import Image # Manipulação de Pixels !pip install requests # traning_data, foi os 42 mil usados pelo modelo. # dimensions, as dimensões usadas para treinamentodo modelo (784,128,128,10) all_data = load_mnist_data() split = 42000 normalize_term = np.amax(all_data["inputs" ]) print("Normalizing Term:", normalize_term) print("Quantidade de exemplos:", len(all_data["inputs"])) print("Dimensão da imagem: ", len(all_data["inputs"][0])) print("Quantidade de digitos: ", len(all_data["outputs"][0])) # Treinamos com 42 mil exemplos train_data = { "inputs" : all_data["inputs" ][:split]/normalize_term, "outputs": all_data["outputs"][:split] } # Restante dos 28 mil exemplos que o modelo não viu, será nossos dados de teste test_data = { "inputs" : all_data["inputs" ][split:]/normalize_term, "outputs": all_data["outputs"][split:] } test_data_being_used = test_data # Modelo com configurações mencionada anteriormente #model = Model((784,128,128,10), activation="sigmoid", verbose=0, wr=(-0.5,0.5)) """Caso queira começar do zero, comente a proxima linha e descomente a linha anterior""" model = Model.load("./models/model_128x128") """É possivel repetir essa celula diversas vezes para treinar o modelo, com epochs, our learning rate diferentes""" learning_rate = 0.035 # Fique a vontade pra trocar aumentar as épocas, sei que com 50 chega a 98% acurácia epochs = 0 model_filename = "model_128x128" print("\n> Model Started Training...\n") model.train( train_data["inputs"], train_data["outputs"], lr = learning_rate, epochs=epochs, shuffle=True, autosave=False) # Caso já tenha um modelo salvo, dê load assim #model = Model.load("./models/" + model_filename) print("> Done.") print(f"{np.std(train_data['inputs'])=}") model.print() model.save("./models/" + model_filename) print("> model saved in: ",model_filename) def test_prediction(index, data, model:Model): current_image = data["inputs"][index] y_predict = model.predict(current_image)[0] prediction = (y_predict == y_predict.max()).astype(int) guess = list(prediction).index(1) label = data["outputs"][index] ground_truth = list(label).index(1) # Opção de desobrigar de fornecer label correto, para quando formor utilizar paint if len(label) < 10: label = "made on paint" ground_truth = " paint" print("Label: ", label) print("Prediction: ", prediction) plt.gray() plt.title("Model thinks it is: " + str(guess) + "\nGround truth: " + str(ground_truth)) plt.imshow( current_image.reshape((28, 28)) * 255, interpolation='nearest') plt.xticks([]) plt.yticks([0]) plt.show() def get_acurracy(testing_data): predicts = list() for y in testing_data["inputs"]: y_predict = model.predict(y)[0] prediction = (y_predict == y_predict.max()).astype(int) predicts.append(prediction) return model.accuracy(predictions=predicts, outputs=testing_data["outputs"], verbose=0) accuracy,wrong_indexes = get_acurracy(test_data_being_used) print( "Qntd de exemplos:",len(test_data_being_used["inputs"])) print(f"Indexes onde está errado: {wrong_indexes}\n") print("Acurácia: ",accuracy,'\n' ) # Escolha um index no intervalo [0,2800'000) Cerca de 96% vai estar certo index = 27049 test_prediction(index,test_data_being_used,model) # Alguns dos indexes errados: 27749, 21073, 27839, 26522, 21558 (usando 28k testing_data) index_errado = 21077 test_prediction(index_errado,test_data_being_used,model) def get_pixels(filename): im = Image.open(filename, "r") pixels = list(im.getdata()) mono_channel_pixels = [rgb[2] for rgb in pixels[0:784]] return np.array(mono_channel_pixels)/255.0 test_pixels = get_pixels("digit28x28.png") test_prediction(0,{"inputs":[test_pixels],"outputs":[[1]]},model)
0.355999
0.922831
# Predicting Boston Housing Prices ## Using XGBoost in SageMaker (Deploy) _Deep Learning Nanodegree Program | Deployment_ --- As an introduction to using SageMaker's High Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass. The documentation for the high level API can be found on the [ReadTheDocs page](http://sagemaker.readthedocs.io/en/latest/) ## General Outline Typically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons. 1. Download or otherwise retrieve the data. 2. Process / Prepare the data. 3. Upload the processed data to S3. 4. Train a chosen model. 5. Test the trained model (typically using a batch transform job). 6. Deploy the trained model. 7. Use the deployed model. In this notebook we will be skipping step 5, testing the model. We will still test the model but we will do so by first deploying the model and then sending the test data to the deployed model. ## Step 0: Setting up the notebook We begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need. ``` %matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_boston import sklearn.model_selection ``` In addition to the modules above, we need to import the various bits of SageMaker that we will be using. ``` import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() ``` ## Step 1: Downloading the data Fortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward. ``` boston = load_boston() ``` ## Step 2: Preparing and splitting the data Given that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets. ``` # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) ``` ## Step 3: Uploading the training and validation files to S3 When a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details. ### Save the data locally First we need to create the train and validation csv files which we will then upload to S3. ``` # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) ``` ### Upload to S3 Since we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project. ``` prefix = 'boston-xgboost-deploy-hl' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) ``` ## Step 4: Train the XGBoost model Now that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility. To construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us. To use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). ``` # As stated above, we use this utility method to construct the image name for the training container. container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session ``` Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html) ``` xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) ``` Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method. ``` # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ## Step 5: Test the trained model We will be skipping this step for now. We will still test our trained model but we are going to do it by using the deployed model, rather than setting up a batch transform job. ## Step 6: Deploy the trained model Now that we have fit our model to the training data, using the validation data to avoid overfitting, we can deploy our model and test it. Deploying is very simple when we use the high level API, we need only call the `deploy` method of our trained estimator. **NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for. In other words **If you are no longer using a deployed endpoint, shut it down!** ``` xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') ``` ## Step 7: Use the model Now that our model is trained and deployed we can send the test data to it and evaluate the results. Here, because our test data is so small, we can send it all using a single call to our endpoint. If our test dataset was larger we would need to split it up and send the data in chunks, making sure to accumulate the results. ``` # We need to tell the endpoint what format the data we are sending is in xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8') # predictions is currently a comma delimited string and so we would like to break it up # as a numpy array. Y_pred = np.fromstring(Y_pred, sep=',') ``` To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement. ``` plt.scatter(Y_test, Y_pred) plt.xlabel("Median Price") plt.ylabel("Predicted Price") plt.title("Median Price vs Predicted Price") ``` ## Delete the endpoint Since we are no longer using the deployed model we need to make sure to shut it down. Remember that you have to pay for the length of time that your endpoint is deployed so the longer it is left running, the more it costs. ``` xgb_predictor.delete_endpoint() ``` ## Optional: Clean up The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. ``` # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir ```
github_jupyter
%matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_boston import sklearn.model_selection import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() boston = load_boston() # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) prefix = 'boston-xgboost-deploy-hl' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) # As stated above, we use this utility method to construct the image name for the training container. container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # We need to tell the endpoint what format the data we are sending is in xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8') # predictions is currently a comma delimited string and so we would like to break it up # as a numpy array. Y_pred = np.fromstring(Y_pred, sep=',') plt.scatter(Y_test, Y_pred) plt.xlabel("Median Price") plt.ylabel("Predicted Price") plt.title("Median Price vs Predicted Price") xgb_predictor.delete_endpoint() # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir
0.493897
0.992192
``` %matplotlib inline # Import dependencies. import matplotlib.pyplot as plt # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot plt.plot(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.plot(x_axis, y_axis) # Create the plot with ax.plt() fig = plt.figure() ax = fig.add_subplot() ax.plot(x_axis, y_axis) # Create the plot. plt.plot(x_axis, y_axis) plt.show() # Create the plot and add a label for the legend. plt.plot(x_axis, y_axis, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot. plt.plot(x_axis, y_axis, marker="*", color="blue", linewidth=2, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add a grid. plt.grid() # Add the legend. plt.legend() # Create the plot. plt.plot(x_axis, y_axis, marker="*", color="blue", linewidth=2, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add a grid. plt.grid() # Add the legend. plt.legend() # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot plt.bar(x_axis, y_axis) # Create the plot. plt.bar(x_axis, y_axis, color="green", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot plt.barh(x_axis, y_axis) # Invert the y-axis data. plt.barh(x_axis, y_axis) plt.gca().invert_yaxis() plt.barh(x_axis, y_axis, color="magenta", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() plt.gca().invert_yaxis() # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot with ax.plt() fig, ax = plt.subplots() ax.bar(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.barh(x_axis, y_axis) # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] plt.plot(y_axis, x_axis, 'o',color='red',label="Chicago") plt.legend() plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") plt.xlim(0, 45) plt.gca().invert_yaxis() plt.scatter(x_axis, y_axis, s=y_axis_larger) y_axis_larger = [] for data in y_axis: y_axis_larger.append(data*3) plt.scatter(x_axis, y_axis, s = [i * 3 for i in y_axis]) fig, ax = plt.subplots() ax.scatter(x_axis, y_axis, s=y_axis) plt.pie(y_axis, labels=x_axis) plt.show() explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.pie(y_axis, explode=explode_values, labels=x_axis, autopct='%.1f%%') # Assign 12 colors, one for each month. colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.subplots(figsize=(8, 8)) plt.pie(y_axis, explode=explode_values, colors=colors, labels=x_axis, autopct='%.1f%%') plt.show() fig, ax = plt.subplots() ax.pie(y_axis,labels=x_axis) plt.show() ```
github_jupyter
%matplotlib inline # Import dependencies. import matplotlib.pyplot as plt # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot plt.plot(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.plot(x_axis, y_axis) # Create the plot with ax.plt() fig = plt.figure() ax = fig.add_subplot() ax.plot(x_axis, y_axis) # Create the plot. plt.plot(x_axis, y_axis) plt.show() # Create the plot and add a label for the legend. plt.plot(x_axis, y_axis, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot. plt.plot(x_axis, y_axis, marker="*", color="blue", linewidth=2, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add a grid. plt.grid() # Add the legend. plt.legend() # Create the plot. plt.plot(x_axis, y_axis, marker="*", color="blue", linewidth=2, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add a grid. plt.grid() # Add the legend. plt.legend() # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot plt.bar(x_axis, y_axis) # Create the plot. plt.bar(x_axis, y_axis, color="green", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot plt.barh(x_axis, y_axis) # Invert the y-axis data. plt.barh(x_axis, y_axis) plt.gca().invert_yaxis() plt.barh(x_axis, y_axis, color="magenta", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() plt.gca().invert_yaxis() # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create the plot with ax.plt() fig, ax = plt.subplots() ax.bar(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.barh(x_axis, y_axis) # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] plt.plot(y_axis, x_axis, 'o',color='red',label="Chicago") plt.legend() plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") plt.xlim(0, 45) plt.gca().invert_yaxis() plt.scatter(x_axis, y_axis, s=y_axis_larger) y_axis_larger = [] for data in y_axis: y_axis_larger.append(data*3) plt.scatter(x_axis, y_axis, s = [i * 3 for i in y_axis]) fig, ax = plt.subplots() ax.scatter(x_axis, y_axis, s=y_axis) plt.pie(y_axis, labels=x_axis) plt.show() explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.pie(y_axis, explode=explode_values, labels=x_axis, autopct='%.1f%%') # Assign 12 colors, one for each month. colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.subplots(figsize=(8, 8)) plt.pie(y_axis, explode=explode_values, colors=colors, labels=x_axis, autopct='%.1f%%') plt.show() fig, ax = plt.subplots() ax.pie(y_axis,labels=x_axis) plt.show()
0.761982
0.926304
## Deploying two modules on IoT Edge Device In this notebook, we deploy two modules -- image-capture-od and people-detection-service, on IoT Edge. ``` %reload_ext autoreload %autoreload 2 %matplotlib inline import time from dotenv import set_key, get_key, find_dotenv # get the .env file where all the variables are stored env_path = find_dotenv(raise_error_if_not_found=True) resource_group = get_key(env_path, 'resource_group') iot_hub_name = get_key(env_path, 'iot_hub_name') device_id = get_key(env_path, 'device_id') acr_name = get_key(env_path, 'acr_name') acr_login_server = get_key(env_path, 'acr_login_server') acr_password = get_key(env_path, 'acr_password') img1_location = get_key(env_path, 'img1_location') img2_location = get_key(env_path, 'img2_location') # deploy two modules module1_name = 'image-capture-od' module2_name = 'people-detection-service' # Based on the deployment_template.json file, we insert necessary information. file = open('./deployment_annotation_template.json') contents = file.read() contents = contents.replace('__REGISTRY_SERVER_NAME', acr_login_server) contents = contents.replace('__REGISTRY_PASSWORD', acr_password) contents = contents.replace('__REGISTRY_USER_NAME', acr_name) contents = contents.replace('__MODULE1_NAME', module1_name) contents = contents.replace('__MODULE2_NAME', module2_name) contents = contents.replace('__REGISTRY_IMAGE1_LOCATION', img1_location) contents = contents.replace('__REGISTRY_IMAGE2_LOCATION', img2_location) with open('./deployment.json', 'wt', encoding='utf-8') as output_file: output_file.write(contents) # Push the deployment JSON to the IOT Hub !az iot edge set-modules --device-id $device_id \ --hub-name $iot_hub_name \ --content deployment.json \ -g $resource_group ``` When you run `docker ps` command in the edge device, you should see there are four containers running: `edgeAgent`, `edgeHub`, and two other containers with name `module1_name` and `module2_name`. ``` # restart the daemon !sudo systemctl restart iotedge time.sleep(20) # Wait 20 seconds for iotedge to restart # restart the daemon again !sudo systemctl restart iotedge !docker ps ``` ### Verify Deployment Succesfull ``` dbe_name = get_key(env_path, 'dbe_name') dbe_ip_address = get_key(env_path, 'dbe_ip_address') ``` 1. In Azure portal `Data Box Edge / Data Box Gatewa` resource (the resource name is `dbe_name`, which can be obtained by executing previous cell), please click `Modules`. It should show four container running. 2. Open a browser from a computer and type in address `http://<dbe_ip_address>:5012/`, you should see a video stream playing. Below image shows a sample frame of the stream vido. The `dbe_ip_address` is obtained by executing previous cell. ![stream_video_frame](video_stream_frame.png) ``` Next we will proceed with notebook [04_DeployOnIOTedge.ipynb](./04_DeployOnIOTedge.ipynb). ```
github_jupyter
%reload_ext autoreload %autoreload 2 %matplotlib inline import time from dotenv import set_key, get_key, find_dotenv # get the .env file where all the variables are stored env_path = find_dotenv(raise_error_if_not_found=True) resource_group = get_key(env_path, 'resource_group') iot_hub_name = get_key(env_path, 'iot_hub_name') device_id = get_key(env_path, 'device_id') acr_name = get_key(env_path, 'acr_name') acr_login_server = get_key(env_path, 'acr_login_server') acr_password = get_key(env_path, 'acr_password') img1_location = get_key(env_path, 'img1_location') img2_location = get_key(env_path, 'img2_location') # deploy two modules module1_name = 'image-capture-od' module2_name = 'people-detection-service' # Based on the deployment_template.json file, we insert necessary information. file = open('./deployment_annotation_template.json') contents = file.read() contents = contents.replace('__REGISTRY_SERVER_NAME', acr_login_server) contents = contents.replace('__REGISTRY_PASSWORD', acr_password) contents = contents.replace('__REGISTRY_USER_NAME', acr_name) contents = contents.replace('__MODULE1_NAME', module1_name) contents = contents.replace('__MODULE2_NAME', module2_name) contents = contents.replace('__REGISTRY_IMAGE1_LOCATION', img1_location) contents = contents.replace('__REGISTRY_IMAGE2_LOCATION', img2_location) with open('./deployment.json', 'wt', encoding='utf-8') as output_file: output_file.write(contents) # Push the deployment JSON to the IOT Hub !az iot edge set-modules --device-id $device_id \ --hub-name $iot_hub_name \ --content deployment.json \ -g $resource_group # restart the daemon !sudo systemctl restart iotedge time.sleep(20) # Wait 20 seconds for iotedge to restart # restart the daemon again !sudo systemctl restart iotedge !docker ps dbe_name = get_key(env_path, 'dbe_name') dbe_ip_address = get_key(env_path, 'dbe_ip_address') Next we will proceed with notebook [04_DeployOnIOTedge.ipynb](./04_DeployOnIOTedge.ipynb).
0.268654
0.614914
``` import sys sys.path.append("/mnt/home/TF_NEW/tf-transformers/src/") # Install tf-transformers from github import datasets import json import glob import tensorflow as tf import numpy as np from tf_transformers.data import TFWriter, TFReader, TFProcessor from tf_transformers.models import AlbertModel from tf_transformers.tasks import Classification_Model from tf_transformers.core import optimization, SimpleTrainer from tf_transformers.losses import cross_entropy_loss from transformers import AlbertTokenizer ``` ### Load Tokenizer ``` # Load HuggingFace Tokenizer tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") ``` ### Load MNLI dataset from Huggingface datasets ``` examples = datasets.load_from_disk("/mnt/home/PRE_MODELS/HuggingFace_models/datasets/glue/rte/") train_examples = examples["train"] for item in train_examples: print(item) break max_seq_length=128 def parse_train(): result = {} for f in train_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['sentence1'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['sentence2'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result # Lets write using TF Writer # Use TFProcessor for smalled data schema = { "input_ids": ("var_len", "int"), "input_mask": ("var_len", "int"), "input_type_ids": ("var_len", "int"), "labels": ("var_len", "int"), } tfrecord_train_dir = '../../OFFICIAL_TFRECORDS/glue/alberta/rte/train' tfrecord_filename = 'rte' tfwriter = TFWriter(schema=schema, file_name=tfrecord_filename, model_dir=tfrecord_train_dir, tag='train', overwrite=True ) tfwriter.process(parse_fn=parse_train()) ``` ### Read TFRecords using TFReader ``` # Read Data schema = json.load(open("{}/schema.json".format(tfrecord_train_dir))) all_files = glob.glob("{}/*.tfrecord".format(tfrecord_train_dir)) tf_reader = TFReader(schema=schema, tfrecord_files=all_files) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] batch_size = 32 train_dataset = tf_reader.read_record(auto_batch=True, keys=x_keys, batch_size=batch_size, x_keys = x_keys, y_keys = y_keys, shuffle=True, drop_remainder=True ) for (batch_inputs, batch_labels) in train_dataset.take(1): print(ba tch_inputs, batch_labels) ``` ### Load Albert V2 Model ``` # Lets load Albert Model tf.keras.backend.clear_session() model_layer, model, config = AlbertModel(model_name='albert_base_v2', is_training=True, use_dropout=False ) model.load_checkpoint("/mnt/home/PRE_MODELS/LegacyAI_models/checkpoints/albert-base-v2/") # model_layer -> Legacylayer inherited from tf.keras.Layer # model -> legacyModel inherited from tf.keras.Model ``` ### Load Classification Model ``` tf.keras.backend.clear_session() classification_layer = Classification_Model(model=model, num_classes=2, use_all_layers=True, is_training=True) classification_model = classification_layer.get_model() # Delete to save up memory del model del model_layer del classification_layer ``` ### Define Loss Loss function is simple. * labels: 1D (batch_size) # class indices * logits: 2D (batch_size x num_classes) **Joint loss** - We minimze loss over each hidden layer . ``` def loss_fn(labels, logits): loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=tf.squeeze(labels, axis=1))) return loss def joint_loss(y_true_dict, y_pred_dict): layer_loss = [] for class_logits in y_pred_dict['class_logits']: loss = loss_fn(y_true_dict['labels'], class_logits) layer_loss.append(loss) return tf.reduce_mean(layer_loss) ``` ### Define Optimizer ``` train_data_size = 3500 learning_rate = 2e-5 steps_per_epoch = int(train_data_size / batch_size) EPOCHS = 3 num_train_steps = steps_per_epoch * EPOCHS warmup_steps = int(0.1 * num_train_steps) # creates an optimizer with learning rate schedule optimizer_type = 'adamw' optimizer, learning_rate_fn = optimization.create_optimizer(learning_rate, steps_per_epoch * EPOCHS, warmup_steps, optimizer_type) ``` ### Train Using Keras :-) - ```compile2``` allows you to have directly use model outputs as well batch dataset outputs into the loss function, without any further complexity. Note: For ```compile2```, loss_fn must be None, and custom_loss_fn must be active. Metrics are not supprted for time being. ``` # # Compile keras_loss_fn = {'class_logits': joint_loss} classification_model.compile2(optimizer=optimizer, loss=None, custom_loss=keras_loss_fn) # Change steps per epoch to large value/ ignore it completely to train # on full dataset history = classification_model.fit(train_dataset, epochs=2, steps_per_epoch=10) ``` ### Train using SimpleTrainer (part of tf-transformers) ``` history = SimpleTrainer(model = classification_model, optimizer = optimizer, loss_fn = joint_loss, dataset = train_dataset.repeat(EPOCHS+1), # This is important epochs = EPOCHS, num_train_examples = train_data_size, batch_size = batch_size, steps_per_call=100, gradient_accumulation_steps=None) ``` ### Save Models You can save models as checkpoints using ```.save_checkpoint``` attribute, which is a part of all ```LegacyModels``` ``` model_save_dir = "../../OFFICIAL_MODELS/glue/rte/albert" classification_model.save_checkpoint(model_save_dir) ``` ### Parse validation data We use ```TFProcessor``` to create validation data, because dev data is small ``` dev_examples = examples['validation'] def parse_dev(): result = {} for f in dev_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['sentence1'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['sentence2'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result tf_processor = TFProcessor() dev_dataset = tf_processor.process(parse_fn=parse_dev()) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] dev_dataset = tf_processor.auto_batch(dev_dataset, shuffle=False, x_keys=x_keys, y_keys=y_keys, batch_size=32, drop_remainder=False) ``` ### Evaluate dev dataset - Accuracy ``` num_hidden_layers = 12 predictions_per_layer = {i:[] for i in range(num_hidden_layers)} original_labels = [] for (batch_inputs, batch_labels) in dev_dataset: model_outputs = classification_model(batch_inputs)['class_logits'] for i in range(num_hidden_layers): predictions_per_layer[i].append(tf.argmax(model_outputs[i], axis=1).numpy()) original_labels.append(batch_labels['labels'].numpy()) from sklearn.metrics import accuracy_score, f1_score eval_metrics = {} for i in range(num_hidden_layers): acc = accuracy_score(np.hstack(predictions_per_layer[i]), np.hstack(original_labels)) eval_metrics[i] = acc print(i, eval_metrics[i]) with open('eval_rte.json', 'w') as f: json.dump(eval_metrics, f) ```
github_jupyter
import sys sys.path.append("/mnt/home/TF_NEW/tf-transformers/src/") # Install tf-transformers from github import datasets import json import glob import tensorflow as tf import numpy as np from tf_transformers.data import TFWriter, TFReader, TFProcessor from tf_transformers.models import AlbertModel from tf_transformers.tasks import Classification_Model from tf_transformers.core import optimization, SimpleTrainer from tf_transformers.losses import cross_entropy_loss from transformers import AlbertTokenizer # Load HuggingFace Tokenizer tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") examples = datasets.load_from_disk("/mnt/home/PRE_MODELS/HuggingFace_models/datasets/glue/rte/") train_examples = examples["train"] for item in train_examples: print(item) break max_seq_length=128 def parse_train(): result = {} for f in train_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['sentence1'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['sentence2'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result # Lets write using TF Writer # Use TFProcessor for smalled data schema = { "input_ids": ("var_len", "int"), "input_mask": ("var_len", "int"), "input_type_ids": ("var_len", "int"), "labels": ("var_len", "int"), } tfrecord_train_dir = '../../OFFICIAL_TFRECORDS/glue/alberta/rte/train' tfrecord_filename = 'rte' tfwriter = TFWriter(schema=schema, file_name=tfrecord_filename, model_dir=tfrecord_train_dir, tag='train', overwrite=True ) tfwriter.process(parse_fn=parse_train()) # Read Data schema = json.load(open("{}/schema.json".format(tfrecord_train_dir))) all_files = glob.glob("{}/*.tfrecord".format(tfrecord_train_dir)) tf_reader = TFReader(schema=schema, tfrecord_files=all_files) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] batch_size = 32 train_dataset = tf_reader.read_record(auto_batch=True, keys=x_keys, batch_size=batch_size, x_keys = x_keys, y_keys = y_keys, shuffle=True, drop_remainder=True ) for (batch_inputs, batch_labels) in train_dataset.take(1): print(ba tch_inputs, batch_labels) # Lets load Albert Model tf.keras.backend.clear_session() model_layer, model, config = AlbertModel(model_name='albert_base_v2', is_training=True, use_dropout=False ) model.load_checkpoint("/mnt/home/PRE_MODELS/LegacyAI_models/checkpoints/albert-base-v2/") # model_layer -> Legacylayer inherited from tf.keras.Layer # model -> legacyModel inherited from tf.keras.Model tf.keras.backend.clear_session() classification_layer = Classification_Model(model=model, num_classes=2, use_all_layers=True, is_training=True) classification_model = classification_layer.get_model() # Delete to save up memory del model del model_layer del classification_layer def loss_fn(labels, logits): loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=tf.squeeze(labels, axis=1))) return loss def joint_loss(y_true_dict, y_pred_dict): layer_loss = [] for class_logits in y_pred_dict['class_logits']: loss = loss_fn(y_true_dict['labels'], class_logits) layer_loss.append(loss) return tf.reduce_mean(layer_loss) train_data_size = 3500 learning_rate = 2e-5 steps_per_epoch = int(train_data_size / batch_size) EPOCHS = 3 num_train_steps = steps_per_epoch * EPOCHS warmup_steps = int(0.1 * num_train_steps) # creates an optimizer with learning rate schedule optimizer_type = 'adamw' optimizer, learning_rate_fn = optimization.create_optimizer(learning_rate, steps_per_epoch * EPOCHS, warmup_steps, optimizer_type) # # Compile keras_loss_fn = {'class_logits': joint_loss} classification_model.compile2(optimizer=optimizer, loss=None, custom_loss=keras_loss_fn) # Change steps per epoch to large value/ ignore it completely to train # on full dataset history = classification_model.fit(train_dataset, epochs=2, steps_per_epoch=10) history = SimpleTrainer(model = classification_model, optimizer = optimizer, loss_fn = joint_loss, dataset = train_dataset.repeat(EPOCHS+1), # This is important epochs = EPOCHS, num_train_examples = train_data_size, batch_size = batch_size, steps_per_call=100, gradient_accumulation_steps=None) ### Parse validation data We use ```TFProcessor``` to create validation data, because dev data is small ### Evaluate dev dataset - Accuracy
0.593845
0.695577
``` import openpnm as op import numpy as np import matplotlib.pyplot as plt ``` # Generate a Layered Network In order to see a network with a noticable difference in permeability values in each principle direction, we'll make a network consisting of two distinct layers then stitch them together. The process of stitching is explained in more detail [here](/examples/notebooks/networks/manipulation/stitching_and_merging_networks_together.ipynb). ``` pn = op.network.Cubic(shape=[8, 8, 8], spacing=30e-6) pn2 = op.network.Cubic(shape=[24, 24, 8], spacing=10e-6) ``` ## Apply Labels to Each Network In order to make it easier to find pore indices after stitching, it's helpful to apply unique and descriptive labels to the networks first. Labels are discussed detail [here](/examples/notebooks/tutorials/using_and_creating_labels.ipynb). ``` pn['pore.coarse'] = True pn['throat.coarse'] = True pn2['pore.fine'] = True pn2['throat.fine'] = True ``` Now we can stitch the two network together, but first we must translate the fine network along the z-axis so the networks are not overlapping. Stiching is described in more detail [here](.../tutorials/manipulation/stitching_and_merging_networks_together.ipynb). ``` pn2['pore.coords'] += [0, 0, 8*30e-6] op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'), P_donor=pn2.pores('bottom')) ``` Let's visualize the network to see if it stitched as expected. Quick plotting is described in more detail [here](/examples/notebooks/io/quick_plotting_networks.ipynb). ``` # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_connections(pn) ``` # Create other objects needed for simulation ## Generate one Geometry for each layer ``` np.random.seed(0) Ps = pn.pores('fine') Ts = pn.throats('fine') geo2 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) Ps = pn.pores('coarse') Ts = pn.throats(['coarse', 'stitched']) geo1 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) ``` Let's visualize the pore sizes using histograms: ``` # NBVAL_IGNORE_OUTPUT fig = plt.hist(geo1['pore.diameter'], bins=25, density=True, edgecolor='k', alpha=0.5) fig = plt.hist(geo2['pore.diameter'], bins=25, density=True, edgecolor='k', alpha=0.5) ``` ## Create a Phase and 2 Physics objects ``` air = op.phases.Air(network=pn) phys1 = op.physics.Standard(network=pn, phase=air, geometry=geo1) phys2 = op.physics.Standard(network=pn, phase=air, geometry=geo2) ``` Let's visualize the hydraulic conductances using histograms. Note that the values are logged, to better visualize the distributions: ``` # NBVAL_IGNORE_OUTPUT fig = plt.hist(np.log10(phys1['throat.hydraulic_conductance']), bins=25, density=True, edgecolor='k', alpha=0.5) fig = plt.hist(np.log10(phys2['throat.hydraulic_conductance']), bins=25, density=True, edgecolor='k', alpha=0.5) ``` # Run a StokesFlow simulation in perpendicular directions ``` sf_x = op.algorithms.StokesFlow(network=pn, phase=air) Pin = 1.0 Pout = 0.0 sf_x.set_value_BC(pores=pn.pores('left'), values=1.0) sf_x.set_value_BC(pores=pn.pores('right'), values=0.0) sf_x.run() ``` Let's make sure the pressure field is applied in the correct direction: ``` # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_coordinates(pn, color=sf_x['pore.pressure'], size_by=pn['pore.diameter'], markersize=50) sf_z = op.algorithms.StokesFlow(network=pn, phase=air) Pin = 1.0 Pout = 0.0 sf_z.set_value_BC(pores=pn.pores(['top', 'coarse'], mode='and'), values=Pin) sf_z.set_value_BC(pores=pn.pores(['top', 'fine'], mode='and'), values=Pout) sf_z.run() ``` Again, let's visualize the pressure field: ``` # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_coordinates(pn, color=sf_z['pore.pressure'], size_by=pn['pore.diameter'], markersize=50) ``` The presure in the coarse network is essentially constant since it's much more permeable than the fine layer. ## Determine the Permeability Coefficient for each direction ``` L = 8 * 30e-6 A = (8 * 30e-6)**2 + (8 * 24)*(10e-6)**2 mu = air['pore.viscosity'][0] Q = sf_x.rate(pores=pn.pores('left'), mode='group') Kx = Q*L*mu/(A*(Pin - Pout)) print('The permeability coefficient is:', Kx) A = (8 * 30e-6)**2 L = (8 * 30e-6) + (8 * 10e-6) mu = air['pore.viscosity'][0] Q = sf_z.rate(pores=pn.pores(['top', 'coarse'], mode='and'), mode='group') Kz = Q*L*mu/(A*(Pin - Pout)) print('The permeability coefficient is:', Kz) Kx/Kz ``` The permeability coefficient for the direction through the 'fine' layer is about 2x lower than the direction parallel to it.
github_jupyter
import openpnm as op import numpy as np import matplotlib.pyplot as plt pn = op.network.Cubic(shape=[8, 8, 8], spacing=30e-6) pn2 = op.network.Cubic(shape=[24, 24, 8], spacing=10e-6) pn['pore.coarse'] = True pn['throat.coarse'] = True pn2['pore.fine'] = True pn2['throat.fine'] = True pn2['pore.coords'] += [0, 0, 8*30e-6] op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'), P_donor=pn2.pores('bottom')) # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_connections(pn) np.random.seed(0) Ps = pn.pores('fine') Ts = pn.throats('fine') geo2 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) Ps = pn.pores('coarse') Ts = pn.throats(['coarse', 'stitched']) geo1 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) # NBVAL_IGNORE_OUTPUT fig = plt.hist(geo1['pore.diameter'], bins=25, density=True, edgecolor='k', alpha=0.5) fig = plt.hist(geo2['pore.diameter'], bins=25, density=True, edgecolor='k', alpha=0.5) air = op.phases.Air(network=pn) phys1 = op.physics.Standard(network=pn, phase=air, geometry=geo1) phys2 = op.physics.Standard(network=pn, phase=air, geometry=geo2) # NBVAL_IGNORE_OUTPUT fig = plt.hist(np.log10(phys1['throat.hydraulic_conductance']), bins=25, density=True, edgecolor='k', alpha=0.5) fig = plt.hist(np.log10(phys2['throat.hydraulic_conductance']), bins=25, density=True, edgecolor='k', alpha=0.5) sf_x = op.algorithms.StokesFlow(network=pn, phase=air) Pin = 1.0 Pout = 0.0 sf_x.set_value_BC(pores=pn.pores('left'), values=1.0) sf_x.set_value_BC(pores=pn.pores('right'), values=0.0) sf_x.run() # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_coordinates(pn, color=sf_x['pore.pressure'], size_by=pn['pore.diameter'], markersize=50) sf_z = op.algorithms.StokesFlow(network=pn, phase=air) Pin = 1.0 Pout = 0.0 sf_z.set_value_BC(pores=pn.pores(['top', 'coarse'], mode='and'), values=Pin) sf_z.set_value_BC(pores=pn.pores(['top', 'fine'], mode='and'), values=Pout) sf_z.run() # NBVAL_IGNORE_OUTPUT fig = op.topotools.plot_coordinates(pn, color=sf_z['pore.pressure'], size_by=pn['pore.diameter'], markersize=50) L = 8 * 30e-6 A = (8 * 30e-6)**2 + (8 * 24)*(10e-6)**2 mu = air['pore.viscosity'][0] Q = sf_x.rate(pores=pn.pores('left'), mode='group') Kx = Q*L*mu/(A*(Pin - Pout)) print('The permeability coefficient is:', Kx) A = (8 * 30e-6)**2 L = (8 * 30e-6) + (8 * 10e-6) mu = air['pore.viscosity'][0] Q = sf_z.rate(pores=pn.pores(['top', 'coarse'], mode='and'), mode='group') Kz = Q*L*mu/(A*(Pin - Pout)) print('The permeability coefficient is:', Kz) Kx/Kz
0.514156
0.979803
``` """ Module containng custom Keras models and layers required for FlowNet architecture. """ try: import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K except Exception as e: raise Exception("Error occured while importing dependency packages. More details:\n",e) __author__ = "Manu Jayadharan" __copyright__ = "Copyright 2020, FlowNet" __credits__ = ["Manu Jayadharan"] __license__ = "" __version__ = "0.1.0" __maintainer__ = "Manu Jayadharan" __email__ = "manu.jayadharan@pitt.edu" __status__ = "Development" class ForwardModel(tf.keras.Model): """ Model to construct FNN (Forward Neural Network) using custom Keras layers. Subclass of tf.keras.Model """ def __init__(self, space_dim=1, time_dep=False, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None): """ space_dim (int) -> Dimension of the space Omega where the PDE is defined. time_dep (bool) -> True if the problem is time dependent. output_dim (int) -> Dimension of the range of the solution to PDE. n_hid_layer (int) -> Number of hidden layers in the neural network. n_hid_nrn (int) -> Number of neurons in each hidden layer of the NN. act_func (string) -> Activation functions for each of the hidden layers. Has to be one of the members of keras.activations: could be one of {"tanh", "sigmoid", "elu", "relu", "exponential"} """ super(ForwardModel, self).__init__() #Defining class atributes self.space_dim = space_dim self.time_dep = time_dep self.output_dim = output_dim self.n_hid_lay = n_hid_lay self.n_hid_nrn = n_hid_nrn #Block of hidden layers self.hidden_block = [keras.layers.Dense( self.n_hid_nrn, activation=act_func, name="dense_"+str(i+1) ) for i in range(n_hid_lay)] #Final output layer self.final_layer = keras.layers.Dense(self.output_dim, name="final_layer") #Defining the rhs of PDE: P(u,delu) = f(x,t) if rhs_func != None: self.rhs_function = rhs_func else: self.rhs_function = lambda x: 0 def findGrad(self,func,input_space): """ Find gradient with respect to the domain Omega of the PDE. (tensor, tensor) -> Keras.Lambda layer arguments: ---------- func (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is usually the final output (solution u) coming out of a hidden layer input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. Note that output of such a lambda layer will be a list of tensors with each element giving partial derrivative wrt to each element in argm. See tf.Keras.Lambda and tf.gradients for more details. """ try: return keras.layers.Lambda(lambda z: [tf.gradients(z[0],x_i, unconnected_gradients='zero') for x_i in z[1] ]) ([func, input_space]) except Exception as e: raise Exception("Error occured in finding the time derrivative lambda layer of type {} as follows: \n{}".format(type(e)),e) def findTimeDer(self,func,input_time): """ (tensor, tensor) -> Keras.Lambda layer arguments: ---------- func (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is usually the final output (solution u) coming out of a hidden layer input_time: TensorFlow tensor. This should be the element of the input list which corresponds to the time dimension. Used only if the problem is time_dependent. Output: Keras.Lambda layer. Note that output of such a lambda layer will be a tensor of size m x 1 representing the time derrivative of output func. See tf.Keras.Lambda and tf.gradients for more details. """ assert (self.time_dep), "Tried taking time derrivative even though the problem is not time dependent." try: return keras.layers.Lambda(lambda z: tf.gradients(z[0],z[1], unconnected_gradients='zero') [0]) ([func, input_time]) except Exception as e: raise Exception("Error occured in find gradient lambda layer of type {} as follows: \n{} ".format(type(e)),e) def findLaplace(self,first_der,input_space): """ (tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the laplacian of the solution to pde. arguments: ---------- first_der (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. This lambda layer outputs the laplacian of solution function u. See tf.Keras.Lambda and tf.gradients for more details. """ try: # list containng diagonal entries of hessian matrix. Note that tf.gradients #returns a list of tensors and hence thats why we have a [0] at the end of #the tf.gradients fucntion as tf.gradients(func,argm) [0] del_sq_layer = keras.layers.Lambda( lambda z: [ tf.gradients(z[0][i], z[1][i], unconnected_gradients='zero') [0] for i in range(len(z[1])) ] ) ([first_der,input_space]) return sum(del_sq_layer) except Exception as e: raise Exception("Error occured in find laplacian lambda layer of type {} as follows: \n{}".format(type(e)),e) #final layer representing the lhs P(x,t) of PDE P(x,t)=0 def findPdeLayer(self, laplacian, input_arg, time_der=0): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- laplacian (tf tensor): laplacian with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: # return keras.layers.Lambda(lambda z: z[0] - z[1] - tf.sin(z[2][0]+z[2][1]) - # 2*z[2][2]*tf.sin(z[2][0]+z[2][1])) ([time_der, laplacian, input_arg]) return keras.layers.Lambda(lambda z: z[0] - z[1] - self.rhs_function(input_arg)) ([time_der, laplacian, input_arg]) except Exception as e: raise Exception("Error occured in finding pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def get_config(self): #getting basic config using the parent model class base_config = super().get_config() return {**base_config, "space_dim": self.space_dim, "time_dep": self.time_dep, "output_dim": self.output_dim, "n_hid_lay": self.n_hid_lay, "n_hid_nrn": self.n_hid_nrn, "act_func": self.act_func } def from_config(self, config, custom_objects): super().from_config(config) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: try: assert(len(inputs) > 1) input_space = inputs[:-1] input_time = inputs[-1] except Exception as e: raise Exception("Error occured while separating spacial and temporal data from inputs,\ make sure that spacio-temporal data is being used to for training and \ x=[space_dim1,..,space_dimn,time_dim]. More details on error below:\n", type(e), e) else: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) #output layer, this is typically the solution function output_layer = self.final_layer(hidden_output) if training: #pde specific layers grad_layer = self.findGrad(output_layer, input_space) laplace_layer = self.findLaplace(grad_layer, input_space) if self.time_dep: time_der_layer = self.findTimeDer(output_layer, input_time) else: time_der_layer=0 pde_layer = self.findPdeLayer(laplace_layer, inputs, time_der_layer) return output_layer, pde_layer elif not training: #only outputting the function value if not tranining. return output_layer class Poission(ForwardModel): """ Doc string goes here """ def __init__(self, space_dim=1, perm_tensor=None, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None): """ talk about super initialization """ super().__init__(space_dim=space_dim, time_dep=False, output_dim=output_dim, n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_nrn, act_func = act_func, rhs_func = rhs_func) self._perm_tensor = perm_tensor if perm_tensor else tf.eye(space_dim) #final layer representing the lhs P(x) of PDE P(x)=0 def findPdeLayer(self, laplacian, input_arg): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- laplacian (tf tensor): laplacian with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: return keras.layers.Lambda(lambda z: -z[0] - self.rhs_function(input_arg)) ([laplacian, input_arg]) except Exception as e: raise Exception("Error occured in finding pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) #output layer, this is typically the solution function output_layer = self.final_layer(hidden_output) if training: #pde specific layers grad_layer = self.findGrad(output_layer, input_space) laplace_layer = self.findLaplace(grad_layer, input_space) pde_layer = self.findPdeLayer(laplace_layer, inputs) return output_layer, pde_layer elif not training: #only outputting the function value if not tranining. return output_layer class MixedDif(ForwardModel): """ Doc string goes here """ def __init__(self, space_dim=1, perm_tensor=None, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None, n_xtr_hid_lay=1): """ talk about super initialization """ super().__init__(space_dim=space_dim, time_dep=True, output_dim=output_dim, n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_nrn, act_func = act_func, rhs_func = rhs_func) self._perm_tensor = perm_tensor if perm_tensor else tf.eye(space_dim) self.n_xtr_hid_lay = n_xtr_hid_lay self.__pressure_dim = 1 # Defining final output layers for pressure and velocity #pressure self.__pres_final_layer = keras.layers.Dense(self.__pressure_dim, name="pressure_out_layer") self.__vel_final_layer = keras.layers.Dense(space_dim, name = "velocity_out_layer") #Block of extra hidden layers for velocity self.xtra_hidden_block = [keras.layers.Dense( self.n_hid_nrn, activation=act_func, name="dense_xtra_"+str(i+1) ) for i in range(n_xtr_hid_lay)] def findDivLayer(self,vel_vector,input_space): """ (tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the divergence of the velocity vector. arguments: ---------- vel_vector (tf tensor): predicted velocity function represented by tf tensor structure (Usually of size: data_size x space_dim). input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. This lambda layer outputs the laplacian of solution function u. See tf.Keras.Lambda and tf.gradients for more details. """ try: # list containng diagonal entries of the gradient #on velocity vector. Note that tf.gradients #returns a list of tensors and hence thats why we have a [0] at the end of #the tf.gradients fucntion as tf.gradients(func,argm) [0] grad_diag= keras.layers.Lambda( lambda z: [ tf.gradients(z[0][:,i], z[1][i], unconnected_gradients='zero') [0] for i in range(len(z[1])) ] ) ([vel_vector,input_space]) return sum(grad_diag) except Exception as e: raise Exception("Error occured in finding divergence of velocity lambda layer of type {} as follows: \n{}".format(type(e)),e) #layer representing the scalar equation in the pde, PDE P_scalar(x,t)=0. #This is typically something of the form p_t - div(u) -f =0 def findPdeLayer_1(self, div_vel, input_arg, time_der=0): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- div_vel(tf tensor): divergence of velocity vector with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. time_der (tf tensor): derrivative of the pressure function with respect to time. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: # return keras.layers.Lambda(lambda z: z[0] - z[1] - tf.sin(z[2][0]+z[2][1]) - # 2*z[2][2]*tf.sin(z[2][0]+z[2][1])) ([time_der, laplacian, input_arg]) return keras.layers.Lambda(lambda z: z[0] - z[1] - self.rhs_function(input_arg)) ([time_der, div_vel, input_arg]) except Exception as e: raise Exception("Error occured in finding pressure pde lambda layer of type {} as follows: \n{}".format(type(e)),e) #layer representing the vector equation in the pde, PDE PDE_vector(x,t)=0. #This is typically something of the form grad(p) + u = 0 def findPdeLayer_2(self, grad_p, vel_vect): """ (tensor, tensor) -> Keras.Lambda layer Returns list of lambda layer to find the vector PDE= 0 in the mixed form. arguments: ---------- grad_p(list of tf tensors): gradient of p with respect so spacedim. each element of the list corresponds to the partial with respect to different variable in spacedim.. vel_vect(tensor): velocity vector coming from teh final xtra hidden layer. This has shape, num_instances*vel_dim. note that vel_dim = space_dim. Output: list of Keras.Lambda layer. This lambda layer outputs a list representing the vector equation grad(p) + u =0. """ try: return keras.layers.Lambda(lambda z: [z[0][i] + z[1][:,i] for i in range(len(grad_p))]) ([grad_p, vel_vect]) except Exception as e: raise Exception("Error occured in finding vector pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: try: assert(len(inputs) > 1) input_space = inputs[:-1] input_time = inputs[-1] except Exception as e: raise Exception("Error occured while separating spacial and temporal data from inputs,\ make sure that spacio-temporal data is being used to for training and \ x=[space_dim1,..,space_dimn,time_dim]. More details on error below:\n", type(e), e) else: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) # #output layer, this is typically the solution function # output_layer = self.final_layer(hidden_output) #pressure output layer pres_output_layer = self.__pres_final_layer(hidden_output) #velocity output layer for layer_id in range(self.n_xtr_hid_lay): hidden_output = self.xtra_hidden_block[layer_id] (hidden_output) vel_output_layer = self.__vel_final_layer(hidden_output) if training: #pde specific layers #finding gradient of pressure with respect to space dim grad_layer = self.findGrad(pres_output_layer, input_space) #finding divergence of the velocity layer with respect to space dim div_layer = self.findDivLayer(vel_output_layer, input_space) if self.time_dep: time_der_layer = self.findTimeDer(pres_output_layer, input_time) else: time_der_layer=0 #PDE_1 = 0 the scalar equation of the form p_t - div(u) -f = 0 pde_layer_1 = self.findPdeLayer(div_layer, inputs, time_der_layer) # pde_layer_2 = self.findPdeLayer_2(grad_layer, vel_output_layer) # return pres_output_layer, pde_layer_1, pde_layer_2 return pres_output_layer, pde_layer_1 elif not training: #only outputting the function value if not tranining. # return pres_output_layer, vel_output_layer return pres_output_layer ```
github_jupyter
""" Module containng custom Keras models and layers required for FlowNet architecture. """ try: import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K except Exception as e: raise Exception("Error occured while importing dependency packages. More details:\n",e) __author__ = "Manu Jayadharan" __copyright__ = "Copyright 2020, FlowNet" __credits__ = ["Manu Jayadharan"] __license__ = "" __version__ = "0.1.0" __maintainer__ = "Manu Jayadharan" __email__ = "manu.jayadharan@pitt.edu" __status__ = "Development" class ForwardModel(tf.keras.Model): """ Model to construct FNN (Forward Neural Network) using custom Keras layers. Subclass of tf.keras.Model """ def __init__(self, space_dim=1, time_dep=False, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None): """ space_dim (int) -> Dimension of the space Omega where the PDE is defined. time_dep (bool) -> True if the problem is time dependent. output_dim (int) -> Dimension of the range of the solution to PDE. n_hid_layer (int) -> Number of hidden layers in the neural network. n_hid_nrn (int) -> Number of neurons in each hidden layer of the NN. act_func (string) -> Activation functions for each of the hidden layers. Has to be one of the members of keras.activations: could be one of {"tanh", "sigmoid", "elu", "relu", "exponential"} """ super(ForwardModel, self).__init__() #Defining class atributes self.space_dim = space_dim self.time_dep = time_dep self.output_dim = output_dim self.n_hid_lay = n_hid_lay self.n_hid_nrn = n_hid_nrn #Block of hidden layers self.hidden_block = [keras.layers.Dense( self.n_hid_nrn, activation=act_func, name="dense_"+str(i+1) ) for i in range(n_hid_lay)] #Final output layer self.final_layer = keras.layers.Dense(self.output_dim, name="final_layer") #Defining the rhs of PDE: P(u,delu) = f(x,t) if rhs_func != None: self.rhs_function = rhs_func else: self.rhs_function = lambda x: 0 def findGrad(self,func,input_space): """ Find gradient with respect to the domain Omega of the PDE. (tensor, tensor) -> Keras.Lambda layer arguments: ---------- func (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is usually the final output (solution u) coming out of a hidden layer input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. Note that output of such a lambda layer will be a list of tensors with each element giving partial derrivative wrt to each element in argm. See tf.Keras.Lambda and tf.gradients for more details. """ try: return keras.layers.Lambda(lambda z: [tf.gradients(z[0],x_i, unconnected_gradients='zero') for x_i in z[1] ]) ([func, input_space]) except Exception as e: raise Exception("Error occured in finding the time derrivative lambda layer of type {} as follows: \n{}".format(type(e)),e) def findTimeDer(self,func,input_time): """ (tensor, tensor) -> Keras.Lambda layer arguments: ---------- func (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is usually the final output (solution u) coming out of a hidden layer input_time: TensorFlow tensor. This should be the element of the input list which corresponds to the time dimension. Used only if the problem is time_dependent. Output: Keras.Lambda layer. Note that output of such a lambda layer will be a tensor of size m x 1 representing the time derrivative of output func. See tf.Keras.Lambda and tf.gradients for more details. """ assert (self.time_dep), "Tried taking time derrivative even though the problem is not time dependent." try: return keras.layers.Lambda(lambda z: tf.gradients(z[0],z[1], unconnected_gradients='zero') [0]) ([func, input_time]) except Exception as e: raise Exception("Error occured in find gradient lambda layer of type {} as follows: \n{} ".format(type(e)),e) def findLaplace(self,first_der,input_space): """ (tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the laplacian of the solution to pde. arguments: ---------- first_der (tf tensor): function represented by tf tensor structure (Usually of size: data_size x dim_output_previous_layer). The func is input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. This lambda layer outputs the laplacian of solution function u. See tf.Keras.Lambda and tf.gradients for more details. """ try: # list containng diagonal entries of hessian matrix. Note that tf.gradients #returns a list of tensors and hence thats why we have a [0] at the end of #the tf.gradients fucntion as tf.gradients(func,argm) [0] del_sq_layer = keras.layers.Lambda( lambda z: [ tf.gradients(z[0][i], z[1][i], unconnected_gradients='zero') [0] for i in range(len(z[1])) ] ) ([first_der,input_space]) return sum(del_sq_layer) except Exception as e: raise Exception("Error occured in find laplacian lambda layer of type {} as follows: \n{}".format(type(e)),e) #final layer representing the lhs P(x,t) of PDE P(x,t)=0 def findPdeLayer(self, laplacian, input_arg, time_der=0): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- laplacian (tf tensor): laplacian with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: # return keras.layers.Lambda(lambda z: z[0] - z[1] - tf.sin(z[2][0]+z[2][1]) - # 2*z[2][2]*tf.sin(z[2][0]+z[2][1])) ([time_der, laplacian, input_arg]) return keras.layers.Lambda(lambda z: z[0] - z[1] - self.rhs_function(input_arg)) ([time_der, laplacian, input_arg]) except Exception as e: raise Exception("Error occured in finding pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def get_config(self): #getting basic config using the parent model class base_config = super().get_config() return {**base_config, "space_dim": self.space_dim, "time_dep": self.time_dep, "output_dim": self.output_dim, "n_hid_lay": self.n_hid_lay, "n_hid_nrn": self.n_hid_nrn, "act_func": self.act_func } def from_config(self, config, custom_objects): super().from_config(config) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: try: assert(len(inputs) > 1) input_space = inputs[:-1] input_time = inputs[-1] except Exception as e: raise Exception("Error occured while separating spacial and temporal data from inputs,\ make sure that spacio-temporal data is being used to for training and \ x=[space_dim1,..,space_dimn,time_dim]. More details on error below:\n", type(e), e) else: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) #output layer, this is typically the solution function output_layer = self.final_layer(hidden_output) if training: #pde specific layers grad_layer = self.findGrad(output_layer, input_space) laplace_layer = self.findLaplace(grad_layer, input_space) if self.time_dep: time_der_layer = self.findTimeDer(output_layer, input_time) else: time_der_layer=0 pde_layer = self.findPdeLayer(laplace_layer, inputs, time_der_layer) return output_layer, pde_layer elif not training: #only outputting the function value if not tranining. return output_layer class Poission(ForwardModel): """ Doc string goes here """ def __init__(self, space_dim=1, perm_tensor=None, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None): """ talk about super initialization """ super().__init__(space_dim=space_dim, time_dep=False, output_dim=output_dim, n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_nrn, act_func = act_func, rhs_func = rhs_func) self._perm_tensor = perm_tensor if perm_tensor else tf.eye(space_dim) #final layer representing the lhs P(x) of PDE P(x)=0 def findPdeLayer(self, laplacian, input_arg): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- laplacian (tf tensor): laplacian with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: return keras.layers.Lambda(lambda z: -z[0] - self.rhs_function(input_arg)) ([laplacian, input_arg]) except Exception as e: raise Exception("Error occured in finding pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) #output layer, this is typically the solution function output_layer = self.final_layer(hidden_output) if training: #pde specific layers grad_layer = self.findGrad(output_layer, input_space) laplace_layer = self.findLaplace(grad_layer, input_space) pde_layer = self.findPdeLayer(laplace_layer, inputs) return output_layer, pde_layer elif not training: #only outputting the function value if not tranining. return output_layer class MixedDif(ForwardModel): """ Doc string goes here """ def __init__(self, space_dim=1, perm_tensor=None, output_dim=1, n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None, n_xtr_hid_lay=1): """ talk about super initialization """ super().__init__(space_dim=space_dim, time_dep=True, output_dim=output_dim, n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_nrn, act_func = act_func, rhs_func = rhs_func) self._perm_tensor = perm_tensor if perm_tensor else tf.eye(space_dim) self.n_xtr_hid_lay = n_xtr_hid_lay self.__pressure_dim = 1 # Defining final output layers for pressure and velocity #pressure self.__pres_final_layer = keras.layers.Dense(self.__pressure_dim, name="pressure_out_layer") self.__vel_final_layer = keras.layers.Dense(space_dim, name = "velocity_out_layer") #Block of extra hidden layers for velocity self.xtra_hidden_block = [keras.layers.Dense( self.n_hid_nrn, activation=act_func, name="dense_xtra_"+str(i+1) ) for i in range(n_xtr_hid_lay)] def findDivLayer(self,vel_vector,input_space): """ (tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the divergence of the velocity vector. arguments: ---------- vel_vector (tf tensor): predicted velocity function represented by tf tensor structure (Usually of size: data_size x space_dim). input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of input arguments representing the space dimension. Output: Keras.Lambda layer. This lambda layer outputs the laplacian of solution function u. See tf.Keras.Lambda and tf.gradients for more details. """ try: # list containng diagonal entries of the gradient #on velocity vector. Note that tf.gradients #returns a list of tensors and hence thats why we have a [0] at the end of #the tf.gradients fucntion as tf.gradients(func,argm) [0] grad_diag= keras.layers.Lambda( lambda z: [ tf.gradients(z[0][:,i], z[1][i], unconnected_gradients='zero') [0] for i in range(len(z[1])) ] ) ([vel_vector,input_space]) return sum(grad_diag) except Exception as e: raise Exception("Error occured in finding divergence of velocity lambda layer of type {} as follows: \n{}".format(type(e)),e) #layer representing the scalar equation in the pde, PDE P_scalar(x,t)=0. #This is typically something of the form p_t - div(u) -f =0 def findPdeLayer_1(self, div_vel, input_arg, time_der=0): """ (tensor, tensor, tensor) -> Keras.Lambda layer Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0. arguments: ---------- div_vel(tf tensor): divergence of velocity vector with respect to space dim . input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of the list corresponds to the temporal dimension. time_der (tf tensor): derrivative of the pressure function with respect to time. Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t). See tf.Keras.Lambda and tf.gradients for more details. """ try: # return keras.layers.Lambda(lambda z: z[0] - z[1] - tf.sin(z[2][0]+z[2][1]) - # 2*z[2][2]*tf.sin(z[2][0]+z[2][1])) ([time_der, laplacian, input_arg]) return keras.layers.Lambda(lambda z: z[0] - z[1] - self.rhs_function(input_arg)) ([time_der, div_vel, input_arg]) except Exception as e: raise Exception("Error occured in finding pressure pde lambda layer of type {} as follows: \n{}".format(type(e)),e) #layer representing the vector equation in the pde, PDE PDE_vector(x,t)=0. #This is typically something of the form grad(p) + u = 0 def findPdeLayer_2(self, grad_p, vel_vect): """ (tensor, tensor) -> Keras.Lambda layer Returns list of lambda layer to find the vector PDE= 0 in the mixed form. arguments: ---------- grad_p(list of tf tensors): gradient of p with respect so spacedim. each element of the list corresponds to the partial with respect to different variable in spacedim.. vel_vect(tensor): velocity vector coming from teh final xtra hidden layer. This has shape, num_instances*vel_dim. note that vel_dim = space_dim. Output: list of Keras.Lambda layer. This lambda layer outputs a list representing the vector equation grad(p) + u =0. """ try: return keras.layers.Lambda(lambda z: [z[0][i] + z[1][:,i] for i in range(len(grad_p))]) ([grad_p, vel_vect]) except Exception as e: raise Exception("Error occured in finding vector pde lambda layer of type {} as follows: \n{}".format(type(e)),e) def call(self, inputs, training=False): """ Call function which wll be used while training, prediciton and evaluation of the ForwardModel. arguments: ---------- inputs (list of tensors) -> last element of the list corresponds to temporal diimension if self.time_dep = True. If possible, always feed the data from the data processing method in flowDataProcess module. training (bool) -> True if calling the function for training. False for prediction and evaluation. Value of triainng will be automatically taken care of by Keras. Note that inputs should always be given as a list with the last element of the list representing the dimension corresponding to time. """ if self.time_dep: try: assert(len(inputs) > 1) input_space = inputs[:-1] input_time = inputs[-1] except Exception as e: raise Exception("Error occured while separating spacial and temporal data from inputs,\ make sure that spacio-temporal data is being used to for training and \ x=[space_dim1,..,space_dimn,time_dim]. More details on error below:\n", type(e), e) else: input_space = inputs #concatening all the input data (space and time dimensions) making it #read to be passed to the hidden layers hidden_output = keras.layers.concatenate(inputs) #hidden layers for layer_id in range(self.n_hid_lay): hidden_output = self.hidden_block[layer_id] (hidden_output) # #output layer, this is typically the solution function # output_layer = self.final_layer(hidden_output) #pressure output layer pres_output_layer = self.__pres_final_layer(hidden_output) #velocity output layer for layer_id in range(self.n_xtr_hid_lay): hidden_output = self.xtra_hidden_block[layer_id] (hidden_output) vel_output_layer = self.__vel_final_layer(hidden_output) if training: #pde specific layers #finding gradient of pressure with respect to space dim grad_layer = self.findGrad(pres_output_layer, input_space) #finding divergence of the velocity layer with respect to space dim div_layer = self.findDivLayer(vel_output_layer, input_space) if self.time_dep: time_der_layer = self.findTimeDer(pres_output_layer, input_time) else: time_der_layer=0 #PDE_1 = 0 the scalar equation of the form p_t - div(u) -f = 0 pde_layer_1 = self.findPdeLayer(div_layer, inputs, time_der_layer) # pde_layer_2 = self.findPdeLayer_2(grad_layer, vel_output_layer) # return pres_output_layer, pde_layer_1, pde_layer_2 return pres_output_layer, pde_layer_1 elif not training: #only outputting the function value if not tranining. # return pres_output_layer, vel_output_layer return pres_output_layer
0.851459
0.484807
## 19.3.2 Open MongoDB compass to visually see what each python commands does You'll see below screen - > ![first screen](mongo_img/compass/c1.JPG) ### STEP 1: Create a DB ``` import pymongo client = pymongo.MongoClient("mongodb://localhost:27017/") client import pymongo client_cloud = pymongo.MongoClient("mongodb+srv://test:test@cluster0.he6iz.mongodb.net/myFirstDatabase?retryWrites=true&w=majority") db = client_cloud.test DEFAULT_CONNECTION_URL = "mongodb://localhost:27017/" DB_NAME = "iNeuron" # Establish a connection with mongoDB client = pymongo.MongoClient(DEFAULT_CONNECTION_URL) # Create a DB dataBase = client[DB_NAME] db_1 = client["sudhanshu_kumar"] ``` --- ### Paste the default URL in highlighted area of the Compass tool as shown below and click on connect default URL for local system:- ``` mongodb://localhost:27017/ ``` > ![new connection](mongo_img/compass/c2.JPG) after you press connect you'll see the following screen which contains already existing databases > ![new connection](mongo_img/compass/c3.JPG) **NOTE** you'll not see your database untill or unless you have created first document inside it. So at present we don't have any document in our DB its name is not visible here You can also create a database by clicking on CREATE DATABSE button. You'll see a below screen (But we'll see eveyrthing using python) > ![new connection](mongo_img/compass/c4.JPG) ``` # lets see what the existing list of DBs - client.list_database_names() client_cloud.list_database_names() # let's verify whether we have our database in the list or not # we'll use the following function:- def checkExistence_DB(DB_NAME, client): """It verifies the existence of DB""" DBlist = client.list_database_names() if DB_NAME in DBlist: print(f"DB: '{DB_NAME}' exists") return True print(f"DB: '{DB_NAME}' not yet present OR no collection is present in the DB") return False _ = checkExistence_DB(DB_NAME=DB_NAME, client=client) ``` ### STEP 2: Create a collection ``` COLLECTION_NAME = "iNeuron_Products" collection = dataBase[COLLECTION_NAME] # let's verify whether we have our database in the list or not # we'll use the following function:- def checkExistence_COL(COLLECTION_NAME, DB_NAME, db): """It verifies the existence of collection name in a database""" collection_list = db.list_collection_names() if COLLECTION_NAME in collection_list: print(f"Collection:'{COLLECTION_NAME}' in Database:'{DB_NAME}' exists") return True print(f"Collection:'{COLLECTION_NAME}' in Database:'{DB_NAME}' does not exists OR \n\ no documents are present in the collection") return False _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) ``` ### STEP 3: Insert a record in the collection ``` import pymongo #client = pymongo.MongoClient("mongodb://localhost:27017/") client = pymongo.MongoClient("mongodb+srv://test:test@cluster0.he6iz.mongodb.net/myFirstDatabase?retryWrites=true&w=majority") db_1 = client["sudhanshu_kumar"] collection = db_1["test"] record = {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for Computer Vision', 'name' : ["sudhan","kumar",5466], "record_dict" :{"name" :"sudhanshu" , "mail_id" : "sudhanshu@fadfsaf.ai","ph_number" :543535}} collection.insert_one(record) record = {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for Computer Vision', 'name' : ["sudhan","kumar",5466], "record_dict" :{"name" :"sudhanshu" , "mail_id" : "sudhanshu@fadfsaf.ai","ph_number" :543535}} collection.insert_one(record) {"_id":{"$oid":"60a1089e715f9806b78164be"},"companyName":"iNeuron","product":"Affordable AI","courseOffered":"Deep Learning for Computer Vision","name":["sudhan","kumar",5466],"record_dict":{"name":"sudhanshu","mail_id":"sudhanshu@fadfsaf.ai","ph_number":543535}} _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) ``` Now you can verify in Compass that iNeuron DB exists: - > ![verify Db](mongo_img/compass/c5.JPG) > **NOTE**: You may need to click refresh button if your DB is not visible here. You'll find collection name here- > ![verify Collection](mongo_img/compass/c6.JPG) Verify your inserted record with a unique id which is given by mongoDB by default - > ![check inserted document](mongo_img/compass/c7.JPG) Let's reverify whether our database and collection exists or not by using the function that we have defined before. ``` # Verify DATABASE _ = checkExistence_DB(DB_NAME=DB_NAME, client=client) # Verify COLLECTION _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) ``` ### STEP 4: Insert multiple records ``` list_of_records = [ {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Machine Learning with Deployment'}, {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for NLP and Computer vision'}, {'companyName': 'iNeuron', 'product': 'Master Program', 'courseOffered': 'Data Science Masters Program', "test" : "ffsdfsffsf", "complex" : [{"name" : "sudhanshu","list" : [554,545,454,54,5,4]},{"email_id" :"sudhanshu@dffsf"},{"phone_no":345345345353},[4,54,534,5,45,5,45,4]] }] rec = collection.insert_many(list_of_records) collection.find_one() # lets print he unique ID that of the record that we have inserted - inserted_IDs = rec.inserted_ids for idx, unique_ids in enumerate(inserted_IDs): print(f"{idx}. {unique_ids}") ``` We can verify the inserted records by refreshing our compass document- > ![inserted list of records](mongo_img/compass/c8.JPG) You can override the default unique Id by giving a user defined as shown below - ``` COLLECTION_NAME = "iNeuron_Faculties" faculties = db_1[COLLECTION_NAME] list_of_records_user_defined_id = [ {"_id": "6", "companyName": "iNeuron", "Faculty": "Sudhanshu Kumar"}, {"_id": "8", "companyName": "iNeuron", "Faculty": "Virat Sagar"}, ] faculties_record = faculties.insert_many(list_of_records_user_defined_id) ``` refresh the Compass tool and you'll see a fresh collection is created by name iNeuron_Faculties and this time _id is defined by us. Refer the highlighted portion of the image below:- > ![unique_id](mongo_img/compass/c9.JPG) > **NOTE**: Make sure the \_id of the records that you insert are unique other wise you'll get a _BulkWriteError_ which comes because of duplicate key ### STEP 5: Find method in MongoDB ``` find_first_record = faculties.find_one() print(f"The first record of collection: \n{COLLECTION_NAME} is=\ \n{find_first_record}") faculties.find_one() for i in faculties.find(): print(i) for i in collection.find(): print(i) # find all the record at once present in thr record with all the fields all_record = faculties.find() for idx, record in enumerate(all_record): print(f"{idx}: {record}") # find all the record at once present in the record with SPECIFIC fields all_record = faculties.find({}, {"Faculty"}) for idx, record in enumerate(all_record): print(f"{idx}: {record}") ``` ### STEP 6: Query or filter out data in MongoDB ``` for i in faculties.find({'Faculty':"Virat Sagar"}): print(i) for i in faculties.find({'_id':{'$gt':'1'}}): print(i) query1 = {"_id": '1'} results = faculties.find(query1) for data in results: print(data) query2 = {"_id": {"$gt": "1"}} results = faculties.find(query2) for data in results: print(data) ``` ### STEP 7: Delete one or many documents in MongoDB ``` # Lets add some random data in faculties random_data = [ {'_id': '3', 'companyName': 'iNeuron', 'Faculty': 'XYZ'}, {'_id': '4', 'companyName': 'iNeuron', 'Faculty': 'ABC'}, {'_id': '5', 'companyName': 'iNeuron', 'Faculty': 'PQR'}, ] faculties.insert_many(random_data) # Lets delete one document in faculties query_to_delete = {"Faculty": "XYZ"} faculties.delete_one(query_to_delete) for i in faculties.find({"_id": {"$gte": "4"}}): print(i) faculties.delete_many({"_id": {"$gte": "4"}}) # lets delete multiple record multi_query_to_delete = {"_id": {"$gte": "4"}} faculties.delete_many(multi_query_to_delete) ``` > **NOTE**: In order to delete all the documents present in the collection you can just pass and empty dictionary as shown below: - ```python faculties.delete_many({}) ``` ### STEP 8: Drop the entire collection ``` faculties.drop() # Lets verify if the collection exists or not after dropping it COLLECTION_NAME = "iNeuron_Faculties" DB_NAME = "iNeuron" _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) ``` ### STEP 9: Update ``` COLLECTION_NAME = "iNeuron_Products" products = dataBase[COLLECTION_NAME] all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") present_data = {'courseOffered': 'Machine Learning with Deployment'} new_data = {"$set":{'courseOffered': 'ML and DL with Deployment'}} products.update_one(present_data, new_data) all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") present_data = {'companyName': 'iNeuron'} new_data = {"$set": {'companyName': 'iNeuron.ai'}} products.update_many(present_data, new_data) all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") ``` ### STEP 9: Set limit to view N records ``` N_records = 3 N_record = products.find().limit(N_records) for idx, record in enumerate(N_record): print(f"{record}\n") ```
github_jupyter
import pymongo client = pymongo.MongoClient("mongodb://localhost:27017/") client import pymongo client_cloud = pymongo.MongoClient("mongodb+srv://test:test@cluster0.he6iz.mongodb.net/myFirstDatabase?retryWrites=true&w=majority") db = client_cloud.test DEFAULT_CONNECTION_URL = "mongodb://localhost:27017/" DB_NAME = "iNeuron" # Establish a connection with mongoDB client = pymongo.MongoClient(DEFAULT_CONNECTION_URL) # Create a DB dataBase = client[DB_NAME] db_1 = client["sudhanshu_kumar"] mongodb://localhost:27017/ # lets see what the existing list of DBs - client.list_database_names() client_cloud.list_database_names() # let's verify whether we have our database in the list or not # we'll use the following function:- def checkExistence_DB(DB_NAME, client): """It verifies the existence of DB""" DBlist = client.list_database_names() if DB_NAME in DBlist: print(f"DB: '{DB_NAME}' exists") return True print(f"DB: '{DB_NAME}' not yet present OR no collection is present in the DB") return False _ = checkExistence_DB(DB_NAME=DB_NAME, client=client) COLLECTION_NAME = "iNeuron_Products" collection = dataBase[COLLECTION_NAME] # let's verify whether we have our database in the list or not # we'll use the following function:- def checkExistence_COL(COLLECTION_NAME, DB_NAME, db): """It verifies the existence of collection name in a database""" collection_list = db.list_collection_names() if COLLECTION_NAME in collection_list: print(f"Collection:'{COLLECTION_NAME}' in Database:'{DB_NAME}' exists") return True print(f"Collection:'{COLLECTION_NAME}' in Database:'{DB_NAME}' does not exists OR \n\ no documents are present in the collection") return False _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) import pymongo #client = pymongo.MongoClient("mongodb://localhost:27017/") client = pymongo.MongoClient("mongodb+srv://test:test@cluster0.he6iz.mongodb.net/myFirstDatabase?retryWrites=true&w=majority") db_1 = client["sudhanshu_kumar"] collection = db_1["test"] record = {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for Computer Vision', 'name' : ["sudhan","kumar",5466], "record_dict" :{"name" :"sudhanshu" , "mail_id" : "sudhanshu@fadfsaf.ai","ph_number" :543535}} collection.insert_one(record) record = {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for Computer Vision', 'name' : ["sudhan","kumar",5466], "record_dict" :{"name" :"sudhanshu" , "mail_id" : "sudhanshu@fadfsaf.ai","ph_number" :543535}} collection.insert_one(record) {"_id":{"$oid":"60a1089e715f9806b78164be"},"companyName":"iNeuron","product":"Affordable AI","courseOffered":"Deep Learning for Computer Vision","name":["sudhan","kumar",5466],"record_dict":{"name":"sudhanshu","mail_id":"sudhanshu@fadfsaf.ai","ph_number":543535}} _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) # Verify DATABASE _ = checkExistence_DB(DB_NAME=DB_NAME, client=client) # Verify COLLECTION _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) list_of_records = [ {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Machine Learning with Deployment'}, {'companyName': 'iNeuron', 'product': 'Affordable AI', 'courseOffered': 'Deep Learning for NLP and Computer vision'}, {'companyName': 'iNeuron', 'product': 'Master Program', 'courseOffered': 'Data Science Masters Program', "test" : "ffsdfsffsf", "complex" : [{"name" : "sudhanshu","list" : [554,545,454,54,5,4]},{"email_id" :"sudhanshu@dffsf"},{"phone_no":345345345353},[4,54,534,5,45,5,45,4]] }] rec = collection.insert_many(list_of_records) collection.find_one() # lets print he unique ID that of the record that we have inserted - inserted_IDs = rec.inserted_ids for idx, unique_ids in enumerate(inserted_IDs): print(f"{idx}. {unique_ids}") COLLECTION_NAME = "iNeuron_Faculties" faculties = db_1[COLLECTION_NAME] list_of_records_user_defined_id = [ {"_id": "6", "companyName": "iNeuron", "Faculty": "Sudhanshu Kumar"}, {"_id": "8", "companyName": "iNeuron", "Faculty": "Virat Sagar"}, ] faculties_record = faculties.insert_many(list_of_records_user_defined_id) find_first_record = faculties.find_one() print(f"The first record of collection: \n{COLLECTION_NAME} is=\ \n{find_first_record}") faculties.find_one() for i in faculties.find(): print(i) for i in collection.find(): print(i) # find all the record at once present in thr record with all the fields all_record = faculties.find() for idx, record in enumerate(all_record): print(f"{idx}: {record}") # find all the record at once present in the record with SPECIFIC fields all_record = faculties.find({}, {"Faculty"}) for idx, record in enumerate(all_record): print(f"{idx}: {record}") for i in faculties.find({'Faculty':"Virat Sagar"}): print(i) for i in faculties.find({'_id':{'$gt':'1'}}): print(i) query1 = {"_id": '1'} results = faculties.find(query1) for data in results: print(data) query2 = {"_id": {"$gt": "1"}} results = faculties.find(query2) for data in results: print(data) # Lets add some random data in faculties random_data = [ {'_id': '3', 'companyName': 'iNeuron', 'Faculty': 'XYZ'}, {'_id': '4', 'companyName': 'iNeuron', 'Faculty': 'ABC'}, {'_id': '5', 'companyName': 'iNeuron', 'Faculty': 'PQR'}, ] faculties.insert_many(random_data) # Lets delete one document in faculties query_to_delete = {"Faculty": "XYZ"} faculties.delete_one(query_to_delete) for i in faculties.find({"_id": {"$gte": "4"}}): print(i) faculties.delete_many({"_id": {"$gte": "4"}}) # lets delete multiple record multi_query_to_delete = {"_id": {"$gte": "4"}} faculties.delete_many(multi_query_to_delete) faculties.delete_many({}) faculties.drop() # Lets verify if the collection exists or not after dropping it COLLECTION_NAME = "iNeuron_Faculties" DB_NAME = "iNeuron" _ = checkExistence_COL(COLLECTION_NAME=COLLECTION_NAME, DB_NAME=DB_NAME, db=dataBase) COLLECTION_NAME = "iNeuron_Products" products = dataBase[COLLECTION_NAME] all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") present_data = {'courseOffered': 'Machine Learning with Deployment'} new_data = {"$set":{'courseOffered': 'ML and DL with Deployment'}} products.update_one(present_data, new_data) all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") present_data = {'companyName': 'iNeuron'} new_data = {"$set": {'companyName': 'iNeuron.ai'}} products.update_many(present_data, new_data) all_record = products.find() for idx, record in enumerate(all_record): print(f"{record}\n") N_records = 3 N_record = products.find().limit(N_records) for idx, record in enumerate(N_record): print(f"{record}\n")
0.135618
0.538498
### Amazon Sentiment Data To ease-up the upcoming implementation exercise, examine and comment the following implementation of a log-linear model and its gradient update rule. Start by loading Amazon sentiment corpus used in day 1 ``` %load_ext autoreload %autoreload 2 import lxmls.readers.sentiment_reader as srs from lxmls.deep_learning.utils import AmazonData corpus = srs.SentimentCorpus("books") data = AmazonData(corpus=corpus) data.datasets['train'] ``` ### A Shallow Model: Log-Linear in Numpy Compare the following numpy implementation of a log-linear model with the derivations seen in the previous sections. Introduce comments on the blocks marked with # relating them to the corresponding algorithm steps. ``` from lxmls.deep_learning.utils import Model, glorot_weight_init, index2onehot, logsumexp import numpy as np class NumpyLogLinear(Model): def __init__(self, **config): # Initialize parameters weight_shape = (config['input_size'], config['num_classes']) # after Xavier Glorot et al self.weight = glorot_weight_init(weight_shape, 'softmax') self.bias = np.zeros((1, config['num_classes'])) self.learning_rate = config['learning_rate'] def log_forward(self, input=None): """Forward pass of the computation graph""" # Linear transformation z = np.dot(input, self.weight.T) + self.bias # Softmax implemented in log domain log_tilde_z = z - logsumexp(z, axis=1, keepdims=True) return log_tilde_z def predict(self, input=None): """Prediction: most probable class index""" return np.argmax(np.exp(self.log_forward(input)), axis=1) def update(self, input=None, output=None): """Stochastic Gradient Descent update""" # Probabilities of each class class_probabilities = np.exp(self.log_forward(input)) batch_size, num_classes = class_probabilities.shape # Error derivative at softmax layer I = index2onehot(output, num_classes) error = (class_probabilities - I) / batch_size # Weight gradient gradient_weight = np.zeros(self.weight.shape) for l in range(batch_size): gradient_weight += np.outer(error[l, :], input[l, :]) # Bias gradient gradient_bias = np.sum(error, axis=0, keepdims=True) # SGD update self.weight = self.weight - self.learning_rate * gradient_weight self.bias = self.bias - self.learning_rate * gradient_bias ``` ### Training Bench Instantiate model and data classes. Check the initial accuracy of the model. This should be close to 50% since we are on a binary prediction task and the model is not trained yet. ``` learning_rate = 0.05 num_epochs = 10 batch_size = 30 model = NumpyLogLinear( input_size=corpus.nr_features, num_classes=2, learning_rate=learning_rate ) # Define number of epochs and batch size num_epochs = 10 batch_size = 30 # Get batch iterators for train and test train_batches = data.batches('train', batch_size=batch_size) test_set = data.batches('test', batch_size=None)[0] # Get intial accuracy hat_y = model.predict(input=test_set['input']) accuracy = 100*np.mean(hat_y == test_set['output']) print("Initial accuracy %2.2f %%" % accuracy) ``` Train the model with simple batch stochastic gradient descent. Be sure to understand each of the steps involved, including the code running inside of the model class. We will be wokring on a more complex version of the model in the upcoming exercise. ``` # Epoch loop for epoch in range(num_epochs): # Batch loop for batch in train_batches: model.update(input=batch['input'], output=batch['output']) # Prediction for this epoch hat_y = model.predict(input=test_set['input']) # Evaluation accuracy = 100*np.mean(hat_y == test_set['output']) # Inform user print("Epoch %d: accuracy %2.2f %%" % (epoch+1, accuracy)) ```
github_jupyter
%load_ext autoreload %autoreload 2 import lxmls.readers.sentiment_reader as srs from lxmls.deep_learning.utils import AmazonData corpus = srs.SentimentCorpus("books") data = AmazonData(corpus=corpus) data.datasets['train'] from lxmls.deep_learning.utils import Model, glorot_weight_init, index2onehot, logsumexp import numpy as np class NumpyLogLinear(Model): def __init__(self, **config): # Initialize parameters weight_shape = (config['input_size'], config['num_classes']) # after Xavier Glorot et al self.weight = glorot_weight_init(weight_shape, 'softmax') self.bias = np.zeros((1, config['num_classes'])) self.learning_rate = config['learning_rate'] def log_forward(self, input=None): """Forward pass of the computation graph""" # Linear transformation z = np.dot(input, self.weight.T) + self.bias # Softmax implemented in log domain log_tilde_z = z - logsumexp(z, axis=1, keepdims=True) return log_tilde_z def predict(self, input=None): """Prediction: most probable class index""" return np.argmax(np.exp(self.log_forward(input)), axis=1) def update(self, input=None, output=None): """Stochastic Gradient Descent update""" # Probabilities of each class class_probabilities = np.exp(self.log_forward(input)) batch_size, num_classes = class_probabilities.shape # Error derivative at softmax layer I = index2onehot(output, num_classes) error = (class_probabilities - I) / batch_size # Weight gradient gradient_weight = np.zeros(self.weight.shape) for l in range(batch_size): gradient_weight += np.outer(error[l, :], input[l, :]) # Bias gradient gradient_bias = np.sum(error, axis=0, keepdims=True) # SGD update self.weight = self.weight - self.learning_rate * gradient_weight self.bias = self.bias - self.learning_rate * gradient_bias learning_rate = 0.05 num_epochs = 10 batch_size = 30 model = NumpyLogLinear( input_size=corpus.nr_features, num_classes=2, learning_rate=learning_rate ) # Define number of epochs and batch size num_epochs = 10 batch_size = 30 # Get batch iterators for train and test train_batches = data.batches('train', batch_size=batch_size) test_set = data.batches('test', batch_size=None)[0] # Get intial accuracy hat_y = model.predict(input=test_set['input']) accuracy = 100*np.mean(hat_y == test_set['output']) print("Initial accuracy %2.2f %%" % accuracy) # Epoch loop for epoch in range(num_epochs): # Batch loop for batch in train_batches: model.update(input=batch['input'], output=batch['output']) # Prediction for this epoch hat_y = model.predict(input=test_set['input']) # Evaluation accuracy = 100*np.mean(hat_y == test_set['output']) # Inform user print("Epoch %d: accuracy %2.2f %%" % (epoch+1, accuracy))
0.707809
0.954351
``` import csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import statistics data_issues = [] data_empty = [] data_all = [] csv_columns = ['Hostname', 'Keyspace', 'Table', 'Stats', 'Value', 'Timestamp'] df_columns = ['Keyspace' , 'Table', 'Stats', 'Value_Mean', 'Value_Variance', 'Value_Standard_Deviation'] df = pd.read_csv('usprod2.cfstats.csv', names=csv_columns) del df['Timestamp'] ar_keyspaces = df["Keyspace"].unique() ar_stats = df["Stats"].unique() ar_hosts = df["Hostname"].unique() ar_tables = df["Table"].unique() ar_stat_cols = ['Hostname','Keyspace','Table'] ar_stat_cols += df["Stats"].unique().tolist() ar_stat_skew_cols = ['SpaceUsedLive','NumberOfKeys'] ar_stat_wide_cols = ['CompactedPartitionMaximumBytes'] ar_stat_tomb_cols = ['AverageTombstones','MaximumTombstones'] ar_stat_prob_cols = ar_stat_skew_cols + ar_stat_wide_cols + ar_stat_tomb_cols df_pivot = pd.DataFrame(columns=ar_stat_cols) pivot_index = 0 for keyspace in ar_keyspaces: #print(keyspace) df_keyspace_data = df[df['Keyspace'] == keyspace] ar_tables = df_keyspace_data["Table"].unique() for table in ar_tables: #print(table) df_table_data = df_keyspace_data[ (df_keyspace_data['Table'] == table) & (df_keyspace_data['Keyspace'] == keyspace)] ar_stats = df_table_data["Stats"].unique() for stat in ar_stats: df_stat_data = df_table_data[(df_table_data['Table'] == table) & (df_table_data['Stats'] == stat)] value_stdev = statistics.stdev(df_stat_data['Value']) value_mean = statistics.mean(df_stat_data['Value']) value_variance = statistics.variance(df_stat_data['Value'], value_mean) value_data = [keyspace,table, stat, value_mean, value_variance, value_stdev] value_stdbymean = value_stdev/value_mean if( (value_stdev!=0) & (value_mean!=0) & (value_stdbymean>1.2) ) : data_issues.append(value_data) # can also use ar_stat_wide_cols or ar_stat_tomb_cols if( stat in ar_stat_skew_cols) : plt.figure() print("{}.{}.{}".format(keyspace,table,stat)) df_stat_data.plot.bar() plt.show() elif ( (value_stdev == 0) & (value_mean == 0) ): data_empty.append(value_data) data_all.append(value_data) for host in ar_hosts: s_host_data = pd.Series({'Hostname': host, 'Keyspace':keyspace, 'Table':table}) for stat in ar_stats: df_host_data = df_table_data[ (df_table_data['Hostname'] == host) & (df_table_data['Stats'] == stat)]['Value'] s_host_data = s_host_data.set_value(stat, df_host_data.iloc[0] ) if(table!='keyspace'): df_pivot.loc[pivot_index] = s_host_data pivot_index = pivot_index+1 df_issues = pd.DataFrame(data=data_issues, columns=df_columns) df_empty = pd.DataFrame(data=data_empty, columns=df_columns) df_all = pd.DataFrame(data=data_all, columns=df_columns) print("Problem Tables: {}".format(len(df_issues['Table'].unique()))) ```
github_jupyter
import csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import statistics data_issues = [] data_empty = [] data_all = [] csv_columns = ['Hostname', 'Keyspace', 'Table', 'Stats', 'Value', 'Timestamp'] df_columns = ['Keyspace' , 'Table', 'Stats', 'Value_Mean', 'Value_Variance', 'Value_Standard_Deviation'] df = pd.read_csv('usprod2.cfstats.csv', names=csv_columns) del df['Timestamp'] ar_keyspaces = df["Keyspace"].unique() ar_stats = df["Stats"].unique() ar_hosts = df["Hostname"].unique() ar_tables = df["Table"].unique() ar_stat_cols = ['Hostname','Keyspace','Table'] ar_stat_cols += df["Stats"].unique().tolist() ar_stat_skew_cols = ['SpaceUsedLive','NumberOfKeys'] ar_stat_wide_cols = ['CompactedPartitionMaximumBytes'] ar_stat_tomb_cols = ['AverageTombstones','MaximumTombstones'] ar_stat_prob_cols = ar_stat_skew_cols + ar_stat_wide_cols + ar_stat_tomb_cols df_pivot = pd.DataFrame(columns=ar_stat_cols) pivot_index = 0 for keyspace in ar_keyspaces: #print(keyspace) df_keyspace_data = df[df['Keyspace'] == keyspace] ar_tables = df_keyspace_data["Table"].unique() for table in ar_tables: #print(table) df_table_data = df_keyspace_data[ (df_keyspace_data['Table'] == table) & (df_keyspace_data['Keyspace'] == keyspace)] ar_stats = df_table_data["Stats"].unique() for stat in ar_stats: df_stat_data = df_table_data[(df_table_data['Table'] == table) & (df_table_data['Stats'] == stat)] value_stdev = statistics.stdev(df_stat_data['Value']) value_mean = statistics.mean(df_stat_data['Value']) value_variance = statistics.variance(df_stat_data['Value'], value_mean) value_data = [keyspace,table, stat, value_mean, value_variance, value_stdev] value_stdbymean = value_stdev/value_mean if( (value_stdev!=0) & (value_mean!=0) & (value_stdbymean>1.2) ) : data_issues.append(value_data) # can also use ar_stat_wide_cols or ar_stat_tomb_cols if( stat in ar_stat_skew_cols) : plt.figure() print("{}.{}.{}".format(keyspace,table,stat)) df_stat_data.plot.bar() plt.show() elif ( (value_stdev == 0) & (value_mean == 0) ): data_empty.append(value_data) data_all.append(value_data) for host in ar_hosts: s_host_data = pd.Series({'Hostname': host, 'Keyspace':keyspace, 'Table':table}) for stat in ar_stats: df_host_data = df_table_data[ (df_table_data['Hostname'] == host) & (df_table_data['Stats'] == stat)]['Value'] s_host_data = s_host_data.set_value(stat, df_host_data.iloc[0] ) if(table!='keyspace'): df_pivot.loc[pivot_index] = s_host_data pivot_index = pivot_index+1 df_issues = pd.DataFrame(data=data_issues, columns=df_columns) df_empty = pd.DataFrame(data=data_empty, columns=df_columns) df_all = pd.DataFrame(data=data_all, columns=df_columns) print("Problem Tables: {}".format(len(df_issues['Table'].unique())))
0.145176
0.276605
# Vectorizing tabular fields This notebook covers simple methods to vectorize tabular data using the same dataset as in the other examples, but this time ignoring the text of the question. ``` import pandas as pd from pathlib import Path import sys sys.path.append("..") import warnings warnings.filterwarnings('ignore') from ml_editor.data_processing import get_normalized_series data_path = Path("../data/writers.csv") df = pd.read_csv(data_path) ``` Let's pretend we wanted to predict the **score** from the tags, number of comments, and question creation date. Here is what the data looks like ``` df["is_question"] = df["PostTypeId"] == 1 tabular_df = df[df["is_question"]][["Tags", "CommentCount", "CreationDate", "Score"]] tabular_df.head() ``` In order to use this data as input to a model, we need to give it a suitable numerical representation. To do so, we will do three things here: 1. Normalize numerical input features to limit the impact of outliers 2. Transform the date feature in a way that makes it easier to understand for a model. 3. Get dummy variables from categorical features so a model can ingest them. First, we normalize the data to reduce the effect of outliers on downstream model performance. ``` tabular_df["NormComment"]= get_normalized_series(tabular_df, "CommentCount") tabular_df["NormScore"]= get_normalized_series(tabular_df, "Score") tabular_df.head() ``` Now, let's represent dates in a way that would make it easier for a model to extract patterns (see chapter 4 of the attached book for more information on why we chose these particular features.) ``` # Convert our date to a pandas datetime tabular_df["date"] = pd.to_datetime(tabular_df["CreationDate"]) # Extract meaningful features from the datetime object tabular_df["year"] = tabular_df["date"].dt.year tabular_df["month"] = tabular_df["date"].dt.month tabular_df["day"] = tabular_df["date"].dt.day tabular_df["hour"] = tabular_df["date"].dt.hour tabular_df.head() ``` And finally let's transform tags into dummy variables using pandas' [get_dummies](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html) function, with each tag being assigned an index that will take the value "1" only if it is present in the given row. ``` # Select our tags, represented as strings, and transform them into arrays of tags tags = tabular_df["Tags"] clean_tags = tags.str.split("><").apply( lambda x: [a.strip("<").strip(">") for a in x]) # Use pandas' get_dummies to get dummy values # select only tags that appear over 500 times tag_columns = pd.get_dummies(clean_tags.apply(pd.Series).stack()).sum(level=0) all_tags = tag_columns.astype(bool).sum(axis=0).sort_values(ascending=False) top_tags = all_tags[all_tags > 500] top_tag_columns = tag_columns[top_tags.index] top_tag_columns.head() # Add our tags back into our initial DataFrame final = pd.concat([tabular_df, top_tag_columns], axis=1) # Keeping only the vectorized features col_to_keep = ["year", "month", "day", "hour", "NormComment", "NormScore"] + list(top_tags.index) final_features = final[col_to_keep] final_features.head() ``` Voila! Our tabular data is now ready to be used for a model.
github_jupyter
import pandas as pd from pathlib import Path import sys sys.path.append("..") import warnings warnings.filterwarnings('ignore') from ml_editor.data_processing import get_normalized_series data_path = Path("../data/writers.csv") df = pd.read_csv(data_path) df["is_question"] = df["PostTypeId"] == 1 tabular_df = df[df["is_question"]][["Tags", "CommentCount", "CreationDate", "Score"]] tabular_df.head() tabular_df["NormComment"]= get_normalized_series(tabular_df, "CommentCount") tabular_df["NormScore"]= get_normalized_series(tabular_df, "Score") tabular_df.head() # Convert our date to a pandas datetime tabular_df["date"] = pd.to_datetime(tabular_df["CreationDate"]) # Extract meaningful features from the datetime object tabular_df["year"] = tabular_df["date"].dt.year tabular_df["month"] = tabular_df["date"].dt.month tabular_df["day"] = tabular_df["date"].dt.day tabular_df["hour"] = tabular_df["date"].dt.hour tabular_df.head() # Select our tags, represented as strings, and transform them into arrays of tags tags = tabular_df["Tags"] clean_tags = tags.str.split("><").apply( lambda x: [a.strip("<").strip(">") for a in x]) # Use pandas' get_dummies to get dummy values # select only tags that appear over 500 times tag_columns = pd.get_dummies(clean_tags.apply(pd.Series).stack()).sum(level=0) all_tags = tag_columns.astype(bool).sum(axis=0).sort_values(ascending=False) top_tags = all_tags[all_tags > 500] top_tag_columns = tag_columns[top_tags.index] top_tag_columns.head() # Add our tags back into our initial DataFrame final = pd.concat([tabular_df, top_tag_columns], axis=1) # Keeping only the vectorized features col_to_keep = ["year", "month", "day", "hour", "NormComment", "NormScore"] + list(top_tags.index) final_features = final[col_to_keep] final_features.head()
0.325092
0.971156
``` from io import StringIO import sys, getopt import pandas as pd import numpy as np import re import os ! pip3 install gensim tensorflow tflearn ! git clone --recursive https://BedirT:bedir5363@github.com/AhmetHamzaEmra/CakmaSair.git import glob text = "" for i in range(3,7): with open("CakmaSair/Data/siirler_page" + str(i) + ".txt", 'r') as f: text += f.read() len(text) alfabe = 'çığöşüâqwertyuioplkjhgfdsazxcvbnmÇIĞÖŞÜÂQWERTYUIOPLKJHGFDSAZXCVBNM' alfabe = list(alfabe) alfabe.append("<start>") alfabe.append("<end>") alfabe.append("\n") alfabe.append("<") alfabe.append(">") alfabe.append("\\") alfabe.append(" ") chars_set = set(text) for i in chars_set: if i not in alfabe: text = text.replace(i, "") text = text.replace("\n", " \n ") text = text.replace("<start>", " <start> ") text = text.replace("<end>", " <end> ") for k in range(100): text = text.replace(" ", " ") text = text.replace(" ", " ") text = text.replace(" ", " ") text = text.replace("<start>", "<siir> <start>") text = text.replace("<end>", "<end> <zero> <zero> <zero> <zero>") text = text.split("<siir>") for i in range(len(text)): text[i] = text[i].split(" ") import gensim.models.word2vec as w2v import multiprocessing num_features = 300 min_word_count = 1 num_workers = multiprocessing.cpu_count() context_size = 7 downsampling = 1e-3 seed = 1 w2vmodel = w2v.Word2Vec( sg=1, seed=seed, workers=num_workers, size=num_features, min_count=min_word_count, window=context_size, sample=downsampling ) w2vmodel.build_vocab(text) w2vmodel.train(text, total_examples=w2vmodel.corpus_count, epochs=20) w2vmodel.wv.most_similar('<start>', topn=10) ``` ## Operation continues ``` word_to_vec_map = w2vmodel[w2vmodel.wv.vocab] vocab = w2vmodel.wv.vocab word_to_index={} index_to_word = {} for i, w in enumerate(vocab): word_to_index[w] = i index_to_word[i] = w word_to_index['bu'] import tflearn from tflearn.data_utils import * import tensorflow as tf max_len = 5 g = tflearn.input_data([None, max_len, num_features]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, num_features, activation='linear') g = tflearn.regression(g, optimizer='adam', loss='mean_square', learning_rate=0.001) ! pip3 install tqdm from tqdm import tqdm trainX = [] trainY = [] for i in tqdm(text): if len(i) > 6: for k in range(len(i)-6): a = [] for j in range(k,k+5): a.append(w2vmodel[i[j]]) trainX.append(a) b= w2vmodel[i[k+5]] trainY.append(b) len(trainX) trainX = np.array(trainX) trainY = np.array(trainY) trainX.shape, trainY.shape ! ls model = tflearn.DNN(g, tensorboard_verbose=0) model.load("model1.tflearn") for step in range(20): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') for step in range(20): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') for step in range(60): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') unknown_ = word_to_vec_map.mean(axis=0) def generate(input_text): input_text = input_text.split(" ") input_vec = np.zeros([1,5,300]) output = input_text for i,w in enumerate(input_text[-5:]): if w in vocab: input_vec[0,i] = w2vmodel[w] else: input_vec[0,i] = unknown_ pred = model.predict(input_vec)[0] output.append(w2vmodel.wv.similar_by_vector(pred)[0][0]) return " ".join(output) def generate_seq(input_text, seq_len=100): for i in range(seq_len): input_text = generate(input_text) return (input_text) print(generate_seq("Ne hasta bekler sabahı, \n Ne taze ölüyü mezar.")) w2vmodel.wv.similar_by_vector(unknown_) ```
github_jupyter
from io import StringIO import sys, getopt import pandas as pd import numpy as np import re import os ! pip3 install gensim tensorflow tflearn ! git clone --recursive https://BedirT:bedir5363@github.com/AhmetHamzaEmra/CakmaSair.git import glob text = "" for i in range(3,7): with open("CakmaSair/Data/siirler_page" + str(i) + ".txt", 'r') as f: text += f.read() len(text) alfabe = 'çığöşüâqwertyuioplkjhgfdsazxcvbnmÇIĞÖŞÜÂQWERTYUIOPLKJHGFDSAZXCVBNM' alfabe = list(alfabe) alfabe.append("<start>") alfabe.append("<end>") alfabe.append("\n") alfabe.append("<") alfabe.append(">") alfabe.append("\\") alfabe.append(" ") chars_set = set(text) for i in chars_set: if i not in alfabe: text = text.replace(i, "") text = text.replace("\n", " \n ") text = text.replace("<start>", " <start> ") text = text.replace("<end>", " <end> ") for k in range(100): text = text.replace(" ", " ") text = text.replace(" ", " ") text = text.replace(" ", " ") text = text.replace("<start>", "<siir> <start>") text = text.replace("<end>", "<end> <zero> <zero> <zero> <zero>") text = text.split("<siir>") for i in range(len(text)): text[i] = text[i].split(" ") import gensim.models.word2vec as w2v import multiprocessing num_features = 300 min_word_count = 1 num_workers = multiprocessing.cpu_count() context_size = 7 downsampling = 1e-3 seed = 1 w2vmodel = w2v.Word2Vec( sg=1, seed=seed, workers=num_workers, size=num_features, min_count=min_word_count, window=context_size, sample=downsampling ) w2vmodel.build_vocab(text) w2vmodel.train(text, total_examples=w2vmodel.corpus_count, epochs=20) w2vmodel.wv.most_similar('<start>', topn=10) word_to_vec_map = w2vmodel[w2vmodel.wv.vocab] vocab = w2vmodel.wv.vocab word_to_index={} index_to_word = {} for i, w in enumerate(vocab): word_to_index[w] = i index_to_word[i] = w word_to_index['bu'] import tflearn from tflearn.data_utils import * import tensorflow as tf max_len = 5 g = tflearn.input_data([None, max_len, num_features]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, num_features, activation='linear') g = tflearn.regression(g, optimizer='adam', loss='mean_square', learning_rate=0.001) ! pip3 install tqdm from tqdm import tqdm trainX = [] trainY = [] for i in tqdm(text): if len(i) > 6: for k in range(len(i)-6): a = [] for j in range(k,k+5): a.append(w2vmodel[i[j]]) trainX.append(a) b= w2vmodel[i[k+5]] trainY.append(b) len(trainX) trainX = np.array(trainX) trainY = np.array(trainY) trainX.shape, trainY.shape ! ls model = tflearn.DNN(g, tensorboard_verbose=0) model.load("model1.tflearn") for step in range(20): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') for step in range(20): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') for step in range(60): model.fit(trainX, trainY, validation_set=0.1, n_epoch=1, run_id='siir') model.save('model1.tflearn') unknown_ = word_to_vec_map.mean(axis=0) def generate(input_text): input_text = input_text.split(" ") input_vec = np.zeros([1,5,300]) output = input_text for i,w in enumerate(input_text[-5:]): if w in vocab: input_vec[0,i] = w2vmodel[w] else: input_vec[0,i] = unknown_ pred = model.predict(input_vec)[0] output.append(w2vmodel.wv.similar_by_vector(pred)[0][0]) return " ".join(output) def generate_seq(input_text, seq_len=100): for i in range(seq_len): input_text = generate(input_text) return (input_text) print(generate_seq("Ne hasta bekler sabahı, \n Ne taze ölüyü mezar.")) w2vmodel.wv.similar_by_vector(unknown_)
0.282295
0.220573
# 机器学习工程师纳米学位 ## 模型评价与验证 ## 项目 1: 预测波士顿房价 欢迎来到机器学习工程师纳米学位的第一个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能来让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以**'练习'**开始的标题表示接下来的内容中有需要你必须实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以**'TODO'**标出。请仔细阅读所有的提示! 除了实现代码外,你还**必须**回答一些与项目和实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。你的项目将会根据你对问题的回答和撰写代码所实现的功能来进行评分。 >**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。 ## 开始 在这个项目中,你将利用马萨诸塞州波士顿郊区的房屋信息数据训练和测试一个模型,并对模型的性能和预测能力进行测试。通过该数据训练后的好的模型可以被用来对房屋做特定预测---尤其是对房屋的价值。对于房地产经纪等人的日常工作来说,这样的预测模型被证明非常有价值。 此项目的数据集来自[UCI机器学习知识库](https://archive.ics.uci.edu/ml/datasets/Housing)。波士顿房屋这些数据于1978年开始统计,共506个数据点,涵盖了麻省波士顿不同郊区房屋14种特征的信息。本项目对原始数据集做了以下处理: - 有16个`'MEDV'` 值为50.0的数据点被移除。 这很可能是由于这些数据点包含**遗失**或**看不到的值**。 - 有1个数据点的 `'RM'` 值为8.78. 这是一个异常值,已经被移除。 - 对于本项目,房屋的`'RM'`, `'LSTAT'`,`'PTRATIO'`以及`'MEDV'`特征是必要的,其余不相关特征已经被移除。 - `'MEDV'`特征的值已经过必要的数学转换,可以反映35年来市场的通货膨胀效应。 运行下面区域的代码以载入波士顿房屋数据集,以及一些此项目所需的Python库。如果成功返回数据集的大小,表示数据集已载入成功。 ``` # Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('housing.csv') prices = data['MEDV'] features = data.drop('MEDV', axis = 1) # Success # 完成 print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape) ``` ## 分析数据 在项目的第一个部分,你会对波士顿房地产数据进行初步的观察并给出你的分析。通过对数据的探索来熟悉数据可以让你更好地理解和解释你的结果。 由于这个项目的最终目标是建立一个预测房屋价值的模型,我们需要将数据集分为**特征(features)**和**目标变量(target variable)**。**特征** `'RM'`, `'LSTAT'`,和 `'PTRATIO'`,给我们提供了每个数据点的数量相关的信息。**目标变量**:` 'MEDV'`,是我们希望预测的变量。他们分别被存在`features`和`prices`两个变量名中。 ## 练习:基础统计运算 你的第一个编程练习是计算有关波士顿房价的描述统计数据。我们已为你导入了` numpy `,你需要使用这个库来执行必要的计算。这些统计数据对于分析模型的预测结果非常重要的。 在下面的代码中,你要做的是: - 计算`prices`中的`'MEDV'`的最小值、最大值、均值、中值和标准差; - 将运算结果储存在相应的变量中。 ``` # TODO: Minimum price of the data #目标:计算价值的最小值 minimum_price = np.min(prices) # TODO: Maximum price of the data #目标:计算价值的最大值 maximum_price = np.max(prices) # TODO: Mean price of the data #目标:计算价值的平均值 mean_price = np.mean(prices) # TODO: Median price of the data #目标:计算价值的中值 median_price = np.median(prices) # TODO: Standard deviation of prices of the data #目标:计算价值的标准差 std_price = np.std(prices) # Show the calculated statistics #目标:输出计算的结果 print "Statistics for Boston housing dataset:\n" print "Minimum price: ${:,.2f}".format(minimum_price) print "Maximum price: ${:,.2f}".format(maximum_price) print "Mean price: ${:,.2f}".format(mean_price) print "Median price ${:,.2f}".format(median_price) print "Standard deviation of prices: ${:,.2f}".format(std_price) ``` ### 问题1 - 特征观察 如前文所述,本项目中我们关注的是其中三个值:`'RM'`、`'LSTAT'` 和`'PTRATIO'`,对每一个数据点: - `'RM'` 是该地区中每个房屋的平均房间数量; - `'LSTAT'` 是指该地区有多少百分比的房东属于是低收入阶层(有工作但收入微薄); - `'PTRATIO'` 是该地区的中学和小学里,学生和老师的数目比(`学生/老师`)。 _凭直觉,上述三个特征中对每一个来说,你认为增大该特征的数值,`'MEDV'`的值会是**增大**还是**减小**呢?每一个答案都需要你给出理由。_ **提示:**你预期一个`'RM'` 值是6的房屋跟`'RM'` 值是7的房屋相比,价值更高还是更低呢? **回答: ** - 'RM' 值越大,'MEDV'的值也会越大。因为'RM' 值越大,能住的人就会越多,房屋的价格也会越高。 - 'LSTAT'值越大,'MEDV'的值也会越小。收入越高人是不会轻易卖出房子,他们只会等高价,没有高价也不会卖,因为他们有足够钱。只有收入低的人为了增加收入才会竞价以低价卖出。LSTAT'值越大,房屋的价格也会越低。 - 'PTRATIO'值越大,'MEDV'的值也会越小。相同的学生数量,当然家长也差不多,假设其他人一样多,学生和老师的数目比越大,老师越少总人数越少,相同的房屋数,当然可以以更低的价格买到房。'PTRATIO'值越大,房屋的价格也会越低。 ## 建模 在项目的第二部分中,你需要了解必要的工具和技巧来让你的模型进行预测。用这些工具和技巧对每一个模型的表现做精确的衡量可以极大地增强你预测的信心。 ### 练习:定义衡量标准 如果不能对模型的训练和测试的表现进行量化地评估,我们就很难衡量模型的好坏。通常我们会定义一些衡量标准,这些标准可以通过对某些误差或者拟合程度的计算来得到。在这个项目中,你将通过运算[*决定系数*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination) R<sup>2</sup> 来量化模型的表现。模型的决定系数是回归分析中十分常用的统计信息,经常被当作衡量模型预测能力好坏的标准。 R<sup>2</sup>的数值范围从0至1,表示**目标变量**的预测值和实际值之间的相关程度平方的百分比。一个模型的R<sup>2</sup> 值为0还不如直接用**平均值**来预测效果好;而一个R<sup>2</sup> 值为1的模型则可以对目标变量进行完美的预测。从0至1之间的数值,则表示该模型中目标变量中有百分之多少能够用**特征**来解释。_模型也可能出现负值的R<sup>2</sup>,这种情况下模型所做预测有时会比直接计算目标变量的平均值差很多。_ 在下方代码的 `performance_metric` 函数中,你要实现: - 使用 `sklearn.metrics` 中的 `r2_score` 来计算 `y_true` 和 `y_predict`的R<sup>2</sup>值,作为对其表现的评判。 - 将他们的表现评分储存到`score`变量中。 ``` # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true,y_predict) # Return the score return score ``` ### 问题2 - 拟合程度 假设一个数据集有五个数据且一个模型做出下列目标变量的预测: | 真实数值 | 预测数值 | | :-------------: | :--------: | | 3.0 | 2.5 | | -0.5 | 0.0 | | 2.0 | 2.1 | | 7.0 | 7.8 | | 4.2 | 5.3 | *你觉得这个模型已成功地描述了目标变量的变化吗?如果成功,请解释为什么,如果没有,也请给出原因。* 运行下方的代码,使用`performance_metric`函数来计算模型的决定系数。 ``` # Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score) ``` **回答:**成功,因为R2很接近1 ### 练习: 数据分割与重排 接下来,你需要把波士顿房屋数据集分成训练和测试两个子集。通常在这个过程中,数据也会被重新排序,以消除数据集中由于排序而产生的偏差。 在下面的代码中,你需要: - 使用 `sklearn.model_selection` 中的 `train_test_split`, 将`features`和`prices`的数据都分成用于训练的数据子集和用于测试的数据子集。 - 分割比例为:80%的数据用于训练,20%用于测试; - 选定一个数值以设定 `train_test_split` 中的 `random_state` ,这会确保结果的一致性; - 最终分离出的子集为`X_train`,`X_test`,`y_train`,和`y_test`。 ``` # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=0) # Success print "Training and testing split was successful" ``` ### 问题 3- 训练及测试 *将数据集按一定比例分为训练用的数据集和测试用的数据集对学习算法有什么好处?* **提示:** 如果没有数据来对模型进行测试,会出现什么问题? **答案: **训练用的数据集用来训练模型,测试用的数据集测试模型。通过这样训练出的模型对测试数据中特征进行预测,并与测试数据中的目标值进行比较。从而能实际的反应训练模型好坏。 ---- ## 分析模型的表现 在项目的第三部分,我们来看一下几个模型针对不同的数据集在学习和测试上的表现。另外,你需要专注于一个特定的算法,用全部训练集训练时,提高它的`'max_depth'` 参数,观察这一参数的变化如何影响模型的表现。把你模型的表现画出来对于分析过程十分有益。可视化可以让我们看到一些单看结果看不到的行为。 ### 学习曲线 下方区域内的代码会输出四幅图像,它们是一个决策树模型在不同最大深度下的表现。每一条曲线都直观的显示了随着训练数据量的增加,模型学习曲线的训练评分和测试评分的变化。注意,曲线的阴影区域代表的是该曲线的不确定性(用标准差衡量)。这个模型的训练和测试部分都使用决定系数R<sup>2</sup>来评分。 运行下方区域中的代码,并利用输出的图形回答下面的问题。 ``` # Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices) ``` ### 问题 4 - 学习数据 *选择上述图像中的其中一个,并给出其最大深度。随着训练数据量的增加,训练曲线的评分有怎样的变化?测试曲线呢?如果有更多的训练数据,是否能有效提升模型的表现呢?* **提示:**学习曲线的评分是否最终会收敛到特定的值? **答案: **最大深度3,训练曲线的评分逐渐降低到不变,测试曲线逐渐升高到不变,不能。 ### 复杂度曲线 下列代码内的区域会输出一幅图像,它展示了一个已经经过训练和验证的决策树模型在不同最大深度条件下的表现。这个图形将包含两条曲线,一个是训练的变化,一个是测试的变化。跟**学习曲线**相似,阴影区域代表该曲线的不确定性,模型训练和测试部分的评分都用的 `performance_metric` 函数。 运行下方区域中的代码,并利用输出的图形并回答下面的两个问题。 ``` vs.ModelComplexity(X_train, y_train) ``` ### 问题 5- 偏差与方差之间的权衡取舍 *当模型以最大深度 1训练时,模型的预测是出现很大的偏差还是出现了很大的方差?当模型以最大深度10训练时,情形又如何呢?图形中的哪些特征能够支持你的结论?* **提示:** 你如何得知模型是否出现了偏差很大或者方差很大的问题? **答案: **当模型以最大深度 1训练时,偏差很大,Training Score 很低就能说明;当模型以最大深度 10训练时,方差很大,Validation Score在下降。 ### 问题 6- 最优模型的猜测 *你认为最大深度是多少的模型能够最好地对未见过的数据进行预测?你得出这个答案的依据是什么?* **答案: **最大深度为4,这时的Validation Score最大 ----- ## 评价模型表现 在这个项目的最后,你将自己建立模型,并使用最优化的`fit_model`函数,基于客户房子的特征来预测该房屋的价值。 ### 问题 7- 网格搜索(Grid Search) *什么是网格搜索法?如何用它来优化学习算法?* **回答: **各参数变量值的可行区间(可从小到大),划分为一系列的小区,由计算机顺序算出对应各参数变量值组合,所对应的误差目标值并逐一比较择优,从而求得该区间内最小目标值及其对应的最佳特定参数值。采用基于网格搜索的交叉验证法来选择模型参数。 ### 问题 8- 交叉验证 *什么是K折交叉验证法(k-fold cross-validation)?优化模型时,使用这种方法对网格搜索有什么好处?网格搜索是如何结合交叉验证来完成对最佳参数组合的选择的?网格搜索时如果不使用交叉验证会有什么问题?GridSearchCV中的[`'cv_results'`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)属性能告诉我们什么?* **提示:** 在下面 fit_model函数最后加入 `print pd.DataFrame(grid.cv_results_)` 可以帮你查看更多。 **答案: ** - 将数据集分为训练集和测试集,再将训练集有序分为k个包,每次将其中一个包作为验证集,剩下k-1个包用来训练,交叉验证重复K次,每个子样本验证一次,平均K次的结果或者使用其它结合方式,最终得到一个单一估测。 - 采用K折交叉验证法,避免了因为数据集划分的偶然性造成的评分偏高或偏低。 - 选定算法和评分标准后,交叉验证对要进行网格搜索的所有参数组合中每一种组合进行评分,通过评分高低来选取最佳组合 - 如果不使用交叉校验,就会因为数据集划分的偶然性造成的评分偏高或偏低。 - 平均拟合时间,拟合时间方差;平均评分时间,评分时间方差;平均测试时间,测试时间方差;平均训练时间,训练时间方差;参数的值,参数;各参数测试得分排名;k次训练中每一次的训练得分,k次测试中每一次的测试得分。 ### 练习:训练模型 在最后一个练习中,你将需要将所学到的内容整合,使用**决策树演算法**训练一个模型。为了保证你得出的是一个最优模型,你需要使用网格搜索法训练模型,以找到最佳的 `'max_depth'` 参数。你可以把`'max_depth'` 参数理解为决策树算法在做出预测前,允许其对数据提出问题的数量。决策树是**监督学习算法**中的一种。 此外,你会发现你的实现使用的是 `ShuffleSplit()` 。它也是交叉验证的一种方式(见变量 `'cv_sets'`)。虽然这不是**问题8**中描述的 K-Fold 交叉验证,这个教程验证方法也很有用!这里 `ShuffleSplit()` 会创造10个(`'n_splits'`)混洗过的集合,每个集合中20%(`'test_size'`)的数据会被用作**验证集**。当你在实现的时候,想一想这跟 K-Fold 交叉验证有哪些相同点,哪些不同点? 在下方 `fit_model` 函数中,你需要做的是: - 使用 `sklearn.tree` 中的 [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) 创建一个决策树的回归函数; - 将这个回归函数储存到 `'regressor'` 变量中; - 为 `'max_depth'` 创造一个字典,它的值是从1至10的数组,并储存到 `'params'` 变量中; - 使用 `sklearn.metrics` 中的 [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) 创建一个评分函数; - 将 `performance_metric` 作为参数传至这个函数中; - 将评分函数储存到 `'scoring_fnc'` 变量中; - 使用 `sklearn.model_selection` 中的 [`GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) 创建一个网格搜索对象; - 将变量`'regressor'`, `'params'`, `'scoring_fnc'`, 和 `'cv_sets'` 作为参数传至这个对象中; - 将 `GridSearchCV` 存到 `'grid'` 变量中。 如果有同学对python函数如何传递多个参数不熟悉,可以参考这个MIT课程的[视频](http://cn-static.udacity.com/mlnd/videos/MIT600XXT114-V004200_DTH.mp4)。 ``` # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.metrics import make_scorer from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor().fit(X, y) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth':range(1,11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor,params,scoring_fnc,cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ ``` ### 做出预测 当我们用数据训练出一个模型,它现在就可用于对新的数据进行预测。在决策树回归函数中,模型已经学会对新输入的数据*提问*,并返回对**目标变量**的预测值。你可以用这个预测来获取数据未知目标变量的信息,这些数据必须是不包含在训练数据之内的。 ### 问题 9- 最优模型 *最优模型的最大深度(maximum depth)是多少?此答案与你在**问题 6**所做的猜测是否相同?* 运行下方区域内的代码,将决策树回归函数代入训练数据的集合,以得到最优化的模型。 ``` # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) ``` **Answer: **4,是的 ### 问题 10 - 预测销售价格 想像你是一个在波士顿地区的房屋经纪人,并期待使用此模型以帮助你的客户评估他们想出售的房屋。你已经从你的三个客户收集到以下的资讯: | 特征 | 客戶 1 | 客戶 2 | 客戶 3 | | :---: | :---: | :---: | :---: | | 房屋内房间总数 | 5 间房间 | 4 间房间 | 8 间房间 | | 社区贫困指数(%被认为是贫困阶层) | 17% | 32% | 3% | | 邻近学校的学生-老师比例 | 15:1 | 22:1 | 12:1 | *你会建议每位客户的房屋销售的价格为多少?从房屋特征的数值判断,这样的价格合理吗?为什么?* **提示:**用你在**分析数据**部分计算出来的统计信息来帮助你证明你的答案。 运行下列的代码区域,使用你优化的模型来为每位客户的房屋价值做出预测。 ``` # Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price) ``` **答案: **客户1的房屋销售的价格为$391,183.33,比较接近中位数$438,900.00。客户2的房屋销售的价格为$189,123.53,介于最小数$105,000.00到中位数$438,900.00之间。客户3的房屋销售的价格为$942,666.67,比较接近最大值$105,000.00。房屋内房间总数越多当然价格也越贵;社区贫困指数越高,人们能给出的价格越低;邻近学校的学生-老师比例越高,房价越低。 ### 敏感度 一个最优的模型不一定是一个健壮模型。有的时候模型会过于复杂或者过于简单,以致于难以泛化新增添的数据;有的时候模型采用的学习算法并不适用于特定的数据结构;有的时候样本本身可能有太多噪点或样本过少,使得模型无法准确地预测目标变量。这些情况下我们会说模型是欠拟合的。执行下方区域中的代码,采用不同的训练和测试集执行 `fit_model` 函数10次。注意观察对一个特定的客户来说,预测是如何随训练数据的变化而变化的。 ``` vs.PredictTrials(features, prices, fit_model, client_data) ``` ### 问题 11 - 实用性探讨 *简单地讨论一下你建构的模型能否在现实世界中使用?* **提示:** 回答几个问题,并给出相应结论的理由: - *1978年所采集的数据,在今天是否仍然适用?* - *数据中呈现的特征是否足够描述一个房屋?* - *模型是否足够健壮来保证预测的一致性?* - *在波士顿这样的大都市采集的数据,能否应用在其它乡镇地区?* **答案: **不能,受影响的因素在变化;不能,还有房屋的位置等没有考虑;是的,最大值与最小值相差 $69,044.61,占预测均值的0.17,也就是预测平均值为404978.717,上下波动为均值的0.085,该模型足够健壮;不能,其它乡镇会有他独特因素 ### 可选问题 - 预测北京房价 (本题结果不影响项目是否通过)通过上面的实践,相信你对机器学习的一些常用概念有了很好的领悟和掌握。但利用70年代的波士顿房价数据进行建模的确对我们来说意义不是太大。现在你可以把你上面所学应用到北京房价数据集中`bj_housing.csv`。 免责声明:考虑到北京房价受到宏观经济、政策调整等众多因素的直接影响,预测结果仅供参考。 这个数据集的特征有: - Area:房屋面积,平方米 - Room:房间数,间 - Living: 厅数,间 - School: 是否为学区房,0或1 - Year: 房屋建造时间,年 - Floor: 房屋所处楼层,层 目标变量: - Value: 房屋人民币售价,万 你可以参考上面学到的内容,拿这个数据集来练习数据分割与重排、定义衡量标准、训练模型、评价模型表现、使用网格搜索配合交叉验证对参数进行调优并选出最佳参数,比较两者的差别,最终得出最佳模型对验证集的预测分数。 ``` # Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('bj_housing.csv') prices = data['Value'] features = data.drop('Value', axis = 1) # Success # 完成 print "BeiJing housing dataset has {} data points with {} variables each.".format(*data.shape) # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true,y_predict) # Return the score return score # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=0) # Success print "Training and testing split was successful." # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.metrics import make_scorer from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor().fit(X, y) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth':range(1,11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor,params,scoring_fnc,cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) vs.ModelComplexity(X_train, y_train) ``` 你成功的用新的数据集构建了模型了吗?他能对测试数据进行验证吗?它的表现是否符合你的预期?交叉验证是否有助于提升你模型的表现? **答案:**是的,能,是的,有。 如果你是从零开始构建机器学习的代码会让你一时觉得无从下手。这时不要着急,你要做的只是查看之前写的代码,把每一行都看明白,然后逐步构建你的模型。当中遇到什么问题也可以在我们论坛寻找答案。也许你会发现你所构建的模型的表现并没有达到你的预期,这说明机器学习并非是一项简单的任务,构建一个表现良好的模型需要长时间的研究和测试。这也是我们接下来的课程中会逐渐学到的。
github_jupyter
# Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('housing.csv') prices = data['MEDV'] features = data.drop('MEDV', axis = 1) # Success # 完成 print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape) # TODO: Minimum price of the data #目标:计算价值的最小值 minimum_price = np.min(prices) # TODO: Maximum price of the data #目标:计算价值的最大值 maximum_price = np.max(prices) # TODO: Mean price of the data #目标:计算价值的平均值 mean_price = np.mean(prices) # TODO: Median price of the data #目标:计算价值的中值 median_price = np.median(prices) # TODO: Standard deviation of prices of the data #目标:计算价值的标准差 std_price = np.std(prices) # Show the calculated statistics #目标:输出计算的结果 print "Statistics for Boston housing dataset:\n" print "Minimum price: ${:,.2f}".format(minimum_price) print "Maximum price: ${:,.2f}".format(maximum_price) print "Mean price: ${:,.2f}".format(mean_price) print "Median price ${:,.2f}".format(median_price) print "Standard deviation of prices: ${:,.2f}".format(std_price) # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true,y_predict) # Return the score return score # Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score) # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=0) # Success print "Training and testing split was successful" # Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices) vs.ModelComplexity(X_train, y_train) # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.metrics import make_scorer from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor().fit(X, y) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth':range(1,11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor,params,scoring_fnc,cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) # Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price) vs.PredictTrials(features, prices, fit_model, client_data) # Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('bj_housing.csv') prices = data['Value'] features = data.drop('Value', axis = 1) # Success # 完成 print "BeiJing housing dataset has {} data points with {} variables each.".format(*data.shape) # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true,y_predict) # Return the score return score # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=0) # Success print "Training and testing split was successful." # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.metrics import make_scorer from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor().fit(X, y) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth':range(1,11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor,params,scoring_fnc,cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) vs.ModelComplexity(X_train, y_train)
0.509276
0.863852
## Run simulation and optimizations for the folded cascode example ``` import json import circuits as cir import optimizers as opt import ngspice import pandas as pd import importlib importlib.reload(cir) importlib.reload(opt) importlib.reload(ngspice) #load the cicruit definitions and targets defined in circuit_setup.json folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/", corners=None) #load a sample sizing with open("./circuit_examples/ptm130_folded_cascode/sizing_example.json", 'r') as file: sizing = json.load(file) simulation_result = folded_cascode.simulate(sizing) print("Simulation Results Typ Only") df = pd.DataFrame(simulation_result[0]).transpose() df['CL'] *= 1e12 df.rename(columns = {"CL": "CL[pF]"}, inplace=True) print(df) #load the cicruit definitions and targets defined in circuit_setup.json with corners from corners.inc folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/") #load a sample sizing with open("./circuit_examples/ptm130_folded_cascode/sizing_example.json", 'r') as file: sizing = json.load(file) simulation_result = folded_cascode.simulate(sizing) print("Simulation Results All Corners") df = pd.DataFrame(simulation_result[0]).transpose() # rescale CL to show not 0 in df df['CL'] *= 1e12 df.rename(columns = {"CL": "CL[pF]"}, inplace=True) print(df) obj,cstr,log = folded_cascode.target.evaluate(simulation_result[0]) print("Objectives") print( {folded_cascode.objectives[i][0]:(obj[i]*folded_cascode.objectives[i][1]) for i in range(len(obj))}) ``` ## Optimizing in Nominal conditions Outputs are save to the output_folder This can take a while. ``` import random from datetime import datetime import numpy as np import pandas as pd import os seed = 42 np.random.seed(seed) random.seed(seed) nsga2 = opt.NSGA2() output_folder = "./run/ptm130_folded_cascode/" if not os.path.exists(output_folder): os.makedirs(output_folder) i = 0 folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/", corners=None) for pop, pop_obj, pop_cstr, pop_data, evals, front_no in nsga2.minimize( folded_cascode,pop_size=256, evaluations=256*100, mutation=0.1, crossover=0.6): print(i, pop_cstr[pop_cstr.argmax()], datetime.now().time()) with open("{}history256_{}_{}.json".format(output_folder,seed, i), "w") as file: json.dump( { "pop":json.loads(pd.DataFrame(data=pop, columns=folded_cascode.parameters).to_json(orient="records")), "obj":json.loads(pd.DataFrame(data=pop_obj, columns=folded_cascode.objectives).to_json(orient="records")), "cstr":pop_cstr.tolist(), "data":pop_data, "evals":evals, "fn":[ str(fn) for fn in front_no.tolist()] }, file ) i = i + 1 import plot_utils importlib.reload(plot_utils) plot_utils.plt_pof(["{}history256_{}_{}.json".format(output_folder,seed, 100)], scales = [("[uA]", 1e6),("[MHz]", 1e-6),("[dB]", 1.0)]) import sys print(sys.path) ```
github_jupyter
import json import circuits as cir import optimizers as opt import ngspice import pandas as pd import importlib importlib.reload(cir) importlib.reload(opt) importlib.reload(ngspice) #load the cicruit definitions and targets defined in circuit_setup.json folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/", corners=None) #load a sample sizing with open("./circuit_examples/ptm130_folded_cascode/sizing_example.json", 'r') as file: sizing = json.load(file) simulation_result = folded_cascode.simulate(sizing) print("Simulation Results Typ Only") df = pd.DataFrame(simulation_result[0]).transpose() df['CL'] *= 1e12 df.rename(columns = {"CL": "CL[pF]"}, inplace=True) print(df) #load the cicruit definitions and targets defined in circuit_setup.json with corners from corners.inc folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/") #load a sample sizing with open("./circuit_examples/ptm130_folded_cascode/sizing_example.json", 'r') as file: sizing = json.load(file) simulation_result = folded_cascode.simulate(sizing) print("Simulation Results All Corners") df = pd.DataFrame(simulation_result[0]).transpose() # rescale CL to show not 0 in df df['CL'] *= 1e12 df.rename(columns = {"CL": "CL[pF]"}, inplace=True) print(df) obj,cstr,log = folded_cascode.target.evaluate(simulation_result[0]) print("Objectives") print( {folded_cascode.objectives[i][0]:(obj[i]*folded_cascode.objectives[i][1]) for i in range(len(obj))}) import random from datetime import datetime import numpy as np import pandas as pd import os seed = 42 np.random.seed(seed) random.seed(seed) nsga2 = opt.NSGA2() output_folder = "./run/ptm130_folded_cascode/" if not os.path.exists(output_folder): os.makedirs(output_folder) i = 0 folded_cascode = cir.Circuit("./circuit_examples/ptm130_folded_cascode/", corners=None) for pop, pop_obj, pop_cstr, pop_data, evals, front_no in nsga2.minimize( folded_cascode,pop_size=256, evaluations=256*100, mutation=0.1, crossover=0.6): print(i, pop_cstr[pop_cstr.argmax()], datetime.now().time()) with open("{}history256_{}_{}.json".format(output_folder,seed, i), "w") as file: json.dump( { "pop":json.loads(pd.DataFrame(data=pop, columns=folded_cascode.parameters).to_json(orient="records")), "obj":json.loads(pd.DataFrame(data=pop_obj, columns=folded_cascode.objectives).to_json(orient="records")), "cstr":pop_cstr.tolist(), "data":pop_data, "evals":evals, "fn":[ str(fn) for fn in front_no.tolist()] }, file ) i = i + 1 import plot_utils importlib.reload(plot_utils) plot_utils.plt_pof(["{}history256_{}_{}.json".format(output_folder,seed, 100)], scales = [("[uA]", 1e6),("[MHz]", 1e-6),("[dB]", 1.0)]) import sys print(sys.path)
0.317744
0.762269
[View in Colaboratory](https://colab.research.google.com/github/ZER-0-NE/ML_problems/blob/master/keras_VGGFace_1FC.ipynb) ``` from google.colab import auth auth.authenticate_user() !pip install PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. # This only needs to be done once in a notebook. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) fileId = drive.CreateFile({'id': '1AoyPQ3HoBbRnLpjEBsEBVd-49CYZaZJD'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('test_img.zip') # Save Drive file as a local file !unzip test_img.zip -d ./ fileId = drive.CreateFile({'id': '1OhPBMbSOG3ejP26-peRmDPYX7WfF2ixN'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('dataset_cfps.zip') # Save Drive file as a local file !unzip dataset_cfps.zip -d ./ !ls !rm -rf test_cfps.zip yfileId = drive.CreateFile({'id': '1OhPBMbSOG3ejP26-peRmDPYX7WfF2ixN'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('dataset_cfps.zip') # Save Drive file as a local file !unzip dataset_cfps.zip -d ./ from keras import models from keras import layers from keras import optimizers from keras.applications import VGG16 from keras.applications import InceptionResNetV2 import sys import os from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense, Activation, Input from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras import callbacks, regularizers from keras.models import load_model import matplotlib.pyplot as plt from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras import backend as K from keras_vggface.vggface import VGGFace from keras.engine import Model from keras.models import load_model from sklearn import metrics import numpy !pip install keras_vggface train_data_path = 'dataset_cfps/train' validation_data_path = 'dataset_cfps/validation' test_data_path = 'test' #Parametres img_width, img_height = 224, 224 #Load the VGG model #vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) vggface = VGGFace(model='resnet50', include_top=False, input_shape=(img_width, img_height, 3)) #vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3)) last_layer = vggface.get_layer('avg_pool').output x = Flatten(name='flatten')(last_layer) xx = Dense(256, activation = 'softmax', kernel_regularizer=regularizers.l2(0.0001))(x) x1 = BatchNormalization()(xx) x2 = Dropout(0.7)(x1) x3 = Dense(12, activation='softmax', name='classifier', kernel_regularizer=regularizers.l2(0.0001))(x2) custom_vgg_model = Model(vggface.input, x3) # Create the model model = models.Sequential() # Add the convolutional base model model.add(custom_vgg_model) # Add new layers #model.add(layers.Flatten()) # model.add(layers.Dense(1024, activation='relu')) # model.add(BatchNormalization()) #model.add(layers.Dropout(0.5)) # model.add(layers.Dense(12, activation='sigmoid')) # Show a summary of the model. Check the number of trainable parameters model.summary() #model = load_model('keras_vggface.h5') def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall)) train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, fill_mode='nearest') # Change the batchsize according to your system RAM train_batchsize = 32 val_batchsize = 32 train_generator = train_datagen.flow_from_directory( train_data_path, target_size=(img_width, img_height), batch_size=train_batchsize, class_mode='categorical') validation_generator = validation_datagen.flow_from_directory( validation_data_path, target_size=(img_width, img_height), batch_size=val_batchsize, class_mode='categorical') # Compile the model model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-3), metrics=['acc']) # Train the model history = model.fit_generator( train_generator, steps_per_epoch=train_generator.samples/train_generator.batch_size , epochs=100, validation_data=validation_generator, validation_steps=validation_generator.samples/validation_generator.batch_size, verbose=1) # Save the model model.save('keras_vggface_1FC.h5') # loss and accuracy curves. acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() test_generator = ImageDataGenerator() test_data_generator = test_generator.flow_from_directory( test_data_path, target_size=(img_width, img_height), batch_size=32, shuffle=False) test_steps_per_epoch = numpy.math.ceil(test_data_generator.samples / test_data_generator.batch_size) predictions = model.predict_generator(test_data_generator, steps=test_steps_per_epoch) # Get most likely class predicted_classes = numpy.argmax(predictions, axis=1) true_classes = test_data_generator.classes class_labels = list(test_data_generator.class_indices.keys()) report = metrics.classification_report(true_classes, predicted_classes, target_names=class_labels) print(report) test_generator = ImageDataGenerator() test_data_generator = test_generator.flow_from_directory( test_data_path, target_size=(img_width, img_height), batch_size=32, shuffle=False) test_steps_per_epoch = numpy.math.ceil(test_data_generator.samples / test_data_generator.batch_size) predictions = model.predict_generator(test_data_generator, steps=test_steps_per_epoch) # Get most likely class predicted_classes = numpy.argmax(predictions, axis=1) true_classes = test_data_generator.classes class_labels = list(test_data_generator.class_indices.keys()) report = metrics.classification_report(true_classes, predicted_classes, target_names=class_labels) print(report) from google.colab import files files.download('facenet_resnet_lr3_SGD_new_FC3_200.h5') # memory footprint support libraries/code !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi !pip install gputil !pip install psutil !pip install humanize import psutil import humanize import os import GPUtil as GPU GPUs = GPU.getGPUs() # XXX: only one GPU on Colab and isn’t guaranteed gpu = GPUs[0] def printm(): process = psutil.Process(os.getpid()) print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss)) print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal)) printm() ```
github_jupyter
from google.colab import auth auth.authenticate_user() !pip install PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. # This only needs to be done once in a notebook. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) fileId = drive.CreateFile({'id': '1AoyPQ3HoBbRnLpjEBsEBVd-49CYZaZJD'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('test_img.zip') # Save Drive file as a local file !unzip test_img.zip -d ./ fileId = drive.CreateFile({'id': '1OhPBMbSOG3ejP26-peRmDPYX7WfF2ixN'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('dataset_cfps.zip') # Save Drive file as a local file !unzip dataset_cfps.zip -d ./ !ls !rm -rf test_cfps.zip yfileId = drive.CreateFile({'id': '1OhPBMbSOG3ejP26-peRmDPYX7WfF2ixN'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq print(fileId['title']) # folder_data.zip fileId.GetContentFile('dataset_cfps.zip') # Save Drive file as a local file !unzip dataset_cfps.zip -d ./ from keras import models from keras import layers from keras import optimizers from keras.applications import VGG16 from keras.applications import InceptionResNetV2 import sys import os from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense, Activation, Input from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras import callbacks, regularizers from keras.models import load_model import matplotlib.pyplot as plt from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras import backend as K from keras_vggface.vggface import VGGFace from keras.engine import Model from keras.models import load_model from sklearn import metrics import numpy !pip install keras_vggface train_data_path = 'dataset_cfps/train' validation_data_path = 'dataset_cfps/validation' test_data_path = 'test' #Parametres img_width, img_height = 224, 224 #Load the VGG model #vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) vggface = VGGFace(model='resnet50', include_top=False, input_shape=(img_width, img_height, 3)) #vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3)) last_layer = vggface.get_layer('avg_pool').output x = Flatten(name='flatten')(last_layer) xx = Dense(256, activation = 'softmax', kernel_regularizer=regularizers.l2(0.0001))(x) x1 = BatchNormalization()(xx) x2 = Dropout(0.7)(x1) x3 = Dense(12, activation='softmax', name='classifier', kernel_regularizer=regularizers.l2(0.0001))(x2) custom_vgg_model = Model(vggface.input, x3) # Create the model model = models.Sequential() # Add the convolutional base model model.add(custom_vgg_model) # Add new layers #model.add(layers.Flatten()) # model.add(layers.Dense(1024, activation='relu')) # model.add(BatchNormalization()) #model.add(layers.Dropout(0.5)) # model.add(layers.Dense(12, activation='sigmoid')) # Show a summary of the model. Check the number of trainable parameters model.summary() #model = load_model('keras_vggface.h5') def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall)) train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, fill_mode='nearest') # Change the batchsize according to your system RAM train_batchsize = 32 val_batchsize = 32 train_generator = train_datagen.flow_from_directory( train_data_path, target_size=(img_width, img_height), batch_size=train_batchsize, class_mode='categorical') validation_generator = validation_datagen.flow_from_directory( validation_data_path, target_size=(img_width, img_height), batch_size=val_batchsize, class_mode='categorical') # Compile the model model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-3), metrics=['acc']) # Train the model history = model.fit_generator( train_generator, steps_per_epoch=train_generator.samples/train_generator.batch_size , epochs=100, validation_data=validation_generator, validation_steps=validation_generator.samples/validation_generator.batch_size, verbose=1) # Save the model model.save('keras_vggface_1FC.h5') # loss and accuracy curves. acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() test_generator = ImageDataGenerator() test_data_generator = test_generator.flow_from_directory( test_data_path, target_size=(img_width, img_height), batch_size=32, shuffle=False) test_steps_per_epoch = numpy.math.ceil(test_data_generator.samples / test_data_generator.batch_size) predictions = model.predict_generator(test_data_generator, steps=test_steps_per_epoch) # Get most likely class predicted_classes = numpy.argmax(predictions, axis=1) true_classes = test_data_generator.classes class_labels = list(test_data_generator.class_indices.keys()) report = metrics.classification_report(true_classes, predicted_classes, target_names=class_labels) print(report) test_generator = ImageDataGenerator() test_data_generator = test_generator.flow_from_directory( test_data_path, target_size=(img_width, img_height), batch_size=32, shuffle=False) test_steps_per_epoch = numpy.math.ceil(test_data_generator.samples / test_data_generator.batch_size) predictions = model.predict_generator(test_data_generator, steps=test_steps_per_epoch) # Get most likely class predicted_classes = numpy.argmax(predictions, axis=1) true_classes = test_data_generator.classes class_labels = list(test_data_generator.class_indices.keys()) report = metrics.classification_report(true_classes, predicted_classes, target_names=class_labels) print(report) from google.colab import files files.download('facenet_resnet_lr3_SGD_new_FC3_200.h5') # memory footprint support libraries/code !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi !pip install gputil !pip install psutil !pip install humanize import psutil import humanize import os import GPUtil as GPU GPUs = GPU.getGPUs() # XXX: only one GPU on Colab and isn’t guaranteed gpu = GPUs[0] def printm(): process = psutil.Process(os.getpid()) print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss)) print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal)) printm()
0.531696
0.642657
``` import striplog import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt facies = { 'sand' : striplog.Component({'lithology' : 'S'}), 'shale' : striplog.Component({'lithology' : 'SH'}) } sand_decor = striplog.Decor({ 'component': facies['sand'], 'colour': 'yellow', 'hatch': '.' }) shale_decor = striplog.Decor({ 'component': facies['shale'], 'colour': 'darkgray', 'hatch': '-' }) legend = striplog.Legend([sand_decor, shale_decor]) # sand interval -- fixed width sand1 = striplog.Interval( top = 1000., base = 1005., components = [facies['sand']], data = {'gs' : 2.0} ) # shale interval -- fixed width shale1 = striplog.Interval( top = 1005., base = 1010., components = [facies['shale']], data = {'gs' : 0.5} ) # sand interval -- variable width sand_gs = np.array([ [1010.5, 2.5], [1012., 2.2], [1014., 2.7] ]) sand2 = striplog.Interval( top = 1010., base = 1015., components = [facies['sand']], data = {'gs' : sand_gs} ) # shale interval -- variable width shale_gs = np.array([ [1015., 0.3], [1017., 0.5], [1018., 0.7] ]) shale2 = striplog.Interval( top = 1015., base = 1020., components = [facies['shale']], data = {'gs' : shale_gs} ) slog = striplog.Striplog([sand1, shale1, sand2, shale2]) fig, ax = plt.subplots(figsize=(5,15)) ax = slog.plot(field='gs', ax=ax, legend=legend) shale_gs = np.array([ [1015., 0.3], [1017., 0.5], [1018., 0.7], [1020., 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) ax.invert_yaxis() ax.plot(shale_gs[:,1], shale_gs[:,0]) from scipy import interpolate shale_gs = np.array([ [1014, 0.3], [1015., 0.3], #[1017., 0.4], [1017.5, 0.75], [1020., 0.8], [1020.1, 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) #ax.invert_yaxis() pts_x, pts_y = shale_gs[:,0], shale_gs[:,1] ix = np.linspace(1014.5, 1020.5, 1000) for interpolator_name in ['BarycentricInterpolator', 'KroghInterpolator', 'CubicSpline']: interp = getattr(interpolate, interpolator_name)(pts_x, pts_y) new_y = interp(ix) ax.plot(ix, new_y, label=interpolator_name, alpha=0.75) ax.scatter(pts_x, pts_y) ax.set_xlim(1014, 1021) ax.legend() from scipy import interpolate shale_gs = np.array([ [1015., 0.3], [1017., 0.33], [1019., 0.75], [1020., 0.8], ]) fig, ax = plt.subplots(figsize=(10,10)) #ax.invert_yaxis() pts_x, pts_y = shale_gs[:,0], shale_gs[:,1] ix = np.linspace(1000., 1050., 1000) def tanh_fn(x, a, b, c, d): return a*np.tanh(b*(x-c)) + d popt, cov = curve_fit(tanh_fn, pts_x, pts_y, p0=[0.5, 0.5, 1017.5, 0.5]) print(popt) new_y = tanh_fn(ix, *popt) ax.plot(ix, new_y, alpha=0.75) ax.scatter(pts_x, pts_y) ax.set_xlim(1010, 1025) from scipy import interpolate shale_gs = np.array([ [1015., 0.3], [1017., 0.31], [1018., 0.7], [1020., 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) ax.invert_yaxis() y = np.linspace(1014., 1021., 1000) for interpolator_name in ['BarycentricInterpolator', 'KroghInterpolator', 'CubicSpline']: interp = getattr(interpolate, interpolator_name)(shale_gs[:,0], shale_gs[:,1]) new_x = interp(x) ax.plot(new_x, y, label=interpolator_name, alpha=0.75) ax.scatter(shale_gs[:,0], shale_gs[:,1]) #ax.set_ylim(1021., 1014.) ax.legend() ```
github_jupyter
import striplog import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt facies = { 'sand' : striplog.Component({'lithology' : 'S'}), 'shale' : striplog.Component({'lithology' : 'SH'}) } sand_decor = striplog.Decor({ 'component': facies['sand'], 'colour': 'yellow', 'hatch': '.' }) shale_decor = striplog.Decor({ 'component': facies['shale'], 'colour': 'darkgray', 'hatch': '-' }) legend = striplog.Legend([sand_decor, shale_decor]) # sand interval -- fixed width sand1 = striplog.Interval( top = 1000., base = 1005., components = [facies['sand']], data = {'gs' : 2.0} ) # shale interval -- fixed width shale1 = striplog.Interval( top = 1005., base = 1010., components = [facies['shale']], data = {'gs' : 0.5} ) # sand interval -- variable width sand_gs = np.array([ [1010.5, 2.5], [1012., 2.2], [1014., 2.7] ]) sand2 = striplog.Interval( top = 1010., base = 1015., components = [facies['sand']], data = {'gs' : sand_gs} ) # shale interval -- variable width shale_gs = np.array([ [1015., 0.3], [1017., 0.5], [1018., 0.7] ]) shale2 = striplog.Interval( top = 1015., base = 1020., components = [facies['shale']], data = {'gs' : shale_gs} ) slog = striplog.Striplog([sand1, shale1, sand2, shale2]) fig, ax = plt.subplots(figsize=(5,15)) ax = slog.plot(field='gs', ax=ax, legend=legend) shale_gs = np.array([ [1015., 0.3], [1017., 0.5], [1018., 0.7], [1020., 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) ax.invert_yaxis() ax.plot(shale_gs[:,1], shale_gs[:,0]) from scipy import interpolate shale_gs = np.array([ [1014, 0.3], [1015., 0.3], #[1017., 0.4], [1017.5, 0.75], [1020., 0.8], [1020.1, 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) #ax.invert_yaxis() pts_x, pts_y = shale_gs[:,0], shale_gs[:,1] ix = np.linspace(1014.5, 1020.5, 1000) for interpolator_name in ['BarycentricInterpolator', 'KroghInterpolator', 'CubicSpline']: interp = getattr(interpolate, interpolator_name)(pts_x, pts_y) new_y = interp(ix) ax.plot(ix, new_y, label=interpolator_name, alpha=0.75) ax.scatter(pts_x, pts_y) ax.set_xlim(1014, 1021) ax.legend() from scipy import interpolate shale_gs = np.array([ [1015., 0.3], [1017., 0.33], [1019., 0.75], [1020., 0.8], ]) fig, ax = plt.subplots(figsize=(10,10)) #ax.invert_yaxis() pts_x, pts_y = shale_gs[:,0], shale_gs[:,1] ix = np.linspace(1000., 1050., 1000) def tanh_fn(x, a, b, c, d): return a*np.tanh(b*(x-c)) + d popt, cov = curve_fit(tanh_fn, pts_x, pts_y, p0=[0.5, 0.5, 1017.5, 0.5]) print(popt) new_y = tanh_fn(ix, *popt) ax.plot(ix, new_y, alpha=0.75) ax.scatter(pts_x, pts_y) ax.set_xlim(1010, 1025) from scipy import interpolate shale_gs = np.array([ [1015., 0.3], [1017., 0.31], [1018., 0.7], [1020., 0.8] ]) fig, ax = plt.subplots(figsize=(10,10)) ax.invert_yaxis() y = np.linspace(1014., 1021., 1000) for interpolator_name in ['BarycentricInterpolator', 'KroghInterpolator', 'CubicSpline']: interp = getattr(interpolate, interpolator_name)(shale_gs[:,0], shale_gs[:,1]) new_x = interp(x) ax.plot(new_x, y, label=interpolator_name, alpha=0.75) ax.scatter(shale_gs[:,0], shale_gs[:,1]) #ax.set_ylim(1021., 1014.) ax.legend()
0.536556
0.663606
# Tokenizing Sentences 1. Split apart corpus into sentences. 2. Split apart sentences into words. ``` # Why not just tokenize myself? import nltk text = "I made two purchases today! I bought a bag of grapes for $4.99, \ but then... realized John Francis already bought some at the Y.M.C.A!" # trying to write our own tokenizer text.split(".") # Using NLTK sent_tokenize() sent_text = nltk.sent_tokenize(text) # this gives us a list of sentences sent_text ``` ## Stemming <img src="images/stemming-examples.png" alt="Different Stemming Techniques" style="width:600px;"/> Stemming is the process of reducing inflection in words to their root forms such as mapping a group of words to the same stem even if the stem itself is not a valid word in the language [Source](https://www.datacamp.com/community/tutorials/stemming-lemmatization-python) In Python, we can use **`nltk.stem.porter.PorterStemmer`** stem our words: ```python stemmer = PorterStemmer() print(stemmer.stem("caressed")) # caress print(stemmer.stem("athlete")) # athlet print(stemmer.stem("athletics")) # athlet print(stemmer.stem("media")) # media print(stemmer.stem("photography")) # photographi print(stemmer.stem("sexy")) # sexi print(stemmer.stem("journalling")) # journal print(stemmer.stem("Slovakia")) # slovakia print(stemmer.stem("corpora")) # corpora print(stemmer.stem("thieves")) # thiev print(stemmer.stem("rocks")) # rock ``` ## Lemmatization ```python from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize("caressed")) #caressed print(lemmatizer.lemmatize("athlete")) #athlete print(lemmatizer.lemmatize("athletics")) #athletics print(lemmatizer.lemmatize("media")) print(lemmatizer.lemmatize("photography")) #photography print(lemmatizer.lemmatize("sexy")) #sexy print(lemmatizer.lemmatize("journalling")) #journalling print(lemmatizer.lemmatize("Slovakia")) #Slovakia print(lemmatizer.lemmatize("corpora")) # corpus print(lemmatizer.lemmatize("thieves")) # thief print(lemmatizer.lemmatize("rocks")) #rock ``` Why would you ever care to use stemming? - smaller and faster - simplicity in "good enough" - can often **provide higher recall (coverage)** if you are using it for text searching: `drives` and `drivers` will likely shorten to `driv`, which may be useful if your search engine wants to make sure to get all relevant documents, even at the cost of surfacing a few irrelevant documents - could potentially be more useful for predictive models that tend to overfit ## Scoring Metrics <img src="images/confusion_matrix2.png" alt="Different Stemming Techniques" style="width:600px;"/> ### Precision/Recall **Recall:** What percent of the positive classes did the model successfully predict? **Precision:** When a model predicted a positive class, what percentage of the time was it correct? In terms of NLP / stemming / lemmatization: **Recall**: After processing (tokenizing, stemming/lemmatizing) the data, what percent of the relevant search results were surfaced? Ie. - when a user searches for "blue jeans", did all the results returned include all the relevant items (blue-ish colored denim pants)? **Precision**: After processing (tokenizing, stemming/lemmatizing) the data, what percent of the results returned were relevant? <img src="images/matrix_practice2.png" alt="Different Stemming Techniques" style="width:600px;"/> **Precision:** $\frac{?}{?}$ **Recall:** $\frac{?}{?}$ ### F1 Score The F1 score of a model represents the harmonic mean between precision and recall, and is defined as $$ \begin{equation} F_{1} = 2 * \frac{P * R}{P + R} \end{equation} $$ ## Exercise: ##### 1. For each of the following statements, label them True or False. If False, briefly explain why: A. Text typically should be processed via either stemming or lemmatization, but not both. B. Texts processed using lemmatization will typically have higher recall than stemming. C. If the **F1 score** of a model is **1.0 (100%)**, then the accuracy of your model must also be **100%**. ##### 2. Calculate precison and recall given the following results from a confusion matrix: <img src="images/exercise.jpeg" alt="Different Stemming Techniques" style="width:600px;"/> ``` from sklearn.feature_extraction.text import CountVectorizer # list of text documents text = ["It's still early, so box-office disappointments are still among the highest-grossing movies of the year.", "That movie was terrific", "You love cats", "Pay for top executives at big US companies is vastly higher than what everyday workers make, and a new report from The Wall Street Journal has found that CEOs have hit an eye-popping milestone in the size of their monthly paychecks."] # create the transform vectorizer = CountVectorizer() # tokenize and build vocab vectorizer.fit(text) # vectorize the corpus vector = vectorizer.transform(text) # summarize encoded vector print(vector.shape) # Notice what type of object this is print(type(vector)) # see the outputted vectors print(vector.toarray()) print(vectorizer.get_feature_names()) # load vectorized corpus into Pandas dataframe import pandas as pd corpus_df = pd.DataFrame(vector.toarray(), columns=vectorizer.get_feature_names()) corpus_df.describe() ``` ## Removing Stopwords It's your call if you want to remove stopwords. We discussed already the advantages and disadvantages to both approaches. You will first need to run `nltk.download("stopwords")` to download the set of stopwords for NLTK: ``` from nltk.corpus import stopwords print(set(stopwords.words('english'))) # see the set of words NLTK considers stopwords # iterate through the Pandas dataframe, and drop the columns that reflect stopwords: original_columns = corpus_df.columns # get existing columns to_drop_columns = set(original_columns).intersection(set(stopwords.words('english'))) # get the list of words to drop print(f"Dataframe shape was {corpus_df.shape}") corpus_df.drop(columns=to_drop_columns, inplace=True) print(f"Dataframe shape is now{corpus_df.shape}") ``` ## Co-Occurence Matrix ``` # run a quick correlation analysis to see if any word pairs show rough co-occurence corpus_df.corr() ```
github_jupyter
# Why not just tokenize myself? import nltk text = "I made two purchases today! I bought a bag of grapes for $4.99, \ but then... realized John Francis already bought some at the Y.M.C.A!" # trying to write our own tokenizer text.split(".") # Using NLTK sent_tokenize() sent_text = nltk.sent_tokenize(text) # this gives us a list of sentences sent_text stemmer = PorterStemmer() print(stemmer.stem("caressed")) # caress print(stemmer.stem("athlete")) # athlet print(stemmer.stem("athletics")) # athlet print(stemmer.stem("media")) # media print(stemmer.stem("photography")) # photographi print(stemmer.stem("sexy")) # sexi print(stemmer.stem("journalling")) # journal print(stemmer.stem("Slovakia")) # slovakia print(stemmer.stem("corpora")) # corpora print(stemmer.stem("thieves")) # thiev print(stemmer.stem("rocks")) # rock from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize("caressed")) #caressed print(lemmatizer.lemmatize("athlete")) #athlete print(lemmatizer.lemmatize("athletics")) #athletics print(lemmatizer.lemmatize("media")) print(lemmatizer.lemmatize("photography")) #photography print(lemmatizer.lemmatize("sexy")) #sexy print(lemmatizer.lemmatize("journalling")) #journalling print(lemmatizer.lemmatize("Slovakia")) #Slovakia print(lemmatizer.lemmatize("corpora")) # corpus print(lemmatizer.lemmatize("thieves")) # thief print(lemmatizer.lemmatize("rocks")) #rock from sklearn.feature_extraction.text import CountVectorizer # list of text documents text = ["It's still early, so box-office disappointments are still among the highest-grossing movies of the year.", "That movie was terrific", "You love cats", "Pay for top executives at big US companies is vastly higher than what everyday workers make, and a new report from The Wall Street Journal has found that CEOs have hit an eye-popping milestone in the size of their monthly paychecks."] # create the transform vectorizer = CountVectorizer() # tokenize and build vocab vectorizer.fit(text) # vectorize the corpus vector = vectorizer.transform(text) # summarize encoded vector print(vector.shape) # Notice what type of object this is print(type(vector)) # see the outputted vectors print(vector.toarray()) print(vectorizer.get_feature_names()) # load vectorized corpus into Pandas dataframe import pandas as pd corpus_df = pd.DataFrame(vector.toarray(), columns=vectorizer.get_feature_names()) corpus_df.describe() from nltk.corpus import stopwords print(set(stopwords.words('english'))) # see the set of words NLTK considers stopwords # iterate through the Pandas dataframe, and drop the columns that reflect stopwords: original_columns = corpus_df.columns # get existing columns to_drop_columns = set(original_columns).intersection(set(stopwords.words('english'))) # get the list of words to drop print(f"Dataframe shape was {corpus_df.shape}") corpus_df.drop(columns=to_drop_columns, inplace=True) print(f"Dataframe shape is now{corpus_df.shape}") # run a quick correlation analysis to see if any word pairs show rough co-occurence corpus_df.corr()
0.402744
0.906818
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt bonair = pd.read_csv('../Cleaned_data/ABS Alaskan/Bonaire/513790_system_energy_20141206_to_20190522.csv') braun = pd.read_csv('../Cleaned_data/ABS Alaskan/braun/572156_system_energy_20150226_to_20190522.csv') Dashevsky = pd.read_csv('../Cleaned_data/ABS Alaskan/Dashevsky/721120_system_energy_20150828_to_20190522 (1).csv') Davis = pd.read_csv('../Cleaned_data/ABS Alaskan/Davis/273944_system_energy_20131115_to_20190522.csv') Deer = pd.read_csv('../Cleaned_data/ABS Alaskan/Deer/1062205_system_energy_20160927_to_20190522.csv') Dukeminier = pd.read_csv('../Cleaned_data/ABS Alaskan/Dukeminier/716566_system_energy_20150824_to_20190522.csv') FireweedBusCenter = pd.read_csv('../Cleaned_data/ABS Alaskan/FireweedBusCenter/945883_system_energy_20160520_to_20190522.csv') GenaTran = pd.read_csv('../Cleaned_data/ABS Alaskan/GenaTran/28253_system_energy_20110826_to_20190522.csv') Grubis = pd.read_csv('../Cleaned_data/ABS Alaskan/Grubis/660658_system_energy_20150705_to_20190522.csv') Hall = pd.read_csv('../Cleaned_data/ABS Alaskan/Hall/1000407_system_energy_20160804_to_20190522.csv') jorgenson = pd.read_csv('../Cleaned_data/ABS Alaskan/jorgenson/802562_system_energy_20151221_to_20190522.csv') Kogl = pd.read_csv('../Cleaned_data/ABS Alaskan/Kogl/1030153_system_energy_20160929_to_20190522.csv') RonLevy = pd.read_csv('../Cleaned_data/ABS Alaskan/RonLevy/29009_system_energy_20110831_to_20190522.csv') SolarFox = pd.read_csv('../Cleaned_data/ABS Alaskan/SolarFox/394826_system_energy_20140729_to_20190522.csv') Wiessman_Fairbanks = pd.read_csv('../Cleaned_data/ABS Alaskan/Wiessman_Fairbanks/720232_system_energy_20150827_to_20190522.csv') bonair_new = bonair.copy(deep=True) for i in range(len(bonair)): bonair_new['Date/Time'][i] = bonair_new['Date/Time'][i].rsplit(' ',1)[0] bonair_new['DC Capacity'] = "" bonair_new['DC Capacity'][0] = 12 bonair_new['Location'] = "" bonair_new['Location'][0] = 'Fairbanks_Bonaire_Appts1902_Mary_Ann_St' bonair_new.to_csv('bonair_fairbanks.csv',index=False) def newdata(data, capacity, location, filename): data_new = data.copy(deep=True) for i in range(len(data)): data_new['Date/Time'][i] = data_new['Date/Time'][i].rsplit(' ',1)[0] data_new['DC Capacity'] = "" data_new['DC Capacity'][0] = capacity data_new['Location'] = "" data_new['Location'][0] = location data_new.to_csv(filename,index=False) newdata(braun, 3.75, 'Fairbanks_1255_Shypoke_Dr' , 'braun_fairbanks.csv') newdata(Dashevsky, 3.06, 'Fairbanks_1819_MuskOx_Trail' , 'Dashevsky_fairbanks.csv') newdata(Davis, 1.5, 'Wasilla_2122_S_Cotten_Drive' , 'Davis_wasilla.csv') newdata(Deer, 3.5, 'Fairbanks_322' , 'Deer_fairbanks.csv') newdata(Dukeminier, 4.08, 'Fairbanks_932_Reindeer' , 'Dukeminier_fairbanks.csv') newdata(FireweedBusCenter, 11.9, 'Anchorage_725_East_Fireweed_Lane' , 'FireweedBusCenter_anchorage.csv') newdata(GenaTran, 1.9, 'Fairbanks_Gena_Tran_372_Taurus_Drive' , 'GenaTran_fairbanks.csv') newdata(Grubis, 3.12, 'Fairbanks_1601_Hans_Way' , 'Grubis_fairbanks.csv') newdata(Hall, 3.83, 'North_Pole_3591_Jogger_Court' , 'Hall_northpole.csv') newdata(jorgenson, 3.06, 'Fairbanks_2332_Cordes_Drive' , 'jorgenson_fairbanks.csv') newdata(Kogl, 4, 'Denali_Park_228.9_Parks Hwy' , 'Kogl_denali_park.csv') newdata(RonLevy, 7.6, 'Soldotna_30880_Swanson_Drive' , 'RonLevy_soldotna.csv') newdata(SolarFox, 4, 'Fairbanks_785_Gold_Mine_Trail' , 'SolarFox_fairbanks.csv') newdata(Wiessman_Fairbanks, 3.1, 'Fairbanks_3293_Rosie_Creek_Road' , 'Wiessman_fairbanks.csv') bonair = pd.read_csv('../Cleaned_data/ABS Alaskan/bonair_fairbanks.csv') braun = pd.read_csv('../Cleaned_data/ABS Alaskan/braun_fairbanks.csv') Dashevsky = pd.read_csv('../Cleaned_data/ABS Alaskan/Dashevsky_fairbanks.csv') Davis = pd.read_csv('../Cleaned_data/ABS Alaskan/Davis_wasilla.csv') Deer = pd.read_csv('../Cleaned_data/ABS Alaskan/Deer_fairbanks.csv') Dukeminier = pd.read_csv('../Cleaned_data/ABS Alaskan/Dukeminier_fairbanks.csv') FireweedBusCenter = pd.read_csv('../Cleaned_data/ABS Alaskan/FireweedBusCenter_anchorage.csv') GenaTran = pd.read_csv('../Cleaned_data/ABS Alaskan/GenaTran_fairbanks.csv') Grubis = pd.read_csv('../Cleaned_data/ABS Alaskan/Grubis_fairbanks.csv') Hall = pd.read_csv('../Cleaned_data/ABS Alaskan/Hall_northpole.csv') jorgenson = pd.read_csv('../Cleaned_data/ABS Alaskan/jorgenson_fairbanks.csv') Kogl = pd.read_csv('../Cleaned_data/ABS Alaskan/Kogl_denali_park.csv') RonLevy = pd.read_csv('../Cleaned_data/ABS Alaskan/RonLevy_soldotna.csv') SolarFox = pd.read_csv('../Cleaned_data/ABS Alaskan/SolarFox_fairbanks.csv') Wiessman_Fairbanks = pd.read_csv('../Cleaned_data/ABS Alaskan/Wiessman_fairbanks.csv') # copy a the dataframe bonair_new = bonair.copy(deep=True) # drop the last 'total energy' bonair_new.drop(bonair_new.tail(1).index,inplace=True) # get the index of the last day lenth_list = list(range(365,len(bonair_new.index))) annual_values = [] date = [] for i in range(len(lenth_list)): single_values = bonair_new['Energy Produced (Wh)'][lenth_list[i]-365:lenth_list[i]].sum()/bonair_new['DC Capacity'][0]/1000 #rolling_average.append(each_period) single_date = bonair_new['Date/Time'][lenth_list[i]] # append the result we want to the list annual_values.append(single_values) date.append(single_date) bonair_with_annual = pd.DataFrame({'Date':date,'Annual_production':annual_values}) plt.subplots(figsize = (15, 8)) plt.plot(bonair_with_annual['Date'],bonair_with_annual['Annual_production']) tick_spacing = np.linspace(0, len(bonair_with_annual.index)-1, 12, dtype = 'int') plt.xticks(tick_spacing) plt.xlabel("Month", size = 14) plt.title("Rolling 12-Month Average of Produced Power, Over Time", size = 18) plt.ylabel("AC Power Produced (kWh) per DC Power Installed (kW)", size = 14) plt.show() def figure(data): # copy a the dataframe data_new = data.copy(deep=True) # drop the last 'total energy' data_new.drop(data_new.tail(1).index,inplace=True) # get the index of the last day lenth_list = list(range(365,len(data_new.index))) annual_values = [] date = [] for i in range(len(lenth_list)): single_values = data_new['Energy Produced (Wh)'][lenth_list[i]-365:lenth_list[i]].sum()/data_new['DC Capacity'][0]/1000 #rolling_average.append(each_period) single_date = data_new['Date/Time'][lenth_list[i]] # append the result we want to the list annual_values.append(single_values) date.append(single_date) data_with_annual = pd.DataFrame({'Date':date,'Annual_production':annual_values}) plt.subplots(figsize = (15, 8)) plt.plot(data_with_annual['Date'],data_with_annual['Annual_production']) tick_spacing = np.linspace(0, len(data_with_annual.index)-1, 12, dtype = 'int') plt.xticks(tick_spacing) plt.xlabel("Month", size = 14) plt.title("Rolling 12-Month Average of Produced Power, Over Time", size = 18) plt.ylabel("AC Power Produced (kWh) per DC Power Installed (kW)", size = 14) plt.show() figure(bonair) figure(braun) figure(Dashevsky) figure(Davis) figure(Deer) figure(Dukeminier) figure(FireweedBusCenter) figure(GenaTran) figure(Grubis) figure(Hall) figure(jorgenson) figure(Kogl) figure(RonLevy) figure(SolarFox) figure(Wiessman_Fairbanks) ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt bonair = pd.read_csv('../Cleaned_data/ABS Alaskan/Bonaire/513790_system_energy_20141206_to_20190522.csv') braun = pd.read_csv('../Cleaned_data/ABS Alaskan/braun/572156_system_energy_20150226_to_20190522.csv') Dashevsky = pd.read_csv('../Cleaned_data/ABS Alaskan/Dashevsky/721120_system_energy_20150828_to_20190522 (1).csv') Davis = pd.read_csv('../Cleaned_data/ABS Alaskan/Davis/273944_system_energy_20131115_to_20190522.csv') Deer = pd.read_csv('../Cleaned_data/ABS Alaskan/Deer/1062205_system_energy_20160927_to_20190522.csv') Dukeminier = pd.read_csv('../Cleaned_data/ABS Alaskan/Dukeminier/716566_system_energy_20150824_to_20190522.csv') FireweedBusCenter = pd.read_csv('../Cleaned_data/ABS Alaskan/FireweedBusCenter/945883_system_energy_20160520_to_20190522.csv') GenaTran = pd.read_csv('../Cleaned_data/ABS Alaskan/GenaTran/28253_system_energy_20110826_to_20190522.csv') Grubis = pd.read_csv('../Cleaned_data/ABS Alaskan/Grubis/660658_system_energy_20150705_to_20190522.csv') Hall = pd.read_csv('../Cleaned_data/ABS Alaskan/Hall/1000407_system_energy_20160804_to_20190522.csv') jorgenson = pd.read_csv('../Cleaned_data/ABS Alaskan/jorgenson/802562_system_energy_20151221_to_20190522.csv') Kogl = pd.read_csv('../Cleaned_data/ABS Alaskan/Kogl/1030153_system_energy_20160929_to_20190522.csv') RonLevy = pd.read_csv('../Cleaned_data/ABS Alaskan/RonLevy/29009_system_energy_20110831_to_20190522.csv') SolarFox = pd.read_csv('../Cleaned_data/ABS Alaskan/SolarFox/394826_system_energy_20140729_to_20190522.csv') Wiessman_Fairbanks = pd.read_csv('../Cleaned_data/ABS Alaskan/Wiessman_Fairbanks/720232_system_energy_20150827_to_20190522.csv') bonair_new = bonair.copy(deep=True) for i in range(len(bonair)): bonair_new['Date/Time'][i] = bonair_new['Date/Time'][i].rsplit(' ',1)[0] bonair_new['DC Capacity'] = "" bonair_new['DC Capacity'][0] = 12 bonair_new['Location'] = "" bonair_new['Location'][0] = 'Fairbanks_Bonaire_Appts1902_Mary_Ann_St' bonair_new.to_csv('bonair_fairbanks.csv',index=False) def newdata(data, capacity, location, filename): data_new = data.copy(deep=True) for i in range(len(data)): data_new['Date/Time'][i] = data_new['Date/Time'][i].rsplit(' ',1)[0] data_new['DC Capacity'] = "" data_new['DC Capacity'][0] = capacity data_new['Location'] = "" data_new['Location'][0] = location data_new.to_csv(filename,index=False) newdata(braun, 3.75, 'Fairbanks_1255_Shypoke_Dr' , 'braun_fairbanks.csv') newdata(Dashevsky, 3.06, 'Fairbanks_1819_MuskOx_Trail' , 'Dashevsky_fairbanks.csv') newdata(Davis, 1.5, 'Wasilla_2122_S_Cotten_Drive' , 'Davis_wasilla.csv') newdata(Deer, 3.5, 'Fairbanks_322' , 'Deer_fairbanks.csv') newdata(Dukeminier, 4.08, 'Fairbanks_932_Reindeer' , 'Dukeminier_fairbanks.csv') newdata(FireweedBusCenter, 11.9, 'Anchorage_725_East_Fireweed_Lane' , 'FireweedBusCenter_anchorage.csv') newdata(GenaTran, 1.9, 'Fairbanks_Gena_Tran_372_Taurus_Drive' , 'GenaTran_fairbanks.csv') newdata(Grubis, 3.12, 'Fairbanks_1601_Hans_Way' , 'Grubis_fairbanks.csv') newdata(Hall, 3.83, 'North_Pole_3591_Jogger_Court' , 'Hall_northpole.csv') newdata(jorgenson, 3.06, 'Fairbanks_2332_Cordes_Drive' , 'jorgenson_fairbanks.csv') newdata(Kogl, 4, 'Denali_Park_228.9_Parks Hwy' , 'Kogl_denali_park.csv') newdata(RonLevy, 7.6, 'Soldotna_30880_Swanson_Drive' , 'RonLevy_soldotna.csv') newdata(SolarFox, 4, 'Fairbanks_785_Gold_Mine_Trail' , 'SolarFox_fairbanks.csv') newdata(Wiessman_Fairbanks, 3.1, 'Fairbanks_3293_Rosie_Creek_Road' , 'Wiessman_fairbanks.csv') bonair = pd.read_csv('../Cleaned_data/ABS Alaskan/bonair_fairbanks.csv') braun = pd.read_csv('../Cleaned_data/ABS Alaskan/braun_fairbanks.csv') Dashevsky = pd.read_csv('../Cleaned_data/ABS Alaskan/Dashevsky_fairbanks.csv') Davis = pd.read_csv('../Cleaned_data/ABS Alaskan/Davis_wasilla.csv') Deer = pd.read_csv('../Cleaned_data/ABS Alaskan/Deer_fairbanks.csv') Dukeminier = pd.read_csv('../Cleaned_data/ABS Alaskan/Dukeminier_fairbanks.csv') FireweedBusCenter = pd.read_csv('../Cleaned_data/ABS Alaskan/FireweedBusCenter_anchorage.csv') GenaTran = pd.read_csv('../Cleaned_data/ABS Alaskan/GenaTran_fairbanks.csv') Grubis = pd.read_csv('../Cleaned_data/ABS Alaskan/Grubis_fairbanks.csv') Hall = pd.read_csv('../Cleaned_data/ABS Alaskan/Hall_northpole.csv') jorgenson = pd.read_csv('../Cleaned_data/ABS Alaskan/jorgenson_fairbanks.csv') Kogl = pd.read_csv('../Cleaned_data/ABS Alaskan/Kogl_denali_park.csv') RonLevy = pd.read_csv('../Cleaned_data/ABS Alaskan/RonLevy_soldotna.csv') SolarFox = pd.read_csv('../Cleaned_data/ABS Alaskan/SolarFox_fairbanks.csv') Wiessman_Fairbanks = pd.read_csv('../Cleaned_data/ABS Alaskan/Wiessman_fairbanks.csv') # copy a the dataframe bonair_new = bonair.copy(deep=True) # drop the last 'total energy' bonair_new.drop(bonair_new.tail(1).index,inplace=True) # get the index of the last day lenth_list = list(range(365,len(bonair_new.index))) annual_values = [] date = [] for i in range(len(lenth_list)): single_values = bonair_new['Energy Produced (Wh)'][lenth_list[i]-365:lenth_list[i]].sum()/bonair_new['DC Capacity'][0]/1000 #rolling_average.append(each_period) single_date = bonair_new['Date/Time'][lenth_list[i]] # append the result we want to the list annual_values.append(single_values) date.append(single_date) bonair_with_annual = pd.DataFrame({'Date':date,'Annual_production':annual_values}) plt.subplots(figsize = (15, 8)) plt.plot(bonair_with_annual['Date'],bonair_with_annual['Annual_production']) tick_spacing = np.linspace(0, len(bonair_with_annual.index)-1, 12, dtype = 'int') plt.xticks(tick_spacing) plt.xlabel("Month", size = 14) plt.title("Rolling 12-Month Average of Produced Power, Over Time", size = 18) plt.ylabel("AC Power Produced (kWh) per DC Power Installed (kW)", size = 14) plt.show() def figure(data): # copy a the dataframe data_new = data.copy(deep=True) # drop the last 'total energy' data_new.drop(data_new.tail(1).index,inplace=True) # get the index of the last day lenth_list = list(range(365,len(data_new.index))) annual_values = [] date = [] for i in range(len(lenth_list)): single_values = data_new['Energy Produced (Wh)'][lenth_list[i]-365:lenth_list[i]].sum()/data_new['DC Capacity'][0]/1000 #rolling_average.append(each_period) single_date = data_new['Date/Time'][lenth_list[i]] # append the result we want to the list annual_values.append(single_values) date.append(single_date) data_with_annual = pd.DataFrame({'Date':date,'Annual_production':annual_values}) plt.subplots(figsize = (15, 8)) plt.plot(data_with_annual['Date'],data_with_annual['Annual_production']) tick_spacing = np.linspace(0, len(data_with_annual.index)-1, 12, dtype = 'int') plt.xticks(tick_spacing) plt.xlabel("Month", size = 14) plt.title("Rolling 12-Month Average of Produced Power, Over Time", size = 18) plt.ylabel("AC Power Produced (kWh) per DC Power Installed (kW)", size = 14) plt.show() figure(bonair) figure(braun) figure(Dashevsky) figure(Davis) figure(Deer) figure(Dukeminier) figure(FireweedBusCenter) figure(GenaTran) figure(Grubis) figure(Hall) figure(jorgenson) figure(Kogl) figure(RonLevy) figure(SolarFox) figure(Wiessman_Fairbanks)
0.113752
0.109444
``` %load_ext autoreload %autoreload 1 %matplotlib inline import numpy as np import pandas as pd import matplotlib, collections, itertools, os, re, textwrap, logging import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib.patches as mpatches from functools import reduce from logging.config import dictConfig from logging import getLogger dictConfig(dict( version = 1, formatters = {'f': {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}}, handlers = { 'h': {'class': 'logging.StreamHandler','formatter': 'f', 'level': logging.DEBUG}}, root = {'handlers': ['h'], 'level': logging.DEBUG,}, )) matplotlib.rc('font',**{'size':16, 'family':'sans-serif','sans-serif':['HelveticaNeue', 'Helvetica']}) logger = getLogger('notebook') import yt_misc_py as yt_misc import rivas_decomposition_py as decomposition import plotly import plotly.plotly as py import plotly.graph_objs as go plotly.offline.init_notebook_mode(connected=True) repo_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(os.getcwd())))) out_dir = os.path.join( repo_dir, 'figs', os.path.basename(os.path.realpath(os.getcwd())), ) # d_PTVs = decomposition.decomposition(os.path.join( # repo_dir, 'private_data', 'npz', 'dev_PTVsNonMHC_z_center_p0001_100PCs_20180129.npz' # )) # d_coding = decomposition.decomposition(os.path.join( # repo_dir, 'private_data', 'npz', 'dev_codingNonMHC_z_center_p0001_100PCs_20180129.npz' # )) d_all = decomposition.decomposition(os.path.join( repo_dir, 'private_data', 'npz', 'dev_allNonMHC_z_center_p0001_100PCs_20180129.npz' )) d_PTVs = decomposition.decomposition(os.path.join( repo_dir, 'private_data', 'npz', 'dev_PTVsNonMHC_z_center_p0001_100PCs_20180129.npz' )) biplot_phes = [ 'Body mass index (BMI)', 'heart attack/myocardial infarction', 'cholelithiasis/gall stones', 'Whole body fat-free mass', 'Whole body fat mass', 'Standing height', 'Sitting height', 'Weight', ] import collections source_data_fig2a = d_all.plot_data_pca_phe(0, 1) pd.DataFrame(collections.OrderedDict([ ('GBE_ID', d_all.d['label_phe_code']), ('Phenotype', d_all.d['label_phe']), ('PC1', source_data_fig2a['x']), ('PC2', source_data_fig2a['y']) ])).to_csv('Fig2a.tsv', sep='\t', index=False) source_data_fig2b = d_all.plot_data_pca_var(0, 1) pd.DataFrame(collections.OrderedDict([ ('Variant', d_all.d['label_var']), ('PC1', source_data_fig2b['x']), ('PC2', source_data_fig2b['y']) ])).to_csv('Fig2b.tsv', sep='\t', index=False) source_data_fig2b_arrow = d_all.get_biplot_arrow_by_phenotypes([0, 1], biplot_phes) pd.DataFrame(collections.OrderedDict([ ('Phenotype', biplot_phes), ('PC1', source_data_fig2b_arrow[:, 0]), ('PC2', source_data_fig2b_arrow[:, 1]) ])).to_csv('Fig2b-arrow.tsv', sep='\t', index=False) source_data_fig4c = d_PTVs.plot_data_pca_var(0, 2) pd.DataFrame(collections.OrderedDict([ ('Variant', d_PTVs.d['label_var']), ('PC1', source_data_fig4c['x']), ('PC3', source_data_fig4c['y']) ])).to_csv('Fig4c.tsv', sep='\t', index=False) biplot_phes_PTVs = [ 'Body mass index (BMI)', 'Whole body fat-free mass', 'Whole body fat mass', 'Standing height', # 'Sitting height', 'Weight', 'Basal metabolic rate', 'Hip circumference', # 'Reticulocyte count', 'high cholesterol', # 'Leg fat-free mass (right)', 'Leg fat-free mass (left)', ] source_data_fig4c_arrow = d_PTVs.get_biplot_arrow_by_phenotypes([0, 2], biplot_phes_PTVs) pd.DataFrame(collections.OrderedDict([ ('Phenotype', biplot_phes_PTVs), ('PC1', source_data_fig4c_arrow[:, 0]), ('PC3', source_data_fig4c_arrow[:, 1]) ])).to_csv('Fig4c-arrow.tsv', sep='\t', index=False) ```
github_jupyter
%load_ext autoreload %autoreload 1 %matplotlib inline import numpy as np import pandas as pd import matplotlib, collections, itertools, os, re, textwrap, logging import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib.patches as mpatches from functools import reduce from logging.config import dictConfig from logging import getLogger dictConfig(dict( version = 1, formatters = {'f': {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}}, handlers = { 'h': {'class': 'logging.StreamHandler','formatter': 'f', 'level': logging.DEBUG}}, root = {'handlers': ['h'], 'level': logging.DEBUG,}, )) matplotlib.rc('font',**{'size':16, 'family':'sans-serif','sans-serif':['HelveticaNeue', 'Helvetica']}) logger = getLogger('notebook') import yt_misc_py as yt_misc import rivas_decomposition_py as decomposition import plotly import plotly.plotly as py import plotly.graph_objs as go plotly.offline.init_notebook_mode(connected=True) repo_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(os.getcwd())))) out_dir = os.path.join( repo_dir, 'figs', os.path.basename(os.path.realpath(os.getcwd())), ) # d_PTVs = decomposition.decomposition(os.path.join( # repo_dir, 'private_data', 'npz', 'dev_PTVsNonMHC_z_center_p0001_100PCs_20180129.npz' # )) # d_coding = decomposition.decomposition(os.path.join( # repo_dir, 'private_data', 'npz', 'dev_codingNonMHC_z_center_p0001_100PCs_20180129.npz' # )) d_all = decomposition.decomposition(os.path.join( repo_dir, 'private_data', 'npz', 'dev_allNonMHC_z_center_p0001_100PCs_20180129.npz' )) d_PTVs = decomposition.decomposition(os.path.join( repo_dir, 'private_data', 'npz', 'dev_PTVsNonMHC_z_center_p0001_100PCs_20180129.npz' )) biplot_phes = [ 'Body mass index (BMI)', 'heart attack/myocardial infarction', 'cholelithiasis/gall stones', 'Whole body fat-free mass', 'Whole body fat mass', 'Standing height', 'Sitting height', 'Weight', ] import collections source_data_fig2a = d_all.plot_data_pca_phe(0, 1) pd.DataFrame(collections.OrderedDict([ ('GBE_ID', d_all.d['label_phe_code']), ('Phenotype', d_all.d['label_phe']), ('PC1', source_data_fig2a['x']), ('PC2', source_data_fig2a['y']) ])).to_csv('Fig2a.tsv', sep='\t', index=False) source_data_fig2b = d_all.plot_data_pca_var(0, 1) pd.DataFrame(collections.OrderedDict([ ('Variant', d_all.d['label_var']), ('PC1', source_data_fig2b['x']), ('PC2', source_data_fig2b['y']) ])).to_csv('Fig2b.tsv', sep='\t', index=False) source_data_fig2b_arrow = d_all.get_biplot_arrow_by_phenotypes([0, 1], biplot_phes) pd.DataFrame(collections.OrderedDict([ ('Phenotype', biplot_phes), ('PC1', source_data_fig2b_arrow[:, 0]), ('PC2', source_data_fig2b_arrow[:, 1]) ])).to_csv('Fig2b-arrow.tsv', sep='\t', index=False) source_data_fig4c = d_PTVs.plot_data_pca_var(0, 2) pd.DataFrame(collections.OrderedDict([ ('Variant', d_PTVs.d['label_var']), ('PC1', source_data_fig4c['x']), ('PC3', source_data_fig4c['y']) ])).to_csv('Fig4c.tsv', sep='\t', index=False) biplot_phes_PTVs = [ 'Body mass index (BMI)', 'Whole body fat-free mass', 'Whole body fat mass', 'Standing height', # 'Sitting height', 'Weight', 'Basal metabolic rate', 'Hip circumference', # 'Reticulocyte count', 'high cholesterol', # 'Leg fat-free mass (right)', 'Leg fat-free mass (left)', ] source_data_fig4c_arrow = d_PTVs.get_biplot_arrow_by_phenotypes([0, 2], biplot_phes_PTVs) pd.DataFrame(collections.OrderedDict([ ('Phenotype', biplot_phes_PTVs), ('PC1', source_data_fig4c_arrow[:, 0]), ('PC3', source_data_fig4c_arrow[:, 1]) ])).to_csv('Fig4c-arrow.tsv', sep='\t', index=False)
0.38168
0.239216
# Comparing Encoder-Decoders Analysis ### Model Architecture ``` report_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_200_512_04drb/encdec_noing6_200_512_04drb.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_bow_200_512_04drb/encdec_noing6_bow_200_512_04drb.json"] log_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_200_512_04drb/encdec_noing6_200_512_04drb_logs.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_bow_200_512_04drb/encdec_noing6_bow_200_512_04drb_logs.json"] reports = [] logs = [] import json import matplotlib.pyplot as plt import numpy as np for report_file in report_files: with open(report_file) as f: reports.append((report_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for log_file in log_files: with open(log_file) as f: logs.append((log_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for report_name, report in reports: print '\n', report_name, '\n' print 'Encoder: \n', report['architecture']['encoder'] print 'Decoder: \n', report['architecture']['decoder'] ``` ### Perplexity on Each Dataset ``` %matplotlib inline from IPython.display import HTML, display def display_table(data): display(HTML( u'<table><tr>{}</tr></table>'.format( u'</tr><tr>'.join( u'<td>{}</td>'.format('</td><td>'.join(unicode(_) for _ in row)) for row in data) ) )) def bar_chart(data): n_groups = len(data) train_perps = [d[1] for d in data] valid_perps = [d[2] for d in data] test_perps = [d[3] for d in data] fig, ax = plt.subplots(figsize=(10,8)) index = np.arange(n_groups) bar_width = 0.3 opacity = 0.4 error_config = {'ecolor': '0.3'} train_bars = plt.bar(index, train_perps, bar_width, alpha=opacity, color='b', error_kw=error_config, label='Training Perplexity') valid_bars = plt.bar(index + bar_width, valid_perps, bar_width, alpha=opacity, color='r', error_kw=error_config, label='Valid Perplexity') test_bars = plt.bar(index + 2*bar_width, test_perps, bar_width, alpha=opacity, color='g', error_kw=error_config, label='Test Perplexity') plt.xlabel('Model') plt.ylabel('Scores') plt.title('Perplexity by Model and Dataset') plt.xticks(index + bar_width / 3, [d[0] for d in data]) plt.legend() plt.tight_layout() plt.show() data = [['<b>Model</b>', '<b>Train Perplexity</b>', '<b>Valid Perplexity</b>', '<b>Test Perplexity</b>']] for rname, report in reports: data.append([rname, report['train_perplexity'], report['valid_perplexity'], report['test_perplexity']]) display_table(data) bar_chart(data[1:]) ``` ### Loss vs. Epoch ``` %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][1], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][2], label=str(k) + ' ' + rname + ' (valid)') plt.title('Loss v. Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() ``` ### Perplexity vs. Epoch ``` %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][3], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][4], label=str(k) + ' ' + rname + ' (valid)') plt.title('Perplexity v. Epoch') plt.xlabel('Epoch') plt.ylabel('Perplexity') plt.legend() plt.show() ``` ### Generations ``` def print_sample(sample, best_bleu=None): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) print('Input: '+ enc_input + '\n') print('Gend: ' + sample['generated'] + '\n') print('True: ' + gold + '\n') if best_bleu is not None: cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>']) print('Closest BLEU Match: ' + cbm + '\n') print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n') print('\n') def display_sample(samples, best_bleu=False): for enc_input in samples: data = [] for rname, sample in samples[enc_input]: gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) data.append([rname, '<b>Generated: </b>' + sample['generated']]) if best_bleu: cbm = ' '.join([w for w in sample['best_match'].split(' ') if w != '<mask>']) data.append([rname, '<b>Closest BLEU Match: </b>' + cbm + ' (Score: ' + str(sample['best_score']) + ')']) data.insert(0, ['<u><b>' + enc_input + '</b></u>', '<b>True: ' + gold+ '</b>']) display_table(data) def process_samples(samples): # consolidate samples with identical inputs result = {} for rname, t_samples, t_cbms in samples: for i, sample in enumerate(t_samples): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) if t_cbms is not None: sample.update(t_cbms[i]) if enc_input in result: result[enc_input].append((rname, sample)) else: result[enc_input] = [(rname, sample)] return result samples = process_samples([(rname, r['train_samples'], r['best_bleu_matches_train'] if 'best_bleu_matches_train' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_train' in reports[1][1]) samples = process_samples([(rname, r['valid_samples'], r['best_bleu_matches_valid'] if 'best_bleu_matches_valid' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_valid' in reports[1][1]) samples = process_samples([(rname, r['test_samples'], r['best_bleu_matches_test'] if 'best_bleu_matches_test' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_test' in reports[1][1]) ``` ### BLEU Analysis ``` def print_bleu(blue_structs): data= [['<b>Model</b>', '<b>Overall Score</b>','<b>1-gram Score</b>','<b>2-gram Score</b>','<b>3-gram Score</b>','<b>4-gram Score</b>']] for rname, blue_struct in blue_structs: data.append([rname, blue_struct['score'], blue_struct['components']['1'], blue_struct['components']['2'], blue_struct['components']['3'], blue_struct['components']['4']]) display_table(data) # Training Set BLEU Scores print_bleu([(rname, report['train_bleu']) for (rname, report) in reports]) # Validation Set BLEU Scores print_bleu([(rname, report['valid_bleu']) for (rname, report) in reports]) # Test Set BLEU Scores print_bleu([(rname, report['test_bleu']) for (rname, report) in reports]) # All Data BLEU Scores print_bleu([(rname, report['combined_bleu']) for (rname, report) in reports]) ``` ### N-pairs BLEU Analysis This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations ``` # Training Set BLEU n-pairs Scores print_bleu([(rname, report['n_pairs_bleu_train']) for (rname, report) in reports]) # Validation Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_valid']) for (rname, report) in reports]) # Test Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_test']) for (rname, report) in reports]) # Combined n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_all']) for (rname, report) in reports]) # Ground Truth n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_gold']) for (rname, report) in reports]) ``` ### Alignment Analysis This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores ``` def print_align(reports): data= [['<b>Model</b>', '<b>Average (Train) Generated Score</b>','<b>Average (Valid) Generated Score</b>','<b>Average (Test) Generated Score</b>','<b>Average (All) Generated Score</b>', '<b>Average (Gold) Score</b>']] for rname, report in reports: data.append([rname, report['average_alignment_train'], report['average_alignment_valid'], report['average_alignment_test'], report['average_alignment_all'], report['average_alignment_gold']]) display_table(data) print_align(reports) ```
github_jupyter
report_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_200_512_04drb/encdec_noing6_200_512_04drb.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_bow_200_512_04drb/encdec_noing6_bow_200_512_04drb.json"] log_files = ["/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_200_512_04drb/encdec_noing6_200_512_04drb_logs.json", "/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing6_bow_200_512_04drb/encdec_noing6_bow_200_512_04drb_logs.json"] reports = [] logs = [] import json import matplotlib.pyplot as plt import numpy as np for report_file in report_files: with open(report_file) as f: reports.append((report_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for log_file in log_files: with open(log_file) as f: logs.append((log_file.split('/')[-1].split('.json')[0], json.loads(f.read()))) for report_name, report in reports: print '\n', report_name, '\n' print 'Encoder: \n', report['architecture']['encoder'] print 'Decoder: \n', report['architecture']['decoder'] %matplotlib inline from IPython.display import HTML, display def display_table(data): display(HTML( u'<table><tr>{}</tr></table>'.format( u'</tr><tr>'.join( u'<td>{}</td>'.format('</td><td>'.join(unicode(_) for _ in row)) for row in data) ) )) def bar_chart(data): n_groups = len(data) train_perps = [d[1] for d in data] valid_perps = [d[2] for d in data] test_perps = [d[3] for d in data] fig, ax = plt.subplots(figsize=(10,8)) index = np.arange(n_groups) bar_width = 0.3 opacity = 0.4 error_config = {'ecolor': '0.3'} train_bars = plt.bar(index, train_perps, bar_width, alpha=opacity, color='b', error_kw=error_config, label='Training Perplexity') valid_bars = plt.bar(index + bar_width, valid_perps, bar_width, alpha=opacity, color='r', error_kw=error_config, label='Valid Perplexity') test_bars = plt.bar(index + 2*bar_width, test_perps, bar_width, alpha=opacity, color='g', error_kw=error_config, label='Test Perplexity') plt.xlabel('Model') plt.ylabel('Scores') plt.title('Perplexity by Model and Dataset') plt.xticks(index + bar_width / 3, [d[0] for d in data]) plt.legend() plt.tight_layout() plt.show() data = [['<b>Model</b>', '<b>Train Perplexity</b>', '<b>Valid Perplexity</b>', '<b>Test Perplexity</b>']] for rname, report in reports: data.append([rname, report['train_perplexity'], report['valid_perplexity'], report['test_perplexity']]) display_table(data) bar_chart(data[1:]) %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][1], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][2], label=str(k) + ' ' + rname + ' (valid)') plt.title('Loss v. Epoch') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show() %matplotlib inline plt.figure(figsize=(10, 8)) for rname, l in logs: for k in l.keys(): plt.plot(l[k][0], l[k][3], label=str(k) + ' ' + rname + ' (train)') plt.plot(l[k][0], l[k][4], label=str(k) + ' ' + rname + ' (valid)') plt.title('Perplexity v. Epoch') plt.xlabel('Epoch') plt.ylabel('Perplexity') plt.legend() plt.show() def print_sample(sample, best_bleu=None): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) print('Input: '+ enc_input + '\n') print('Gend: ' + sample['generated'] + '\n') print('True: ' + gold + '\n') if best_bleu is not None: cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>']) print('Closest BLEU Match: ' + cbm + '\n') print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n') print('\n') def display_sample(samples, best_bleu=False): for enc_input in samples: data = [] for rname, sample in samples[enc_input]: gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>']) data.append([rname, '<b>Generated: </b>' + sample['generated']]) if best_bleu: cbm = ' '.join([w for w in sample['best_match'].split(' ') if w != '<mask>']) data.append([rname, '<b>Closest BLEU Match: </b>' + cbm + ' (Score: ' + str(sample['best_score']) + ')']) data.insert(0, ['<u><b>' + enc_input + '</b></u>', '<b>True: ' + gold+ '</b>']) display_table(data) def process_samples(samples): # consolidate samples with identical inputs result = {} for rname, t_samples, t_cbms in samples: for i, sample in enumerate(t_samples): enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>']) if t_cbms is not None: sample.update(t_cbms[i]) if enc_input in result: result[enc_input].append((rname, sample)) else: result[enc_input] = [(rname, sample)] return result samples = process_samples([(rname, r['train_samples'], r['best_bleu_matches_train'] if 'best_bleu_matches_train' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_train' in reports[1][1]) samples = process_samples([(rname, r['valid_samples'], r['best_bleu_matches_valid'] if 'best_bleu_matches_valid' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_valid' in reports[1][1]) samples = process_samples([(rname, r['test_samples'], r['best_bleu_matches_test'] if 'best_bleu_matches_test' in r else None) for (rname, r) in reports]) display_sample(samples, best_bleu='best_bleu_matches_test' in reports[1][1]) def print_bleu(blue_structs): data= [['<b>Model</b>', '<b>Overall Score</b>','<b>1-gram Score</b>','<b>2-gram Score</b>','<b>3-gram Score</b>','<b>4-gram Score</b>']] for rname, blue_struct in blue_structs: data.append([rname, blue_struct['score'], blue_struct['components']['1'], blue_struct['components']['2'], blue_struct['components']['3'], blue_struct['components']['4']]) display_table(data) # Training Set BLEU Scores print_bleu([(rname, report['train_bleu']) for (rname, report) in reports]) # Validation Set BLEU Scores print_bleu([(rname, report['valid_bleu']) for (rname, report) in reports]) # Test Set BLEU Scores print_bleu([(rname, report['test_bleu']) for (rname, report) in reports]) # All Data BLEU Scores print_bleu([(rname, report['combined_bleu']) for (rname, report) in reports]) # Training Set BLEU n-pairs Scores print_bleu([(rname, report['n_pairs_bleu_train']) for (rname, report) in reports]) # Validation Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_valid']) for (rname, report) in reports]) # Test Set n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_test']) for (rname, report) in reports]) # Combined n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_all']) for (rname, report) in reports]) # Ground Truth n-pairs BLEU Scores print_bleu([(rname, report['n_pairs_bleu_gold']) for (rname, report) in reports]) def print_align(reports): data= [['<b>Model</b>', '<b>Average (Train) Generated Score</b>','<b>Average (Valid) Generated Score</b>','<b>Average (Test) Generated Score</b>','<b>Average (All) Generated Score</b>', '<b>Average (Gold) Score</b>']] for rname, report in reports: data.append([rname, report['average_alignment_train'], report['average_alignment_valid'], report['average_alignment_test'], report['average_alignment_all'], report['average_alignment_gold']]) display_table(data) print_align(reports)
0.246261
0.712954
# Best Linear Predictor (BLP) > Notes. - toc: true - badges: true - comments: true - categories: [stats] - image: images/logo.JPG - layout: post - author: "<a href='https://pradeeptadas.github.io/'>Pradeepta Das</a>" - permalink: /ghtop So CEF can be non-linear. Here we will explore what we do to get a linear predictor? ### Linear CEF $m(x) = E(y|\textbf{x})$ is linear in x. $$ m(\textbf{x}) = \textbf{x'}\beta + e \implies y = \textbf{x'}\beta + e $$ Here the error doesnt have the mean error property! Because we are breaking down as linear vs nonlinear part. Generic notation of a linear model: $$ m(x) = \textbf{x'}\beta, x = (x_1, x_2, ... x_k)'$$ But typically x includes a constant. So it is convenient to write as $y = \alpha + \textbf{x'}\beta + e$ But if you take expectations on both sides ad solve for $\alpha$ and substitute: $y - \mu_y = (x - \mu_x)' \beta + e \implies \overline{y} = \overline{x'} \beta+ e$ So, you can de-mean any series and write in this format! We know that the conditional mean is the best predictor of y. But what is the best predictor $m(x)$ in the linear family of regrressors? Assumptions are, $Q_{xx} = E(xx')$ is positive definite. This is going to be a second moment matrix. **Best Linear Predictor** - MSE is $S(\beta) = E(y - \textbf{x}'\beta)^2$ - The best linear predictor of y given $\textbf{x}$ is $P(y|x) = \textbf{x}'\beta$ - BLP coefficient is given by $\beta = argmin_{b \in \mathbb{R}^k} S(b)$. The BLP coefficients are also called the Linear Projection Coefficient. - $\hat{\beta}$ is unique and given by $(E(xx')^{-1} E(xy))$ - The BLP is given by $P(y|x) = \textbf{x}'\beta = \textbf{x}' (E(xx')^{-1} E(xy)) $ - The error $e = y - \textbf{x}'\beta$ exists and satisfies $\boxed{E(\textbf{x}e) = 0}$ This means for each i, $E[X_i e] = 0$ i.e. $X_i$ and e are orthogonal. - If x is a constant then $E(e) = 0$, however for CEF, it was always true! - For BLP, it must be true that the errors are uncorrelated with $x_is.$ $\beta$ is BLP $ <=> E(xe) =0$ - This is the moment condition (in the moment estimator!). So the BLP can be written as a moments estimator!! - So, all the properties of moments estimators are true in this case! - The moment that finds BLP is that errors are othoginal to the $x_i$. **Linear Prediction with Constant Term** If y has a constant term i.e. $y = \alpha + \textbf{x'} \beta + e$ $\implies \alpha = E(y) - E(\textbf{x'} \beta) = \mu_y - \mu'_x\beta $ $\implies \beta = Var(x)^{-1} Cov(x,y)$ **Linear Predictor Error Variance** $\sigma^2 = E(y - \textbf{x'}\beta)^2 = Q_{yy} - Q_{yx} Q^{-1}_{xx} Q_{xy}$ This is the variance of the errors from the BLP of y on x. ## Joint Normality - (y, x) are jointly normal - This means (e, x) are jointly normal - $E(e) = 0$ and $E(xe) = 0$ $\implies Cov(e, x) = 0$. - e and x are jointly normal and uncorrelated and this independent! This is only for normality! - For Independence, $E(e|x) = E(e) = 0$ - So, This is CEF! - Under Joint Normal, the linear projection is a CEF! - So, therefore BLP is the best predictor among all (including nonlinear) predictors! # Example Let $z_1, z_2$ are two independent N(0,1) RVs and $x = \mu_x + \sigma_x z_1$ $y = \mu_y + \sigma_y (\rho z_1 + \sqrt{1 - \rho^2} z_2)$ For this $\sigma^2_x = Var(x)$, $\sigma^2_y = Var(y)$, $Cov(x,y) = \rho$ $\beta = \rho \frac{\sigma_y}{\sigma_x}$ $Var(e) = (1 - \rho^2)\sigma_y^2$
github_jupyter
# Best Linear Predictor (BLP) > Notes. - toc: true - badges: true - comments: true - categories: [stats] - image: images/logo.JPG - layout: post - author: "<a href='https://pradeeptadas.github.io/'>Pradeepta Das</a>" - permalink: /ghtop So CEF can be non-linear. Here we will explore what we do to get a linear predictor? ### Linear CEF $m(x) = E(y|\textbf{x})$ is linear in x. $$ m(\textbf{x}) = \textbf{x'}\beta + e \implies y = \textbf{x'}\beta + e $$ Here the error doesnt have the mean error property! Because we are breaking down as linear vs nonlinear part. Generic notation of a linear model: $$ m(x) = \textbf{x'}\beta, x = (x_1, x_2, ... x_k)'$$ But typically x includes a constant. So it is convenient to write as $y = \alpha + \textbf{x'}\beta + e$ But if you take expectations on both sides ad solve for $\alpha$ and substitute: $y - \mu_y = (x - \mu_x)' \beta + e \implies \overline{y} = \overline{x'} \beta+ e$ So, you can de-mean any series and write in this format! We know that the conditional mean is the best predictor of y. But what is the best predictor $m(x)$ in the linear family of regrressors? Assumptions are, $Q_{xx} = E(xx')$ is positive definite. This is going to be a second moment matrix. **Best Linear Predictor** - MSE is $S(\beta) = E(y - \textbf{x}'\beta)^2$ - The best linear predictor of y given $\textbf{x}$ is $P(y|x) = \textbf{x}'\beta$ - BLP coefficient is given by $\beta = argmin_{b \in \mathbb{R}^k} S(b)$. The BLP coefficients are also called the Linear Projection Coefficient. - $\hat{\beta}$ is unique and given by $(E(xx')^{-1} E(xy))$ - The BLP is given by $P(y|x) = \textbf{x}'\beta = \textbf{x}' (E(xx')^{-1} E(xy)) $ - The error $e = y - \textbf{x}'\beta$ exists and satisfies $\boxed{E(\textbf{x}e) = 0}$ This means for each i, $E[X_i e] = 0$ i.e. $X_i$ and e are orthogonal. - If x is a constant then $E(e) = 0$, however for CEF, it was always true! - For BLP, it must be true that the errors are uncorrelated with $x_is.$ $\beta$ is BLP $ <=> E(xe) =0$ - This is the moment condition (in the moment estimator!). So the BLP can be written as a moments estimator!! - So, all the properties of moments estimators are true in this case! - The moment that finds BLP is that errors are othoginal to the $x_i$. **Linear Prediction with Constant Term** If y has a constant term i.e. $y = \alpha + \textbf{x'} \beta + e$ $\implies \alpha = E(y) - E(\textbf{x'} \beta) = \mu_y - \mu'_x\beta $ $\implies \beta = Var(x)^{-1} Cov(x,y)$ **Linear Predictor Error Variance** $\sigma^2 = E(y - \textbf{x'}\beta)^2 = Q_{yy} - Q_{yx} Q^{-1}_{xx} Q_{xy}$ This is the variance of the errors from the BLP of y on x. ## Joint Normality - (y, x) are jointly normal - This means (e, x) are jointly normal - $E(e) = 0$ and $E(xe) = 0$ $\implies Cov(e, x) = 0$. - e and x are jointly normal and uncorrelated and this independent! This is only for normality! - For Independence, $E(e|x) = E(e) = 0$ - So, This is CEF! - Under Joint Normal, the linear projection is a CEF! - So, therefore BLP is the best predictor among all (including nonlinear) predictors! # Example Let $z_1, z_2$ are two independent N(0,1) RVs and $x = \mu_x + \sigma_x z_1$ $y = \mu_y + \sigma_y (\rho z_1 + \sqrt{1 - \rho^2} z_2)$ For this $\sigma^2_x = Var(x)$, $\sigma^2_y = Var(y)$, $Cov(x,y) = \rho$ $\beta = \rho \frac{\sigma_y}{\sigma_x}$ $Var(e) = (1 - \rho^2)\sigma_y^2$
0.796846
0.930711
# LAB 4a: Creating a Sampled Dataset. **Learning Objectives** 1. Setup up the environment 1. Sample the natality dataset to create train/eval/test sets 1. Preprocess the data in Pandas dataframe ## Introduction In this notebook, we'll read data from BigQuery into our notebook to preprocess the data within a Pandas dataframe for a small, repeatable sample. We will set up the environment, sample the natality dataset to create train/eval/test splits, and preprocess the data in a Pandas dataframe. Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/4a_sample_babyweight.ipynb). ## Set up environment variables and load necessary libraries Import necessary libraries. ``` from google.cloud import bigquery import pandas as pd ``` ## Lab Task #1: Set environment variables. Set environment variables so that we can use them throughout the entire lab. We will be using our project ID for our bucket, so you only need to change your project and region. ``` %%bash export PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project ID is: "$PROJECT # TODO: Change environment variables PROJECT = "asl-ml-immersion" # Replace with your PROJECT ``` ## Create ML datasets by sampling using BigQuery We'll begin by sampling the BigQuery data to create smaller datasets. Let's create a BigQuery client that we'll use throughout the lab. ``` bq = bigquery.Client(project = PROJECT) ``` We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash with in the modulo. Feel free to play around with these values to get the perfect combination. ``` modulo_divisor = 100 train_percent = 80.0 eval_percent = 10.0 train_buckets = int(modulo_divisor * train_percent / 100.0) eval_buckets = int(modulo_divisor * eval_percent / 100.0) ``` We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows. ``` def display_dataframe_head_from_query(query, count=10): """Displays count rows from dataframe head from query. Args: query: str, query to be run on BigQuery, results stored in dataframe. count: int, number of results from head of dataframe to display. Returns: Dataframe head with count number of results. """ df = bq.query( query + " LIMIT {limit}".format( limit=count)).to_dataframe() return df.head(count) ``` For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results. ``` # Get label, features, and columns to hash and split into buckets hash_cols_fixed_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, year, month, CASE WHEN day IS NULL THEN CASE WHEN wday IS NULL THEN 0 ELSE wday END ELSE day END AS date, IFNULL(state, "Unknown") AS state, IFNULL(mother_birth_state, "Unknown") AS mother_birth_state FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 """ display_dataframe_head_from_query(hash_cols_fixed_query) ``` Using `COALESCE` would provide the same result as the nested `CASE WHEN`. This is preferable when all we want is the first non-null instance. To be precise the `CASE WHEN` would become `COALESCE(wday, day, 0) AS date`. You can read more about it [here](https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions). Next query will combine our hash columns and will leave us just with our label, features, and our hash values. ``` data_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT( CONCAT( CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING) ) ) AS hash_values FROM ({CTE_hash_cols_fixed}) """.format(CTE_hash_cols_fixed=hash_cols_fixed_query) display_dataframe_head_from_query(data_query) ``` The next query is going to find the counts of each of the unique 657484 `hash_values`. This will be our first step at making actual hash buckets for our split via the `GROUP BY`. ``` # Get the counts of each of the unique hashs of our splitting column first_bucketing_query = """ SELECT hash_values, COUNT(*) AS num_records FROM ({CTE_data}) GROUP BY hash_values """.format(CTE_data=data_query) display_dataframe_head_from_query(first_bucketing_query) ``` The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records. ``` # Get the number of records in each of the hash buckets second_bucketing_query = """ SELECT ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index, SUM(num_records) AS num_records FROM ({CTE_first_bucketing}) GROUP BY ABS(MOD(hash_values, {modulo_divisor})) """.format( CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(second_bucketing_query) ``` The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query. ``` # Calculate the overall percentages percentages_query = """ SELECT bucket_index, num_records, CAST(num_records AS FLOAT64) / ( SELECT SUM(num_records) FROM ({CTE_second_bucketing})) AS percent_records FROM ({CTE_second_bucketing}) """.format(CTE_second_bucketing=second_bucketing_query) display_dataframe_head_from_query(percentages_query) ``` We'll now select the range of buckets to be used in training. ``` # Choose hash buckets for training and pull in their statistics train_query = """ SELECT *, "train" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= 0 AND bucket_index < {train_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets) display_dataframe_head_from_query(train_query) ``` We'll do the same by selecting the range of buckets to be used evaluation. ``` # Choose hash buckets for validation and pull in their statistics eval_query = """ SELECT *, "eval" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {train_buckets} AND bucket_index < {cum_eval_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets, cum_eval_buckets=train_buckets + eval_buckets) display_dataframe_head_from_query(eval_query) ``` Lastly, we'll select the hash buckets to be used for the test split. ``` # Choose hash buckets for testing and pull in their statistics test_query = """ SELECT *, "test" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {cum_eval_buckets} AND bucket_index < {modulo_divisor} """.format( CTE_percentages=percentages_query, cum_eval_buckets=train_buckets + eval_buckets, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(test_query) ``` In the below query, we'll `UNION ALL` all of the datasets together so that all three sets of hash buckets will be within one table. We added `dataset_id` so that we can sort on it in the query after. ``` # Union the training, validation, and testing dataset statistics union_query = """ SELECT 0 AS dataset_id, * FROM ({CTE_train}) UNION ALL SELECT 1 AS dataset_id, * FROM ({CTE_eval}) UNION ALL SELECT 2 AS dataset_id, * FROM ({CTE_test}) """.format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query) display_dataframe_head_from_query(union_query) ``` Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to the 80/10/10 that we were hoping to get. ``` # Show final splitting and associated statistics split_query = """ SELECT dataset_id, dataset_name, SUM(num_records) AS num_records, SUM(percent_records) AS percent_records FROM ({CTE_union}) GROUP BY dataset_id, dataset_name ORDER BY dataset_id """.format(CTE_union=union_query) display_dataframe_head_from_query(split_query) ``` ## Lab Task #1: Sample BigQuery dataset. Sample the BigQuery result set (above) so that you have approximately 8,000 training examples and 1000 evaluation examples. The training and evaluation datasets have to be well-distributed (not all the babies are born in Jan 2005, for example) and should not overlap (no baby is part of both training and evaluation datasets). Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train/eval/test sets do not overlap and takes a subsample of our global splits. ``` # every_n allows us to subsample from each of the hash values # This helps us get approximately the record counts we want every_n = # TODO: Experiment with values to get close to target counts # TODO: Replace FUNC with correct function to split with # TODO: Replace COLUMN with correct column to split on splitting_string = "ABS(FUNC(COLUMN, {0} * {1}))".format(every_n, modulo_divisor) def create_data_split_sample_df(query_string, splitting_string, lo, up): """Creates a dataframe with a sample of a data split. Args: query_string: str, query to run to generate splits. splitting_string: str, modulo string to split by. lo: float, lower bound for bucket filtering for split. up: float, upper bound for bucket filtering for split. Returns: Dataframe containing data split sample. """ query = "SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}".format( query_string, splitting_string, int(lo), int(up)) df = bq.query(query).to_dataframe() return df train_df = create_data_split_sample_df( data_query, splitting_string, lo=0, up=train_percent) eval_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent, up=train_percent + eval_percent) test_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent + eval_percent, up=modulo_divisor) print("There are {} examples in the train dataset.".format(len(train_df))) print("There are {} examples in the validation dataset.".format(len(eval_df))) print("There are {} examples in the test dataset.".format(len(test_df))) ``` ## Preprocess data using Pandas We'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the `is_male` field be `Unknown`. Also, if there is more than child we'll change the `plurality` to `Multiple(2+)`. While we're at it, we'll also change the plurality column to be a string. We'll perform these operations below. Let's start by examining the training dataset as is. ``` train_df.head() ``` Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data) ``` train_df.describe() ``` It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a `preprocess` function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect. ## Lab Task #2: Pandas preprocessing. Use Pandas to: * Clean up the data to remove rows that are missing any of the fields. * Simulate the lack of ultrasound. * Change the plurality column to be a string. Hint (highlight to see): <p> Filtering: <pre style="color:white"> df = df[df.weight_pounds > 0] </pre> Modify plurality to be a string: <pre style="color:white"> twins_etc = dict(zip([1,2,3,4,5], ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"])) df["plurality"].replace(twins_etc, inplace=True) </pre> Lack of ultrasound: <pre style="color:white"> no_ultrasound = df.copy(deep=True) no_ultrasound["is_male"] = "Unknown" </pre> </p> ``` def preprocess(df): """ Preprocess pandas dataframe for augmented babyweight data. Args: df: Dataframe containing raw babyweight data. Returns: Pandas dataframe containing preprocessed raw babyweight data as well as simulated no ultrasound data masking some of the original data. """ # Clean up raw data # TODO: Filter out what we don"t want to use for training # TODO: Modify plurality field to be a string # TODO: Clone data and mask certain columns to simulate lack of ultrasound # TODO: Modify is_male # TODO: Modify plurality # Concatenate both datasets together and shuffle return pd.concat( [df, no_ultrasound]).sample(frac=1).reset_index(drop=True) ``` Let's process the train/eval/test set and see a small sample of the training data after our preprocessing: ``` train_df = preprocess(train_df) eval_df = preprocess(eval_df) test_df = preprocess(test_df) train_df.head() train_df.tail() ``` Let's look again at a summary of the dataset. Note that we only see numeric columns, so `plurality` does not show up. ``` train_df.describe() ``` ## Write to .csv files In the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers. ``` # Define columns columns = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Write out CSV files train_df.to_csv( path_or_buf="train.csv", columns=columns, header=False, index=False) eval_df.to_csv( path_or_buf="eval.csv", columns=columns, header=False, index=False) test_df.to_csv( path_or_buf="test.csv", columns=columns, header=False, index=False) %%bash wc -l *.csv %%bash head *.csv %%bash tail *.csv ``` ## Lab Summary: In this lab, we set up the environment, sampled the natality dataset to create train/eval/test splits, and preprocessed the data in a Pandas dataframe. Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
github_jupyter
from google.cloud import bigquery import pandas as pd %%bash export PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project ID is: "$PROJECT # TODO: Change environment variables PROJECT = "asl-ml-immersion" # Replace with your PROJECT bq = bigquery.Client(project = PROJECT) modulo_divisor = 100 train_percent = 80.0 eval_percent = 10.0 train_buckets = int(modulo_divisor * train_percent / 100.0) eval_buckets = int(modulo_divisor * eval_percent / 100.0) def display_dataframe_head_from_query(query, count=10): """Displays count rows from dataframe head from query. Args: query: str, query to be run on BigQuery, results stored in dataframe. count: int, number of results from head of dataframe to display. Returns: Dataframe head with count number of results. """ df = bq.query( query + " LIMIT {limit}".format( limit=count)).to_dataframe() return df.head(count) # Get label, features, and columns to hash and split into buckets hash_cols_fixed_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, year, month, CASE WHEN day IS NULL THEN CASE WHEN wday IS NULL THEN 0 ELSE wday END ELSE day END AS date, IFNULL(state, "Unknown") AS state, IFNULL(mother_birth_state, "Unknown") AS mother_birth_state FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 """ display_dataframe_head_from_query(hash_cols_fixed_query) data_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT( CONCAT( CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING) ) ) AS hash_values FROM ({CTE_hash_cols_fixed}) """.format(CTE_hash_cols_fixed=hash_cols_fixed_query) display_dataframe_head_from_query(data_query) # Get the counts of each of the unique hashs of our splitting column first_bucketing_query = """ SELECT hash_values, COUNT(*) AS num_records FROM ({CTE_data}) GROUP BY hash_values """.format(CTE_data=data_query) display_dataframe_head_from_query(first_bucketing_query) # Get the number of records in each of the hash buckets second_bucketing_query = """ SELECT ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index, SUM(num_records) AS num_records FROM ({CTE_first_bucketing}) GROUP BY ABS(MOD(hash_values, {modulo_divisor})) """.format( CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(second_bucketing_query) # Calculate the overall percentages percentages_query = """ SELECT bucket_index, num_records, CAST(num_records AS FLOAT64) / ( SELECT SUM(num_records) FROM ({CTE_second_bucketing})) AS percent_records FROM ({CTE_second_bucketing}) """.format(CTE_second_bucketing=second_bucketing_query) display_dataframe_head_from_query(percentages_query) # Choose hash buckets for training and pull in their statistics train_query = """ SELECT *, "train" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= 0 AND bucket_index < {train_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets) display_dataframe_head_from_query(train_query) # Choose hash buckets for validation and pull in their statistics eval_query = """ SELECT *, "eval" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {train_buckets} AND bucket_index < {cum_eval_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets, cum_eval_buckets=train_buckets + eval_buckets) display_dataframe_head_from_query(eval_query) # Choose hash buckets for testing and pull in their statistics test_query = """ SELECT *, "test" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {cum_eval_buckets} AND bucket_index < {modulo_divisor} """.format( CTE_percentages=percentages_query, cum_eval_buckets=train_buckets + eval_buckets, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(test_query) # Union the training, validation, and testing dataset statistics union_query = """ SELECT 0 AS dataset_id, * FROM ({CTE_train}) UNION ALL SELECT 1 AS dataset_id, * FROM ({CTE_eval}) UNION ALL SELECT 2 AS dataset_id, * FROM ({CTE_test}) """.format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query) display_dataframe_head_from_query(union_query) # Show final splitting and associated statistics split_query = """ SELECT dataset_id, dataset_name, SUM(num_records) AS num_records, SUM(percent_records) AS percent_records FROM ({CTE_union}) GROUP BY dataset_id, dataset_name ORDER BY dataset_id """.format(CTE_union=union_query) display_dataframe_head_from_query(split_query) # every_n allows us to subsample from each of the hash values # This helps us get approximately the record counts we want every_n = # TODO: Experiment with values to get close to target counts # TODO: Replace FUNC with correct function to split with # TODO: Replace COLUMN with correct column to split on splitting_string = "ABS(FUNC(COLUMN, {0} * {1}))".format(every_n, modulo_divisor) def create_data_split_sample_df(query_string, splitting_string, lo, up): """Creates a dataframe with a sample of a data split. Args: query_string: str, query to run to generate splits. splitting_string: str, modulo string to split by. lo: float, lower bound for bucket filtering for split. up: float, upper bound for bucket filtering for split. Returns: Dataframe containing data split sample. """ query = "SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}".format( query_string, splitting_string, int(lo), int(up)) df = bq.query(query).to_dataframe() return df train_df = create_data_split_sample_df( data_query, splitting_string, lo=0, up=train_percent) eval_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent, up=train_percent + eval_percent) test_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent + eval_percent, up=modulo_divisor) print("There are {} examples in the train dataset.".format(len(train_df))) print("There are {} examples in the validation dataset.".format(len(eval_df))) print("There are {} examples in the test dataset.".format(len(test_df))) train_df.head() train_df.describe() def preprocess(df): """ Preprocess pandas dataframe for augmented babyweight data. Args: df: Dataframe containing raw babyweight data. Returns: Pandas dataframe containing preprocessed raw babyweight data as well as simulated no ultrasound data masking some of the original data. """ # Clean up raw data # TODO: Filter out what we don"t want to use for training # TODO: Modify plurality field to be a string # TODO: Clone data and mask certain columns to simulate lack of ultrasound # TODO: Modify is_male # TODO: Modify plurality # Concatenate both datasets together and shuffle return pd.concat( [df, no_ultrasound]).sample(frac=1).reset_index(drop=True) train_df = preprocess(train_df) eval_df = preprocess(eval_df) test_df = preprocess(test_df) train_df.head() train_df.tail() train_df.describe() # Define columns columns = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Write out CSV files train_df.to_csv( path_or_buf="train.csv", columns=columns, header=False, index=False) eval_df.to_csv( path_or_buf="eval.csv", columns=columns, header=False, index=False) test_df.to_csv( path_or_buf="test.csv", columns=columns, header=False, index=False) %%bash wc -l *.csv %%bash head *.csv %%bash tail *.csv
0.446495
0.991297
# Friend-based Ranking: Figures This notebook is to produce figures for *Friend-based Ranking*, available at [arXiv:1807.05093](https://arxiv.org/pdf/1807.05093.pdf). # Setup ## Packages ``` # Import packages import numpy as np import scipy as sp import networkx as nx import decimal import math import pandas as pd import statsmodels.api as sm import random # To import Matlab matrices import scipy.io # Plotting import matplotlib.pyplot as plt from matplotlib.patches import Patch import seaborn as sns # Suppress the warnings from matplotlib about networkx import warnings warnings.filterwarnings("ignore") # Pandas display settings pd.set_option("display.max_rows",999) pd.options.display.float_format = '{:,.4f}'.format #Seaborn display settings sns.set(style="ticks", palette="Greys", font_scale=1.4) #Display plots inside notebook %matplotlib inline ``` Generate the random seed from [random.org](https://www.random.org/integers/?num=1&min=1&max=100000&col=1&base=10&format=html&rnd=new) ``` # Seed for random numbers seed = 40588 random.seed(seed) ``` ## Colors I use the [ColorBrewer tool](http://colorbrewer2.org/) to choose color palettes. ``` sns.color_palette("Greys", n_colors=2) colors = {'Indonesia':'#66c2a5','India':'#fc8d62'} grey_light = '#bdbdbd' grey_dark = '#636363' ``` ## Read data Save the panda dataframes to pickle files so that I don't need to extract the data again. ``` df = pd.read_pickle('pd_df/netdata.pickle') ``` # Tables ``` df.columns df.groupby(['country','calculated_on'])[['num_nodes','ave_deg','density', 'ave_clust','ave_dist', 'info_total','links_supported']].agg(['mean','min','max','count']).transpose() ``` # Figures ## Distributions ``` df_plot= df[(df.calculated_on=="giant")][['info_total_friend_only','links_supported', 'density','ave_dist','country','num_nodes']] df_plot.groupby('country').num_nodes.describe() df_plot[df_plot.info_total_friend_only==1].country.value_counts() df_plot[df_plot.links_supported==1].country.value_counts() sns.distplot(df_plot[df_plot.country=="Indonesia"]['info_total_friend_only'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['Indonesia'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Density comparison network') sns.despine() plt.savefig('figures/hist_density_comp_indonesia.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="India"]['info_total_friend_only'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['India'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Density comparison network') sns.despine() plt.savefig('figures/hist_density_comp_india.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="Indonesia"]['links_supported'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['Indonesia'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Support') sns.despine() plt.savefig('figures/hist_support_indonesia.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="India"]['links_supported'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['India'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Support') sns.despine() plt.savefig('figures/hist_support_india.pdf', bbox_inches='tight') ``` ## Density comparsion network vs social network ``` sns.relplot(x="density", y="info_total_friend_only",height=6, hue="country", data=df_plot,palette=colors,aspect=1, s=60,legend=False) plt.xticks([0,0.2,0.4,0.6,0.8,1]) plt.yticks([0,0.2,0.4,0.6,0.8,1]) plt.ylim(bottom=-0.05,top=1.05) plt.xlim(left=-0.05,right=1.05) plt.ylabel('Density Comparison Network') plt.xlabel('Density Social Network') plt.savefig('figures/density.pdf', bbox_inches='tight'); ``` ## Pairplot ``` df_pairplot = df[df.calculated_on=="giant"][['info_total_friend_only','links_supported','ave_clust','density','country']] df_pairplot.rename(columns={ 'density': 'Density \n social network', 'ave_clust': 'Average clustering', 'info_total_friend_only': 'Density \n comparison network', 'links_supported':'Support', 'country': 'Country'}, inplace=True) ax = sns.pairplot(data=df_pairplot[['Density \n social network','Average clustering', 'Density \n comparison network','Support','Country']], hue='Country', palette=colors) ax._legend.remove() ax.savefig('figures/pairplot.pdf'); ``` ## Comparison of mechanisms ``` df_comp = df[(df.info_SP.notnull())& (df.country=='Indonesia')& (df.calculated_on=='giant')& (df.num_nodes<=20)& (df.info_expostIC<1)& (df.info_total_friend_only>df.info_expostIC)][['key', 'num_nodes', 'info_total_friend_only', 'info_expostIC', 'comp_supp', 'info_SP']] df_comp['share_partition'] = df_comp.info_SP/df_comp.info_total_friend_only df_comp['share_supp'] = df_comp.info_expostIC/df_comp.info_total_friend_only df_comp.describe() df_comp[df_comp.info_expostIC>df_comp.info_SP].key.count() df_comp[df_comp.info_expostIC==df_comp.info_SP].key.count() sns.relplot(x="share_supp", y="share_partition",height=5, data=df_comp, color=colors["Indonesia"],aspect=1, s=60,legend=False) plt.plot([0, 1], [0, 1], color = grey_dark, linewidth = 2, alpha=0.5) plt.xticks([0,0.2,0.4,0.6,0.8,1]) plt.yticks([0,0.2,0.4,0.6,0.8,1]) plt.ylim(bottom=0,top=1.05) plt.xlim(left=0,right=1.05) plt.ylabel('Partition mechanism') plt.xlabel('Support mechanism') plt.savefig('figures/mechanisms.pdf', bbox_inches='tight'); df_comp[['info_expostIC','info_SP']].describe() ```
github_jupyter
# Import packages import numpy as np import scipy as sp import networkx as nx import decimal import math import pandas as pd import statsmodels.api as sm import random # To import Matlab matrices import scipy.io # Plotting import matplotlib.pyplot as plt from matplotlib.patches import Patch import seaborn as sns # Suppress the warnings from matplotlib about networkx import warnings warnings.filterwarnings("ignore") # Pandas display settings pd.set_option("display.max_rows",999) pd.options.display.float_format = '{:,.4f}'.format #Seaborn display settings sns.set(style="ticks", palette="Greys", font_scale=1.4) #Display plots inside notebook %matplotlib inline # Seed for random numbers seed = 40588 random.seed(seed) sns.color_palette("Greys", n_colors=2) colors = {'Indonesia':'#66c2a5','India':'#fc8d62'} grey_light = '#bdbdbd' grey_dark = '#636363' df = pd.read_pickle('pd_df/netdata.pickle') df.columns df.groupby(['country','calculated_on'])[['num_nodes','ave_deg','density', 'ave_clust','ave_dist', 'info_total','links_supported']].agg(['mean','min','max','count']).transpose() df_plot= df[(df.calculated_on=="giant")][['info_total_friend_only','links_supported', 'density','ave_dist','country','num_nodes']] df_plot.groupby('country').num_nodes.describe() df_plot[df_plot.info_total_friend_only==1].country.value_counts() df_plot[df_plot.links_supported==1].country.value_counts() sns.distplot(df_plot[df_plot.country=="Indonesia"]['info_total_friend_only'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['Indonesia'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Density comparison network') sns.despine() plt.savefig('figures/hist_density_comp_indonesia.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="India"]['info_total_friend_only'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['India'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Density comparison network') sns.despine() plt.savefig('figures/hist_density_comp_india.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="Indonesia"]['links_supported'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['Indonesia'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Support') sns.despine() plt.savefig('figures/hist_support_indonesia.pdf', bbox_inches='tight') sns.distplot(df_plot[df_plot.country=="India"]['links_supported'],kde=False,bins=np.arange(0,1.06,0.05), color=colors['India'],hist_kws={'alpha':1}) plt.ylabel('Networks') plt.xlabel('Support') sns.despine() plt.savefig('figures/hist_support_india.pdf', bbox_inches='tight') sns.relplot(x="density", y="info_total_friend_only",height=6, hue="country", data=df_plot,palette=colors,aspect=1, s=60,legend=False) plt.xticks([0,0.2,0.4,0.6,0.8,1]) plt.yticks([0,0.2,0.4,0.6,0.8,1]) plt.ylim(bottom=-0.05,top=1.05) plt.xlim(left=-0.05,right=1.05) plt.ylabel('Density Comparison Network') plt.xlabel('Density Social Network') plt.savefig('figures/density.pdf', bbox_inches='tight'); df_pairplot = df[df.calculated_on=="giant"][['info_total_friend_only','links_supported','ave_clust','density','country']] df_pairplot.rename(columns={ 'density': 'Density \n social network', 'ave_clust': 'Average clustering', 'info_total_friend_only': 'Density \n comparison network', 'links_supported':'Support', 'country': 'Country'}, inplace=True) ax = sns.pairplot(data=df_pairplot[['Density \n social network','Average clustering', 'Density \n comparison network','Support','Country']], hue='Country', palette=colors) ax._legend.remove() ax.savefig('figures/pairplot.pdf'); df_comp = df[(df.info_SP.notnull())& (df.country=='Indonesia')& (df.calculated_on=='giant')& (df.num_nodes<=20)& (df.info_expostIC<1)& (df.info_total_friend_only>df.info_expostIC)][['key', 'num_nodes', 'info_total_friend_only', 'info_expostIC', 'comp_supp', 'info_SP']] df_comp['share_partition'] = df_comp.info_SP/df_comp.info_total_friend_only df_comp['share_supp'] = df_comp.info_expostIC/df_comp.info_total_friend_only df_comp.describe() df_comp[df_comp.info_expostIC>df_comp.info_SP].key.count() df_comp[df_comp.info_expostIC==df_comp.info_SP].key.count() sns.relplot(x="share_supp", y="share_partition",height=5, data=df_comp, color=colors["Indonesia"],aspect=1, s=60,legend=False) plt.plot([0, 1], [0, 1], color = grey_dark, linewidth = 2, alpha=0.5) plt.xticks([0,0.2,0.4,0.6,0.8,1]) plt.yticks([0,0.2,0.4,0.6,0.8,1]) plt.ylim(bottom=0,top=1.05) plt.xlim(left=0,right=1.05) plt.ylabel('Partition mechanism') plt.xlabel('Support mechanism') plt.savefig('figures/mechanisms.pdf', bbox_inches='tight'); df_comp[['info_expostIC','info_SP']].describe()
0.517083
0.801354
<img src="../logo.png" align='center' width=80%> # Overview As data scientists working in a cyber-security company, we wanted to show that Natural Language Processing (NLP) algorithms can be applied to security related events. For this task we used 2 algorithm developed by Google: **Word2vec** ([link](https://arxiv.org/abs/1301.3781)) and **Doc2vec** ([link](https://arxiv.org/abs/1405.4053)). These algorithms use the context of words to extract a vectorized representation (aka embedding) for each word/document in a given vocabulary. If you want to learn about how **Word2vec** works, you can [start here](https://skymind.ai/wiki/word2vec). Using these algorithms, we managed to model the behavior of common vulnerability scanners (and other client applications) based on their unique 'syntax' of malicious web requests. We named our implementation **Mal2vec**. ### About this notebook This notebook contains easy to use widgets to execute each step on your own data. We also include 3 datasets as examples of how to use this project. ### Table of contents - [Load csv data file](#Load-CSV-data-file) - [Map columns](#Map-columns) - [Select additional grouping columns](#Select-additional-grouping-columns) - [Create sentences](#Create-sentences) - [Prepare dataset](#Prepare-dataset) - [Train classification model](#Train-classifictaion-model) - [Evaluate the model](#Evaluate-the-model) # Imports ``` import random from IPython.display import display, Markdown, clear_output, HTML def hide_toggle(): # @author: harshil # @Source: https://stackoverflow.com/a/28073228/6306692 this_cell = """$('div.cell.code_cell.rendered.selected')""" next_cell = this_cell + '.next()' toggle_text = 'Show/hide code' # text shown on toggle link target_cell = this_cell # target cell to control with toggle js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell) js_f_name = 'code_toggle_{}'.format(str(random.randint(1,2**64))) html = """ <script> function {f_name}() {{ {cell_selector}.find('div.input').toggle(); }} {js_hide_current} </script> <a href="javascript:{f_name}()">{toggle_text}</a> """.format( f_name=js_f_name, cell_selector=target_cell, js_hide_current=js_hide_current, toggle_text=toggle_text ) return HTML(html) display(hide_toggle()) display(HTML('''<style>.text_cell {background: #E0E5EE;} .widget-inline-hbox .widget-label{width:120px;}</style>''')) %load_ext autoreload %autoreload 2 import os import pandas as pd import ipywidgets as widgets import sys sys.path.append("..") from classify import prepare_dataset, train_classifier from vizualize import draw_model, plot_model_results from sentensize import create_sentences, dump_sentences ``` # Load CSV data file ### Ready to use dataset - Malware system calls - Extracted by executing malware using Cuckoo sandbox - **Events**: system calls (mapped to an integer) - **Label**: the malware type - **Groupping by**: 'filename' ``` display(hide_toggle()) df = None def load_csv(btn): global df clear_output() display(hide_toggle()) display(widgets.VBox([filename_input, nrows_input])) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) nrows = int(nrows_input.value) df = pd.read_csv(filename_input.value, nrows=nrows if nrows > 0 else None) clear_output() display(hide_toggle()) display(widgets.VBox([filename_input, nrows_input, load_button])) print('Loaded {} rows'.format(df.shape[0])) display(df.sample(n=5)) filename_input = widgets.Text(description='CSV file:', value='data/malware.gz') nrows_input = widgets.Text(description='Rows limit:', value='0') load_button = widgets.Button(description='Load CSV') load_button.on_click(load_csv) widgets.VBox([filename_input, nrows_input, load_button]) ``` # Map columns The data should have at least 3 columns: - **Timestamp** (int) - if you don't have timestamps, it can also be a simple increasing index - **Event** (string) - rule name, event description, etc. Must be a single word containing only alpha-numeric characters - **Label** (string) - type of event. This will be later used to create the classification model ``` time_column_input, event_column_input, label_column_input = None, None, None def show_dropdown(obj): global time_column_input, event_column_input, label_column_input time_column_input = widgets.Dropdown(options=df.columns, description='Time column:') event_column_input = widgets.Dropdown(options=df.columns, value='system_call', description='Event column:') label_column_input = widgets.Dropdown(options=df.columns, value='label', description='Label column:') clear_output() display(hide_toggle()) display(widgets.VBox([show_dropdown_button, time_column_input, event_column_input, label_column_input])) show_dropdown_button = widgets.Button(description='Refresh') show_dropdown_button.on_click(show_dropdown) show_dropdown(None) ``` # Select additional grouping columns Select those columns which represents unique sequences ``` checkboxes = None def show_checkboxes(obj): global checkboxes checkboxes = {k:widgets.Checkbox(description=k) for k in df.columns if k not in [time_column_input.value, event_column_input.value, label_column_input.value ]} checkboxes['filename'].value = True clear_output() display(hide_toggle()) display(widgets.VBox([show_checkboxes_button] + [checkboxes[x] for x in checkboxes])) show_checkboxes_button = widgets.Button(description='Refresh') show_checkboxes_button.on_click(show_checkboxes) show_checkboxes(None) ``` # Create sentences This cell will group events into sentences (using the grouping columns selected). It will then split sentences if to consecutive events are separated by more than the given timeout (default: 300 seconds) ``` display(hide_toggle()) dataset_name = os.path.splitext(os.path.basename(filename_input.value))[0] sentences_df, sentences_filepath = None, None def sentences(obj): global sentences_df, sentences_filepath clear_output() display(hide_toggle()) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) groupping_columns = [x for x in checkboxes if checkboxes[x].value] sentences_df = create_sentences(df, time_column_input.value, event_column_input.value, label_column_input.value, groupping_columns, timeout=300 ) sentences_filepath = dump_sentences(sentences_df, dataset_name) clear_output() display(hide_toggle()) display(sentence_button) print('Created {} sentences. Showing 5 examples:'.format(sentences_df.shape[0])) display(sentences_df.sample(n=5)) sentence_button = widgets.Button(description='Start') display(sentence_button) sentence_button.on_click(sentences) ``` # Prepare dataset 1) Train a doc2vec model to extract the embedding vector from each sentence. **Parameters**: *vector_size*: the size of embedding vector. Increasing this parameters might improve accuracy, but will take longer to train (int, default=30) *epochs*: how many epochs should be applied during training. Increasing this parameters might improve accuracy, but will take longer to train (int, default=50) *min_sentence_count*: don't classify labels with small amount of sentences (int, default=200) 2) Prepare dataset - Infer the embedding vector for each sample in the data set - Perform [stratified sampling](https://en.wikipedia.org/wiki/Stratified_sampling) for each label - Split to train/test sets 80%-20% ``` display(hide_toggle()) X_train, X_test, y_train, y_test, classes = None, None, None, None, None def dataset(obj): global sentences_df, sentences_filepath, dataset_name, X_train, X_test, y_train, y_test, classes clear_output() display(hide_toggle()) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) X_train, X_test, y_train, y_test, classes = prepare_dataset(sentences_df, sentences_filepath, dataset_name, vector_size=30, epochs=50, min_sentence_count=200 ) dataset_button.description = 'Run Again' clear_output() display(hide_toggle()) print('Dataset ready!') display(dataset_button) dataset_button = widgets.Button(description='Start') display(dataset_button) dataset_button.on_click(dataset) ``` # Train classification model Train a deep neural network to classify each sentence to its correct label for 500 epochs (automatically stop when training no longer improves results) For the purpose of this demo, the network architecture and hyper-parameters are constant. Feel free the modify to code and improve the model ``` display(hide_toggle()) history, report, df_cm = None, None, None def train(obj): global dataset_name, X_train, X_test, y_train, y_test, classes, history, report, df_cm train_button.description = 'Train Again' clear_output() display(hide_toggle()) display(train_button) history, report, df_cm = train_classifier(X_train, X_test, y_train, y_test, classes, dataset_name) train_button = widgets.Button(description='Start') display(train_button) train_button.on_click(train) ``` # Evaluate the model Plot the results of the model: - **Loss** - how did the model progress during training (lower values mean better performance) - **Accuracy** - how did the model perform on the validation set (higher values are better) - **Confusion Matrix** - mapping each of the model's predictions (x-axis) to its true label (y-axis). Correct predictions are placed on the main diagonal (brighter is better) - **Detailed report** - for each label, show the following metrics: precision, recall, f1-score ([read more here](https://towardsdatascience.com/accuracy-precision-recall-or-f1-331fb37c5cb9)). The 'support' metric is the number of instances in that class ``` display(hide_toggle()) def evaluate(btn): global history, report, df_cm clear_output() evaluate_button.description = 'Refresh' display(hide_toggle()) display(evaluate_button) plot_model_results(history, report, df_cm, classes) evaluate_button = widgets.Button(description='Evaluate Model') display(evaluate_button) evaluate_button.on_click(evaluate) ```
github_jupyter
import random from IPython.display import display, Markdown, clear_output, HTML def hide_toggle(): # @author: harshil # @Source: https://stackoverflow.com/a/28073228/6306692 this_cell = """$('div.cell.code_cell.rendered.selected')""" next_cell = this_cell + '.next()' toggle_text = 'Show/hide code' # text shown on toggle link target_cell = this_cell # target cell to control with toggle js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell) js_f_name = 'code_toggle_{}'.format(str(random.randint(1,2**64))) html = """ <script> function {f_name}() {{ {cell_selector}.find('div.input').toggle(); }} {js_hide_current} </script> <a href="javascript:{f_name}()">{toggle_text}</a> """.format( f_name=js_f_name, cell_selector=target_cell, js_hide_current=js_hide_current, toggle_text=toggle_text ) return HTML(html) display(hide_toggle()) display(HTML('''<style>.text_cell {background: #E0E5EE;} .widget-inline-hbox .widget-label{width:120px;}</style>''')) %load_ext autoreload %autoreload 2 import os import pandas as pd import ipywidgets as widgets import sys sys.path.append("..") from classify import prepare_dataset, train_classifier from vizualize import draw_model, plot_model_results from sentensize import create_sentences, dump_sentences display(hide_toggle()) df = None def load_csv(btn): global df clear_output() display(hide_toggle()) display(widgets.VBox([filename_input, nrows_input])) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) nrows = int(nrows_input.value) df = pd.read_csv(filename_input.value, nrows=nrows if nrows > 0 else None) clear_output() display(hide_toggle()) display(widgets.VBox([filename_input, nrows_input, load_button])) print('Loaded {} rows'.format(df.shape[0])) display(df.sample(n=5)) filename_input = widgets.Text(description='CSV file:', value='data/malware.gz') nrows_input = widgets.Text(description='Rows limit:', value='0') load_button = widgets.Button(description='Load CSV') load_button.on_click(load_csv) widgets.VBox([filename_input, nrows_input, load_button]) time_column_input, event_column_input, label_column_input = None, None, None def show_dropdown(obj): global time_column_input, event_column_input, label_column_input time_column_input = widgets.Dropdown(options=df.columns, description='Time column:') event_column_input = widgets.Dropdown(options=df.columns, value='system_call', description='Event column:') label_column_input = widgets.Dropdown(options=df.columns, value='label', description='Label column:') clear_output() display(hide_toggle()) display(widgets.VBox([show_dropdown_button, time_column_input, event_column_input, label_column_input])) show_dropdown_button = widgets.Button(description='Refresh') show_dropdown_button.on_click(show_dropdown) show_dropdown(None) checkboxes = None def show_checkboxes(obj): global checkboxes checkboxes = {k:widgets.Checkbox(description=k) for k in df.columns if k not in [time_column_input.value, event_column_input.value, label_column_input.value ]} checkboxes['filename'].value = True clear_output() display(hide_toggle()) display(widgets.VBox([show_checkboxes_button] + [checkboxes[x] for x in checkboxes])) show_checkboxes_button = widgets.Button(description='Refresh') show_checkboxes_button.on_click(show_checkboxes) show_checkboxes(None) display(hide_toggle()) dataset_name = os.path.splitext(os.path.basename(filename_input.value))[0] sentences_df, sentences_filepath = None, None def sentences(obj): global sentences_df, sentences_filepath clear_output() display(hide_toggle()) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) groupping_columns = [x for x in checkboxes if checkboxes[x].value] sentences_df = create_sentences(df, time_column_input.value, event_column_input.value, label_column_input.value, groupping_columns, timeout=300 ) sentences_filepath = dump_sentences(sentences_df, dataset_name) clear_output() display(hide_toggle()) display(sentence_button) print('Created {} sentences. Showing 5 examples:'.format(sentences_df.shape[0])) display(sentences_df.sample(n=5)) sentence_button = widgets.Button(description='Start') display(sentence_button) sentence_button.on_click(sentences) display(hide_toggle()) X_train, X_test, y_train, y_test, classes = None, None, None, None, None def dataset(obj): global sentences_df, sentences_filepath, dataset_name, X_train, X_test, y_train, y_test, classes clear_output() display(hide_toggle()) display(HTML('<img src="../loading.gif" alt="Drawing" style="width: 50px;"/>')) X_train, X_test, y_train, y_test, classes = prepare_dataset(sentences_df, sentences_filepath, dataset_name, vector_size=30, epochs=50, min_sentence_count=200 ) dataset_button.description = 'Run Again' clear_output() display(hide_toggle()) print('Dataset ready!') display(dataset_button) dataset_button = widgets.Button(description='Start') display(dataset_button) dataset_button.on_click(dataset) display(hide_toggle()) history, report, df_cm = None, None, None def train(obj): global dataset_name, X_train, X_test, y_train, y_test, classes, history, report, df_cm train_button.description = 'Train Again' clear_output() display(hide_toggle()) display(train_button) history, report, df_cm = train_classifier(X_train, X_test, y_train, y_test, classes, dataset_name) train_button = widgets.Button(description='Start') display(train_button) train_button.on_click(train) display(hide_toggle()) def evaluate(btn): global history, report, df_cm clear_output() evaluate_button.description = 'Refresh' display(hide_toggle()) display(evaluate_button) plot_model_results(history, report, df_cm, classes) evaluate_button = widgets.Button(description='Evaluate Model') display(evaluate_button) evaluate_button.on_click(evaluate)
0.306838
0.950983
<a href="https://www.skills.network/"><img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DL0120ENedX/labs/Template%20for%20Instructional%20Hands-on%20Labs/images/IDSNlogo.png" width="400px" align="center"></a> <h1 align="center"><font size="5">LINEAR REGRESSION WITH TENSORFLOW</font></h1> <h2>LINEAR REGRESSION WITH TENSORFLOW</h2> <h3>Objective for this Notebook<h3> <h5> 1. What is Linear Regression</h5> <h5> 2. Linear Regression with TensorFlow. </h5> <div class="alert alert-block alert-info" style="margin-top: 20px"> <font size="3"><strong>In this notebook we will overview the implementation of Linear Regression with TensorFlow</strong></font> <br> <br> <h2>Table of Contents</h2> <ol> <li><a href="#ref1">Linear Regression</a></li> <li><a href="#ref2">Linear Regression with TensorFlow</a></li> </ol> </div> <br> <br> <p></p> <hr> <a id="ref1"></a> <h1>Linear Regression</h1> Defining a linear regression in simple terms, is the approximation of a linear model used to describe the relationship between two or more variables. In a simple linear regression there are two variables, the dependent variable, which can be seen as the "state" or "final goal" that we study and try to predict, and the independent variables, also known as explanatory variables, which can be seen as the "causes" of the "states". When more than one independent variable is present the process is called multiple linear regression. <br> When multiple dependent variables are predicted the process is known as multivariate linear regression. The equation of a simple linear model is $$Y = a X + b $$ Where Y is the dependent variable and X is the independent variable, and <b>a</b> and <b>b</b> being the parameters we adjust. <b>a</b> is known as "slope" or "gradient" and <b>b</b> is the "intercept". You can interpret this equation as Y being a function of X, or Y being dependent on X. If you plot the model, you will see it is a line, and by adjusting the "slope" parameter you will change the angle between the line and the independent variable axis, and the "intercept parameter" will affect where it crosses the dependent variable's axis. We begin by installing TensorFlow version 2.2.0 and its required prerequistes. ``` !pip install grpcio==1.24.3 !pip install tensorflow==2.2.0 ``` **Restart kernel for latest version of TensorFlow to be activated** Next, let's first import the required packages: ``` import matplotlib.pyplot as plt import pandas as pd import pylab as pl import numpy as np import tensorflow as tf import matplotlib.patches as mpatches import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) if not tf.__version__ == '2.2.0': print(tf.__version__) raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)') ``` IMPORTANT! => Please restart the kernel by clicking on "Kernel"->"Restart and Clear Outout" and wait until all output disapears. Then your changes are beeing picked up Let's define the independent variable: ``` X = np.arange(0.0, 5.0, 0.1) X ##You can adjust the slope and intercept to verify the changes in the graph a = 1 b = 0 Y= a * X + b plt.plot(X, Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` OK... but how can we see this concept of linear relations with a more meaningful point of view? Simple linear relations were used to try to describe and quantify many observable physical phenomena, the easiest to understand are speed and distance traveled: $$Distance Traveled = Speed \\times Time + Initial Distance$$ $$Speed = Acceleration \\times Time + Initial Speed$$ They are also used to describe properties of different materials: $$Force = Deformation \\times Stiffness$$ $$Heat Transfered = Temperature Difference \\times Thermal Conductivity$$ $$Electrical Tension (Voltage) = Electrical Current \\times Resistance$$ $$Mass = Volume \\times Density$$ When we perform an experiment and gather the data, or if we already have a dataset and we want to perform a linear regression, what we will do is adjust a simple linear model to the dataset, we adjust the "slope" and "intercept" parameters to the data the best way possible, because the closer the model comes to describing each ocurrence, the better it will be at representing them. So how is this "regression" performed? <hr> <a id="ref2"></a> <h1>Linear Regression with TensorFlow</h1> A simple example of a linear function can help us understand the basic mechanism behind TensorFlow. For the first part we will use a sample dataset, and then we'll use TensorFlow to adjust and get the right parameters. We download a dataset that is related to fuel consumption and Carbon dioxide emission of cars. ``` !wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv ``` <h2>Understanding the Data</h2> <h3><code>FuelConsumption.csv</code>:</h3> We have downloaded a fuel consumption dataset, <b><code>FuelConsumption.csv</code></b>, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. <a href="http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64">Dataset source</a> - **MODELYEAR** e.g. 2014 - **MAKE** e.g. Acura - **MODEL** e.g. ILX - **VEHICLE CLASS** e.g. SUV - **ENGINE SIZE** e.g. 4.7 - **CYLINDERS** e.g 6 - **TRANSMISSION** e.g. A6 - **FUEL CONSUMPTION in CITY(L/100 km)** e.g. 9.9 - **FUEL CONSUMPTION in HWY (L/100 km)** e.g. 8.9 - **FUEL CONSUMPTION COMB (L/100 km)** e.g. 9.2 - **CO2 EMISSIONS (g/km)** e.g. 182 --> low --> 0 ``` df = pd.read_csv("FuelConsumption.csv") # take a look at the dataset df.head() ``` Lets say we want to use linear regression to predict Co2Emission of cars based on their engine size. So, lets define X and Y value for the linear regression, that is, train_x and train_y: ``` train_x = np.asanyarray(df[['ENGINESIZE']]) train_y = np.asanyarray(df[['CO2EMISSIONS']]) ``` First, we initialize the variables <b>a</b> and <b>b</b>, with any random guess, and then we define the linear function: ``` a = tf.Variable(20.0) b = tf.Variable(30.2) def h(x): y = a*x + b return y ``` Now, we are going to define a loss function for our regression, so we can train our model to better fit our data. In a linear regression, we minimize the squared error of the difference between the predicted values(obtained from the equation) and the target values (the data that we have). In other words we want to minimize the square of the predicted values minus the target value. So we define the equation to be minimized as loss. To find value of our loss, we use <b>tf.reduce_mean()</b>. This function finds the mean of a multidimensional tensor, and the result can have a different dimension. ``` def loss_object(y,train_y) : return tf.reduce_mean(tf.square(y - train_y)) # Below is a predefined method offered by TensorFlow to calculate loss function #loss_object = tf.keras.losses.MeanSquaredLogarithmicError() ``` Now we are ready to start training and run the graph. We use GradientTape to calculate gradients: ``` learning_rate = 0.01 train_data = [] loss_values =[] # steps of looping through all your data to update the parameters training_epochs = 200 # train model for epoch in range(training_epochs): with tf.GradientTape() as tape: y_predicted = h(train_x) loss_value = loss_object(train_y,y_predicted) loss_values.append(loss_value) # get gradients gradients = tape.gradient(loss_value, [b,a]) # compute and adjust weights b.assign_sub(gradients[0]*learning_rate) a.assign_sub(gradients[1]*learning_rate) if epoch % 5 == 0: train_data.append([a.numpy(), b.numpy()]) ``` Lets plot the loss values to see how it has changed during the training: ``` plt.plot(loss_values, 'ro') ``` Lets visualize how the coefficient and intercept of line has changed to fit the data: ``` cr, cg, cb = (1.0, 1.0, 0.0) for f in train_data: cb += 1.0 / len(train_data) cg -= 1.0 / len(train_data) if cb > 1.0: cb = 1.0 if cg < 0.0: cg = 0.0 [a, b] = f f_y = np.vectorize(lambda x: a*x + b)(train_x) line = plt.plot(train_x, f_y) plt.setp(line, color=(cr,cg,cb)) plt.plot(train_x, train_y, 'ro') green_line = mpatches.Patch(color='red', label='Data Points') plt.legend(handles=[green_line]) plt.show() ``` * * * ## Want to learn more? Running deep learning programs usually needs a high performance platform. **PowerAI** speeds up deep learning and AI. Built on IBM’s Power Systems, **PowerAI** is a scalable software platform that accelerates deep learning and AI with blazing performance for individual users or enterprises. The **PowerAI** platform supports popular machine learning libraries and dependencies including TensorFlow, Caffe, Torch, and Theano. You can use [PowerAI on IMB Cloud](https://cocl.us/ML0120EN_PAI). Also, you can use **Watson Studio** to run these notebooks faster with bigger datasets.**Watson Studio** is IBM’s leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, **Watson Studio** enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of **Watson Studio** users today with a free account at [Watson Studio](https://cocl.us/ML0120EN_DSX).This is the end of this lesson. Thank you for reading this notebook, and good luck on your studies. ### Thanks for completing this lesson! If you are familiar with some of these methods and concepts, this tutorial might have been boring for you, but it is important to get used to the TensorFlow mechanics, and feel familiar and comfortable using it, so you can build more complex algorithms in it. Created by <a href="https://linkedin.com/in/romeo-kienzler-089b4557"> Romeo Kienzler </a>, <a href="https://linkedin.com/in/saeedaghabozorgi"> Saeed Aghabozorgi </a> , <a href="https://ca.linkedin.com/in/rafaelblsilva"> Rafael Belo Da Silva</a><br> Updated to TF 2.X by <a href="https://www.linkedin.com/in/samaya-madhavan"> Samaya Madhavan </a> ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | | ----------------- | ------- | ---------- | ----------------------------------------------------------- | | 2020-09-21 | 2.0 | Srishti | Migrated Lab to Markdown and added to course repo in GitLab | <hr> ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> <hr> Copyright © 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
github_jupyter
!pip install grpcio==1.24.3 !pip install tensorflow==2.2.0 import matplotlib.pyplot as plt import pandas as pd import pylab as pl import numpy as np import tensorflow as tf import matplotlib.patches as mpatches import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) if not tf.__version__ == '2.2.0': print(tf.__version__) raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)') X = np.arange(0.0, 5.0, 0.1) X ##You can adjust the slope and intercept to verify the changes in the graph a = 1 b = 0 Y= a * X + b plt.plot(X, Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() !wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv df = pd.read_csv("FuelConsumption.csv") # take a look at the dataset df.head() train_x = np.asanyarray(df[['ENGINESIZE']]) train_y = np.asanyarray(df[['CO2EMISSIONS']]) a = tf.Variable(20.0) b = tf.Variable(30.2) def h(x): y = a*x + b return y def loss_object(y,train_y) : return tf.reduce_mean(tf.square(y - train_y)) # Below is a predefined method offered by TensorFlow to calculate loss function #loss_object = tf.keras.losses.MeanSquaredLogarithmicError() learning_rate = 0.01 train_data = [] loss_values =[] # steps of looping through all your data to update the parameters training_epochs = 200 # train model for epoch in range(training_epochs): with tf.GradientTape() as tape: y_predicted = h(train_x) loss_value = loss_object(train_y,y_predicted) loss_values.append(loss_value) # get gradients gradients = tape.gradient(loss_value, [b,a]) # compute and adjust weights b.assign_sub(gradients[0]*learning_rate) a.assign_sub(gradients[1]*learning_rate) if epoch % 5 == 0: train_data.append([a.numpy(), b.numpy()]) plt.plot(loss_values, 'ro') cr, cg, cb = (1.0, 1.0, 0.0) for f in train_data: cb += 1.0 / len(train_data) cg -= 1.0 / len(train_data) if cb > 1.0: cb = 1.0 if cg < 0.0: cg = 0.0 [a, b] = f f_y = np.vectorize(lambda x: a*x + b)(train_x) line = plt.plot(train_x, f_y) plt.setp(line, color=(cr,cg,cb)) plt.plot(train_x, train_y, 'ro') green_line = mpatches.Patch(color='red', label='Data Points') plt.legend(handles=[green_line]) plt.show()
0.624294
0.985271
### 计算新闻传播学课程简介 *** *** # 数据科学的编程工具:大数据 *** *** 王成军 wangchengjun@nju.edu.cn 计算传播网 http://computational-communication.com # 关于大数据的图片 ![](./img/bigdata.png) # 数字 ![](./img/bignumber.png) # 网络 ![](./img/bignetwork.png) # 文本 ![](./img/bigword.png) > # Big data is like teenage sex: > # Everyone talks about it, nobody really knows how to do it, > # everyone thinks everyone else is doing it, so everyone claims they are doing it. > --Dan Ariely of Duke University > # Big data is a broad term for data sets so large or complex that traditional data processing applications are inadequate. Challenges include analysis, capture, data curation , search, sharing , storage, transfer, visualization, and information privacy . (WIKIPEDIA) # 云计算 2006: AWS EC2 (cloud-based computing clusters) # Tools in the Ecosystem: "Hadoop" and Map/Reduce # 阿里云 # 百度云 # Map/Reduce Google article on MapReduce by Dean and Ghemawat, 2004 ![](./img/mapreduce.png) # The nightmare that is to find out a way to split the data - word count - network? # An alternative to Hadoop, Spark with Python - [wordcount with spark and python](https://github.com/apache/spark/blob/master/examples/src/main/python/wordcount.py) 把系统与算法结合,设计大规模分布式的机器学习算法与系统,使得机器学习算法可以在多处理器和多机器的集群环境下作业,处理更大量级的数据。 这方面较为知名的系统包括: - 加州大学伯克利分校的Spark - 谷歌的TensorFlow - 华盛顿大学的Dato GraphLab - 卡内基梅陇大学的Petuum - 微软的DMTK系统 # Giant Data Sets Are Around - amazon https://aws.amazon.com/public-data-sets/ - 雅虎开放数据 http://webscope.sandbox.yahoo.com/ - Stanford Large Network Dataset Collection https://snap.stanford.edu/data/ - bigdata/twitter7/tweets2009-07.txt.gz - Gdelt news data http://gdeltproject.org/ ![](./img/gdelt.png) # Big Query Demo of GDELT https://bigquery.cloud.google.com/table/gdelt-bq:gdeltv2.events?_ga=1.69322323.2127553167.1443183592 ![](./img/bigquery.png) ``` from IPython.display import display_html, HTML HTML('<iframe src=http://ccc.nju.edu.cn/newsmap/ width=1000 height=500></iframe>') # the webpage we would like to crawl ``` <p><a href="https://vimeo.com/68736161">imMens: Real-time Visual Querying of Big Data</a> from <a href="https://vimeo.com/stanfordvis">Stanford Visualization Group</a> on <a href="https://vimeo.com">Vimeo</a>.</p> [Bin-Summarize-Smooth: A Framework for Visualizing Large Data](http://vita.had.co.nz/papers/bigvis.pdf) (Hadley Wickham) ["Why Exploring Big Data is Hard and What We Can Do About It"](https://www.youtube.com/watch?t=2&v=UP5412nU2lI), Danyel Fisher's talk at OpenVisConf 2015 ![](./img/dilbert.png) ### Big Data and whole data are not the same. Without taking into account the sample of a data set, the size of the data set is meaningless. For example, a researcher may seek to understand the topical frequency of tweets, yet if Twitter removes all tweets that contain problematic words or content – such as references to pornography or spam – from the stream, the topical frequency would be inaccurate. Regardless of the number of tweets, it is not a representative sample as the data is skewed from the beginning. > d. boyd and K. Crawford, "Critical Questions for Big Data" Information, Communication & Society Volume 15, Issue 5, 2012 http://www.tandfonline.com/doi/abs/10.1080/1369118X.2012.678878 ### ... four quantitatively adept social scientists reported that Google’s flu-tracking service not only wildly overestimated the number of flu cases in the United States in the 2012-13 flu season — a well-known miss — but has also consistently overshot in the last few years. Google Flu Trends’ estimate for the 2011-12 flu season was more than 50 percent higher than the cases reported by the Centers for Disease Control and Prevention. ...Their technical criticism of Google Flu Trends is that it is not using a broader array of data analysis tools. Indeed, their analysis shows that combining Google Flu Trends with C.D.C. data, and applying a few tweaking techniques, works best. [Google Flu Trends: The Limits of Big Data (NYT)](http://bits.blogs.nytimes.com/2014/03/28/google-flu-trends-the-limits-of-big-data/?_r=0) > Lazer, David, Ryan Kennedy, Gary King, and Alessandro Vespignani. 2014. “The Parable of Google Flu: Traps in Big Data Analysis.” Science 343 (14 March): 1203-1205. ### The first lesson of Web-scale learning is to use available large-scale data rather than hoping for annotated data that isn’t available. For instance, we find that useful semantic relationships can be automatically learned from the statistics of search queries and the corresponding results-- or from the accumulated evidence of Web-based text patterns and formatted tables-- in both cases without needing any manually annotated data. > Halevy, Norvig, Pereira ![](./img/datascience.png) # Type A: Analysis - making sense of data - very similar to a statistician # Type B: Builders - mainly interested in using data in production. - strong coders and may be trained software engineers. # Everyone should learn Python ## Everyone should learn some SQL. # 参考文献 - 维克托•迈尔•舍恩伯格(Viktor Mayer-Schönberger) 大数据时代:生活、工作与思维的大变革, 浙江人民出版社 Big Data:A Revolution That Will Transform How We Live, Work, and Think 译者: 周涛 2012-12 页数: 261 - http://ghostweather.slides.com/lynncherny/what-is-big-data-anyway
github_jupyter
from IPython.display import display_html, HTML HTML('<iframe src=http://ccc.nju.edu.cn/newsmap/ width=1000 height=500></iframe>') # the webpage we would like to crawl
0.304248
0.725187
# Example 1 ## Step 0 - prepare your data Prepare cellphoneDB inputs starting from an anndata object object ``` import numpy as np import pandas as pd import scanpy as sc import anndata import os import sys from scipy import sparse sc.settings.verbosity = 1 # verbosity: errors (0), warnings (1), info (2), hints (3) sys.executable ``` ### 1. Load andata The anndata object contains counts that have been normalized (per cell) and log-transformed. ``` adata = sc.read('endometrium_example_counts.h5ad') ``` ### 2. Generate your meta In this example, our input is an anndata containing the cluster/celltype information in anndata.obs['cell_type'] The object also has anndata.obs['lineage'] information wich will be used below for a hierarchical DEGs approach. ``` adata.obs['cell_type'].values.describe() df_meta = pd.DataFrame(data={'Cell':list(adata.obs.index), 'cell_type':[ i for i in adata.obs['cell_type']] }) df_meta.set_index('Cell', inplace=True) df_meta.to_csv('endometrium_example_meta.tsv', sep = '\t') ``` ### 3. Compute DEGs (optional) We will import out gene expression into Seurat using rpy2 so that we can estimate the differentially expressed genes using Seurat `FindAllMarkers` ``` # Conver to dense matrix for Seurat adata.X = adata.X.toarray() import rpy2.rinterface_lib.callbacks import logging # Ignore R warning messages #Note: this can be commented out to get more verbose R output rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) import anndata2ri anndata2ri.activate() %load_ext rpy2.ipython %%R -i adata adata ``` Use Seurat `FindAllMarkers` to compute differentially expressed genes and extract the corresponding data frame `DEGs`. Here there are three options you may be interested on: 1. Identify DEGs for each cell type (compare cell type vs rest, most likely option) 2. Identify DEGs for each cell type using a per-lineage hierarchycal approach (compare cell type vs rest in the lineage, such as in endometrium paper Garcia-Alonso et al 2021) In the endometrium paper (Garcia-Alonso et al 2021) we're interested in the differences within the stromal and epithelial lineages, rather than the commonalities (example, what is specific of epithelials in the glands compared to epithelials in the lumen). The reason is that epithelial and stromal subtypes vary in space and type and thus we wanna extract the subtile differences within the lineage to better understand their differential location/ biological role. ``` %%R -o DEGs library(Seurat) so = as.Seurat(adata, counts = "X", data = "X") Idents(so) = so$cell_type ## OPTION 1 - compute DEGs for all cell types ## Extract DEGs for each cell_type # DEGs <- FindAllMarkers(so, # test.use = 'LR', # verbose = F, # only.pos = T, # random.seed = 1, # logfc.threshold = 0.2, # min.pct = 0.1, # return.thresh = 0.05) # OPTION 2 - optional - Re-compute hierarchical (per lineage) DEGs for Epithelial and Stromal lineages DEGs = c() for( lin in c('Epithelial', 'Stromal') ){ message('Computing DEGs within linage ', lin) so_in_lineage = subset(so, cells = Cells(so)[ so$lineage == lin ] ) celltye_in_lineage = unique(so$cell_type[ so$lineage == lin ]) DEGs_lin = FindAllMarkers(so_in_lineage, test.use = 'LR', verbose = F, only.pos = T, random.seed = 1, logfc.threshold = 0.2, min.pct = 0.1, return.thresh = 0.05) DEGs = rbind(DEGs_lin, DEGs) } ``` Filter significant genes. Here we select genes with adjusted p-value `< 0.05` and average log FoldChange `>0.1` ``` DEGs.head() cond1 = DEGs['p_val_adj'] < 0.05 cond2 = DEGs['avg_log2FC'] > 0.1 mask = [all(tup) for tup in zip(cond1, cond2)] fDEGs = DEGs[mask] ``` Save significant DEGs into a file. Important, the DEGs output file must contain - 1st column = cluster - 2nd column = gene - 3rd-Z columns = ignored ``` # 1st column = cluster; 2nd column = gene fDEGs = fDEGs[['cluster', 'gene', 'p_val_adj', 'p_val', 'avg_log2FC', 'pct.1', 'pct.2']] fDEGs.to_csv('endometrium_example_DEGs.tsv', index=False, sep='\t') ``` ### 4. Run cellphoneDB ``` cellphonedb method degs_analysis \ endometrium_example_meta.tsv \ endometrium_example_counts.h5ad \ endometrium_example_DEGs.tsv \ --microenvs endometrium_example_microenviroments.tsv \ --counts-data hgnc_symbol \ --database database/database/cellphonedb_user_2021-06-29-11_41.db \ --threshold 0.1 ```
github_jupyter
import numpy as np import pandas as pd import scanpy as sc import anndata import os import sys from scipy import sparse sc.settings.verbosity = 1 # verbosity: errors (0), warnings (1), info (2), hints (3) sys.executable adata = sc.read('endometrium_example_counts.h5ad') adata.obs['cell_type'].values.describe() df_meta = pd.DataFrame(data={'Cell':list(adata.obs.index), 'cell_type':[ i for i in adata.obs['cell_type']] }) df_meta.set_index('Cell', inplace=True) df_meta.to_csv('endometrium_example_meta.tsv', sep = '\t') # Conver to dense matrix for Seurat adata.X = adata.X.toarray() import rpy2.rinterface_lib.callbacks import logging # Ignore R warning messages #Note: this can be commented out to get more verbose R output rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) import anndata2ri anndata2ri.activate() %load_ext rpy2.ipython %%R -i adata adata %%R -o DEGs library(Seurat) so = as.Seurat(adata, counts = "X", data = "X") Idents(so) = so$cell_type ## OPTION 1 - compute DEGs for all cell types ## Extract DEGs for each cell_type # DEGs <- FindAllMarkers(so, # test.use = 'LR', # verbose = F, # only.pos = T, # random.seed = 1, # logfc.threshold = 0.2, # min.pct = 0.1, # return.thresh = 0.05) # OPTION 2 - optional - Re-compute hierarchical (per lineage) DEGs for Epithelial and Stromal lineages DEGs = c() for( lin in c('Epithelial', 'Stromal') ){ message('Computing DEGs within linage ', lin) so_in_lineage = subset(so, cells = Cells(so)[ so$lineage == lin ] ) celltye_in_lineage = unique(so$cell_type[ so$lineage == lin ]) DEGs_lin = FindAllMarkers(so_in_lineage, test.use = 'LR', verbose = F, only.pos = T, random.seed = 1, logfc.threshold = 0.2, min.pct = 0.1, return.thresh = 0.05) DEGs = rbind(DEGs_lin, DEGs) } DEGs.head() cond1 = DEGs['p_val_adj'] < 0.05 cond2 = DEGs['avg_log2FC'] > 0.1 mask = [all(tup) for tup in zip(cond1, cond2)] fDEGs = DEGs[mask] # 1st column = cluster; 2nd column = gene fDEGs = fDEGs[['cluster', 'gene', 'p_val_adj', 'p_val', 'avg_log2FC', 'pct.1', 'pct.2']] fDEGs.to_csv('endometrium_example_DEGs.tsv', index=False, sep='\t') cellphonedb method degs_analysis \ endometrium_example_meta.tsv \ endometrium_example_counts.h5ad \ endometrium_example_DEGs.tsv \ --microenvs endometrium_example_microenviroments.tsv \ --counts-data hgnc_symbol \ --database database/database/cellphonedb_user_2021-06-29-11_41.db \ --threshold 0.1
0.241937
0.934694
# Distinguish Your Own Digits (DYOD) > In this we are going to write a classifier that distinguishes between the number 3 and number 8. Also we will be comparing our neural network to the simple logistic regression model - badges: true - author: Prasasti Choudhury ``` #importing the necessary libraries %load_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd ``` From the command line run `pip install mnist`. This is a library that will help you bring down the mnist dataset. If you run this from a notebook, you need to put `!pip install mnist` in a cell by itself. ``` #this line can be commented out if already installed !pip install mnist ``` ## Preparing the Data ``` import mnist train_images = mnist.train_images() train_labels = mnist.train_labels() train_images.shape, train_labels.shape test_images = mnist.test_images() test_labels = mnist.test_labels() test_images.shape, test_labels.shape image_index = 7776 # You may select anything up to 60,000 print(train_labels[image_index]) plt.imshow(train_images[image_index], cmap='Greys') ``` ## Filter data to get 3 and 8 out ``` train_filter = np.where((train_labels == 3 ) | (train_labels == 8)) test_filter = np.where((test_labels == 3) | (test_labels == 8)) X_train, y_train = train_images[train_filter], train_labels[train_filter] X_test, y_test = test_images[test_filter], test_labels[test_filter] ``` We normalize the pizel values in the 0 to 1 range ``` X_train = X_train/255. X_test = X_test/255. ``` And setup the labels as 1 (when the digit is 3) and 0 (when the digit is 8) ``` y_train = 1*(y_train==3) y_test = 1*(y_test==3) X_train.shape, X_test.shape ``` We reshape the data to flatten the image pixels into a set of features or co-variates: ``` X_train = X_train.reshape(X_train.shape[0], -1) X_test = X_test.reshape(X_test.shape[0], -1) X_train.shape, X_test.shape #Importing the Kudzu libraries that was provided from kudzu.data import Data, Dataloader, Sampler from kudzu.callbacks import AccCallback, ClfCallback from kudzu.model import Model from kudzu.loss import MSE from kudzu.optim import GD from kudzu.layer import Affine, Sigmoid, Relu from kudzu.train import Learner #Using the following configuration to set up the model for training class Config: pass config = Config() config.lr = 0.001 config.num_epochs = 200 config.bs = 50 data = Data(X_train, y_train.reshape(-1,1)) loss = MSE() opt = GD(config.lr) sampler = Sampler(data, config.bs, shuffle=True) dl = Dataloader(data, sampler) #Initialising the model layers = [Affine("first", 784, 100), Relu("first"), Affine("second", 100, 100), Relu("second"), Affine("third", 100, 2), Affine("final", 2, 1), Sigmoid("final")] model = Model(layers) learner = Learner(loss, model, opt, config.num_epochs) #calling our modified ClfCallback function in the callbacks.py file acc = ClfCallback(learner, config.bs, X_train, X_test, y_train.reshape(-1,1), y_test.reshape(-1,1)) learner.set_callbacks([acc]) ``` #### Getting the Epochs and losses as well as the train and test accuracies ``` learner.train_loop(dl) ``` #### Plot for the train and test accuracies for the model ``` plt.figure(figsize= (10,6)) plt.plot(acc.accuracies, 'b-', label = 'Train Accuracies') plt.plot(acc.val_accuracies, 'r-', label = 'Test Accuracies') plt.legend(frameon=False, loc='lower right') plt.show() ``` ##### The above plot shows overfitting of data as the test accuracies comes below the train accuracies. ### Plotting of the two-dimensional output before the last Affine with Probability Contours (hints taken from the TA session videos and Office hour videos) ``` #taking the model before the last affine where we get the two-dimensional output model_plot = Model(layers[:-2]) plot_data = model_plot(X_test) #function to plot the data def plotdata(x, y): plt.plot(x[y[:,0] == 1, 0], x[y[:,0] == 1, 1], 'ro', label = 'image 3', alpha= 0.1) plt.plot(x[y[:,0] == 0, 0], x[y[:,0] == 0, 1], 'bx', label = 'image 8', alpha= 0.15) plt.legend(frameon=False, loc='upper right') ``` ##### Calculating the Probability contours ``` #Inorder to get the probability the last two layers(Logits followed ) are needed to create a model prob_model = Model(layers[-2: ]) #creating the xx and yy as arrays with 100 rows and 100 columns xgrid = np.linspace(-8, 8, 100) ygrid = np.linspace(-5, 3, 100) xx, yy = np.meshgrid(xgrid, ygrid) #making both xx and yy as single dimensional arrays and then stacking them vertically and transposing to get the array with 10000 rows and 2 columns X = np.vstack((np.ravel(xx), np.ravel(yy))).T #The above data with 10000 rows and 2 columns needs to be fed to the probability model that we created to get the probability contours prob_contours = prob_model(X).reshape(100,100) prob_contours.shape # Plotting the points with the probability contours plt.figure(figsize = (10,8)) plotdata(plot_data, y_test.reshape(-1,1)) contours = plt.contour(xx, yy, prob_contours) plt.clabel(contours, inline=True, fontsize= 8) ``` ### Comparing our Neural Network model with the simple Logistic Regression model ``` #creating the model for the logistic regression consisting of an Affine and a Sigmoid layer layers_logistic = [Affine("logits", 784, 1), Sigmoid("sigmoid")] logistic_model = Model(layers_logistic) learner_l = Learner(loss, logistic_model, opt, config.num_epochs) #calling our modified ClfCallback function in the callbacks.py file acc_l = ClfCallback(learner_l, config.bs, X_train, X_test, y_train.reshape(-1,1), y_test.reshape(-1,1)) learner_l.set_callbacks([acc_l]) learner_l.train_loop(dl) ``` ##### Plotting our Neural Network Model and Logistic Regression model ``` plt.figure(figsize = (20,5)) plt.subplot(1,2,1) plt.plot(acc.val_accuracies, 'b-', label = "Val Accuracies") plt.plot(acc.accuracies, 'r-', label = "Accuracies") plt.title("Plot for Neural Network Model") plt.ylim(0.6,1) plt.legend(frameon=False, loc='lower right') plt.subplot(1,2,2) plt.plot(acc_l.val_accuracies, 'y-', label = "Val Accuracies") plt.plot(acc_l.accuracies, 'g-', label = "Accuracies") plt.title("Plot for Logistic Regression Model") plt.ylim(0.6,1) plt.legend(frameon=False, loc='lower right') ``` ##### By comparing the above two graphs it can be seen that our neural network model has better accuracy than the logistic regression model. But in our neural network model overfitting is an issue.
github_jupyter
#importing the necessary libraries %load_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd #this line can be commented out if already installed !pip install mnist import mnist train_images = mnist.train_images() train_labels = mnist.train_labels() train_images.shape, train_labels.shape test_images = mnist.test_images() test_labels = mnist.test_labels() test_images.shape, test_labels.shape image_index = 7776 # You may select anything up to 60,000 print(train_labels[image_index]) plt.imshow(train_images[image_index], cmap='Greys') train_filter = np.where((train_labels == 3 ) | (train_labels == 8)) test_filter = np.where((test_labels == 3) | (test_labels == 8)) X_train, y_train = train_images[train_filter], train_labels[train_filter] X_test, y_test = test_images[test_filter], test_labels[test_filter] X_train = X_train/255. X_test = X_test/255. y_train = 1*(y_train==3) y_test = 1*(y_test==3) X_train.shape, X_test.shape X_train = X_train.reshape(X_train.shape[0], -1) X_test = X_test.reshape(X_test.shape[0], -1) X_train.shape, X_test.shape #Importing the Kudzu libraries that was provided from kudzu.data import Data, Dataloader, Sampler from kudzu.callbacks import AccCallback, ClfCallback from kudzu.model import Model from kudzu.loss import MSE from kudzu.optim import GD from kudzu.layer import Affine, Sigmoid, Relu from kudzu.train import Learner #Using the following configuration to set up the model for training class Config: pass config = Config() config.lr = 0.001 config.num_epochs = 200 config.bs = 50 data = Data(X_train, y_train.reshape(-1,1)) loss = MSE() opt = GD(config.lr) sampler = Sampler(data, config.bs, shuffle=True) dl = Dataloader(data, sampler) #Initialising the model layers = [Affine("first", 784, 100), Relu("first"), Affine("second", 100, 100), Relu("second"), Affine("third", 100, 2), Affine("final", 2, 1), Sigmoid("final")] model = Model(layers) learner = Learner(loss, model, opt, config.num_epochs) #calling our modified ClfCallback function in the callbacks.py file acc = ClfCallback(learner, config.bs, X_train, X_test, y_train.reshape(-1,1), y_test.reshape(-1,1)) learner.set_callbacks([acc]) learner.train_loop(dl) plt.figure(figsize= (10,6)) plt.plot(acc.accuracies, 'b-', label = 'Train Accuracies') plt.plot(acc.val_accuracies, 'r-', label = 'Test Accuracies') plt.legend(frameon=False, loc='lower right') plt.show() #taking the model before the last affine where we get the two-dimensional output model_plot = Model(layers[:-2]) plot_data = model_plot(X_test) #function to plot the data def plotdata(x, y): plt.plot(x[y[:,0] == 1, 0], x[y[:,0] == 1, 1], 'ro', label = 'image 3', alpha= 0.1) plt.plot(x[y[:,0] == 0, 0], x[y[:,0] == 0, 1], 'bx', label = 'image 8', alpha= 0.15) plt.legend(frameon=False, loc='upper right') #Inorder to get the probability the last two layers(Logits followed ) are needed to create a model prob_model = Model(layers[-2: ]) #creating the xx and yy as arrays with 100 rows and 100 columns xgrid = np.linspace(-8, 8, 100) ygrid = np.linspace(-5, 3, 100) xx, yy = np.meshgrid(xgrid, ygrid) #making both xx and yy as single dimensional arrays and then stacking them vertically and transposing to get the array with 10000 rows and 2 columns X = np.vstack((np.ravel(xx), np.ravel(yy))).T #The above data with 10000 rows and 2 columns needs to be fed to the probability model that we created to get the probability contours prob_contours = prob_model(X).reshape(100,100) prob_contours.shape # Plotting the points with the probability contours plt.figure(figsize = (10,8)) plotdata(plot_data, y_test.reshape(-1,1)) contours = plt.contour(xx, yy, prob_contours) plt.clabel(contours, inline=True, fontsize= 8) #creating the model for the logistic regression consisting of an Affine and a Sigmoid layer layers_logistic = [Affine("logits", 784, 1), Sigmoid("sigmoid")] logistic_model = Model(layers_logistic) learner_l = Learner(loss, logistic_model, opt, config.num_epochs) #calling our modified ClfCallback function in the callbacks.py file acc_l = ClfCallback(learner_l, config.bs, X_train, X_test, y_train.reshape(-1,1), y_test.reshape(-1,1)) learner_l.set_callbacks([acc_l]) learner_l.train_loop(dl) plt.figure(figsize = (20,5)) plt.subplot(1,2,1) plt.plot(acc.val_accuracies, 'b-', label = "Val Accuracies") plt.plot(acc.accuracies, 'r-', label = "Accuracies") plt.title("Plot for Neural Network Model") plt.ylim(0.6,1) plt.legend(frameon=False, loc='lower right') plt.subplot(1,2,2) plt.plot(acc_l.val_accuracies, 'y-', label = "Val Accuracies") plt.plot(acc_l.accuracies, 'g-', label = "Accuracies") plt.title("Plot for Logistic Regression Model") plt.ylim(0.6,1) plt.legend(frameon=False, loc='lower right')
0.651022
0.963609
``` # Make necessary import import os import warnings from xyzspaces.datasets import get_countries_data import xyzspaces ``` <div class="alert alert-block alert-warning"> <b>Warning:</b> Before running below cells please make sure you have XYZ Token to interact with xyzspaces. Please see README.md in notebooks folder for more info on XYZ_TOKEN </div> ``` # Make a XYZ object try: xyz_token = os.environ["XYZ_TOKEN"] except KeyError: xyz_token = "MY-FANCY-XYZ-TOKEN" if xyz_token == "MY-FANCY-XYZ-TOKEN": warnings.warn( "Please either set your actual token to env variable XYZ_TOKEN or " "just assign value of your actual token to variable xyz_token above." ) xyz = xyzspaces.XYZ(credentials=xyz_token) # List the space available xyz.spaces.list() # Create a new space title = "Testing xyzspaces" description = "Temporary space containing countries data." space = xyz.spaces.new(title=title, description=description) space.info space.isshared() # Read from a existing space id space = xyz.spaces.from_id(space.info["id"]) # Countries Polygons/Multipolygons gj_countries = get_countries_data() len(gj_countries["features"]) _ = space.add_features(features=gj_countries) # Get Statistics of a Space. space.get_statistics() # Read all features from space: for feature in space.iter_feature(): print(feature) # Get some feature from space test = space.get_features(feature_ids=["IND","DEU"]) # Create some new test data test['features'][0]['id'] = 'test1' test['features'][1]['id'] = 'test2' # Add a new feature in space _ = space.add_feature(feature_id='test1', data=test['features'][0]) # Get a feature from space space.get_feature(feature_id='test1') # Update a feature in space _ = space.update_feature(feature_id='test1', data=test['features'][1]) # Delete a feature in space space.delete_feature(feature_id='test1') # Add new features in space _ = space.add_features(features=test) # Get newly added features in space space.get_features(feature_ids=["test1","test2"]) # Update features in space space.update_features(features=test) # Delete the new fatures added in space. space.delete_features(feature_ids=["test1","test2"]) # Search features in space for feature in space.search(params={"p.name": "India"}): print(feature) # Get features in a bounding box from space for feature in space.features_in_bbox(bbox=[0, 0, 20, 20]): print(feature) # Get features in tile from space. for feature in space.features_in_tile(tile_type="here", tile_id="12"): print(feature) # Get features in radius using spatial search in space. for feature in space.spatial_search(lat=37.377228699000057, lon=74.512691691000043, radius=100000): print(feature) # Get features which interesect the provided geometry. geom = {"type": "Point", "coordinates": [72.8557, 19.1526]} for feature in space.spatial_search_geometry(data=geom): print(feature) # Add features from csv file space.add_features_csv('data/test.csv', lat_col='latitude', lon_col='longitude', id_col='policyID') space.get_feature(feature_id='333743') # Add features from geojson file space.add_features_geojson('data/test.geojson') space.get_feature(feature_id='test_geojson_1') # Delete space space.delete() space.info ```
github_jupyter
# Make necessary import import os import warnings from xyzspaces.datasets import get_countries_data import xyzspaces # Make a XYZ object try: xyz_token = os.environ["XYZ_TOKEN"] except KeyError: xyz_token = "MY-FANCY-XYZ-TOKEN" if xyz_token == "MY-FANCY-XYZ-TOKEN": warnings.warn( "Please either set your actual token to env variable XYZ_TOKEN or " "just assign value of your actual token to variable xyz_token above." ) xyz = xyzspaces.XYZ(credentials=xyz_token) # List the space available xyz.spaces.list() # Create a new space title = "Testing xyzspaces" description = "Temporary space containing countries data." space = xyz.spaces.new(title=title, description=description) space.info space.isshared() # Read from a existing space id space = xyz.spaces.from_id(space.info["id"]) # Countries Polygons/Multipolygons gj_countries = get_countries_data() len(gj_countries["features"]) _ = space.add_features(features=gj_countries) # Get Statistics of a Space. space.get_statistics() # Read all features from space: for feature in space.iter_feature(): print(feature) # Get some feature from space test = space.get_features(feature_ids=["IND","DEU"]) # Create some new test data test['features'][0]['id'] = 'test1' test['features'][1]['id'] = 'test2' # Add a new feature in space _ = space.add_feature(feature_id='test1', data=test['features'][0]) # Get a feature from space space.get_feature(feature_id='test1') # Update a feature in space _ = space.update_feature(feature_id='test1', data=test['features'][1]) # Delete a feature in space space.delete_feature(feature_id='test1') # Add new features in space _ = space.add_features(features=test) # Get newly added features in space space.get_features(feature_ids=["test1","test2"]) # Update features in space space.update_features(features=test) # Delete the new fatures added in space. space.delete_features(feature_ids=["test1","test2"]) # Search features in space for feature in space.search(params={"p.name": "India"}): print(feature) # Get features in a bounding box from space for feature in space.features_in_bbox(bbox=[0, 0, 20, 20]): print(feature) # Get features in tile from space. for feature in space.features_in_tile(tile_type="here", tile_id="12"): print(feature) # Get features in radius using spatial search in space. for feature in space.spatial_search(lat=37.377228699000057, lon=74.512691691000043, radius=100000): print(feature) # Get features which interesect the provided geometry. geom = {"type": "Point", "coordinates": [72.8557, 19.1526]} for feature in space.spatial_search_geometry(data=geom): print(feature) # Add features from csv file space.add_features_csv('data/test.csv', lat_col='latitude', lon_col='longitude', id_col='policyID') space.get_feature(feature_id='333743') # Add features from geojson file space.add_features_geojson('data/test.geojson') space.get_feature(feature_id='test_geojson_1') # Delete space space.delete() space.info
0.415136
0.755005
![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/PythagoreanTheorem/pythagorean-theorem.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a> ``` %%html <script> function code_toggle() { if (code_shown){ $('div.input').hide('500'); $('#toggleButton').val('Show Code') } else { $('div.input').show('500'); $('#toggleButton').val('Hide Code') } code_shown = !code_shown } $( document ).ready(function(){ code_shown=false; $('div.input').hide() }); </script> <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> from IPython.display import Image from IPython.display import IFrame import ipywidgets as widgets import IPython ``` # Pythagorean Theorem This notebook will cover the Pythagorean theorem, including its applications and a proof of the theorem. **Note:** You should have a solid understanding of square roots and squaring numbers before moving on to this notebook. This notebook assumes you know these concepts, though it also gives more practice of these concepts. ## Introduction Say you have 2 sides of a right angle triangle and are trying to figure out the third. How can we do this? Thankfully that's where the Pythagorean theorem comes in! <img style="float: right;" src="images/PythagoreanTriangle.png" width="50%" height="700"> ### Terminology **Hypotenuse:** the longest side of a triangle **legs:** the other two sides of a triangle that are not the hypotenuse ### What is this theorem? When you draw a right angle triangle with a square on each side like this diagram, there's a relationship between the areas of the squares. You should notice that the areas of the squares on the two legs added together are equal to the area of the largest square on the hypotenuse. In this example, the area of the red square is $9 \text{ cm}^2$, the area of the blue square is $16 \text{ cm}^2$, and the area of the yellow triangle is $25 \text{ cm}^2$. $$\text{Notice that } \color{red}{9 \text{ cm}^2} + \color{blue}{16 \text{ cm}^2} = \color{yellow}{25 \text{ cm}^2}$$ $$\text{But } \color{red}{3 \text{ cm}} + \color{blue}{4 \text{ cm}} ≠ \color{yellow}{5 \text{ cm}}$$ This relationship actually works for all right angle triangles! **The Pythagorean theorem is $a^2 + b^2 = c^2$ where $a$ and $b$ are the legs and $c$ is the hypotenuse. It does not matter which leg is $a$ or $b$**. **Fact:** The Pythagorean Theorem is named for the Greek mathematician, Pythagoras. *Pythagorean Triples are sets of three numbers that create a right angle triangle like this one so 3,4,5 is a Pythagorean triple* ## Example 1 <img style="float: left;" src="images/PythagoreanTriangle2.png" width="45%" height="auto"> ##### Question 1: What are the lengths of the legs of the triangle on the left? The side length of a square is the square root of its area. <br> The side length of the red square is $\sqrt{4 \text{ m}^2} = 2 \text{ m}$. <br> The side length of the blue square is $\sqrt{9 \text{ m}^2} = 3 \text{ m}$. <br> Therefore the lengths of the legs are $2 \text{ m}$ and $3 \text{ m}$. ##### Question 2: What is the area of the yellow square in the diagram to the left? Let's use the Pythagorean theorem. The area of the two smaller squares added together is equal to the area of the larger square. <br> The area of the red square is $ 4 \text{ m}^2$ and the area of the blue square is $ 9 \text{ m}^2$. <br> Now we add them together: $ 4 \text{ m}^2 + 9 \text{ m}^2 = 13 \text{ m}^2$. <br> The area of the yellow square is $ 13 \text{ m}^2$. ##### Question 3: What is the length of the hypotenuse of the triangle to the left? Now we know the area of the large yellow square is $ 13 \text{ m}^2$, so the side length of the square is $\sqrt{13} \text{ m}$. <br> The hypotenuse of the triangle has the same length as the length of the side of the yellow square. <br> Therefore the length of the hypotenuse is $\sqrt{13} \text{ m}$. ## Proof Not convinced that this relationship works for all right angle triangles? Look at the visual proof from mathisfun.com. ``` %%html <iframe width="560" height="315" src="https://www.youtube.com/embed/_87RbSoELW8" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> ``` ### Algebraic proof We will work through the proof of a² + b² = c² together. Lets look at the diagram below. <img style="float: center;" src="images/PythagoreanProof.png" width="30%" height="auto"> You can see the 4 identical right angle triangles within a square, and the sides of each triangle are labelled just like our first example. a = 3, b = 4, and c = 5. #### Area of the large square The area of the large square is its side length squared, which is $(3 + 4)^2 = 7^2 = 49.$ #### Area of the pieces The area of the smaller yellow square in the middle is $ 5^2 = 25.$ <br> The area of one blue triangle is $\frac{3 \times 4}{2}$ and since there's 4 of them, the area of all 4 triangles is $$\frac{4 \times (3 \times 4)}{2} = \frac{4 \times 12}{2} = \frac{48}{2} = 24.$$ <br> Now we add those together to get $ 25 + 24 = 49.$ #### Areas are equal You can see that $ 49 = 49. $ This is because the area of the large square takes up the exact same space as the ares of all 4 blue triangles and the yellow square. This doesn't just work for these numbers though, it works for any numbers that create right angle triangles! If you want to see the full proof without numbers, you can check it out at [mathisfun.com](https://www.mathsisfun.com/geometry/pythagorean-theorem-proof.html). ## Example 2 Let's go through an example of a question without the squares. What is the length of the hypotenuse of the triangle below? <img style="float: center;" src="images/PythagoreanTriangle4.png" width="40%" height="auto"> Recall the Pythagorean theorem: $a^2 + b^2 = c^2$. <br> Now let's put the values we know into the theorem. The length of the hypotenuse is the value of c. $$\begin{align*} (2 \text{ cm})^2 + (5 \text{ cm})^2 & = c^2 \\ 4 \text{ cm}^2 + 25 \text{ cm}^2 & = c^2 \\ 29 \text{ cm}^2 & = c^2 \\ \sqrt{29 \text { cm}^2} & = \sqrt{c^2} \\ \sqrt{29} \text{ cm} & = c \\ \end{align*}$$ Let's approximate the answer to one decimal place using a calculator. $\sqrt{29} \text{ cm} = 5.4 \text{ cm}$. <br> The length of the hypotenuse is $\sqrt{29} \text{ cm}$ or approximately $5.4 \text{ cm}$. ********** ## Practice #### Question 1 <img style="float: left;" src="images/PythagoreanTriangle5.png" width="300"> ``` answer1 = widgets.RadioButtons(options=['9 m', '6 m','6.4 m','5.4 m'], value=None, description= 'Hypotenuse') def display1(): IPython.display.clear_output() print("What is the length of the hypotenuse of the triangle above?") print("Round to one decimal place when necessary.") IPython.display.display(answer1) def check1(a): display1() if answer1.value == '6.4 m': print("Correct! Great job! The theorem properly filled out looks like this: 16 m² + 25 m² = 41 m²") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².") display1() answer1.observe(check1, 'value') ``` #### Question 2 Let's have a more practical problem for the Pythagorean theorem. Say you have a table that's shortest side is 3.10 m long. If the table is held on an angle, can it fit through this door frame below? Round to 2 decimal places. <img style="float: left;" src="images/PythagoreanTriangleDoor.png" width="200"> ``` answer3 = widgets.RadioButtons(options=['2.00 m', '2.83 m','3.16 m','4.03 m'], value=None, description= 'Diagonal') def display3(): IPython.display.clear_output() print("What is the diagonal of the door?") print("Round to two decimal places when necessary.") IPython.display.display(answer3) def check3(a): display3() if answer3.value == '3.16 m': print("Correct! Great job!") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².") display3() answer3.observe(check3, 'value') answer2 = widgets.RadioButtons(options=['Yes, the table will fit.', 'No, the table will not fit'], value=None) def display2(): IPython.display.clear_output() print("Is the length of the table smaller than the diagonal of the door?") print("Round to two decimal places when necessary.") IPython.display.display(answer2) def check2(a): display2() if answer2.value == 'Yes, the table will fit.': print("That's right! The table will fit through the door on an angle.") else: print("Sorry, that's not right, the table will be able to fit in the door because 3.1 m is less than 3.16 m.") display2() answer2.observe(check2, 'value') ``` What else would knowing how to find the hypotenuse be helpful for? ## Extend Your Knowledge We can use the Pythagorean theorem for more than just finding the length of the hypotenuse given the two legs. We can find the length of one leg given the other leg and the hypotenuse. ### Example Given this right angled triangle below, what is the missing side length? <img style="float: left;" src="images/PythagoreanTriangle6.png" width=200> Let's start by filling in the information we know into the pythagorean theorem. $$\begin{align*} a^2 + b^2 & = c^2 \\ a^2 + (\sqrt{20 \text{ units}})^2 & = (6 \text{ units})^2 \\ \end{align*}$$ Now let's solve this equation for the missing variable. In this example, we will solve for $a$. $$\begin{align*} a^2 + (\sqrt{20 \text{ units}})^2 & = (6 \text{ units})^2 \\ a^2 + 20 \text{ units}^2 & = 36 \text{ units}^2 \tag{apply the power of 2 to the bases} \\ a^2 + 20 \text{ units}^2 - 20 \text{ units}^2 & = 36 \text{ units}^2 - 20 \text{ units}^2 \tag{subtract 20 units² from both sides} \\ \sqrt{a^2} & = \sqrt{16 \text{ units}^2} \tag{square root both sides} \\ a & = 4 \text{ units} \end{align*}$$ ## Practice Now you try to calculate the length of the missing leg. <img style="float: left;" src="images/PythagoreanTriangle8.png" width="200"> ``` answer4 = widgets.RadioButtons(options=['8 m', '9 m','8.3 m','7.8 m'], value=None, description= 'Side Length') def display4(): IPython.display.clear_output() print("What is the length of the leg labelled a above?") print("Round to one decimal place when necessary.") IPython.display.display(answer4) def check4(a): display4() if answer4.value == '8 m': print("Correct! If we divide each side length by 2, you might notice that this triangle is the same one \n as the very first triangle we looked at in this notebook!") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c². We are looking for a.") display4() answer4.observe(check4, 'value') ``` ## Checking Right angles We can check if a triangle is a right angle triangle by knowing if its sides fit the Pythagorean theorem. If they don't then it isn't a right angle triangle. Lets look at an acute and an obtuse triangle and compare their sides in the Pythagorean theorem. You know, just to make sure. Look at the three triangles below. One is a right angle triangle, one is an acute triangle, and one is an obtuse triangle. Fill in the table below by clicking on the box you want to fill (where it's written 'nan') and typing in your answer. The longest side is side c. <img style="float: left;" src="images/ThreeTriangles.png" width="600"> ``` import pandas as pd import qgrid table = pd.DataFrame(index=pd.Series(['Right', 'Acute', 'Obtuse']), columns=pd.Series(['a²', 'b²','a² + b²', 'c²'])) table_widget = qgrid.QgridWidget(df =table, show_toolbar=False) table_widget answer5 = widgets.RadioButtons(options=['Yes','No'], value=None) def check5(a): IPython.display.clear_output() print("Does a² + b² = c² for all triangles?") IPython.display.display(answer5) if answer5.value == 'No': print("That's right! The Pythagorean theorem only works for right angle triangles.") else: print("Actually, the Pythagorean theorem only works for right angle triangles.") print("Now let's use this knowledge to check if triangles have a right angle or not!") print("Does a² + b² = c² for all triangles?") IPython.display.display(answer5) answer5.observe(check5, 'value') ``` ## Example Let's go through an example together. Here is a triangle with all three sides labelled. Is this a right angle triangle? <img style="float: left;" src="images/angle2.png" width="300"> Remember, the longest side is side c. Let's fill in the Pythagorean theorem and see if the left side equals the right. <br> Since c is the largest side, a and b will be the legs. $$\begin{align*} \text{Let's start with the left side:} \\ a & = 7 \text{ m} \\ a^2 & = 49 \text{ m}^2 \\ b & = 10 \text{ m} \\ b^2& = 100 \text{ m}^2 \\ \text{Now let's add them together:} \\ a^2 + b^2 & = 49 \text{ m}^2 + 100 \text{ m}^2 \\ a^2 + b^2 & = 149 \text{ m}^2 \\ \end{align*}$$ **The left side equals 149 m²** <br> $$\begin{align*} \text{And now the right side:} \\ c & = 13 \text{ m} \\ c^2 & = 169 \text{ m}^2 \\ \end{align*}$$ **The right side equals 169 m²** <br> 149 m² does not equal 169 m² therefore this triangle is not a right angle triangle. ### Practice Now it's your turn to check if this triangle below is a right angle triangle. <img style="float: left;" src="images/angle1.png" width="200"> ``` submit1 = widgets.Button(description='Submit', button_style='success') answer6 = widgets.Text(value=None, placeholder='Your answer here', description='Left side') def display6(): IPython.display.clear_output() print("What is a² + b²?") print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2") IPython.display.display(answer6, submit1) submit1.on_click(check6) def check6(a): display6() if answer6.value == '169 units^2': print("That's right! Now let's move on to the right side.") else: if answer6.value == '169' or answer6.value == '169 units': print("Don't forget your units!") else: print("Sorry, that's not right, try again before moving on to the right side.") display6() submit2 = widgets.Button(description='Submit', button_style='success') answer7 = widgets.Text(value=None, placeholder='Your answer here', description='Right side') def display7(): IPython.display.clear_output() print("What is c²?") print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2") IPython.display.display(answer7, submit2) submit2.on_click(check7) def check7(a): display7() if answer7.value == '169 units^2': print("That's correct! Great job!") elif answer7.value == '169' or answer7.value == '169 units': print("Don't forget your units!") else: print("Sorry, try again.") display7() answer8 = widgets.RadioButtons(options=['Yes','No'], value=None) def check8(a): IPython.display.clear_output() print("Is this triangle a right angle triangle?") IPython.display.display(answer8) if answer8.value == 'Yes': print("That's right! This is a right angle triangle") else: print("Actually, this triangle is a right angle triangle.") print("Is this triangle a right angle triangle?") IPython.display.display(answer8) answer8.observe(check8, 'value') ``` ### Word question ![frame](https://images.freeimages.com/images/premium/large-thumbs/5963/59633908-comic-cartoon-picture-frame.jpg) Bailey has four pieces of wood. Two of them are 3 inches long. The other two are 5 inches long. <br> Bailey makes a rectangular picture frame using these pieces. Then the diagonal is measured to be 7 inches long. <br> ``` answer9 = widgets.RadioButtons(options=['Yes','No'], value=None) def check9(a): IPython.display.clear_output() print("Does the picture frame have a right angle corner?") IPython.display.display(answer9) if answer9.value == 'No': print("That's right! The frame does not have a right angle corner.") else: print("Actually, the frame does not have a right angle corner.") print("Does the picture frame have a right angle corner?") IPython.display.display(answer9) answer9.observe(check9, 'value') ``` ## What did we learn? Lets summarize what we have learned in this notebook: * The Pythagorean theorem states: a² + b² = c² * This theorem has been proven multiple ways * This theorem can be used for multiple purposes * Find the length of the hypotenuse * Find the length of a side * Confirm if there's a right angle * Lots of situations in life need the Pythagorean theorem This math concept will be used for many more years in school. Make sure to do lots of practice, even beyond this notebook so that you understand the Pythagorean theorem well. [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
github_jupyter
%%html <script> function code_toggle() { if (code_shown){ $('div.input').hide('500'); $('#toggleButton').val('Show Code') } else { $('div.input').show('500'); $('#toggleButton').val('Hide Code') } code_shown = !code_shown } $( document ).ready(function(){ code_shown=false; $('div.input').hide() }); </script> <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> from IPython.display import Image from IPython.display import IFrame import ipywidgets as widgets import IPython %%html <iframe width="560" height="315" src="https://www.youtube.com/embed/_87RbSoELW8" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> answer1 = widgets.RadioButtons(options=['9 m', '6 m','6.4 m','5.4 m'], value=None, description= 'Hypotenuse') def display1(): IPython.display.clear_output() print("What is the length of the hypotenuse of the triangle above?") print("Round to one decimal place when necessary.") IPython.display.display(answer1) def check1(a): display1() if answer1.value == '6.4 m': print("Correct! Great job! The theorem properly filled out looks like this: 16 m² + 25 m² = 41 m²") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².") display1() answer1.observe(check1, 'value') answer3 = widgets.RadioButtons(options=['2.00 m', '2.83 m','3.16 m','4.03 m'], value=None, description= 'Diagonal') def display3(): IPython.display.clear_output() print("What is the diagonal of the door?") print("Round to two decimal places when necessary.") IPython.display.display(answer3) def check3(a): display3() if answer3.value == '3.16 m': print("Correct! Great job!") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².") display3() answer3.observe(check3, 'value') answer2 = widgets.RadioButtons(options=['Yes, the table will fit.', 'No, the table will not fit'], value=None) def display2(): IPython.display.clear_output() print("Is the length of the table smaller than the diagonal of the door?") print("Round to two decimal places when necessary.") IPython.display.display(answer2) def check2(a): display2() if answer2.value == 'Yes, the table will fit.': print("That's right! The table will fit through the door on an angle.") else: print("Sorry, that's not right, the table will be able to fit in the door because 3.1 m is less than 3.16 m.") display2() answer2.observe(check2, 'value') answer4 = widgets.RadioButtons(options=['8 m', '9 m','8.3 m','7.8 m'], value=None, description= 'Side Length') def display4(): IPython.display.clear_output() print("What is the length of the leg labelled a above?") print("Round to one decimal place when necessary.") IPython.display.display(answer4) def check4(a): display4() if answer4.value == '8 m': print("Correct! If we divide each side length by 2, you might notice that this triangle is the same one \n as the very first triangle we looked at in this notebook!") else: print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c². We are looking for a.") display4() answer4.observe(check4, 'value') import pandas as pd import qgrid table = pd.DataFrame(index=pd.Series(['Right', 'Acute', 'Obtuse']), columns=pd.Series(['a²', 'b²','a² + b²', 'c²'])) table_widget = qgrid.QgridWidget(df =table, show_toolbar=False) table_widget answer5 = widgets.RadioButtons(options=['Yes','No'], value=None) def check5(a): IPython.display.clear_output() print("Does a² + b² = c² for all triangles?") IPython.display.display(answer5) if answer5.value == 'No': print("That's right! The Pythagorean theorem only works for right angle triangles.") else: print("Actually, the Pythagorean theorem only works for right angle triangles.") print("Now let's use this knowledge to check if triangles have a right angle or not!") print("Does a² + b² = c² for all triangles?") IPython.display.display(answer5) answer5.observe(check5, 'value') submit1 = widgets.Button(description='Submit', button_style='success') answer6 = widgets.Text(value=None, placeholder='Your answer here', description='Left side') def display6(): IPython.display.clear_output() print("What is a² + b²?") print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2") IPython.display.display(answer6, submit1) submit1.on_click(check6) def check6(a): display6() if answer6.value == '169 units^2': print("That's right! Now let's move on to the right side.") else: if answer6.value == '169' or answer6.value == '169 units': print("Don't forget your units!") else: print("Sorry, that's not right, try again before moving on to the right side.") display6() submit2 = widgets.Button(description='Submit', button_style='success') answer7 = widgets.Text(value=None, placeholder='Your answer here', description='Right side') def display7(): IPython.display.clear_output() print("What is c²?") print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2") IPython.display.display(answer7, submit2) submit2.on_click(check7) def check7(a): display7() if answer7.value == '169 units^2': print("That's correct! Great job!") elif answer7.value == '169' or answer7.value == '169 units': print("Don't forget your units!") else: print("Sorry, try again.") display7() answer8 = widgets.RadioButtons(options=['Yes','No'], value=None) def check8(a): IPython.display.clear_output() print("Is this triangle a right angle triangle?") IPython.display.display(answer8) if answer8.value == 'Yes': print("That's right! This is a right angle triangle") else: print("Actually, this triangle is a right angle triangle.") print("Is this triangle a right angle triangle?") IPython.display.display(answer8) answer8.observe(check8, 'value') answer9 = widgets.RadioButtons(options=['Yes','No'], value=None) def check9(a): IPython.display.clear_output() print("Does the picture frame have a right angle corner?") IPython.display.display(answer9) if answer9.value == 'No': print("That's right! The frame does not have a right angle corner.") else: print("Actually, the frame does not have a right angle corner.") print("Does the picture frame have a right angle corner?") IPython.display.display(answer9) answer9.observe(check9, 'value')
0.365343
0.95297
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import seaborn as sns import umap %matplotlib inline sns.set(style='white', rc={'figure.figsize':(12,8)}) import requests import zipfile import imageio ``` Sklearn $\ge$ 0.19 is necessary for the efficient implementation of tsne. ``` import sklearn.manifold sklearn.__version__ ``` ### Pull the data from the internet and write it to a file Don't bother running this if you've already downloaded the dataset. ``` #%%time #getData = True # Change this to false the first time you run the notebook #if(getData): # results = requests.get('https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing') # model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) # model.save_word2vec_format('GoogleNews-vectors-negative300.txt', binary=False) ``` ### Read in from file and transform into a vector space ``` %%time from gensim.models.keyedvectors import KeyedVectors model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) data = model.syn0 print(data.shape) model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) model.save_word2vec_format('GoogleNews-vectors-negative300.txt', binary=False) ``` ### Now we have our data in a list of vectors. Let's extract the object id's from the files and cast to data frame (in case we want to explore things further) ### Now let's use UMAP to embed these points into a two dimensional space. ``` num_points = 100000 selected_index = np.random.choice(data.shape[0],size=num_points, replace=False) data = data[selected_index,:] words = np.array(model.index2word)[selected_index] fit = umap.UMAP(n_neighbors=15, random_state=42, metric='cosine') %time u = fit.fit_transform(data) embedding = pd.DataFrame({'word':words, 'x':u[:,0], 'y':u[:,1]}) embedding.to_csv("~/python/UMAP/umap_paper_notebooks/embedding_word_100_umap.csv") plt.scatter(u[:,0], u[:,1], s=0.1); ``` 100,000 datapoints subsampled from our file results give a run time of (2min 47sec) and results in lovely topological structure. ### T-SNE It is largely accepted that cosine distance is a good measure to use on text. Unfortunately, t-SNE cannot be efficiently run with cosine distance. Fortunately, cosine distance can be approximated via L2-normalized Euclidean distance ``` sklearn.preprocessing.normalize(data, norm='l2', copy=False); fit_tsne = sklearn.manifold.TSNE(perplexity=60) %time u_tsne = fit_tsne.fit_transform(data) embedding = pd.DataFrame({'word':words, 'x':u_tsne[:,0], 'y':u_tsne[:,1]}) embedding.to_csv("~/python/UMAP/umap_paper_notebooks/embedding_word_100_tsne.csv") plt.scatter(u_tsne[:,0], u_tsne[:,1], s=0.1); ``` t-SNE reveals very the small clumps contained within the data but fails to highlight the relationship between these small clumps as was revealed via the UMAP algorithm earlier. Additionally, t-SNE took 2 hours a 30 minutes to complete while UMAP finished the task in only 2 minutes and 47 seconds. ### PCA The old standby of PCA which is blindingly fast to compute and often used as an initialization to many of the more complex algorithms. This isn't really a competitor but instead should be thought of as a strawman. Given that other algorithms initialize with these values one would hope that they can do better. ``` from sklearn.decomposition import PCA pca = PCA(n_components=2) %time u_pca = pca.fit_transform(data) plt.scatter(u_pca[:,0], u_pca[:,1], s=1); ``` ## MDS For old times sake we attempt to embed with multi-dimensional scaling. Probably not worth running for the 100,000 use case. Might try an overnight run though. Nope killed my kernel on an 8 GB laptop on an overnight run. ``` fit_mds = sklearn.manifold.MDS() %time u_mds = fit_mds.fit_transform(data) plt.scatter(u_mds[:,0], u_mds[:,1], s=10); ``` MDS and PCA (below) should do a better job preserving global structure at the expense of sacrificying local structure. These can be seen in the loss of loop structure in our data and the dispersal of points across our space.
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import seaborn as sns import umap %matplotlib inline sns.set(style='white', rc={'figure.figsize':(12,8)}) import requests import zipfile import imageio import sklearn.manifold sklearn.__version__ #%%time #getData = True # Change this to false the first time you run the notebook #if(getData): # results = requests.get('https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing') # model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) # model.save_word2vec_format('GoogleNews-vectors-negative300.txt', binary=False) %%time from gensim.models.keyedvectors import KeyedVectors model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) data = model.syn0 print(data.shape) model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) model.save_word2vec_format('GoogleNews-vectors-negative300.txt', binary=False) num_points = 100000 selected_index = np.random.choice(data.shape[0],size=num_points, replace=False) data = data[selected_index,:] words = np.array(model.index2word)[selected_index] fit = umap.UMAP(n_neighbors=15, random_state=42, metric='cosine') %time u = fit.fit_transform(data) embedding = pd.DataFrame({'word':words, 'x':u[:,0], 'y':u[:,1]}) embedding.to_csv("~/python/UMAP/umap_paper_notebooks/embedding_word_100_umap.csv") plt.scatter(u[:,0], u[:,1], s=0.1); sklearn.preprocessing.normalize(data, norm='l2', copy=False); fit_tsne = sklearn.manifold.TSNE(perplexity=60) %time u_tsne = fit_tsne.fit_transform(data) embedding = pd.DataFrame({'word':words, 'x':u_tsne[:,0], 'y':u_tsne[:,1]}) embedding.to_csv("~/python/UMAP/umap_paper_notebooks/embedding_word_100_tsne.csv") plt.scatter(u_tsne[:,0], u_tsne[:,1], s=0.1); from sklearn.decomposition import PCA pca = PCA(n_components=2) %time u_pca = pca.fit_transform(data) plt.scatter(u_pca[:,0], u_pca[:,1], s=1); fit_mds = sklearn.manifold.MDS() %time u_mds = fit_mds.fit_transform(data) plt.scatter(u_mds[:,0], u_mds[:,1], s=10);
0.466359
0.865849
# K-Means Objective function: ![](https://wikimedia.org/api/rest_v1/media/math/render/svg/8dc15ec63e0676fc07e790f61efd89484a6b7922) that is, find k sets that minimize the within-cluster sum of squares (i.e. _inertia_). Problems with K-Means: - Inertia makes the assumption that clusters are convex and isotropic, which is not always the case. It responds poorly to elongated clusters, or manifolds with irregular shapes. - Inertia is not a normalized metric: we just know that lower values are better and zero is optimal. But in very high-dimensional spaces, Euclidean distances tend to become inflated (this is an instance of the so-called “curse of dimensionality”). Running a dimensionality reduction algorithm such as PCA prior to k-means clustering can alleviate this problem and speed up the computations. [Demonstration of K-Means assumptions](http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_assumptions.html#sphx-glr-auto-examples-cluster-plot-kmeans-assumptions-py) Index: [Naive implementation](#naive-implementation) ## Naive implementation ``` import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.samples_generator import make_blobs (X, y) = make_blobs(n_samples=200, n_features=2, centers=4, cluster_std=2, random_state=10) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y) plt.show() from collections import defaultdict def predict(X, k): centroids_idx = np.random.choice(np.arange(X.shape[0]), k, replace=False) centroids = X[centroids_idx] old_assignment = None new_assignment = {} max_iterations = 50 it = 0 while old_assignment != new_assignment and it < max_iterations: old_assignment = new_assignment new_assignment = defaultdict(lambda: []) # expectation step for i, x in enumerate(X): distances = np.linalg.norm(centroids - x, axis=1) nearest_centroid = np.argmin(distances) new_assignment[nearest_centroid].append(i) # maximization step for c in range(k): cluster_indices = new_assignment[c] # print(cluster_indices) # print(X[cluster_indices]) mean = np.mean(X[cluster_indices], axis=0) centroids[c] = mean it += 1 print("Done! #iterations: ", it) y_pred = [(index, centroid_id) for centroid_id, cluster_indices in new_assignment.items() for index in cluster_indices] y_pred = list(map(lambda e: e[1], sorted(y_pred, key=lambda e: e[0]))) return y_pred, centroids K = 4 y_pred, centroids = predict(X, K) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y_pred) plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', c='g') # plot misclassified points - makes sense only if k_true = k_fit from_pred_class_to_dataset_class = {} for c in range(K): centroid = centroids[c] nearest_point_idx = np.argmin(np.linalg.norm(X - centroid, axis=1)) nearest_class = y[nearest_point_idx] from_pred_class_to_dataset_class[c] = nearest_class y_pred = np.array([from_pred_class_to_dataset_class[c] for c in y_pred]) misclassified_points = X[y != y_pred] plt.scatter(misclassified_points[:, 0], misclassified_points[:, 1], c='r') # Draw Voronoi diagrams points = np.array(centroids) from scipy.spatial import Voronoi, voronoi_plot_2d vor = Voronoi(points) import matplotlib.pyplot as plt voronoi_plot_2d(vor) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y) # plt.show() ``` ### evaluation of performance (e.g. purity, completeness, mutual information) ## Using scikit-learn
github_jupyter
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.samples_generator import make_blobs (X, y) = make_blobs(n_samples=200, n_features=2, centers=4, cluster_std=2, random_state=10) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y) plt.show() from collections import defaultdict def predict(X, k): centroids_idx = np.random.choice(np.arange(X.shape[0]), k, replace=False) centroids = X[centroids_idx] old_assignment = None new_assignment = {} max_iterations = 50 it = 0 while old_assignment != new_assignment and it < max_iterations: old_assignment = new_assignment new_assignment = defaultdict(lambda: []) # expectation step for i, x in enumerate(X): distances = np.linalg.norm(centroids - x, axis=1) nearest_centroid = np.argmin(distances) new_assignment[nearest_centroid].append(i) # maximization step for c in range(k): cluster_indices = new_assignment[c] # print(cluster_indices) # print(X[cluster_indices]) mean = np.mean(X[cluster_indices], axis=0) centroids[c] = mean it += 1 print("Done! #iterations: ", it) y_pred = [(index, centroid_id) for centroid_id, cluster_indices in new_assignment.items() for index in cluster_indices] y_pred = list(map(lambda e: e[1], sorted(y_pred, key=lambda e: e[0]))) return y_pred, centroids K = 4 y_pred, centroids = predict(X, K) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y_pred) plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', c='g') # plot misclassified points - makes sense only if k_true = k_fit from_pred_class_to_dataset_class = {} for c in range(K): centroid = centroids[c] nearest_point_idx = np.argmin(np.linalg.norm(X - centroid, axis=1)) nearest_class = y[nearest_point_idx] from_pred_class_to_dataset_class[c] = nearest_class y_pred = np.array([from_pred_class_to_dataset_class[c] for c in y_pred]) misclassified_points = X[y != y_pred] plt.scatter(misclassified_points[:, 0], misclassified_points[:, 1], c='r') # Draw Voronoi diagrams points = np.array(centroids) from scipy.spatial import Voronoi, voronoi_plot_2d vor = Voronoi(points) import matplotlib.pyplot as plt voronoi_plot_2d(vor) plt.scatter(X[:, 0], X[:, 1], marker='o', c=y) # plt.show()
0.398524
0.934694
``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("./mnist/data/", one_hot=True) X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) ``` ## 신경망 입력층(input layer) -> 은닉층(hidden layer) -> 출력층(output layer) 784(입력, 특징 개수) -> 256(첫 번째 은닉층 뉴런 개수) -> 256 (두 번째 은닉층 뉴런 개수) -> 10 (결괏값 0-9 분류 개수) ``` #과적합을 막기위한 드롭아웃 keep_prob = tf.placeholder(tf.float32) W1 = tf.Variable(tf.random_normal([784, 256], stddev=0.01)) L1 = tf.nn.relu(tf.matmul(X, W1)) L1 = tf.nn.dropout(L1, keep_prob) # 배치 정규화 Batch Normalization : 과적합을 막아주고 학습속도를 향상시켜줌 # L1 = tf.layers.batch_normalization(L1, training=True) W2 = tf.Variable(tf.random_normal([256, 256], stddev=0.01)) L2 = tf.nn.relu(tf.matmul(L1, W2)) L2 = tf.nn.dropout(L2, keep_prob) # L2 = tf.layers.batch_normalization(L2, training=True) W3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01)) model = tf.matmul(L2, W3, name='model') # 표준편차가 0.01인 정규분포를 가지는 임의의 값으로 뉴런 초기화 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y)) optimizer = tf.train.AdamOptimizer(0.001).minimize(cost) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) batch_size = 100 total_batch = int(mnist.train.num_examples / batch_size) for epoch in range(30): total_cost = 0 for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _, cost_val = sess.run([optimizer, cost], feed_dict={X: batch_xs, Y: batch_ys, keep_prob: 0.8}) total_cost += cost_val print('Epoch:', '%04d' % (epoch + 1), 'Avg. cost =', '{:3f}'.format(total_cost / total_batch)) print('최적화 완료!') is_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) print('정확도:', sess.run(accuracy, feed_dict={X: mnist.test.images, Y:mnist.test.labels, keep_prob: 1})) labels = sess.run(model, feed_dict = {X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}) fig = plt.figure() for i in range(10): # 2행 5열의 그래프를 만들고, i + 1번째에 숫자 이미지를 출력합니다. subplot = fig.add_subplot(2, 5, i + 1) # 이미지를 깨끗하게 출력하기 위해 x와 y의 눈금을 출력하지 않습니다. subplot.set_xticks([]) subplot.set_yticks([]) # 출력한 이미지 위에 예측한 숫자를 출력합니다. # np.argmax는 tf.argmax와 같은 기능의 함수입니다. subplot.set_title('%d' % np.argmax(labels[i])) subplot.imshow(mnist.test.images[i].reshape((28, 28)), cmap=plt.cm.gray_r) plt.show() ```
github_jupyter
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("./mnist/data/", one_hot=True) X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) #과적합을 막기위한 드롭아웃 keep_prob = tf.placeholder(tf.float32) W1 = tf.Variable(tf.random_normal([784, 256], stddev=0.01)) L1 = tf.nn.relu(tf.matmul(X, W1)) L1 = tf.nn.dropout(L1, keep_prob) # 배치 정규화 Batch Normalization : 과적합을 막아주고 학습속도를 향상시켜줌 # L1 = tf.layers.batch_normalization(L1, training=True) W2 = tf.Variable(tf.random_normal([256, 256], stddev=0.01)) L2 = tf.nn.relu(tf.matmul(L1, W2)) L2 = tf.nn.dropout(L2, keep_prob) # L2 = tf.layers.batch_normalization(L2, training=True) W3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01)) model = tf.matmul(L2, W3, name='model') # 표준편차가 0.01인 정규분포를 가지는 임의의 값으로 뉴런 초기화 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y)) optimizer = tf.train.AdamOptimizer(0.001).minimize(cost) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) batch_size = 100 total_batch = int(mnist.train.num_examples / batch_size) for epoch in range(30): total_cost = 0 for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _, cost_val = sess.run([optimizer, cost], feed_dict={X: batch_xs, Y: batch_ys, keep_prob: 0.8}) total_cost += cost_val print('Epoch:', '%04d' % (epoch + 1), 'Avg. cost =', '{:3f}'.format(total_cost / total_batch)) print('최적화 완료!') is_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) print('정확도:', sess.run(accuracy, feed_dict={X: mnist.test.images, Y:mnist.test.labels, keep_prob: 1})) labels = sess.run(model, feed_dict = {X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}) fig = plt.figure() for i in range(10): # 2행 5열의 그래프를 만들고, i + 1번째에 숫자 이미지를 출력합니다. subplot = fig.add_subplot(2, 5, i + 1) # 이미지를 깨끗하게 출력하기 위해 x와 y의 눈금을 출력하지 않습니다. subplot.set_xticks([]) subplot.set_yticks([]) # 출력한 이미지 위에 예측한 숫자를 출력합니다. # np.argmax는 tf.argmax와 같은 기능의 함수입니다. subplot.set_title('%d' % np.argmax(labels[i])) subplot.imshow(mnist.test.images[i].reshape((28, 28)), cmap=plt.cm.gray_r) plt.show()
0.572723
0.883739
# Objective: This project entails building a Book Recommender System for users based on user-based and item-based collaborative filtering approaches # About Book Crossing Dataset: This dataset has been compiled by Cai-Nicolas Ziegler in 2004, and it comprises of three tables for users, books and ratings. Explicit ratings are expressed on a scale from 1-10 (higher values denoting higher appreciation) and implicit rating is expressed by 0 # Dataset: http://www2.informatik.uni-freiburg.de/~cziegler/BX/ # Load Libraries and Data ``` from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" #Loading libraries import pandas as pd import matplotlib.pyplot as plt import sklearn.metrics as metrics import numpy as np from sklearn.neighbors import NearestNeighbors from sklearn.metrics.pairwise import pairwise_distances import warnings warnings.filterwarnings('ignore') import numpy as np import re import seaborn as sns #Loading data books = pd.read_csv("BX-Books.csv", sep=";", error_bad_lines=False, encoding="latin-1") books.columns = ['ISBN', 'bookTitle', 'bookAuthor', 'yearOfPublication', 'publisher', 'imageUrlS', 'imageUrlM', 'imageUrlL'] users = pd.read_csv('BX-Users.csv', sep=';', error_bad_lines=False, encoding="latin-1") users.columns = ['userID', 'Location', 'Age'] ratings = pd.read_csv('BX-Book-Ratings.csv', sep=';', error_bad_lines=False, encoding="latin-1") ratings.columns = ['userID', 'ISBN', 'bookRating'] ``` # 2. Check no.of records and features given in each dataset, clean the data ``` print(books.shape) print(users.shape) print(ratings.shape) books.head() books.drop(['imageUrlS', 'imageUrlM', 'imageUrlL'],axis=1,inplace=True) books.head() ``` # 3. Check unique values of yearOfPublication ``` books.yearOfPublication.unique() ``` As it can be seen from above that there are some incorrect entries in this field. It looks like Publisher names 'DK Publishing Inc' and 'Gallimard' have been incorrectly loaded as yearOfPublication in dataset due to some errors in csv file. Also some of the entries are strings and same years have been entered as numbers in some places # 4. Check the rows having 'DK Publishing Inc' as yearOfPublication ``` books.loc[books.yearOfPublication == 'DK Publishing Inc',:] books = books[(books.yearOfPublication != 'DK Publishing Inc') & (books.yearOfPublication != 'Gallimard')] ``` # 5. Change the datatype of yearOfPublication to 'int' and Drop NaNs in 'publisher' column ``` books.yearOfPublication = books.yearOfPublication.astype('int32') books.dtypes #Publisher #drop NaNs in publisher column books = books.dropna(subset=['publisher']) books.publisher.isnull().sum() ``` # 6. Explore Users dataset a. Get all unique values in ascending order for column Age b. Values below 5 and above 90 do not make much sense for our book rating case...hence replace these by NaNs c. Replace null values in column Age with mean d. Change the datatype of Age to int ``` #Users users.shape users.head() #Age print(sorted(users.Age.unique())) ``` Age column has some invalid entries like nan, 0 and very high values like 100 and above ``` Values below 5 and above 90 do not make much sense for our book rating case...hence replacing these by NaNs users.loc[(users.Age > 90) | (users.Age < 5), 'Age'] = np.nan #Replace All null values with mean #replacing NaNs with mean users.Age = users.Age.fillna(users.Age.mean()) #Change the datatype into int #setting the data type as int users.Age = users.Age.astype(np.int32) print(sorted(users.Age.unique())) ``` # 7. Explore Ratings dataset a. Check the shape b. Ratings dataset should have books only which exist in our books dataset. Drop the remaining rows c. Ratings dataset should have ratings from users which exist in users dataset. Drop the remaining rows d. Consider only ratings from 1-10 and leave 0s in column bookRating e. Find out which rating has been given highest number of times ``` #check the shape ratings.shape Ratings dataset will have n_users*n_books entries n_users = users.shape[0] n_books = books.shape[0] print(n_users * n_books) ratings.head(5) ``` Ratings dataset should have books only which exist in our books dataset ``` ratings_new = ratings[ratings.ISBN.isin(books.ISBN)] ratings.shape ratings_new.shape ``` It can be seen that many rows having book ISBN not part of books dataset got dropped off Ratings dataset should have ratings from users which exist in users dataset. Consider only ratings from 1-10 and leave 0s. ``` ratings.bookRating.unique() #Hence segragating implicit and explict ratings datasets ratings_explicit = ratings_new[ratings_new.bookRating != 0] ratings_implicit = ratings_new[ratings_new.bookRating == 0] #checking shapes print(ratings_new.shape) print(ratings_explicit.shape) print(ratings_implicit.shape) #plotting count of bookRating sns.countplot(data=ratings_explicit , x='bookRating') plt.show() #It can be seen that higher ratings are more common amongst users and rating 8 has been rated highest number of times ``` # 8. Collaborative Filtering Based Recommendation Systems # a. For more accurate results only consider users who have rated atleast 100 books ``` counts1 = ratings_explicit['userID'].value_counts() # print(counts1) ratings_explicit = ratings_explicit[ratings_explicit['userID'].isin(counts1[counts1 >= 100].index)] ratings_explicit.head() ratings_explicit.shape ``` # b. Generate matrix table from explicit ratings table ``` ratings_matrix = ratings_explicit.pivot(index='userID', columns='ISBN', values='bookRating').fillna(0) userID = ratings_matrix.index ISBN = ratings_matrix.columns print(ratings_matrix.shape) ratings_matrix.head() ``` since NaNs cannot be handled by training algos, replacing these by 0, which indicates absence of ratings # c. Generate the predicted ratings using SVD with no.of singular values to be 50 ``` from scipy.sparse.linalg import svds U, sigma, Vt = svds(ratings_matrix, k = 50) sigma = np.diag(sigma) all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) preds_df = pd.DataFrame(all_user_predicted_ratings, columns = ratings_matrix.columns) preds_df.head() ``` # 9. Take a particular user_id a. Lets find the recommendations for user with id 2110 b. Get the predicted ratings for userID 2110 and sort them in descending order c. Create a dataframe with name user_data containing userID 2110 explicitly interacted books d. Combine the user_data and and corresponding book data(book_data) in a single dataframe with name user_full_info ``` user_id = 2 userID = ratings_matrix.iloc[user_id-1, :].name userID preds_df.shape sorted_user_predictions = preds_df.iloc[user_id].sort_values(ascending=False) len(sorted_user_predictions) # Get all user interacted books user_data = ratings_explicit[ratings_explicit.userID == (userID)] user_data.head() user_data.shape book_data = books[books.ISBN.isin(user_data.ISBN)] book_data.shape book_data.head() user_full_info = user_data.merge(book_data) user_full_info.head() print ('User {0} has already rated {1} movies.'.format(userID, user_full_info.shape[0])) recommendations = (books[~books['ISBN'].isin(user_full_info['ISBN'])]. merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left', left_on = 'ISBN' ,right_on = 'ISBN')).rename(columns = {user_id: 'Predictions'}) recommendations.shape recommendations.head() ``` # 10. Get top 10 recommendation for above given userID ``` recommendations.sort_values('Predictions', ascending = False).iloc[:10, :] ```
github_jupyter
from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" #Loading libraries import pandas as pd import matplotlib.pyplot as plt import sklearn.metrics as metrics import numpy as np from sklearn.neighbors import NearestNeighbors from sklearn.metrics.pairwise import pairwise_distances import warnings warnings.filterwarnings('ignore') import numpy as np import re import seaborn as sns #Loading data books = pd.read_csv("BX-Books.csv", sep=";", error_bad_lines=False, encoding="latin-1") books.columns = ['ISBN', 'bookTitle', 'bookAuthor', 'yearOfPublication', 'publisher', 'imageUrlS', 'imageUrlM', 'imageUrlL'] users = pd.read_csv('BX-Users.csv', sep=';', error_bad_lines=False, encoding="latin-1") users.columns = ['userID', 'Location', 'Age'] ratings = pd.read_csv('BX-Book-Ratings.csv', sep=';', error_bad_lines=False, encoding="latin-1") ratings.columns = ['userID', 'ISBN', 'bookRating'] print(books.shape) print(users.shape) print(ratings.shape) books.head() books.drop(['imageUrlS', 'imageUrlM', 'imageUrlL'],axis=1,inplace=True) books.head() books.yearOfPublication.unique() books.loc[books.yearOfPublication == 'DK Publishing Inc',:] books = books[(books.yearOfPublication != 'DK Publishing Inc') & (books.yearOfPublication != 'Gallimard')] books.yearOfPublication = books.yearOfPublication.astype('int32') books.dtypes #Publisher #drop NaNs in publisher column books = books.dropna(subset=['publisher']) books.publisher.isnull().sum() #Users users.shape users.head() #Age print(sorted(users.Age.unique())) Values below 5 and above 90 do not make much sense for our book rating case...hence replacing these by NaNs users.loc[(users.Age > 90) | (users.Age < 5), 'Age'] = np.nan #Replace All null values with mean #replacing NaNs with mean users.Age = users.Age.fillna(users.Age.mean()) #Change the datatype into int #setting the data type as int users.Age = users.Age.astype(np.int32) print(sorted(users.Age.unique())) #check the shape ratings.shape Ratings dataset will have n_users*n_books entries n_users = users.shape[0] n_books = books.shape[0] print(n_users * n_books) ratings.head(5) ratings_new = ratings[ratings.ISBN.isin(books.ISBN)] ratings.shape ratings_new.shape ratings.bookRating.unique() #Hence segragating implicit and explict ratings datasets ratings_explicit = ratings_new[ratings_new.bookRating != 0] ratings_implicit = ratings_new[ratings_new.bookRating == 0] #checking shapes print(ratings_new.shape) print(ratings_explicit.shape) print(ratings_implicit.shape) #plotting count of bookRating sns.countplot(data=ratings_explicit , x='bookRating') plt.show() #It can be seen that higher ratings are more common amongst users and rating 8 has been rated highest number of times counts1 = ratings_explicit['userID'].value_counts() # print(counts1) ratings_explicit = ratings_explicit[ratings_explicit['userID'].isin(counts1[counts1 >= 100].index)] ratings_explicit.head() ratings_explicit.shape ratings_matrix = ratings_explicit.pivot(index='userID', columns='ISBN', values='bookRating').fillna(0) userID = ratings_matrix.index ISBN = ratings_matrix.columns print(ratings_matrix.shape) ratings_matrix.head() from scipy.sparse.linalg import svds U, sigma, Vt = svds(ratings_matrix, k = 50) sigma = np.diag(sigma) all_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) preds_df = pd.DataFrame(all_user_predicted_ratings, columns = ratings_matrix.columns) preds_df.head() user_id = 2 userID = ratings_matrix.iloc[user_id-1, :].name userID preds_df.shape sorted_user_predictions = preds_df.iloc[user_id].sort_values(ascending=False) len(sorted_user_predictions) # Get all user interacted books user_data = ratings_explicit[ratings_explicit.userID == (userID)] user_data.head() user_data.shape book_data = books[books.ISBN.isin(user_data.ISBN)] book_data.shape book_data.head() user_full_info = user_data.merge(book_data) user_full_info.head() print ('User {0} has already rated {1} movies.'.format(userID, user_full_info.shape[0])) recommendations = (books[~books['ISBN'].isin(user_full_info['ISBN'])]. merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left', left_on = 'ISBN' ,right_on = 'ISBN')).rename(columns = {user_id: 'Predictions'}) recommendations.shape recommendations.head() recommendations.sort_values('Predictions', ascending = False).iloc[:10, :]
0.340814
0.870872
``` # Whereas the other notebook is to create a template from the JHU data and start making # API calls from scratch, if that notebook is interrupted, this one will pick up where # that one left off. import pandas as pd import numpy as np import requests import json from datetime import datetime as dt # reading CSV files to create dataframes # df_confirmed = pd.read_csv('./csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv') df_tMax = pd.read_csv('./csv/tMax_US.csv') df_tMin = pd.read_csv('./csv/tMin_US.csv') df_humidity = pd.read_csv('./csv/humidity_US.csv') df_uvIndex = pd.read_csv('./csv/uv_US.csv') df_cloud = pd.read_csv('./csv/cloud_US.csv') df_precipprob = pd.read_csv('./csv/precip_US.csv') df_dewpoint = pd.read_csv('./csv/dew_US.csv') df_pressure = pd.read_csv('./csv/pressure_US.csv') df_windspeed = pd.read_csv('./csv/wind_US.csv') df_ozone = pd.read_csv('./csv/ozone_US.csv') df_sunrise = pd.read_csv('./csv/sunrise_US.csv') df_sunset = pd.read_csv('./csv/sunset_US.csv') df_tMax.head() # Resuming the API calls # Code to increase number of retries on connection errors, # and also to give it some time. # Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request # And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/ from urllib3.util.retry import Retry from requests.adapters import HTTPAdapter s = requests.Session() retries = Retry(total=30, backoff_factor=0.1, status_forcelist=[ 429, 500, 502, 503, 504 ], method_whitelist=["HEAD", "GET", "OPTIONS"]) adapter = HTTPAdapter(max_retries=retries) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # pull data from darksky weather API # Columns to be skipped when iterating through the DataFrame do_not_include = ['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Lat', 'Long_', 'Combined_Key'] #Darksky API key API_KEY = '723a6f9dbda64ae1e0b9fdde14ba752e' # counter counter = 0 # Dummy value in case of errors dummy = -1000 # variable for determining how many API calls between writing data to CSV write_var = 1000 # Start iterating through the date columns for x in df_tMax.columns.values: # Skip the columns that are not dates if (x not in do_not_include): # Create Unix time stamp out of the date column t = pd.to_datetime(df_tMax[x].name) t = int(t.value / 10**9) t = str(t) # Start iterating through the rows (locations) for y in range(df_tMax['1/22/20'].values.size): # Only do API call if the cell value is 0 if str(df_tMax.iloc[y][x]) == '0': print('Cell is 0') # latitude and longitude coordinates of the row to be passed to the API latitude = str(df_tMax.iloc[y][8]) longitude = str(df_tMax.iloc[y][9]) # Building the URL for the API get url = 'https://api.darksky.net/forecast/' + API_KEY + '/' + latitude + "," + longitude + ',' + t url = url + '?exclude=currently,flags&units=si' # Getting the API call # using the retry error handling established above response = http.get(url) # Putting the API response into the JSON thing info = json.loads(response.content) # adding error handling in case something is wrong with the JSON response try: # Making a variable to more easily acccess JSON response data easy_info = info['daily']['data'][0] # Reading the JSON data tMax = easy_info['temperatureHigh'] tMin = easy_info['temperatureLow'] hum = easy_info['humidity'] * 100 uvee = easy_info['uvIndex'] clouds = easy_info['cloudCover'] * 100 precip = easy_info['precipProbability'] * 100 dew = easy_info['dewPoint'] pressure = easy_info['pressure'] wind = easy_info['windSpeed'] ozone = easy_info['ozone'] sunrise = easy_info['sunriseTime'] sunset = easy_info['sunsetTime'] except: # Creating dummy values in case of error print('Error encountered') tMax = dummy tMin = dummy hum = dummy uvee = dummy clouds = dummy precip = dummy dew = dummy pressure = dummy wind = dummy ozone = dummy sunrise = dummy sunset = dummy # Recording the data into the respective dataframes df_tMax.at[y, x] = tMax df_tMin.at[y, x] = tMin df_humidity.at[y, x] = hum df_uvIndex.at[y, x] = uvee df_cloud.at[y, x] = clouds df_precipprob.at[y, x] = precip df_dewpoint.at[y, x] = dew df_pressure.at[y, x] = pressure df_windspeed.at[y, x] = wind df_ozone.at[y, x] = ozone df_sunrise.at[y,x] = sunrise df_sunset.at[y,x] = sunset counter = counter + 1 print(counter) # writing CSVs of what I've got so far, for every write_var API calls if counter % write_var == 0: print('1000 API calls') df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # Writing final data to csv print('Final data write') df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # Writing data to csv df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # test cell # Resuming the API calls # Code to increase number of retries on connection errors, # and also to give it some time. # Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request # And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/ from urllib3.util.retry import Retry from requests.adapters import HTTPAdapter s = requests.Session() retries = Retry(total=30, backoff_factor=0.1, status_forcelist=[ 429, 500, 502, 503, 504 ], method_whitelist=["HEAD", "GET", "OPTIONS"]) adapter = HTTPAdapter(max_retries=retries) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # pull data from darksky weather API # Columns to be skipped when iterating through the DataFrame do_not_include = ['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Lat', 'Long_', 'Combined_Key'] #Darksky API key API_KEY = '723a6f9dbda64ae1e0b9fdde14ba752e' # counter counter = 0 # Dummy value in case of errors dummy = -1000 # variable for determining how many API calls between writing data to CSV write_var = 1000 # Start iterating through the date columns for x in df_tMax.columns.values: # Skip the columns that are not dates if (x not in do_not_include): # Create Unix time stamp out of the date column t = pd.to_datetime(df_tMax[x].name) t = int(t.value / 10**9) t = str(t) # Start iterating through the rows (locations) for y in range(df_tMax['1/22/20'].values.size): # Only do API call if the cell value is 0 if str(df_tMax.iloc[y][x]) == '0': print('Cell is 0') else: print(str(df_tMax.iloc[y][x])) counter = counter + 1 print(counter) # writing CSVs of what I've got so far, for every write_var API calls if counter % write_var == 0: print('1000 API calls') ```
github_jupyter
# Whereas the other notebook is to create a template from the JHU data and start making # API calls from scratch, if that notebook is interrupted, this one will pick up where # that one left off. import pandas as pd import numpy as np import requests import json from datetime import datetime as dt # reading CSV files to create dataframes # df_confirmed = pd.read_csv('./csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv') df_tMax = pd.read_csv('./csv/tMax_US.csv') df_tMin = pd.read_csv('./csv/tMin_US.csv') df_humidity = pd.read_csv('./csv/humidity_US.csv') df_uvIndex = pd.read_csv('./csv/uv_US.csv') df_cloud = pd.read_csv('./csv/cloud_US.csv') df_precipprob = pd.read_csv('./csv/precip_US.csv') df_dewpoint = pd.read_csv('./csv/dew_US.csv') df_pressure = pd.read_csv('./csv/pressure_US.csv') df_windspeed = pd.read_csv('./csv/wind_US.csv') df_ozone = pd.read_csv('./csv/ozone_US.csv') df_sunrise = pd.read_csv('./csv/sunrise_US.csv') df_sunset = pd.read_csv('./csv/sunset_US.csv') df_tMax.head() # Resuming the API calls # Code to increase number of retries on connection errors, # and also to give it some time. # Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request # And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/ from urllib3.util.retry import Retry from requests.adapters import HTTPAdapter s = requests.Session() retries = Retry(total=30, backoff_factor=0.1, status_forcelist=[ 429, 500, 502, 503, 504 ], method_whitelist=["HEAD", "GET", "OPTIONS"]) adapter = HTTPAdapter(max_retries=retries) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # pull data from darksky weather API # Columns to be skipped when iterating through the DataFrame do_not_include = ['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Lat', 'Long_', 'Combined_Key'] #Darksky API key API_KEY = '723a6f9dbda64ae1e0b9fdde14ba752e' # counter counter = 0 # Dummy value in case of errors dummy = -1000 # variable for determining how many API calls between writing data to CSV write_var = 1000 # Start iterating through the date columns for x in df_tMax.columns.values: # Skip the columns that are not dates if (x not in do_not_include): # Create Unix time stamp out of the date column t = pd.to_datetime(df_tMax[x].name) t = int(t.value / 10**9) t = str(t) # Start iterating through the rows (locations) for y in range(df_tMax['1/22/20'].values.size): # Only do API call if the cell value is 0 if str(df_tMax.iloc[y][x]) == '0': print('Cell is 0') # latitude and longitude coordinates of the row to be passed to the API latitude = str(df_tMax.iloc[y][8]) longitude = str(df_tMax.iloc[y][9]) # Building the URL for the API get url = 'https://api.darksky.net/forecast/' + API_KEY + '/' + latitude + "," + longitude + ',' + t url = url + '?exclude=currently,flags&units=si' # Getting the API call # using the retry error handling established above response = http.get(url) # Putting the API response into the JSON thing info = json.loads(response.content) # adding error handling in case something is wrong with the JSON response try: # Making a variable to more easily acccess JSON response data easy_info = info['daily']['data'][0] # Reading the JSON data tMax = easy_info['temperatureHigh'] tMin = easy_info['temperatureLow'] hum = easy_info['humidity'] * 100 uvee = easy_info['uvIndex'] clouds = easy_info['cloudCover'] * 100 precip = easy_info['precipProbability'] * 100 dew = easy_info['dewPoint'] pressure = easy_info['pressure'] wind = easy_info['windSpeed'] ozone = easy_info['ozone'] sunrise = easy_info['sunriseTime'] sunset = easy_info['sunsetTime'] except: # Creating dummy values in case of error print('Error encountered') tMax = dummy tMin = dummy hum = dummy uvee = dummy clouds = dummy precip = dummy dew = dummy pressure = dummy wind = dummy ozone = dummy sunrise = dummy sunset = dummy # Recording the data into the respective dataframes df_tMax.at[y, x] = tMax df_tMin.at[y, x] = tMin df_humidity.at[y, x] = hum df_uvIndex.at[y, x] = uvee df_cloud.at[y, x] = clouds df_precipprob.at[y, x] = precip df_dewpoint.at[y, x] = dew df_pressure.at[y, x] = pressure df_windspeed.at[y, x] = wind df_ozone.at[y, x] = ozone df_sunrise.at[y,x] = sunrise df_sunset.at[y,x] = sunset counter = counter + 1 print(counter) # writing CSVs of what I've got so far, for every write_var API calls if counter % write_var == 0: print('1000 API calls') df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # Writing final data to csv print('Final data write') df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # Writing data to csv df_tMax.to_csv('./csv/tMax_US.csv', index=False) df_tMin.to_csv('./csv/tMin_US.csv', index=False) df_humidity.to_csv('./csv/humidity_US.csv', index=False) df_uvIndex.to_csv('./csv/uv_US.csv', index=False) df_cloud.to_csv('./csv/cloud_US.csv', index=False) df_precipprob.to_csv('./csv/precip_US.csv', index=False) df_dewpoint.to_csv('./csv/dew_US.csv', index=False) df_pressure.to_csv('./csv/pressure_US.csv', index=False) df_windspeed.to_csv('./csv/wind_US.csv', index=False) df_ozone.to_csv('./csv/ozone_US.csv', index=False) df_sunrise.to_csv('./csv/sunrise_US.csv', index=False) df_sunset.to_csv('./csv/sunset_US.csv', index=False) # test cell # Resuming the API calls # Code to increase number of retries on connection errors, # and also to give it some time. # Found on https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request # And https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/ from urllib3.util.retry import Retry from requests.adapters import HTTPAdapter s = requests.Session() retries = Retry(total=30, backoff_factor=0.1, status_forcelist=[ 429, 500, 502, 503, 504 ], method_whitelist=["HEAD", "GET", "OPTIONS"]) adapter = HTTPAdapter(max_retries=retries) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # pull data from darksky weather API # Columns to be skipped when iterating through the DataFrame do_not_include = ['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Lat', 'Long_', 'Combined_Key'] #Darksky API key API_KEY = '723a6f9dbda64ae1e0b9fdde14ba752e' # counter counter = 0 # Dummy value in case of errors dummy = -1000 # variable for determining how many API calls between writing data to CSV write_var = 1000 # Start iterating through the date columns for x in df_tMax.columns.values: # Skip the columns that are not dates if (x not in do_not_include): # Create Unix time stamp out of the date column t = pd.to_datetime(df_tMax[x].name) t = int(t.value / 10**9) t = str(t) # Start iterating through the rows (locations) for y in range(df_tMax['1/22/20'].values.size): # Only do API call if the cell value is 0 if str(df_tMax.iloc[y][x]) == '0': print('Cell is 0') else: print(str(df_tMax.iloc[y][x])) counter = counter + 1 print(counter) # writing CSVs of what I've got so far, for every write_var API calls if counter % write_var == 0: print('1000 API calls')
0.435661
0.335705
# Lab 01 : Seq2Seq Transformers - demo Annotated Transformers : https://nlp.seas.harvard.edu/2018/04/03/attention.html Author : Alexander Rush Modified by Xavier Bresson to run with Pytorch 1.1.0 Task : Memorize/copy-paste a sequence of arbitrary numbers ``` # For Google Colaboratory import sys, os if 'google.colab' in sys.modules: from google.colab import drive drive.mount('/content/gdrive') file_name = 'seq2seq_transformers_demo.ipynb' import subprocess path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8") print(path_to_file) path_to_file = path_to_file.replace(file_name,"").replace('\n',"") os.chdir(path_to_file) !pwd import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import math, copy, time from torch.autograd import Variable import matplotlib.pyplot as plt import seaborn seaborn.set_context(context="talk") %matplotlib inline ``` # Classes Definition ``` class EncoderDecoder(nn.Module): """ A standard Encoder-Decoder architecture. Base for this and many other models. """ def __init__(self, encoder, decoder, src_embed, tgt_embed, generator): super(EncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder self.src_embed = src_embed self.tgt_embed = tgt_embed self.generator = generator def forward(self, src, tgt, src_mask, tgt_mask): "Take in and process masked src and target sequences." return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask) def encode(self, src, src_mask): return self.encoder(self.src_embed(src), src_mask) def decode(self, memory, src_mask, tgt, tgt_mask): return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask) class Generator(nn.Module): "Define standard linear + softmax generation step." def __init__(self, d_model, vocab): super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): return F.log_softmax(self.proj(x), dim=-1) def clones(module, N): "Produce N identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): "Core encoder is a stack of N layers" def __init__(self, layer, N): super(Encoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, mask): "Pass the input (and mask) through each layer in turn." for layer in self.layers: x = layer(x, mask) return self.norm(x) class LayerNorm(nn.Module): "Construct a layernorm module (See citation for details)." def __init__(self, features, eps=1e-6): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. Note for code simplicity the norm is first as opposed to last. """ def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): "Apply residual connection to any sublayer with the same size." return x + self.dropout(sublayer(self.norm(x))) class EncoderLayer(nn.Module): "Encoder is made up of self-attn and feed forward (defined below)" def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size def forward(self, x, mask): "Follow Figure 1 (left) for connections." x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask)) return self.sublayer[1](x, self.feed_forward) class Decoder(nn.Module): "Generic N layer decoder with masking." def __init__(self, layer, N): super(Decoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, memory, src_mask, tgt_mask): for layer in self.layers: x = layer(x, memory, src_mask, tgt_mask) return self.norm(x) class DecoderLayer(nn.Module): "Decoder is made of self-attn, src-attn, and feed forward (defined below)" def __init__(self, size, self_attn, src_attn, feed_forward, dropout): super(DecoderLayer, self).__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 3) def forward(self, x, memory, src_mask, tgt_mask): "Follow Figure 1 (right) for connections." m = memory x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask)) x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask)) return self.sublayer[2](x, self.feed_forward) def subsequent_mask(size): "Mask out subsequent positions." attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 plt.figure(figsize=(5,5)) plt.imshow(subsequent_mask(20)[0]) None def attention(query, key, value, mask=None, dropout=None): "Compute 'Scaled Dot Product Attention'" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) \ / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim = -1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): "Take in model size and number of heads." super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 # We assume d_v always equals d_k self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward(self, query, key, value, mask=None): "Implements Figure 2" if mask is not None: # Same mask applied to all h heads. mask = mask.unsqueeze(1) nbatches = query.size(0) # 1) Do all the linear projections in batch from d_model => h x d_k query, key, value = \ [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))] # 2) Apply attention on all the projected vectors in batch. x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout) # 3) "Concat" using a view and apply a final linear. x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) return self.linears[-1](x) class PositionwiseFeedForward(nn.Module): "Implements FFN equation." def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class Embeddings(nn.Module): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): "Implement the PE function." def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1).float() div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) #div_term = 1 / (10000 ** (torch.arange(0., d_model, 2) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) plt.figure(figsize=(15, 5)) pe = PositionalEncoding(20, 0) y = pe.forward(Variable(torch.zeros(1, 100, 20))) plt.plot(np.arange(100), y[0, :, 4:8].data.numpy()) plt.legend(["dim %d"%p for p in [4,5,6,7]]) None ``` # Instantiate sequence-to-sequence Transformers ``` def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1): "Helper: Construct a model from hyperparameters." c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.Sequential(Embeddings(d_model, src_vocab), c(position)), nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), Generator(d_model, tgt_vocab)) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model # Small example model. tmp_model = make_model(10, 10, 2) print(tmp_model) None ``` # Create a mini-batch ``` class Batch: "Object for holding a batch of data with mask during training." def __init__(self, src, trg=None, pad=0): self.src = src self.src_mask = (src != pad).unsqueeze(-2) if trg is not None: self.trg = trg[:, :-1] self.trg_y = trg[:, 1:] self.trg_mask = \ self.make_std_mask(self.trg, pad) self.ntokens = (self.trg_y != pad).data.sum() @staticmethod def make_std_mask(tgt, pad): "Create a mask to hide padding and future words." tgt_mask = (tgt != pad).unsqueeze(-2) tgt_mask = tgt_mask & Variable( subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)) return tgt_mask ``` # Train ``` def run_epoch(data_iter, model, loss_compute): "Standard Training and Logging Function" start = time.time() total_tokens = 0 total_loss = 0 tokens = 0 for i, batch in enumerate(data_iter): out = model.forward(batch.src, batch.trg, batch.src_mask, batch.trg_mask) loss = loss_compute(out, batch.trg_y, batch.ntokens) total_loss += loss.detach().numpy() total_tokens += batch.ntokens.numpy() tokens += batch.ntokens.numpy() if i % 50 == 1: elapsed = time.time() - start print('Epoch Step: {} Loss: {:.4f} Tokens per Sec: {:.4f}'.format( i, loss.detach().float().numpy() / batch.ntokens.float().numpy(), float(tokens) / float(elapsed) ) ) print(' t1: {:.4f} t2: {:.4f}'.format( loss.detach().float().numpy() , batch.ntokens.float().numpy() ) ) start = time.time() tokens = 0 return total_loss / total_tokens global max_src_in_batch, max_tgt_in_batch def batch_size_fn(new, count, sofar): "Keep augmenting batch and calculate total number of tokens + padding." global max_src_in_batch, max_tgt_in_batch if count == 1: max_src_in_batch = 0 max_tgt_in_batch = 0 max_src_in_batch = max(max_src_in_batch, len(new.src)) max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2) src_elements = count * max_src_in_batch tgt_elements = count * max_tgt_in_batch return max(src_elements, tgt_elements) ``` # Optimization ``` class NoamOpt: "Optim wrapper that implements rate." def __init__(self, model_size, factor, warmup, optimizer): self.optimizer = optimizer self._step = 0 self.warmup = warmup self.factor = factor self.model_size = model_size self._rate = 0 def step(self): "Update parameters and rate" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step() def rate(self, step = None): "Implement `lrate` above" if step is None: step = self._step return self.factor * \ (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) def get_std_opt(model): return NoamOpt(model.src_embed[0].d_model, 2, 4000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)) # Three settings of the lrate hyperparameters. opts = [NoamOpt(512, 1, 4000, None), NoamOpt(512, 1, 8000, None), NoamOpt(256, 1, 4000, None)] plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)]) plt.legend(["512:4000", "512:8000", "256:4000"]) None ``` # Label smoothing ``` class LabelSmoothing(nn.Module): "Implement label smoothing." def __init__(self, size, padding_idx, smoothing=0.0): super(LabelSmoothing, self).__init__() #self.criterion = nn.KLDivLoss(size_average=False) self.criterion = nn.KLDivLoss(reduction='sum') self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist = None def forward(self, x, target): assert x.size(1) == self.size true_dist = x.data.clone() true_dist.fill_(self.smoothing / (self.size - 2)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) true_dist[:, self.padding_idx] = 0 mask = torch.nonzero(target.data == self.padding_idx) if mask.dim() > 0: true_dist.index_fill_(0, mask.squeeze(), 0.0) self.true_dist = true_dist return self.criterion(x, Variable(true_dist, requires_grad=False)) #Example of label smoothing. crit = LabelSmoothing(5, 0, 0.4) predict = torch.FloatTensor([[0, 0.2, 0.7, 0.1, 0], [0, 0.2, 0.7, 0.1, 0], [0, 0.2, 0.7, 0.1, 0]]) v = crit(Variable(predict.log()), Variable(torch.LongTensor([2, 1, 0]))) # Show the target distributions expected by the system. plt.imshow(crit.true_dist) None crit = LabelSmoothing(5, 0, 0.1) def loss(x): d = x + 3 * 1 predict = torch.FloatTensor([[0, x / d, 1 / d, 1 / d, 1 / d], ]) #print(predict) return crit(Variable(predict.log()), #Variable(torch.LongTensor([1]))).data[0] Variable(torch.LongTensor([1]))).item() plt.plot(np.arange(1, 100), [loss(x) for x in range(1, 100)]) None ``` # Task : Memorize/copy-paste a sequence of arbitrary numbers ``` def data_gen(V, batch, nbatches): "Generate random data for a src-tgt copy task." for i in range(nbatches): data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10))) data[:, 0] = 1 src = Variable(data, requires_grad=False) tgt = Variable(data, requires_grad=False) yield Batch(src, tgt, 0) class SimpleLossCompute: "A simple loss compute and train function." def __init__(self, generator, criterion, opt=None): self.generator = generator self.criterion = criterion self.opt = opt def __call__(self, x, y, norm): x = self.generator(x) loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.contiguous().view(-1)) / norm loss.backward() if self.opt is not None: self.opt.step() self.opt.optimizer.zero_grad() return loss.item() * norm # Train the simple copy task # Training time : 100sec V = 11 criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0) model = make_model(V, V, N=2) model_opt = NoamOpt(model.src_embed[0].d_model, 1, 400, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)) for epoch in range(10): model.train() run_epoch(data_gen(V, 30, 20), model, SimpleLossCompute(model.generator, criterion, model_opt)) model.eval() print(run_epoch(data_gen(V, 30, 5), model, SimpleLossCompute(model.generator, criterion, None))) ``` # Test network ``` def greedy_decode(model, src, src_mask, max_len, start_symbol): memory = model.encode(src, src_mask) ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data) for i in range(max_len-1): out = model.decode(memory, src_mask, Variable(ys), Variable(subsequent_mask(ys.size(1)) .type_as(src.data))) prob = model.generator(out[:, -1]) _, next_word = torch.max(prob, dim = 1) next_word = next_word.data[0] ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1) return ys model.eval() src = Variable(torch.LongTensor([[1,2,3,4,5,6,7,8,9,10]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1)) src = Variable(torch.LongTensor([[1,3,5,7,9]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1)) src = Variable(torch.LongTensor([[1,2,4,5,7,8]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1)) ```
github_jupyter
# For Google Colaboratory import sys, os if 'google.colab' in sys.modules: from google.colab import drive drive.mount('/content/gdrive') file_name = 'seq2seq_transformers_demo.ipynb' import subprocess path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8") print(path_to_file) path_to_file = path_to_file.replace(file_name,"").replace('\n',"") os.chdir(path_to_file) !pwd import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import math, copy, time from torch.autograd import Variable import matplotlib.pyplot as plt import seaborn seaborn.set_context(context="talk") %matplotlib inline class EncoderDecoder(nn.Module): """ A standard Encoder-Decoder architecture. Base for this and many other models. """ def __init__(self, encoder, decoder, src_embed, tgt_embed, generator): super(EncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder self.src_embed = src_embed self.tgt_embed = tgt_embed self.generator = generator def forward(self, src, tgt, src_mask, tgt_mask): "Take in and process masked src and target sequences." return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask) def encode(self, src, src_mask): return self.encoder(self.src_embed(src), src_mask) def decode(self, memory, src_mask, tgt, tgt_mask): return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask) class Generator(nn.Module): "Define standard linear + softmax generation step." def __init__(self, d_model, vocab): super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): return F.log_softmax(self.proj(x), dim=-1) def clones(module, N): "Produce N identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): "Core encoder is a stack of N layers" def __init__(self, layer, N): super(Encoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, mask): "Pass the input (and mask) through each layer in turn." for layer in self.layers: x = layer(x, mask) return self.norm(x) class LayerNorm(nn.Module): "Construct a layernorm module (See citation for details)." def __init__(self, features, eps=1e-6): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. Note for code simplicity the norm is first as opposed to last. """ def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): "Apply residual connection to any sublayer with the same size." return x + self.dropout(sublayer(self.norm(x))) class EncoderLayer(nn.Module): "Encoder is made up of self-attn and feed forward (defined below)" def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size def forward(self, x, mask): "Follow Figure 1 (left) for connections." x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask)) return self.sublayer[1](x, self.feed_forward) class Decoder(nn.Module): "Generic N layer decoder with masking." def __init__(self, layer, N): super(Decoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, memory, src_mask, tgt_mask): for layer in self.layers: x = layer(x, memory, src_mask, tgt_mask) return self.norm(x) class DecoderLayer(nn.Module): "Decoder is made of self-attn, src-attn, and feed forward (defined below)" def __init__(self, size, self_attn, src_attn, feed_forward, dropout): super(DecoderLayer, self).__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 3) def forward(self, x, memory, src_mask, tgt_mask): "Follow Figure 1 (right) for connections." m = memory x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask)) x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask)) return self.sublayer[2](x, self.feed_forward) def subsequent_mask(size): "Mask out subsequent positions." attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 plt.figure(figsize=(5,5)) plt.imshow(subsequent_mask(20)[0]) None def attention(query, key, value, mask=None, dropout=None): "Compute 'Scaled Dot Product Attention'" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) \ / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim = -1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): "Take in model size and number of heads." super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 # We assume d_v always equals d_k self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward(self, query, key, value, mask=None): "Implements Figure 2" if mask is not None: # Same mask applied to all h heads. mask = mask.unsqueeze(1) nbatches = query.size(0) # 1) Do all the linear projections in batch from d_model => h x d_k query, key, value = \ [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))] # 2) Apply attention on all the projected vectors in batch. x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout) # 3) "Concat" using a view and apply a final linear. x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) return self.linears[-1](x) class PositionwiseFeedForward(nn.Module): "Implements FFN equation." def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class Embeddings(nn.Module): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): "Implement the PE function." def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1).float() div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) #div_term = 1 / (10000 ** (torch.arange(0., d_model, 2) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) plt.figure(figsize=(15, 5)) pe = PositionalEncoding(20, 0) y = pe.forward(Variable(torch.zeros(1, 100, 20))) plt.plot(np.arange(100), y[0, :, 4:8].data.numpy()) plt.legend(["dim %d"%p for p in [4,5,6,7]]) None def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1): "Helper: Construct a model from hyperparameters." c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.Sequential(Embeddings(d_model, src_vocab), c(position)), nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), Generator(d_model, tgt_vocab)) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model # Small example model. tmp_model = make_model(10, 10, 2) print(tmp_model) None class Batch: "Object for holding a batch of data with mask during training." def __init__(self, src, trg=None, pad=0): self.src = src self.src_mask = (src != pad).unsqueeze(-2) if trg is not None: self.trg = trg[:, :-1] self.trg_y = trg[:, 1:] self.trg_mask = \ self.make_std_mask(self.trg, pad) self.ntokens = (self.trg_y != pad).data.sum() @staticmethod def make_std_mask(tgt, pad): "Create a mask to hide padding and future words." tgt_mask = (tgt != pad).unsqueeze(-2) tgt_mask = tgt_mask & Variable( subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)) return tgt_mask def run_epoch(data_iter, model, loss_compute): "Standard Training and Logging Function" start = time.time() total_tokens = 0 total_loss = 0 tokens = 0 for i, batch in enumerate(data_iter): out = model.forward(batch.src, batch.trg, batch.src_mask, batch.trg_mask) loss = loss_compute(out, batch.trg_y, batch.ntokens) total_loss += loss.detach().numpy() total_tokens += batch.ntokens.numpy() tokens += batch.ntokens.numpy() if i % 50 == 1: elapsed = time.time() - start print('Epoch Step: {} Loss: {:.4f} Tokens per Sec: {:.4f}'.format( i, loss.detach().float().numpy() / batch.ntokens.float().numpy(), float(tokens) / float(elapsed) ) ) print(' t1: {:.4f} t2: {:.4f}'.format( loss.detach().float().numpy() , batch.ntokens.float().numpy() ) ) start = time.time() tokens = 0 return total_loss / total_tokens global max_src_in_batch, max_tgt_in_batch def batch_size_fn(new, count, sofar): "Keep augmenting batch and calculate total number of tokens + padding." global max_src_in_batch, max_tgt_in_batch if count == 1: max_src_in_batch = 0 max_tgt_in_batch = 0 max_src_in_batch = max(max_src_in_batch, len(new.src)) max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2) src_elements = count * max_src_in_batch tgt_elements = count * max_tgt_in_batch return max(src_elements, tgt_elements) class NoamOpt: "Optim wrapper that implements rate." def __init__(self, model_size, factor, warmup, optimizer): self.optimizer = optimizer self._step = 0 self.warmup = warmup self.factor = factor self.model_size = model_size self._rate = 0 def step(self): "Update parameters and rate" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step() def rate(self, step = None): "Implement `lrate` above" if step is None: step = self._step return self.factor * \ (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) def get_std_opt(model): return NoamOpt(model.src_embed[0].d_model, 2, 4000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)) # Three settings of the lrate hyperparameters. opts = [NoamOpt(512, 1, 4000, None), NoamOpt(512, 1, 8000, None), NoamOpt(256, 1, 4000, None)] plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)]) plt.legend(["512:4000", "512:8000", "256:4000"]) None class LabelSmoothing(nn.Module): "Implement label smoothing." def __init__(self, size, padding_idx, smoothing=0.0): super(LabelSmoothing, self).__init__() #self.criterion = nn.KLDivLoss(size_average=False) self.criterion = nn.KLDivLoss(reduction='sum') self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist = None def forward(self, x, target): assert x.size(1) == self.size true_dist = x.data.clone() true_dist.fill_(self.smoothing / (self.size - 2)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) true_dist[:, self.padding_idx] = 0 mask = torch.nonzero(target.data == self.padding_idx) if mask.dim() > 0: true_dist.index_fill_(0, mask.squeeze(), 0.0) self.true_dist = true_dist return self.criterion(x, Variable(true_dist, requires_grad=False)) #Example of label smoothing. crit = LabelSmoothing(5, 0, 0.4) predict = torch.FloatTensor([[0, 0.2, 0.7, 0.1, 0], [0, 0.2, 0.7, 0.1, 0], [0, 0.2, 0.7, 0.1, 0]]) v = crit(Variable(predict.log()), Variable(torch.LongTensor([2, 1, 0]))) # Show the target distributions expected by the system. plt.imshow(crit.true_dist) None crit = LabelSmoothing(5, 0, 0.1) def loss(x): d = x + 3 * 1 predict = torch.FloatTensor([[0, x / d, 1 / d, 1 / d, 1 / d], ]) #print(predict) return crit(Variable(predict.log()), #Variable(torch.LongTensor([1]))).data[0] Variable(torch.LongTensor([1]))).item() plt.plot(np.arange(1, 100), [loss(x) for x in range(1, 100)]) None def data_gen(V, batch, nbatches): "Generate random data for a src-tgt copy task." for i in range(nbatches): data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10))) data[:, 0] = 1 src = Variable(data, requires_grad=False) tgt = Variable(data, requires_grad=False) yield Batch(src, tgt, 0) class SimpleLossCompute: "A simple loss compute and train function." def __init__(self, generator, criterion, opt=None): self.generator = generator self.criterion = criterion self.opt = opt def __call__(self, x, y, norm): x = self.generator(x) loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.contiguous().view(-1)) / norm loss.backward() if self.opt is not None: self.opt.step() self.opt.optimizer.zero_grad() return loss.item() * norm # Train the simple copy task # Training time : 100sec V = 11 criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0) model = make_model(V, V, N=2) model_opt = NoamOpt(model.src_embed[0].d_model, 1, 400, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)) for epoch in range(10): model.train() run_epoch(data_gen(V, 30, 20), model, SimpleLossCompute(model.generator, criterion, model_opt)) model.eval() print(run_epoch(data_gen(V, 30, 5), model, SimpleLossCompute(model.generator, criterion, None))) def greedy_decode(model, src, src_mask, max_len, start_symbol): memory = model.encode(src, src_mask) ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data) for i in range(max_len-1): out = model.decode(memory, src_mask, Variable(ys), Variable(subsequent_mask(ys.size(1)) .type_as(src.data))) prob = model.generator(out[:, -1]) _, next_word = torch.max(prob, dim = 1) next_word = next_word.data[0] ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1) return ys model.eval() src = Variable(torch.LongTensor([[1,2,3,4,5,6,7,8,9,10]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1)) src = Variable(torch.LongTensor([[1,3,5,7,9]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1)) src = Variable(torch.LongTensor([[1,2,4,5,7,8]]) ) n = src.size(1) src_mask = Variable(torch.ones(1, 1, n) ) print(greedy_decode(model, src, src_mask, max_len=n, start_symbol=1))
0.77081
0.832883
# Assignment: Ionosphere Data Problem ### Dataset Description: This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details. The targets were free electrons in the ionosphere. "Good" radar returns are those showing evidence of some type of structure in the ionosphere. "Bad" returns are those that do not; their signals pass through the ionosphere. Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this databse are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal. ### Attribute Information: - All 34 are continuous - The 35th attribute is either "good" or "bad" according to the definition summarized above. This is a binary classification task. <br><br> <table border="1" cellpadding="6"> <tbody> <tr> <td bgcolor="#DDEEFF"><p class="normal"><b>Data Set Characteristics:&nbsp;&nbsp;</b></p></td> <td><p class="normal">Multivariate</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Number of Instances:</b></p></td> <td><p class="normal">351</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Area:</b></p></td> <td><p class="normal">Physical</p></td> </tr> </tbody> </table> <table border="1" cellpadding="6"> <tbody> <tr> <td bgcolor="#DDEEFF"><p class="normal"><b>Attribute Characteristics:</b></p></td> <td><p class="normal">Integer,Real</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Number of Attributes:</b></p></td> <td><p class="normal">34</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Date Donated</b></p></td> <td><p class="normal">N/A</p></td> </tr> </tbody> </table> <table border="1" cellpadding="6"> <tbody> <tr> <td bgcolor="#DDEEFF"><p class="normal"><b>Associated Tasks:</b></p></td> <td><p class="normal">Classification</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Missing Values?</b></p></td> <td><p class="normal">N/A</p></td> <td bgcolor="#DDEEFF"><p class="normal"><b>Number of Web Hits:</b></p></td> <td><p class="normal">N/A</p></td> </tr> </tbody> </table> ### WORKFLOW : - Load Data - Check Missing Values ( If Exist ; Fill each record with mean of its feature ) or any usless column. - Shuffle the data if needed. - Standardized the Input Variables. **Hint**: Centeralized the data - Split into 60 and 40 ratio. - Encode labels. - Model : 1 hidden layers including 16 unit. - Compilation Step (Note : Its a Binary problem , select loss , metrics according to it) - Train the Model with Epochs (100). - If the model gets overfit tune your model by changing the units , No. of layers , epochs , add dropout layer or add Regularizer according to the need . - Prediction should be > **92%** - Evaluation Step - Prediction # Load Data: [Click Here to Download DataSet](https://github.com/ramsha275/ML_Datasets/blob/main/ionosphere_data.csv) ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf df = pd.read_csv('../datasets/ionosphere_data.csv') df.head() ``` # Analyzing Data ``` df.info() ``` ### Importing modules for encoding purposes ``` from tensorflow.keras.utils import to_categorical from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df.label = le.fit_transform(df.label) # It will transform the data # df.label = to_categorical(df.label) df.head() df.describe() ``` # Splitting Data ``` X = df.iloc[:,:-1].values #Features y = df.iloc[:,-1:].values #Target from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.4, train_size=0.6) ``` # Model ``` from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Flatten X_train.shape model = Sequential([Dense(16, input_shape=(34,), activation='relu'), Dense(1, activation='sigmoid')]) ``` # Compile ``` model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy']) class myCallback(tf.keras.callbacks.Callback): '''Callback function to check accuracy is greatere than 92 and stop if the accuracy reaches above 92% ''' def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy') > 0.92): print("\nReached 92% accuracy or more so cancelling training!") self.model.stop_training = True callback = myCallback() # Making object of callback function history = model.fit(X_train, y_train, epochs = 100) ``` # Evaluation ``` evaluation = model.evaluate(X_test,y_test) evaluation loss,accuracy = evaluation pd.DataFrame([{'Loss':loss*100,'Accuracy':accuracy*100}]) plt.style.use('seaborn') pd.DataFrame(history.history)[['loss','accuracy']].plot() ``` # Prediction ``` y_pred = model.predict(X_test) y_pred = np.round(y_pred).astype(int) print("0 represents = {}\ \n1 represents = {}".format(le.inverse_transform([0]), le.inverse_transform([1]))) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred) from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) plt.style.use('seaborn') sns.heatmap(confusion_matrix(y_test, y_pred), annot=True) plt.xlabel("Actual") plt.ylabel('Predicted') plt.suptitle("0 tick represents = {}\n\ 1 tick represents = {}".format(''.join(le.inverse_transform([0])), ''.join(le.inverse_transform([1])))) plt.show() print("Total no. of Good in test set = {}\ \nTotal no. of Bad in test set = {}".format((y_test==1).sum(),(y_test==0).sum())) print("Percentage of Good in test set = {}%\ \nPercentage of Bad in test set = {}%".format(((y_test==1).sum()/len(y_test) )*100,((y_test==0).sum()/len(y_test) )*100)) print("Total no. of Good in prediction = {}\ \nTotal no. of Bad in prediction = {}".format((y_pred==1).sum(),(y_pred==0).sum())) print("Percentage of Good in prediction = {}%\ \nPercentage of Bad in prediction = {}%".format(((y_pred==1).sum()/len(y_pred) )*100,((y_pred==0).sum()/len(y_pred) )*100)) plt.figure(figsize=(16,8), dpi =500) sns.barplot(data=pd.DataFrame([{'Good Actual':(y_test==1).sum(), 'Good Predicted':(y_pred==1).sum(),'Bad Actual':(y_test==0).sum(), 'Bad Predicted':(y_pred==0).sum()}])) plt.ylabel('Frequency') plt.show() plt.figure(figsize=(16,8), dpi =500) sns.barplot(data=pd.DataFrame([{'Good Actual %':((y_test==1).sum()/len(y_test))*100, 'Good Predict %':((y_pred==1).sum()/len(y_pred))*100,'Bad Actual %':((y_test==0).sum()/len(y_test) )*100, 'Bad Predict %':((y_pred==0).sum()/len(y_pred))*100}])) plt.ylabel('Percentage') plt.show() ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf df = pd.read_csv('../datasets/ionosphere_data.csv') df.head() df.info() from tensorflow.keras.utils import to_categorical from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df.label = le.fit_transform(df.label) # It will transform the data # df.label = to_categorical(df.label) df.head() df.describe() X = df.iloc[:,:-1].values #Features y = df.iloc[:,-1:].values #Target from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.4, train_size=0.6) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Flatten X_train.shape model = Sequential([Dense(16, input_shape=(34,), activation='relu'), Dense(1, activation='sigmoid')]) model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy']) class myCallback(tf.keras.callbacks.Callback): '''Callback function to check accuracy is greatere than 92 and stop if the accuracy reaches above 92% ''' def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy') > 0.92): print("\nReached 92% accuracy or more so cancelling training!") self.model.stop_training = True callback = myCallback() # Making object of callback function history = model.fit(X_train, y_train, epochs = 100) evaluation = model.evaluate(X_test,y_test) evaluation loss,accuracy = evaluation pd.DataFrame([{'Loss':loss*100,'Accuracy':accuracy*100}]) plt.style.use('seaborn') pd.DataFrame(history.history)[['loss','accuracy']].plot() y_pred = model.predict(X_test) y_pred = np.round(y_pred).astype(int) print("0 represents = {}\ \n1 represents = {}".format(le.inverse_transform([0]), le.inverse_transform([1]))) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred) from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) plt.style.use('seaborn') sns.heatmap(confusion_matrix(y_test, y_pred), annot=True) plt.xlabel("Actual") plt.ylabel('Predicted') plt.suptitle("0 tick represents = {}\n\ 1 tick represents = {}".format(''.join(le.inverse_transform([0])), ''.join(le.inverse_transform([1])))) plt.show() print("Total no. of Good in test set = {}\ \nTotal no. of Bad in test set = {}".format((y_test==1).sum(),(y_test==0).sum())) print("Percentage of Good in test set = {}%\ \nPercentage of Bad in test set = {}%".format(((y_test==1).sum()/len(y_test) )*100,((y_test==0).sum()/len(y_test) )*100)) print("Total no. of Good in prediction = {}\ \nTotal no. of Bad in prediction = {}".format((y_pred==1).sum(),(y_pred==0).sum())) print("Percentage of Good in prediction = {}%\ \nPercentage of Bad in prediction = {}%".format(((y_pred==1).sum()/len(y_pred) )*100,((y_pred==0).sum()/len(y_pred) )*100)) plt.figure(figsize=(16,8), dpi =500) sns.barplot(data=pd.DataFrame([{'Good Actual':(y_test==1).sum(), 'Good Predicted':(y_pred==1).sum(),'Bad Actual':(y_test==0).sum(), 'Bad Predicted':(y_pred==0).sum()}])) plt.ylabel('Frequency') plt.show() plt.figure(figsize=(16,8), dpi =500) sns.barplot(data=pd.DataFrame([{'Good Actual %':((y_test==1).sum()/len(y_test))*100, 'Good Predict %':((y_pred==1).sum()/len(y_pred))*100,'Bad Actual %':((y_test==0).sum()/len(y_test) )*100, 'Bad Predict %':((y_pred==0).sum()/len(y_pred))*100}])) plt.ylabel('Percentage') plt.show()
0.592549
0.98045