code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
# AMMI notes - Ordinary least square solution > Solution to the optimization of the squarred error with accompanying code. - toc:true - badges: true - comments: true - author: Gbetondji Dovonon - categories: [notes,ammi] Linear Regression Exercise (Closed Form Solution) In statistics, linear regression is a linear approach to modelling the relationship between a scalar response and one or more explanatory variables (also known as dependent and independent variables) [Wikipedia]. The closed form solution to finding the parameter $\theta$ of a linear regression model is given by $$\theta = (X^TX)^{-1}X^TY$$ where $X$ are your features and $Y$ is your target. Let d be the number of features, n the number of examples. The dimensions are as follow: - $\theta$ is (d,1) - $X$ is (n,d) - $Y$ is (n,1) Prediction is done using: - $Y = X \theta$ We are trying to find the value of $\theta$ that minimizes the squared error which means finding the solution to: $\underset{\theta}{argmin} \|{X \theta - Y}\|_2^2$ In order to find that value of theta, since the squared error is convex, we can find the derivative of the expression and find the value of $\theta$ that makes it 0. First let's expand $\|{X \theta - Y}\|_2^2$ $$ \begin{aligned} \\ \\ \|{X \theta - Y}\|_2^2 &= (X \theta - Y)^T(X \theta - Y) \\ & = (\theta^T X^T - Y^T)(X \theta - Y) \\ & = \theta^T X^T X \theta - Y^T X \theta - \theta^T X^T Y - Y^T Y \\ & = \theta^T X^T X \theta - (\theta^T X^T Y)^T - \theta^T X^T Y - Y^T Y \\ & = \theta^T X^T X \theta - 2\theta^T X^T Y - Y^T Y \ because \ \theta^T X^T Y \ is \ a \ scalar \\ \\ \\ \frac{\partial \|{X \theta - Y}\|_2^2}{\partial \theta} & = 2 X^T X \theta - 2 X^T Y \\ \\ \\ \end{aligned} $$ By equating the derivative to 0 we get: $$ \begin{aligned} 2 X^T X \theta - 2 X^T Y & = 0 \\ X^T X \theta - X^T Y & = 0 \\ X^T X \theta & = X^T Y \\ \theta & = (X^T X)^{-1} X^T Y \\ \end{aligned} $$ Here is an implementation using numpy and the wine quality dataset from this dataset repo [mcu dataset](https://archive.ics.uci.edu/ml/datasets.php). ``` import pandas as pd import numpy as np !wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv -P data !ls data data = pd.read_csv("data/winequality-red.csv",sep=';') data.head(3) cols = ["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide", "density", "pH", "sulphates", "alcohol"] target = "quality" data = data.sample(frac=1) X = data[cols].values Y = data[[target]].values X.shape, Y.shape ``` We also implement the bias parameter by adding a feature with fixed value one to every data point. By doing so we get: $$ \sum_{i=1}^{n-1}(\theta_i \cdot x_i) + \theta_n \cdot 1 $$ $\theta_n$ will be the bias parameter ``` ## function to add ones to every data point def add_ones(X): return np.hstack([X,np.ones((X.shape[0],1))]) class LinearReg: """ Basic linear regression implemetation using numpy """ def __init__(self, bias=False): """ Initialization of theta and a boolean to determine whether to use a bias or not """ self.theta = None self.bias = bias def fit(self,X,Y): """ Fit function. Uses the normal equation to compute theta """ if self.bias: X = add_ones(X) A = X.T @ X B = X.T @ Y self.theta = np.linalg.solve(A,B) #self.theta = np.linalg.inv(A) @ B def predict(self,X): """ prediction function """ if self.bias: X = add_ones(X) return X @ self.theta @staticmethod def mse(y_hat,y): """ Static method implementing the mean squared error """ return np.mean((y-y_hat)**2) model1 = LinearReg() model1.fit(X,Y) LinearReg.mse(model1.predict(X),Y) model2 = LinearReg(bias=True) model2.fit(X,Y) LinearReg.mse(model2.predict(X),Y) ```
github_jupyter
import pandas as pd import numpy as np !wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv -P data !ls data data = pd.read_csv("data/winequality-red.csv",sep=';') data.head(3) cols = ["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide", "density", "pH", "sulphates", "alcohol"] target = "quality" data = data.sample(frac=1) X = data[cols].values Y = data[[target]].values X.shape, Y.shape ## function to add ones to every data point def add_ones(X): return np.hstack([X,np.ones((X.shape[0],1))]) class LinearReg: """ Basic linear regression implemetation using numpy """ def __init__(self, bias=False): """ Initialization of theta and a boolean to determine whether to use a bias or not """ self.theta = None self.bias = bias def fit(self,X,Y): """ Fit function. Uses the normal equation to compute theta """ if self.bias: X = add_ones(X) A = X.T @ X B = X.T @ Y self.theta = np.linalg.solve(A,B) #self.theta = np.linalg.inv(A) @ B def predict(self,X): """ prediction function """ if self.bias: X = add_ones(X) return X @ self.theta @staticmethod def mse(y_hat,y): """ Static method implementing the mean squared error """ return np.mean((y-y_hat)**2) model1 = LinearReg() model1.fit(X,Y) LinearReg.mse(model1.predict(X),Y) model2 = LinearReg(bias=True) model2.fit(X,Y) LinearReg.mse(model2.predict(X),Y)
0.768038
0.994989
``` import wandb import nltk from nltk.stem.porter import * from torch.nn import * from torch.optim import * import numpy as np import pandas as pd import torch,torchvision import random from tqdm import * from torch.utils.data import Dataset,DataLoader stemmer = PorterStemmer() PROJECT_NAME = 'E-Mail-classification-NLP' device = 'cuda' torch.__version__ def tokenize(sentence): return nltk.word_tokenize(sentence) tokenize('$10') def stem(word): return stemmer.stem(word.lower()) stem('organic') def bag_of_words(tokenized_words,words): tokenized_words = [stem(w) for w in tokenized_words] bag = np.zeros(len(words)) for idx,w in enumerate(words): if w in tokenized_words: bag[idx] = 1.0 return bag bag_of_words(['hi'],['hi','how','hi']) data = pd.read_csv('./data.csv',encoding= 'unicode_escape') data X = data['Message_body'] y = data['Label'] words = [] data = [] labels = {} labels_r = {} idx = 0 for label in y: if label not in list(labels.keys()): idx += 1 labels[label] = 1 for X_batch,y_batch in tqdm(zip(X,y)): X_batch = tokenize(X_batch) new_X = [] for Xb in X_batch: new_X.append(stem(Xb)) words.extend(new_X) data.append([ new_X, np.eye(labels[y_batch],len(labels))[labels[y_batch]-1] ]) np.eye(labels[y_batch],len(labels))[labels[y_batch]-1] labels[y_batch]-1 len(labels) labels[y_batch] words = sorted(set(words)) np.random.shuffle(words) np.random.shuffle(data) X = [] y = [] for sentence,tag in tqdm(data): X.append(bag_of_words(sentence,words)) y.append(tag) from sklearn.model_selection import * X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125,shuffle=False) X_train = torch.from_numpy(np.array(X_train)).to(device).float() y_train = torch.from_numpy(np.array(y_train)).to(device).float() X_test = torch.from_numpy(np.array(X_test)).to(device).float() y_test = torch.from_numpy(np.array(y_test)).to(device).float() def get_loss(model,X,y,criterion): preds = model(X) loss = criterion(preds,y) return loss.item() def get_accuracy(model,X,y): preds = model(X) correct = 0 total = 0 for pred,yb in zip(preds,y): pred = int(torch.argmax(pred)) yb = int(torch.argmax(yb)) if pred == yb: correct += 1 total += 1 acc = round(correct/total,3)*100 return acc class Model(Module): def __init__(self): super().__init__() self.iters = 10 self.activation = ReLU() self.linear1 = Linear(len(words),1024) self.linear2 = Linear(1024,1024) self.output = Linear(1024,len(labels)) def forward(self,X): preds = self.linear1(X) for _ in range(self.iters): preds = self.activation(self.linear2(preds)) preds = self.output(preds) return preds model = Model().to(device) criterion = MSELoss() optimizer = Adam(model.parameters(),lr=0.001) epochs = 100 batch_size = 32 wandb.init(project=PROJECT_NAME,name='baseline') for _ in tqdm(range(epochs)): for i in range(0,len(X_train),batch_size): X_batch = X_train[i:i+batch_size] y_batch = y_train[i:i+batch_size] model.to(device) preds = model(X_batch) loss = criterion(preds,y_batch) optimizer.zero_grad() loss.backward() optimizer.step() model.eval() torch.cuda.empty_cache() wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)}) torch.cuda.empty_cache() wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)}) torch.cuda.empty_cache() wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2}) torch.cuda.empty_cache() wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)}) torch.cuda.empty_cache() model.train() wandb.finish() torch.cuda.empty_cache() torch.save(model,'model.pt') torch.save(model,'model.pth') torch.save(model.state_dict(),'model-sd.pt') torch.save(model.state_dict(),'model-sd.pth') torch.save(words,'words.pt') torch.save(words,'words.pth') torch.save(data,'data.pt') torch.save(data,'data.pth') torch.save(labels,'labels.pt') torch.save(labels,'labels.pth') torch.save(idx,'idx.pt') torch.save(idx,'idx.pth') torch.save(y_train,'y_train.pt') torch.save(y_test,'y_test.pth') class Model(Module): def __init__(self): super().__init__() self.iters = 25 self.activation = ReLU() self.linear1 = Linear(len(words),1024) self.linear2 = Linear(1024,1024) self.output = Linear(1024,len(labels)) def forward(self,X): preds = self.linear1(X) for _ in range(self.iters): preds = self.activation(self.linear2(preds)) preds = self.output(preds) return preds model = Model().to(device) criterion = MSELoss() optimizer = Adam(model.parameters(),lr=0.001) epochs = 100 batch_size = 32 wandb.init(project=PROJECT_NAME,name='baseline') for _ in tqdm(range(epochs)): for i in range(0,len(X_train),batch_size): X_batch = X_train[i:i+batch_size] y_batch = y_train[i:i+batch_size] model.to(device) preds = model(X_batch) loss = criterion(preds,y_batch) optimizer.zero_grad() loss.backward() optimizer.step() model.eval() torch.cuda.empty_cache() wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)}) torch.cuda.empty_cache() wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)}) torch.cuda.empty_cache() wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2}) torch.cuda.empty_cache() wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)}) torch.cuda.empty_cache() model.train() wandb.finish() torch.cuda.empty_cache() ```
github_jupyter
import wandb import nltk from nltk.stem.porter import * from torch.nn import * from torch.optim import * import numpy as np import pandas as pd import torch,torchvision import random from tqdm import * from torch.utils.data import Dataset,DataLoader stemmer = PorterStemmer() PROJECT_NAME = 'E-Mail-classification-NLP' device = 'cuda' torch.__version__ def tokenize(sentence): return nltk.word_tokenize(sentence) tokenize('$10') def stem(word): return stemmer.stem(word.lower()) stem('organic') def bag_of_words(tokenized_words,words): tokenized_words = [stem(w) for w in tokenized_words] bag = np.zeros(len(words)) for idx,w in enumerate(words): if w in tokenized_words: bag[idx] = 1.0 return bag bag_of_words(['hi'],['hi','how','hi']) data = pd.read_csv('./data.csv',encoding= 'unicode_escape') data X = data['Message_body'] y = data['Label'] words = [] data = [] labels = {} labels_r = {} idx = 0 for label in y: if label not in list(labels.keys()): idx += 1 labels[label] = 1 for X_batch,y_batch in tqdm(zip(X,y)): X_batch = tokenize(X_batch) new_X = [] for Xb in X_batch: new_X.append(stem(Xb)) words.extend(new_X) data.append([ new_X, np.eye(labels[y_batch],len(labels))[labels[y_batch]-1] ]) np.eye(labels[y_batch],len(labels))[labels[y_batch]-1] labels[y_batch]-1 len(labels) labels[y_batch] words = sorted(set(words)) np.random.shuffle(words) np.random.shuffle(data) X = [] y = [] for sentence,tag in tqdm(data): X.append(bag_of_words(sentence,words)) y.append(tag) from sklearn.model_selection import * X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125,shuffle=False) X_train = torch.from_numpy(np.array(X_train)).to(device).float() y_train = torch.from_numpy(np.array(y_train)).to(device).float() X_test = torch.from_numpy(np.array(X_test)).to(device).float() y_test = torch.from_numpy(np.array(y_test)).to(device).float() def get_loss(model,X,y,criterion): preds = model(X) loss = criterion(preds,y) return loss.item() def get_accuracy(model,X,y): preds = model(X) correct = 0 total = 0 for pred,yb in zip(preds,y): pred = int(torch.argmax(pred)) yb = int(torch.argmax(yb)) if pred == yb: correct += 1 total += 1 acc = round(correct/total,3)*100 return acc class Model(Module): def __init__(self): super().__init__() self.iters = 10 self.activation = ReLU() self.linear1 = Linear(len(words),1024) self.linear2 = Linear(1024,1024) self.output = Linear(1024,len(labels)) def forward(self,X): preds = self.linear1(X) for _ in range(self.iters): preds = self.activation(self.linear2(preds)) preds = self.output(preds) return preds model = Model().to(device) criterion = MSELoss() optimizer = Adam(model.parameters(),lr=0.001) epochs = 100 batch_size = 32 wandb.init(project=PROJECT_NAME,name='baseline') for _ in tqdm(range(epochs)): for i in range(0,len(X_train),batch_size): X_batch = X_train[i:i+batch_size] y_batch = y_train[i:i+batch_size] model.to(device) preds = model(X_batch) loss = criterion(preds,y_batch) optimizer.zero_grad() loss.backward() optimizer.step() model.eval() torch.cuda.empty_cache() wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)}) torch.cuda.empty_cache() wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)}) torch.cuda.empty_cache() wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2}) torch.cuda.empty_cache() wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)}) torch.cuda.empty_cache() model.train() wandb.finish() torch.cuda.empty_cache() torch.save(model,'model.pt') torch.save(model,'model.pth') torch.save(model.state_dict(),'model-sd.pt') torch.save(model.state_dict(),'model-sd.pth') torch.save(words,'words.pt') torch.save(words,'words.pth') torch.save(data,'data.pt') torch.save(data,'data.pth') torch.save(labels,'labels.pt') torch.save(labels,'labels.pth') torch.save(idx,'idx.pt') torch.save(idx,'idx.pth') torch.save(y_train,'y_train.pt') torch.save(y_test,'y_test.pth') class Model(Module): def __init__(self): super().__init__() self.iters = 25 self.activation = ReLU() self.linear1 = Linear(len(words),1024) self.linear2 = Linear(1024,1024) self.output = Linear(1024,len(labels)) def forward(self,X): preds = self.linear1(X) for _ in range(self.iters): preds = self.activation(self.linear2(preds)) preds = self.output(preds) return preds model = Model().to(device) criterion = MSELoss() optimizer = Adam(model.parameters(),lr=0.001) epochs = 100 batch_size = 32 wandb.init(project=PROJECT_NAME,name='baseline') for _ in tqdm(range(epochs)): for i in range(0,len(X_train),batch_size): X_batch = X_train[i:i+batch_size] y_batch = y_train[i:i+batch_size] model.to(device) preds = model(X_batch) loss = criterion(preds,y_batch) optimizer.zero_grad() loss.backward() optimizer.step() model.eval() torch.cuda.empty_cache() wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)}) torch.cuda.empty_cache() wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)}) torch.cuda.empty_cache() wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2}) torch.cuda.empty_cache() wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)}) torch.cuda.empty_cache() model.train() wandb.finish() torch.cuda.empty_cache()
0.760828
0.288889
``` flex_subtitle = "built using jupyter-flex" flex_external_link = "https://github.com/danielfrg/jupyter-flex/blob/master/examples/illusionist/widget-gallery.ipynb" flex_title = "Illusionist Widget Gallery" flex_orientation = "rows" flex_show_source = True ``` # Numeric and Boolean ### IntSlider ``` import ipywidgets as widgets slider = widgets.IntSlider(min=1, max=5, value=3) slider slider_value = widgets.Label() slider_value slider2 = widgets.IntSlider(min=1, max=5, disabled=True) slider2 def slider_update(args): slider2.value = slider.value slider_value.value = str(slider.value) slider_update(None) slider.observe(slider_update, "value") ``` ### IntRangeSlider ``` int_range_slider = widgets.IntRangeSlider(value=[1, 2], min=0, max=5) int_range_slider int_range_slider_value = widgets.Label() int_range_slider_value int_range_slider2 = widgets.IntRangeSlider(value=[1, 2], min=0, max=5, disabled=True) int_range_slider2 def int_range_slider_update(args): int_range_slider2.value = int_range_slider.value int_range_slider_value.value = str(int_range_slider.value) int_range_slider_update(None) int_range_slider.observe(int_range_slider_update, "value") ``` ### BoundedIntText ``` bounded_int_text = widgets.BoundedIntText(value=5, min=0, max=7) bounded_int_text bounded_int_text_value = widgets.Label() bounded_int_text_value bounded_int_text2 = widgets.BoundedIntText(value=1, min=0, max=7, disabled=True) bounded_int_text2 bounded_int_text_pb = widgets.IntProgress(value=bounded_int_text.value, min=0, max=7) bounded_int_text_pb def bounded_int_text_update(args): bounded_int_text2.value = bounded_int_text.value bounded_int_text_value.value = str(bounded_int_text.value) bounded_int_text_pb.value = bounded_int_text.value bounded_int_text_update(None) bounded_int_text.observe(bounded_int_text_update, "value") bounded_int_text.value = 2 ``` ## Boolean Widgets ### ToggleButton ``` toggle_button = widgets.ToggleButton(description="Click me") toggle_button toggle_button_value = widgets.Label() toggle_button_value toogle_button_valid = widgets.Valid(description="Pressed") toogle_button_valid toggle_button2 = widgets.ToggleButton(description="Clicked", disabled=True) toggle_button2 def toggle_button_update(args): toggle_button2.value = toggle_button.value toggle_button_value.value = str(toggle_button.value) toogle_button_valid.value = bool(toggle_button.value) toggle_button_update(None) toggle_button.observe(toggle_button_update, "value") ``` ### Checkbox ``` checkbox_button = widgets.Checkbox(description="Check me") checkbox_button checkbox_button_value = widgets.Label() checkbox_button_value checkbox_button2 = widgets.Checkbox(description="Check me", disabled=True) checkbox_button2 def checkbox_button_update(args): checkbox_button2.value = checkbox_button.value checkbox_button_value.value = str(checkbox_button.value) checkbox_button_update(None) checkbox_button.observe(checkbox_button_update, "value") ``` # Selection widgets ### Dropdown ``` options = [("One", 1), ("Two", 2), ("Three", 3)] dropdown = widgets.Dropdown(options=options, value=2, description="Number:") dropdown dropdown_value = widgets.Label() dropdown_value dropdown2 = widgets.Dropdown(options=options, value=2, description="Number:", disabled=True) dropdown2 def dropdown_update(args): dropdown2.value = dropdown.value dropdown_value.value = dropdown.options[dropdown.value - 1][0] dropdown_update(None) dropdown.observe(dropdown_update, "value") ``` ### RadioButton ``` radio_button = widgets.RadioButtons(options=['pepperoni', 'pineapple', 'anchovies']) radio_button radio_button_value = widgets.Label() radio_button_value radio_button2 = widgets.RadioButtons(options=radio_button.options, disabled=True) radio_button2 def radio_button_update(args): radio_button2.value = radio_button.value radio_button_value.value = str(radio_button.value) radio_button_update(None) radio_button.observe(radio_button_update, "value") ``` ### Select ``` select = widgets.Select(options=['Linux', 'Windows', 'OSX'], value='OSX') select select_value = widgets.Label() select_value select2 = widgets.Select(options=select.options, disabled=True) select2 select3 = widgets.RadioButtons(options=select.options, disabled=True) select3 def select_update(args): select2.value = select.value select3.value = select.value select_value.value = str(select.value) select_update(None) select.observe(select_update, "value") ``` ### SelectionSlider ``` selection_slider = widgets.SelectionSlider(options=["scrambled", "sunny side up", "poached", "over easy"], value="sunny side up") selection_slider selection_slider_value = widgets.Label() selection_slider_value selection_slider2 = widgets.SelectionSlider(options=selection_slider.options, disabled=True) selection_slider2 def selection_slider_update(args): selection_slider2.value = selection_slider.value selection_slider_value.value = str(selection_slider.value) selection_slider_update(None) selection_slider.observe(selection_slider_update, "value") ``` ## Row 2 ### SelectionRangeSlider ``` import datetime dates = [datetime.date(2015, i, 1) for i in range(1, 5)] options = [(i.strftime("%b"), i) for i in dates] selection_range_slider = widgets.SelectionRangeSlider( options=options, index=(1, 2), description="Months", ) selection_range_slider selection_range_slider_value = widgets.Label() selection_range_slider_value selection_range_slider2 = widgets.SelectionRangeSlider( options=options, index=(1, 2), description="Months", disabled=True ) selection_range_slider2 def selection_range_slider_update(args): selection_range_slider2.value = selection_range_slider.value vals = [i.strftime("%b") for i in selection_range_slider.value] selection_range_slider_value.value = str(vals) selection_range_slider_update(None) selection_range_slider.observe(selection_range_slider_update, "value") ``` ### ToggleButtons ``` toggle_buttons = widgets.ToggleButtons(options=['Slow', 'Regular', 'Fast']) toggle_buttons toggle_buttons_value = widgets.Label() toggle_buttons_value toggle_buttons2 = widgets.ToggleButtons(options=toggle_buttons.options, disabled=True) toggle_buttons2 def toggle_buttons_update(args): toggle_buttons2.value = toggle_buttons.value toggle_buttons_value.value = str(toggle_buttons.value) toggle_buttons_update(None) toggle_buttons.observe(toggle_buttons_update, "value") ``` ### SelectMultiple ``` select_multiple = widgets.SelectMultiple(options=["Apples", "Oranges", "Pears"], value=["Apples", "Pears"]) select_multiple select_multiple_value = widgets.Label() select_multiple_value select_multiple2 = widgets.SelectMultiple(options=select_multiple.options, disabled=True) select_multiple2 def select_multiple_update(args): select_multiple2.value = select_multiple.value select_multiple_value.value = str(select_multiple.value) select_multiple_update(None) select_multiple.observe(select_multiple_update, "value") ```
github_jupyter
flex_subtitle = "built using jupyter-flex" flex_external_link = "https://github.com/danielfrg/jupyter-flex/blob/master/examples/illusionist/widget-gallery.ipynb" flex_title = "Illusionist Widget Gallery" flex_orientation = "rows" flex_show_source = True import ipywidgets as widgets slider = widgets.IntSlider(min=1, max=5, value=3) slider slider_value = widgets.Label() slider_value slider2 = widgets.IntSlider(min=1, max=5, disabled=True) slider2 def slider_update(args): slider2.value = slider.value slider_value.value = str(slider.value) slider_update(None) slider.observe(slider_update, "value") int_range_slider = widgets.IntRangeSlider(value=[1, 2], min=0, max=5) int_range_slider int_range_slider_value = widgets.Label() int_range_slider_value int_range_slider2 = widgets.IntRangeSlider(value=[1, 2], min=0, max=5, disabled=True) int_range_slider2 def int_range_slider_update(args): int_range_slider2.value = int_range_slider.value int_range_slider_value.value = str(int_range_slider.value) int_range_slider_update(None) int_range_slider.observe(int_range_slider_update, "value") bounded_int_text = widgets.BoundedIntText(value=5, min=0, max=7) bounded_int_text bounded_int_text_value = widgets.Label() bounded_int_text_value bounded_int_text2 = widgets.BoundedIntText(value=1, min=0, max=7, disabled=True) bounded_int_text2 bounded_int_text_pb = widgets.IntProgress(value=bounded_int_text.value, min=0, max=7) bounded_int_text_pb def bounded_int_text_update(args): bounded_int_text2.value = bounded_int_text.value bounded_int_text_value.value = str(bounded_int_text.value) bounded_int_text_pb.value = bounded_int_text.value bounded_int_text_update(None) bounded_int_text.observe(bounded_int_text_update, "value") bounded_int_text.value = 2 toggle_button = widgets.ToggleButton(description="Click me") toggle_button toggle_button_value = widgets.Label() toggle_button_value toogle_button_valid = widgets.Valid(description="Pressed") toogle_button_valid toggle_button2 = widgets.ToggleButton(description="Clicked", disabled=True) toggle_button2 def toggle_button_update(args): toggle_button2.value = toggle_button.value toggle_button_value.value = str(toggle_button.value) toogle_button_valid.value = bool(toggle_button.value) toggle_button_update(None) toggle_button.observe(toggle_button_update, "value") checkbox_button = widgets.Checkbox(description="Check me") checkbox_button checkbox_button_value = widgets.Label() checkbox_button_value checkbox_button2 = widgets.Checkbox(description="Check me", disabled=True) checkbox_button2 def checkbox_button_update(args): checkbox_button2.value = checkbox_button.value checkbox_button_value.value = str(checkbox_button.value) checkbox_button_update(None) checkbox_button.observe(checkbox_button_update, "value") options = [("One", 1), ("Two", 2), ("Three", 3)] dropdown = widgets.Dropdown(options=options, value=2, description="Number:") dropdown dropdown_value = widgets.Label() dropdown_value dropdown2 = widgets.Dropdown(options=options, value=2, description="Number:", disabled=True) dropdown2 def dropdown_update(args): dropdown2.value = dropdown.value dropdown_value.value = dropdown.options[dropdown.value - 1][0] dropdown_update(None) dropdown.observe(dropdown_update, "value") radio_button = widgets.RadioButtons(options=['pepperoni', 'pineapple', 'anchovies']) radio_button radio_button_value = widgets.Label() radio_button_value radio_button2 = widgets.RadioButtons(options=radio_button.options, disabled=True) radio_button2 def radio_button_update(args): radio_button2.value = radio_button.value radio_button_value.value = str(radio_button.value) radio_button_update(None) radio_button.observe(radio_button_update, "value") select = widgets.Select(options=['Linux', 'Windows', 'OSX'], value='OSX') select select_value = widgets.Label() select_value select2 = widgets.Select(options=select.options, disabled=True) select2 select3 = widgets.RadioButtons(options=select.options, disabled=True) select3 def select_update(args): select2.value = select.value select3.value = select.value select_value.value = str(select.value) select_update(None) select.observe(select_update, "value") selection_slider = widgets.SelectionSlider(options=["scrambled", "sunny side up", "poached", "over easy"], value="sunny side up") selection_slider selection_slider_value = widgets.Label() selection_slider_value selection_slider2 = widgets.SelectionSlider(options=selection_slider.options, disabled=True) selection_slider2 def selection_slider_update(args): selection_slider2.value = selection_slider.value selection_slider_value.value = str(selection_slider.value) selection_slider_update(None) selection_slider.observe(selection_slider_update, "value") import datetime dates = [datetime.date(2015, i, 1) for i in range(1, 5)] options = [(i.strftime("%b"), i) for i in dates] selection_range_slider = widgets.SelectionRangeSlider( options=options, index=(1, 2), description="Months", ) selection_range_slider selection_range_slider_value = widgets.Label() selection_range_slider_value selection_range_slider2 = widgets.SelectionRangeSlider( options=options, index=(1, 2), description="Months", disabled=True ) selection_range_slider2 def selection_range_slider_update(args): selection_range_slider2.value = selection_range_slider.value vals = [i.strftime("%b") for i in selection_range_slider.value] selection_range_slider_value.value = str(vals) selection_range_slider_update(None) selection_range_slider.observe(selection_range_slider_update, "value") toggle_buttons = widgets.ToggleButtons(options=['Slow', 'Regular', 'Fast']) toggle_buttons toggle_buttons_value = widgets.Label() toggle_buttons_value toggle_buttons2 = widgets.ToggleButtons(options=toggle_buttons.options, disabled=True) toggle_buttons2 def toggle_buttons_update(args): toggle_buttons2.value = toggle_buttons.value toggle_buttons_value.value = str(toggle_buttons.value) toggle_buttons_update(None) toggle_buttons.observe(toggle_buttons_update, "value") select_multiple = widgets.SelectMultiple(options=["Apples", "Oranges", "Pears"], value=["Apples", "Pears"]) select_multiple select_multiple_value = widgets.Label() select_multiple_value select_multiple2 = widgets.SelectMultiple(options=select_multiple.options, disabled=True) select_multiple2 def select_multiple_update(args): select_multiple2.value = select_multiple.value select_multiple_value.value = str(select_multiple.value) select_multiple_update(None) select_multiple.observe(select_multiple_update, "value")
0.670608
0.581392
# Init ## Init ``` import sys, traceback import cv2 import os import re import numpy as np import argparse import string from plantcv import plantcv as pcv import glob import os import matplotlib.pyplot as plt import matplotlib.image as mpimg %matplotlib inline pcv.params.debug = 'plot' ``` ## Image Selection ``` images = glob.glob('../resources/Plant_leave_diseases_dataset_without_augmentation/Cherry*/*') img=images[10] img, path, filename = pcv.readimage(img) ### Summary # Analyze Color analysis_image = pcv.visualize.colorspaces(rgb_img=img) color_histogram = pcv.analyze_color(rgb_img=img, mask=None, colorspaces='all', label="default") top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=None, mask=None, label='default') ``` ## Image modification ``` device = 0 s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') b = pcv.rgb2gray_lab(rgb_img=img, channel='b') s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light') s_thresh = pcv.fill_holes(s_thresh) s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_thresh = pcv.threshold.binary(gray_img=b, threshold=140, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=140, max_value=255, object_type='light') # Fill small objects (optional) # b_fill = pcv.fill(b_thresh, 100) b_thresh = pcv.fill_holes(b_thresh) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) masked = pcv.apply_mask(img=img, mask=b_fill, mask_color='white') ``` ## Other tests ``` # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') color_histogram = pcv.analyze_color(rgb_img=img, mask=None, colorspaces='all', label="default") color_histogram = pcv.analyze_color(rgb_img=img, mask=bs, colorspaces='all', label="default") color_histogram = pcv.analyze_color(rgb_img=masked, mask=None, colorspaces='all', label="default") id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill) roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=0, y=0, h=img.shape[0], w=img.shape[1]) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) # Find shape properties, output shape image (optional) # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis # label - Optional label parameter, modifies the variable name of observations recorded shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask, label="default") # Shape properties relative to user boundary line (optional) # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours # line_position - Position of boundary line (a value of 0 would draw a line # through the bottom of the image) # label - Optional label parameter, modifies the variable name of observations recorded boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680, label="default") # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) # Inputs: # rgb_img - RGB image data # mask - Binary mask of selected contours # hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv' # This is the data to be printed to the SVG histogram file # label - Optional label parameter, modifies the variable name of observations recorded color_histogram = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='all', label="default") # Pseudocolor the grayscale image # Inputs: # gray_img - Grayscale image data # obj - Single or grouped contour object (optional), if provided the pseudocolored image gets # cropped down to the region of interest. # mask - Binary mask (optional) # background - Background color/type. Options are "image" (gray_img, default), "white", or "black". A mask # must be supplied. # cmap - Colormap # min_value - Minimum value for range of interest # max_value - Maximum value for range of interest # dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi). # axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True). # colorbar - If False then the colorbar won't be displayed (default colorbar=True) pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=mask, cmap='jet') # Write shape and color data to results file # pcv.print_results(filename=args.result) top_x, bottom_x, center_v_x = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask, label="default") ```
github_jupyter
import sys, traceback import cv2 import os import re import numpy as np import argparse import string from plantcv import plantcv as pcv import glob import os import matplotlib.pyplot as plt import matplotlib.image as mpimg %matplotlib inline pcv.params.debug = 'plot' images = glob.glob('../resources/Plant_leave_diseases_dataset_without_augmentation/Cherry*/*') img=images[10] img, path, filename = pcv.readimage(img) ### Summary # Analyze Color analysis_image = pcv.visualize.colorspaces(rgb_img=img) color_histogram = pcv.analyze_color(rgb_img=img, mask=None, colorspaces='all', label="default") top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=None, mask=None, label='default') device = 0 s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') b = pcv.rgb2gray_lab(rgb_img=img, channel='b') s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light') s_thresh = pcv.fill_holes(s_thresh) s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_thresh = pcv.threshold.binary(gray_img=b, threshold=140, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=140, max_value=255, object_type='light') # Fill small objects (optional) # b_fill = pcv.fill(b_thresh, 100) b_thresh = pcv.fill_holes(b_thresh) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) masked = pcv.apply_mask(img=img, mask=b_fill, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') color_histogram = pcv.analyze_color(rgb_img=img, mask=None, colorspaces='all', label="default") color_histogram = pcv.analyze_color(rgb_img=img, mask=bs, colorspaces='all', label="default") color_histogram = pcv.analyze_color(rgb_img=masked, mask=None, colorspaces='all', label="default") id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill) roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=0, y=0, h=img.shape[0], w=img.shape[1]) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) # Find shape properties, output shape image (optional) # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis # label - Optional label parameter, modifies the variable name of observations recorded shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask, label="default") # Shape properties relative to user boundary line (optional) # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours # line_position - Position of boundary line (a value of 0 would draw a line # through the bottom of the image) # label - Optional label parameter, modifies the variable name of observations recorded boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680, label="default") # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) # Inputs: # rgb_img - RGB image data # mask - Binary mask of selected contours # hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv' # This is the data to be printed to the SVG histogram file # label - Optional label parameter, modifies the variable name of observations recorded color_histogram = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='all', label="default") # Pseudocolor the grayscale image # Inputs: # gray_img - Grayscale image data # obj - Single or grouped contour object (optional), if provided the pseudocolored image gets # cropped down to the region of interest. # mask - Binary mask (optional) # background - Background color/type. Options are "image" (gray_img, default), "white", or "black". A mask # must be supplied. # cmap - Colormap # min_value - Minimum value for range of interest # max_value - Maximum value for range of interest # dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi). # axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True). # colorbar - If False then the colorbar won't be displayed (default colorbar=True) pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=mask, cmap='jet') # Write shape and color data to results file # pcv.print_results(filename=args.result) top_x, bottom_x, center_v_x = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask, label="default")
0.513912
0.580114
# Module 1: Basics of statistics ## Statistics of nerve conduction velocities An inquisitive Duke BME student decides to measure the nerve conduction velocities of fellow studies on campus. After ten grueling hours of recording, the student accumulates velocity readings for a random sample of 50 students, stored to a .csv file. ``` # Import relevant packages import scipy.stats as stats # Comprehensive stats package import numpy as np # Mathematical operations import plotly.express as px # Plotting import pandas as pd # Data reading and processing # Import data as pandas dataframe df = pd.read_csv("../data/ncv_data.csv") # Make sure this is the correct path to the .csv file! # It is good practice to look at your data frame before doing any work df.info() ``` ## Visualizing the data Make a histogram of the raw data. What information does a histogram tell you? ``` fig = px.histogram(df,x="NCV", # Call on the NCV tag in your data frame title='Histogram of NCVs', # Give your plot a title labels={'NCV':'NCV (m/s)'}) # Change the x-axis label to include units fig.show() ``` ## Calculating basic measures Calculate the sample mean and standard deviation. ``` sample_mean = sample_std = # Get in the habit of printing your results print('Sample mean: %.2f' % sample_mean) print('Sample standard deviation: %.2f' % sample_std) ``` ## The sampling distribution Estimate the standard deviation of the sampling distribution of NCVs for Duke students. Be able to explain what the sampling distribution represents. Why is it acceptable to use the t-distribution to model the sampling distribution of the NCVs of Duke students? How many degrees of freedom are there when using the sample data to estimate the t-distribution? ``` n = df['NCV'].count() # This is just one of several useful pandas operations sampling_distribution_std = df_ncv = # Print your results print('Sampling distribution standard deviation: %.2f' % sampling_distribution_std) print('Degrees of freedom: %d' % df_ncv) ``` ## Probabilities Assume that the true population (Duke students) mean for NCV is known to be 51 m/s. Perform the calculations necessary to indicate which region of the t-distribution (i.e. the cut-off t-value) corresponds to probability of collecting a sample with a mean less than or equal to that found using the data provided. Calculate the probability with Python and compare it to value given in the t-table provided. ``` pop_mean = 51 t = print('The region less than t-statistic = %.2f' % t) # Look up how to use this function - what inputs do you need? p = stats.t.cdf() print('p = %.3f' % p) ``` What is the probability that your next random sample of 50 Duke students will have a mean greater than 51.5 m/s? ``` new_sample_mean = 51.5 t = # It's the same function as before. How will you change your inputs? p = stats.t.cdf() print('p = %.2f' % p) ``` # Working backwards Let's think about this problem in the reverse. Instead of determining the probability of finding a sample mean, let's find the mean that yields a desired probability, e.g. $P(\bar{x} \leq ?) = 0.05$. We will basically complete the following statement: "There is a 5% chance of collecting a sample mean greater than _______." First, find the unknown t-statistic in the following statement: $P(t \leq ?) = 0.95$. This value is called the critical t-value, or t-critical. ``` # Another functions from stats.t. Always look up documentation if you don't recognize a function! t_crit = stats.t.ppf() print('t-critical = %.2f' % t_crit) ``` Using this t-critical value, find the sample mean that completes the following statement: "There is a 5% chance of collecting a sample mean greater than _______." ``` new_sample_mean = print('There is a 5%% chance of collecting a sample mean greater than %.2f' % new_sample_mean) ```
github_jupyter
# Import relevant packages import scipy.stats as stats # Comprehensive stats package import numpy as np # Mathematical operations import plotly.express as px # Plotting import pandas as pd # Data reading and processing # Import data as pandas dataframe df = pd.read_csv("../data/ncv_data.csv") # Make sure this is the correct path to the .csv file! # It is good practice to look at your data frame before doing any work df.info() fig = px.histogram(df,x="NCV", # Call on the NCV tag in your data frame title='Histogram of NCVs', # Give your plot a title labels={'NCV':'NCV (m/s)'}) # Change the x-axis label to include units fig.show() sample_mean = sample_std = # Get in the habit of printing your results print('Sample mean: %.2f' % sample_mean) print('Sample standard deviation: %.2f' % sample_std) n = df['NCV'].count() # This is just one of several useful pandas operations sampling_distribution_std = df_ncv = # Print your results print('Sampling distribution standard deviation: %.2f' % sampling_distribution_std) print('Degrees of freedom: %d' % df_ncv) pop_mean = 51 t = print('The region less than t-statistic = %.2f' % t) # Look up how to use this function - what inputs do you need? p = stats.t.cdf() print('p = %.3f' % p) new_sample_mean = 51.5 t = # It's the same function as before. How will you change your inputs? p = stats.t.cdf() print('p = %.2f' % p) # Another functions from stats.t. Always look up documentation if you don't recognize a function! t_crit = stats.t.ppf() print('t-critical = %.2f' % t_crit) new_sample_mean = print('There is a 5%% chance of collecting a sample mean greater than %.2f' % new_sample_mean)
0.625209
0.986363
``` import pandas as pd import numpy as np import nltk from collections import Counter from sklearn.metrics import log_loss from scipy.optimize import minimize import multiprocessing import difflib import time import gc import xgboost as xgb from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import lightgbm as lgb import matplotlib.pyplot as plt %matplotlib inline def get_train(): keras_q1 = np.load('../../data/transformed/keras_tokenizer/train_q1_transformed.npy') keras_q2 = np.load('../../data/transformed/keras_tokenizer/train_q2_transformed.npy') xgb_feats = pd.read_csv('../../data/features/the_1owl/owl_train.csv') abhishek_feats = pd.read_csv('../../data/features/abhishek/train_features.csv', encoding = 'ISO-8859-1').iloc[:, 2:] text_feats = pd.read_csv('../../data/features/other_features/text_features_train.csv', encoding = 'ISO-8859-1') img_feats = pd.read_csv('../../data/features/other_features/img_features_train.csv') srk_feats = pd.read_csv('../../data/features/srk/SRK_grams_features_train.csv') xgb_feats.drop(['z_len1', 'z_len2', 'z_word_len1', 'z_word_len2'], axis = 1, inplace = True) y_train = xgb_feats['is_duplicate'] xgb_feats = xgb_feats.iloc[:, 8:] X_train2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_train2 = np.concatenate([xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_train2 = np.concatenate([xgb_feats], axis = 1) for i in range(X_train2.shape[1]): if np.sum(X_train2[:, i] == y_train.values) == X_train2.shape[0]: print('LEAK FOUND') X_train2 = X_train2.astype('float32') X_train2 = pd.DataFrame(X_train2) X_train2['is_duplicate'] = y_train print('Training data shape:', X_train2.shape) return X_train2, y_train def get_test(): keras_q1 = np.load('../../data/transformed/keras_tokenizer/test_q1_transformed.npy') keras_q2 = np.load('../../data/transformed/keras_tokenizer/test_q2_transformed.npy') xgb_feats = pd.read_csv('../../data/features/the_1owl/owl_test.csv') abhishek_feats = pd.read_csv('../../data/features/abhishek/test_features.csv', encoding = 'ISO-8859-1').iloc[:, 2:] text_feats = pd.read_csv('../../data/features/other_features/text_features_test.csv', encoding = 'ISO-8859-1') img_feats = pd.read_csv('../../data/features/other_features/img_features_test.csv') srk_feats = pd.read_csv('../../data/features/srk/SRK_grams_features_test.csv') xgb_feats.drop(['z_len1', 'z_len2', 'z_word_len1', 'z_word_len2'], axis = 1, inplace = True) xgb_feats = xgb_feats.iloc[:, 5:] X_test2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_test2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats], axis = 1) X_test2 = X_test2.astype('float32') X_test2 = pd.DataFrame(X_test2) print('Test data shape:', X_test2.shape) return X_test2 def predict_test(model_name): X_test = get_test() gbm = lgb.Booster(model_file='saved_models/LGBM/{}.txt'.format(model_name)) test_preds = gbm.predict(lgb.Dataset(X_test)) sub_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/submissions/' sample_sub = pd.read_csv(sub_src + 'sample_submission.csv') sample_sub['is_duplicate'] = test_preds sample_sub.to_csv(sub_src + '{}.csv'.format(model_name), index = False) return def oversample(X_train, y_train): print('Oversampling negative y according to anokas method') pos_train = X_train[X_train['is_duplicate'] == 1] neg_train = X_train[X_train['is_duplicate'] == 0] p = 0.165 scale = ((len(pos_train) / (len(pos_train) + len(neg_train))) / p) - 1 while scale > 1: neg_train = pd.concat([neg_train, neg_train]) scale -=1 neg_train = pd.concat([neg_train, neg_train[:int(scale * len(neg_train))]]) X_train = pd.concat([pos_train, neg_train]) y_train = (np.zeros(len(pos_train)) + 1).tolist() + np.zeros(len(neg_train)).tolist() X_train = X_train.astype('float32') X_train.drop(['is_duplicate'], axis = 1, inplace = True) return X_train, y_train def oversample2(X_train): print('Oversampling negative y according to SRK method') y_train = np.array(X_train["is_duplicate"]) X_train.drop(['is_duplicate'], axis = 1, inplace = True) X_train_dup = X_train[y_train==1] X_train_non_dup = X_train[y_train==0] X_train = np.vstack([X_train_non_dup, X_train_dup, X_train_non_dup, X_train_non_dup]) y_train = np.array([0]*X_train_non_dup.shape[0] + [1]*X_train_dup.shape[0] + [0]*X_train_non_dup.shape[0] + [0]*X_train_non_dup.shape[0]) del X_train_dup del X_train_non_dup print("Mean target rate : ",y_train.mean()) X_train = X_train.astype('float32') return X_train, y_train def kappa(preds, y): score = [] a = 0.165 / 0.37 b = (1 - 0.165) / (1 - 0.37) for pp,yy in zip(preds, y.get_label()): score.append(a * yy * np.log (pp) + b * (1 - yy) * np.log(1-pp)) score = -np.sum(score) / len(score) return 'kappa', score def get_temporal_pattern(df2): df = df2.copy() df["qmax"] = df.apply( lambda row: max(row["qid1"], row["qid2"]), axis=1 ) df = df.sort_values(by=["qmax"], ascending=True) df["dupe_rate"] = df.is_duplicate.rolling(window=500, min_periods=500).mean() df["timeline"] = np.arange(df.shape[0]) / float(df.shape[0]) return df def train_lgb(cv = False): t = time.time() params = { 'task' : 'train', 'boosting_type' : 'gbdt', 'objective' : 'binary', 'metric' : {'binary_logloss'}, 'learning_rate' : 0.05, 'feature_fraction' : 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 100, 'num_leaves' : 200, 'max_depth': 4, 'min_data_in_leaf': 1, 'subsample': 0.7, 'colsample_bytree': 0.7, 'silent': 1, 'random_state': 1337, 'verbose': 1, 'nthread': 6, } X_train, _ = get_train() X_train, y_train = oversample2(X_train) if cv: lgb_train = lgb.Dataset(X_train, y_train) hist = lgb.cv(params, lgb_train, num_boost_round = 100000, nfold = 5, stratified = True, early_stopping_rounds = 350, verbose_eval = 250, seed = 1337) del X_train, y_train gc.collect() print('Time it took to train in CV manner:', time.time() - t) return hist else: X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train, stratify = y_train, test_size = 0.2, random_state = 111) del X_train, y_train gc.collect() lgb_train = lgb.Dataset(X_tr, y_tr) lgb_val = lgb.Dataset(X_val, y_val) print('Start training...') gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val, early_stopping_rounds = 350, verbose_eval = 500) print('Start predicting...') val_pred = gbm.predict(lgb.Dataset(X_val), num_iteration=gbm.best_iteration) score = log_loss(y_val, val_pred) print('Final score:', score, '\n', 'Time it took to train and predict:', time.time() - t) del X_tr, X_val, y_tr, y_val gc.collect() return gbm def run_lgbm(model_name, train = True, test = False, cv = False): if cv: gbm_hist = train_lgb(True) return gbm_hist if train: gbm = train_lgb() gbm.save_model('saved_models/LGBM/{}.txt'.format(model_name)) if test: predict_test('{}'.format(model_name)) return gbm gbm = run_lgbm(train = True) input_folder = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/' df_train = pd.read_csv(input_folder + 'train.csv') X_train, y_train = get_train() X_train['qid1'] = df_train['qid1'] X_train['qid2'] = df_train['qid2'] X_traintemp = get_temporal_pattern(X_train) X_tr = X_traintemp.iloc[:360000, :] X_val = X_traintemp.iloc[:360000, :] X_tr.drop(['qid1', 'qid2', 'qmax', 'dupe_rate'], axis = 1, inplace = True) X_val.drop(['qid1', 'qid2', 'qmax', 'dupe_rate'], axis = 1, inplace = True) X_tr, y_tr = oversample2(X_tr) y_val = X_val['is_duplicate'] X_val.drop(['is_duplicate'], axis = 1, inplace = True) params = { 'task' : 'train', 'boosting_type' : 'gbdt', 'objective' : 'binary', 'metric' : {'binary_logloss'}, 'learning_rate' : 0.05, 'feature_fraction' : 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 100, 'num_leaves' : 200, 'max_depth': 4, 'min_data_in_leaf': 1, 'subsample': 0.7, 'colsample_bytree': 0.7, 'silent': 1, 'random_state': 1337, 'verbose': 1, 'nthread': 6, } t = time.time() lgb_train = lgb.Dataset(X_tr, y_tr) lgb_val = lgb.Dataset(X_val, y_val) print('Start training...') gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val, early_stopping_rounds = 350, verbose_eval = 500) print('Start predicting...') val_pred = gbm.predict(lgb.Dataset(X_val), num_iteration=gbm.best_iteration) score = log_loss(y_val, val_pred) print('Final score:', score, '\n', 'Time it took to train and predict:', time.time() - t) ```
github_jupyter
import pandas as pd import numpy as np import nltk from collections import Counter from sklearn.metrics import log_loss from scipy.optimize import minimize import multiprocessing import difflib import time import gc import xgboost as xgb from sklearn.cross_validation import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import lightgbm as lgb import matplotlib.pyplot as plt %matplotlib inline def get_train(): keras_q1 = np.load('../../data/transformed/keras_tokenizer/train_q1_transformed.npy') keras_q2 = np.load('../../data/transformed/keras_tokenizer/train_q2_transformed.npy') xgb_feats = pd.read_csv('../../data/features/the_1owl/owl_train.csv') abhishek_feats = pd.read_csv('../../data/features/abhishek/train_features.csv', encoding = 'ISO-8859-1').iloc[:, 2:] text_feats = pd.read_csv('../../data/features/other_features/text_features_train.csv', encoding = 'ISO-8859-1') img_feats = pd.read_csv('../../data/features/other_features/img_features_train.csv') srk_feats = pd.read_csv('../../data/features/srk/SRK_grams_features_train.csv') xgb_feats.drop(['z_len1', 'z_len2', 'z_word_len1', 'z_word_len2'], axis = 1, inplace = True) y_train = xgb_feats['is_duplicate'] xgb_feats = xgb_feats.iloc[:, 8:] X_train2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_train2 = np.concatenate([xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_train2 = np.concatenate([xgb_feats], axis = 1) for i in range(X_train2.shape[1]): if np.sum(X_train2[:, i] == y_train.values) == X_train2.shape[0]: print('LEAK FOUND') X_train2 = X_train2.astype('float32') X_train2 = pd.DataFrame(X_train2) X_train2['is_duplicate'] = y_train print('Training data shape:', X_train2.shape) return X_train2, y_train def get_test(): keras_q1 = np.load('../../data/transformed/keras_tokenizer/test_q1_transformed.npy') keras_q2 = np.load('../../data/transformed/keras_tokenizer/test_q2_transformed.npy') xgb_feats = pd.read_csv('../../data/features/the_1owl/owl_test.csv') abhishek_feats = pd.read_csv('../../data/features/abhishek/test_features.csv', encoding = 'ISO-8859-1').iloc[:, 2:] text_feats = pd.read_csv('../../data/features/other_features/text_features_test.csv', encoding = 'ISO-8859-1') img_feats = pd.read_csv('../../data/features/other_features/img_features_test.csv') srk_feats = pd.read_csv('../../data/features/srk/SRK_grams_features_test.csv') xgb_feats.drop(['z_len1', 'z_len2', 'z_word_len1', 'z_word_len2'], axis = 1, inplace = True) xgb_feats = xgb_feats.iloc[:, 5:] X_test2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats, img_feats], axis = 1) #X_test2 = np.concatenate([keras_q1, keras_q2, xgb_feats, abhishek_feats, text_feats], axis = 1) X_test2 = X_test2.astype('float32') X_test2 = pd.DataFrame(X_test2) print('Test data shape:', X_test2.shape) return X_test2 def predict_test(model_name): X_test = get_test() gbm = lgb.Booster(model_file='saved_models/LGBM/{}.txt'.format(model_name)) test_preds = gbm.predict(lgb.Dataset(X_test)) sub_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/submissions/' sample_sub = pd.read_csv(sub_src + 'sample_submission.csv') sample_sub['is_duplicate'] = test_preds sample_sub.to_csv(sub_src + '{}.csv'.format(model_name), index = False) return def oversample(X_train, y_train): print('Oversampling negative y according to anokas method') pos_train = X_train[X_train['is_duplicate'] == 1] neg_train = X_train[X_train['is_duplicate'] == 0] p = 0.165 scale = ((len(pos_train) / (len(pos_train) + len(neg_train))) / p) - 1 while scale > 1: neg_train = pd.concat([neg_train, neg_train]) scale -=1 neg_train = pd.concat([neg_train, neg_train[:int(scale * len(neg_train))]]) X_train = pd.concat([pos_train, neg_train]) y_train = (np.zeros(len(pos_train)) + 1).tolist() + np.zeros(len(neg_train)).tolist() X_train = X_train.astype('float32') X_train.drop(['is_duplicate'], axis = 1, inplace = True) return X_train, y_train def oversample2(X_train): print('Oversampling negative y according to SRK method') y_train = np.array(X_train["is_duplicate"]) X_train.drop(['is_duplicate'], axis = 1, inplace = True) X_train_dup = X_train[y_train==1] X_train_non_dup = X_train[y_train==0] X_train = np.vstack([X_train_non_dup, X_train_dup, X_train_non_dup, X_train_non_dup]) y_train = np.array([0]*X_train_non_dup.shape[0] + [1]*X_train_dup.shape[0] + [0]*X_train_non_dup.shape[0] + [0]*X_train_non_dup.shape[0]) del X_train_dup del X_train_non_dup print("Mean target rate : ",y_train.mean()) X_train = X_train.astype('float32') return X_train, y_train def kappa(preds, y): score = [] a = 0.165 / 0.37 b = (1 - 0.165) / (1 - 0.37) for pp,yy in zip(preds, y.get_label()): score.append(a * yy * np.log (pp) + b * (1 - yy) * np.log(1-pp)) score = -np.sum(score) / len(score) return 'kappa', score def get_temporal_pattern(df2): df = df2.copy() df["qmax"] = df.apply( lambda row: max(row["qid1"], row["qid2"]), axis=1 ) df = df.sort_values(by=["qmax"], ascending=True) df["dupe_rate"] = df.is_duplicate.rolling(window=500, min_periods=500).mean() df["timeline"] = np.arange(df.shape[0]) / float(df.shape[0]) return df def train_lgb(cv = False): t = time.time() params = { 'task' : 'train', 'boosting_type' : 'gbdt', 'objective' : 'binary', 'metric' : {'binary_logloss'}, 'learning_rate' : 0.05, 'feature_fraction' : 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 100, 'num_leaves' : 200, 'max_depth': 4, 'min_data_in_leaf': 1, 'subsample': 0.7, 'colsample_bytree': 0.7, 'silent': 1, 'random_state': 1337, 'verbose': 1, 'nthread': 6, } X_train, _ = get_train() X_train, y_train = oversample2(X_train) if cv: lgb_train = lgb.Dataset(X_train, y_train) hist = lgb.cv(params, lgb_train, num_boost_round = 100000, nfold = 5, stratified = True, early_stopping_rounds = 350, verbose_eval = 250, seed = 1337) del X_train, y_train gc.collect() print('Time it took to train in CV manner:', time.time() - t) return hist else: X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train, stratify = y_train, test_size = 0.2, random_state = 111) del X_train, y_train gc.collect() lgb_train = lgb.Dataset(X_tr, y_tr) lgb_val = lgb.Dataset(X_val, y_val) print('Start training...') gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val, early_stopping_rounds = 350, verbose_eval = 500) print('Start predicting...') val_pred = gbm.predict(lgb.Dataset(X_val), num_iteration=gbm.best_iteration) score = log_loss(y_val, val_pred) print('Final score:', score, '\n', 'Time it took to train and predict:', time.time() - t) del X_tr, X_val, y_tr, y_val gc.collect() return gbm def run_lgbm(model_name, train = True, test = False, cv = False): if cv: gbm_hist = train_lgb(True) return gbm_hist if train: gbm = train_lgb() gbm.save_model('saved_models/LGBM/{}.txt'.format(model_name)) if test: predict_test('{}'.format(model_name)) return gbm gbm = run_lgbm(train = True) input_folder = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/' df_train = pd.read_csv(input_folder + 'train.csv') X_train, y_train = get_train() X_train['qid1'] = df_train['qid1'] X_train['qid2'] = df_train['qid2'] X_traintemp = get_temporal_pattern(X_train) X_tr = X_traintemp.iloc[:360000, :] X_val = X_traintemp.iloc[:360000, :] X_tr.drop(['qid1', 'qid2', 'qmax', 'dupe_rate'], axis = 1, inplace = True) X_val.drop(['qid1', 'qid2', 'qmax', 'dupe_rate'], axis = 1, inplace = True) X_tr, y_tr = oversample2(X_tr) y_val = X_val['is_duplicate'] X_val.drop(['is_duplicate'], axis = 1, inplace = True) params = { 'task' : 'train', 'boosting_type' : 'gbdt', 'objective' : 'binary', 'metric' : {'binary_logloss'}, 'learning_rate' : 0.05, 'feature_fraction' : 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 100, 'num_leaves' : 200, 'max_depth': 4, 'min_data_in_leaf': 1, 'subsample': 0.7, 'colsample_bytree': 0.7, 'silent': 1, 'random_state': 1337, 'verbose': 1, 'nthread': 6, } t = time.time() lgb_train = lgb.Dataset(X_tr, y_tr) lgb_val = lgb.Dataset(X_val, y_val) print('Start training...') gbm = lgb.train(params, lgb_train, num_boost_round = 100000, valid_sets = lgb_val, early_stopping_rounds = 350, verbose_eval = 500) print('Start predicting...') val_pred = gbm.predict(lgb.Dataset(X_val), num_iteration=gbm.best_iteration) score = log_loss(y_val, val_pred) print('Final score:', score, '\n', 'Time it took to train and predict:', time.time() - t)
0.417034
0.264346
``` from numpy import savez, save, load, array import os from keras.layers import Dense, Flatten, Dropout from keras.models import Sequential from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input from keras.preprocessing import image from keras.utils.np_utils import to_categorical from keras.preprocessing.image import ImageDataGenerator from numpy.random import seed, shuffle import h5py from numpy import asarray def make_dataset(path): dataset = [] num_categories = 38 labels = [] for i in range(38): cat_path = path+'/c_'+str(i) for img in os.listdir(cat_path): temp = image.load_img(cat_path +'/'+img, target_size=(224,224,3)) temp = image.img_to_array(temp) dataset.append(temp) labels.append(i) print('Category ' + str(i)+' out of 37 ', end='\r') print('Processing the input and saving. This may take some time.') dataset = array(dataset) dataset = preprocess_input(dataset) labels = array(labels).reshape(-1,1) labels = to_categorical(labels, 38) with h5py.File('crop_disease_dataset.h5', 'w') as h5: h5.create_dataset('dataset', data=dataset) h5.create_dataset('labels', data=labels) print('Saved Dataset as .npz') return dataset, labels train_path = 'crowdai' train_dataset, train_labels = make_dataset(train_path) with h5py.File('crop_disease_dataset.h5', 'r') as h5: train_dataset = h5.get('dataset')[:] train_labels = h5.get('labels')[:] #print(train_dataset, train_labels) train_dataset = array(train_dataset) train_labels = array(train_labels) #avoid this. this step is forward prop base_model = VGG16(weights = 'imagenet', include_top = False) train_features = base_model.predict(train_dataset, verbose=1) save('train_features', train_features) train_features = train_features.reshape((21917,-1)) #avoid forward prop by doing this train_features = load('train_features.npy') seed(28) from tensorflow import set_random_seed set_random_seed(2) c = list(zip(train_features, train_labels)) shuffle(c) train_features, train_labels = zip(*c) del c train_features = asarray(train_features) train_labels = asarray(train_labels) model = Sequential() #model.add(Flatten) model.add(Dense(1024, input_dim=7*7*512, activation='relu', kernel_initializer='glorot_normal')) model.add(Dropout(0.5)) model.add(Dense(1024, activation='sigmoid', kernel_initializer='glorot_normal')) model.add(Dropout(0.5)) model.add(Dense(38, activation='softmax', kernel_initializer='glorot_normal')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(train_features, train_labels, batch_size = 128, epochs = 15, verbose=1, validation_split=0.1) model.save_weights('saved-weights_crop-disease.h5') model.save('saved_model_crop_diseases.h5') base_model = VGG16(weights = 'imagenet',include_top=False, input_shape=(224,224,3)) dense_model = load_model('saved_model_crop_diseases.h5') base_model.outputs = [base_model.layers[-1].output] base_model.layers[-1].outbound_nodes = [] bridge = base_model.layers[-1].output x = Flatten()(bridge) x = dense_model(x) final_model = Model(inputs = base_model.input, outputs=x) from keras.models import load_model from keras import Model final_model.save('complete_model_crop_diseases.h5') index_map = { 0:'Apple Scab', 1:'Black Rot, Apple' , 2:'Cedar Rust, Apple', 3:'Healthy Apple', 4:'Healthy Blueberry', 5: 'Powdery Mildew, Cherry', 6:'Healthy Cherry', 7:'Grey Leaf Spot, Corn', 8: 'Common Rust of Maize', 9:'Northern Leaf Blight, Corn', 10:'Healthy Corn', 11:'Black Rot, Grape' , 12: 'Black Measles, Grape', 13:'Leaf Spot, Grape', 14: 'Healthy Grape', 15:'Citrus Huanglongbing', 16: 'Bacterial Spot, Peach', 17:'Healthy Peach', 18:'Bacterial Spot, Bell Pepper', 19:'Healthy Bell Pepper', 20:'Early Blight, Potato', 21:'Late Blight, Potato', 22:'Healthy Potato', 23:'Healthy Raspberry', 24:'Healthy Soybean', 25:'Powdery Mildew, Squash', 26:'Leaf Scorch, Strawberry', 27:'Healthy Strawberry', 28:'Bacterial Leaf Spot, Tomato', 29:'Early Blight, Tomato', 30:'Late Blight, Tomato', 31:'Leaf Mold, Tomato', 32:'Leaf Spot, Tomato', 33:'Two Spot Spider Mite, Tomato', 34:'Target Leaf Spot, Tomato', 35:'Yellow Leaf Curl, Tomato', 36:'Mosaic, Tomato', 37:'Healthy Tomato' } temp = image.load_img('feabce4c-9bb1-4fca-bcbf-368cacd40a68___PSU_CG 2115.JPG', target_size=(224,224,3)) temp = image.img_to_array(temp) temp = preprocess_input(temp) temp = temp.reshape(1,224,224,3) temp = final_model.predict(temp) print(index_map[np.argmax(temp)]) ```
github_jupyter
from numpy import savez, save, load, array import os from keras.layers import Dense, Flatten, Dropout from keras.models import Sequential from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input from keras.preprocessing import image from keras.utils.np_utils import to_categorical from keras.preprocessing.image import ImageDataGenerator from numpy.random import seed, shuffle import h5py from numpy import asarray def make_dataset(path): dataset = [] num_categories = 38 labels = [] for i in range(38): cat_path = path+'/c_'+str(i) for img in os.listdir(cat_path): temp = image.load_img(cat_path +'/'+img, target_size=(224,224,3)) temp = image.img_to_array(temp) dataset.append(temp) labels.append(i) print('Category ' + str(i)+' out of 37 ', end='\r') print('Processing the input and saving. This may take some time.') dataset = array(dataset) dataset = preprocess_input(dataset) labels = array(labels).reshape(-1,1) labels = to_categorical(labels, 38) with h5py.File('crop_disease_dataset.h5', 'w') as h5: h5.create_dataset('dataset', data=dataset) h5.create_dataset('labels', data=labels) print('Saved Dataset as .npz') return dataset, labels train_path = 'crowdai' train_dataset, train_labels = make_dataset(train_path) with h5py.File('crop_disease_dataset.h5', 'r') as h5: train_dataset = h5.get('dataset')[:] train_labels = h5.get('labels')[:] #print(train_dataset, train_labels) train_dataset = array(train_dataset) train_labels = array(train_labels) #avoid this. this step is forward prop base_model = VGG16(weights = 'imagenet', include_top = False) train_features = base_model.predict(train_dataset, verbose=1) save('train_features', train_features) train_features = train_features.reshape((21917,-1)) #avoid forward prop by doing this train_features = load('train_features.npy') seed(28) from tensorflow import set_random_seed set_random_seed(2) c = list(zip(train_features, train_labels)) shuffle(c) train_features, train_labels = zip(*c) del c train_features = asarray(train_features) train_labels = asarray(train_labels) model = Sequential() #model.add(Flatten) model.add(Dense(1024, input_dim=7*7*512, activation='relu', kernel_initializer='glorot_normal')) model.add(Dropout(0.5)) model.add(Dense(1024, activation='sigmoid', kernel_initializer='glorot_normal')) model.add(Dropout(0.5)) model.add(Dense(38, activation='softmax', kernel_initializer='glorot_normal')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(train_features, train_labels, batch_size = 128, epochs = 15, verbose=1, validation_split=0.1) model.save_weights('saved-weights_crop-disease.h5') model.save('saved_model_crop_diseases.h5') base_model = VGG16(weights = 'imagenet',include_top=False, input_shape=(224,224,3)) dense_model = load_model('saved_model_crop_diseases.h5') base_model.outputs = [base_model.layers[-1].output] base_model.layers[-1].outbound_nodes = [] bridge = base_model.layers[-1].output x = Flatten()(bridge) x = dense_model(x) final_model = Model(inputs = base_model.input, outputs=x) from keras.models import load_model from keras import Model final_model.save('complete_model_crop_diseases.h5') index_map = { 0:'Apple Scab', 1:'Black Rot, Apple' , 2:'Cedar Rust, Apple', 3:'Healthy Apple', 4:'Healthy Blueberry', 5: 'Powdery Mildew, Cherry', 6:'Healthy Cherry', 7:'Grey Leaf Spot, Corn', 8: 'Common Rust of Maize', 9:'Northern Leaf Blight, Corn', 10:'Healthy Corn', 11:'Black Rot, Grape' , 12: 'Black Measles, Grape', 13:'Leaf Spot, Grape', 14: 'Healthy Grape', 15:'Citrus Huanglongbing', 16: 'Bacterial Spot, Peach', 17:'Healthy Peach', 18:'Bacterial Spot, Bell Pepper', 19:'Healthy Bell Pepper', 20:'Early Blight, Potato', 21:'Late Blight, Potato', 22:'Healthy Potato', 23:'Healthy Raspberry', 24:'Healthy Soybean', 25:'Powdery Mildew, Squash', 26:'Leaf Scorch, Strawberry', 27:'Healthy Strawberry', 28:'Bacterial Leaf Spot, Tomato', 29:'Early Blight, Tomato', 30:'Late Blight, Tomato', 31:'Leaf Mold, Tomato', 32:'Leaf Spot, Tomato', 33:'Two Spot Spider Mite, Tomato', 34:'Target Leaf Spot, Tomato', 35:'Yellow Leaf Curl, Tomato', 36:'Mosaic, Tomato', 37:'Healthy Tomato' } temp = image.load_img('feabce4c-9bb1-4fca-bcbf-368cacd40a68___PSU_CG 2115.JPG', target_size=(224,224,3)) temp = image.img_to_array(temp) temp = preprocess_input(temp) temp = temp.reshape(1,224,224,3) temp = final_model.predict(temp) print(index_map[np.argmax(temp)])
0.429429
0.239638
# Sample Crash/etc Data Set - Received Dec. 7th 2015 All columns names and allowed values can be referenced in the __CrashDataDictionary.pdf__ file. # Crash Data ### Load Data ``` #Load Packages import pandas as pd import numpy as np from IPython.display import display, HTML low_memory = False local_path = "/Users/michaeldowd/" #Load the csv's clion_lookup = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/c_lion_node_lookup_sample.csv") crashes = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/crashes_sample.csv") vehicles = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/vehicle_sample.csv") factors = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/factor_sample.csv") print "done" ``` ### Crashes Column List ``` display(HTML(pd.DataFrame(list(crashes.columns)).to_html())) ``` ### Null/NaN Totals by Column See total & percent of null values by each column (where null values are present) ``` #How many nulls in each column (only show columns with more than zero nulls) crash_nulls = crashes.isnull().sum() crash_nulls = crash_nulls[crash_nulls > 0] def getPercNull(df, master_df): #Function for calculation percent of records that are null percents = [] for b in df.iterrows(): try: percents.append(b[1].Count/float(len(master_df[b[0]]))) except: percents.append("NA") df['perc_null'] = percents crash_nulls = pd.DataFrame(crash_nulls, columns = ["Count"]) getPercNull(crash_nulls, crashes) display(HTML(crash_nulls.to_html())) print "Total Records : " , len(crashes) ``` ### Determine Priority Columns #### Priority columns are those columns that may be used as an explanatory variable ``` priority_cols_crashes = ['case_yr', "road_sys", "reportable", "accd_typ", "num_of_veh", "traf_cntl", "light_cond", \ "weather", "road_char","road_surf_", "collision_","ped_loc", \ "ped_actn", "ext_of_inj","regn_cnty_", "dmv_accd_c", "err_cde", \ "highway_in", "intersect1" ] def summarize(column_list, master_df, include_sums = True): """ Cycles through the columns in the priority cols list and does a simple aggregate, total number of cases for each column, the sum of fatalities, and the sum of injuries. """ print 5*">", "START", 5 * "<" for col in column_list: print ">"*20 print col.upper() if include_sums: out = master_df[[col, 'crashid', "num_of_fat", "num_of_inj"]].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' }, 'num_of_fat' : {'sum' : 'sum'}, 'num_of_inj' : {'sum' : 'sum'} }) elif include_sums == "Vehicles": out = master_df[[col, 'crashid', "num_of_fat", "num_of_inj"]].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' }, 'num_of_fat' : {'sum' : lambda x: np.sum(x)} }) else: out = master_df[[col, 'crashid']].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' } }) display(HTML(aggout.to_html())) print 5*">", "END", 5 * "<" return aggout ``` ### Crash Data Summary of Priority Columns Total number of indcidents, fatalities and injuries by each priority column. ``` print 5*">", "Crash Data", 5 * "<" summarize(priority_cols_crashes, crashes) ``` ### Data Cleaning - INCOMPLETE - may be needed for other columns if we use Pandas for loading ``` #without using column value type enforcement - some mixed type colums emerged, code below corrected those in roadsys crashes.road_sys.loc[crashes[crashes.road_sys == 12].index] = '12' crashes.road_sys.loc[crashes[crashes.road_sys == 9].index] = '9' pd.unique(crashes['road_sys'].ravel()) ``` ## Vehicles Data Vehicles Data Column List ``` pd.DataFrame(vehicles.columns, columns = ["Cols"]) ``` ### Null/NaN Totals by Column ``` #How many nulls in each column (only show columns with more than zero nulls) number_of_nulls = vehicles.isnull().sum() number_of_nulls = number_of_nulls[number_of_nulls > 0] nulls_df = pd.DataFrame(number_of_nulls, columns = ["Count"]) getPercNull(nulls_df, vehicles) display(HTML(nulls_df.to_html())) print "Total Records : " , len(vehicles) ``` ### Determine Priority Columns ``` priority_cols_vehicles = [ 'case_yr', 'rgst_typ', 'body_typ', 'veh_typ','pre_accd_actn', 'age', 'sex', 'rgst_wgt' ] #Join Crashes(just injuries and Fatalities) & Vehicles crash_short = crashes[['case_num','crashid', 'num_of_fat', 'num_of_inj']] merge_Vehicles = pd.merge(vehicles, crash_short, on='crashid') grouped_by_Case_merge_vehicles = merge_Vehicles.groupby('crashid') test = merge_Vehicles[['crashid','num_of_fat','num_of_inj']].groupby('crashid').sum() # test.head() merge_Vehicles.head() ``` ### Vehicles Dataset Summary Note: There is double counting present in the sum results in tables below, as each row in the vehicles table represents a party in the accident (vehicles, ped or bike). Data is just to provide an idea of the distribution of value for different fields. Injuries/Fatalities are not mapped to specific people/vehicles. I.E. If a crash occured and we know there was a bike and a car and we know the age/sex/etc of the two people involved (driver / rider) we **do not** know which person was injured/killed. ``` summarize(priority_cols_vehicles, merge_Vehicles, include_sums=True) ``` ## Factor Dataset ``` print factors.head() len(factors) vehicles['uid'] = vehicles.crashid.map(str) + "_" + vehicles.veh_seq_num.map(str) factors['uid'] = factors.crashid.map(str) + "_" + factors.veh_seq_num.map(str) vehicles.crashid factor_out = summarize(["aprnt_fctr"], factors, include_sums = False) factor_out.sort('') print factor_out.columns.get_level_values(1) test = factor_out.iloc[:, factor_out.columns.get_level_values(1)=='count'] pd.DataFrame(test['crashid']['count']).sort_values('count') ```
github_jupyter
#Load Packages import pandas as pd import numpy as np from IPython.display import display, HTML low_memory = False local_path = "/Users/michaeldowd/" #Load the csv's clion_lookup = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/c_lion_node_lookup_sample.csv") crashes = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/crashes_sample.csv") vehicles = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/vehicle_sample.csv") factors = pd.read_csv(local_path + "Google Drive/Dowd_Local/Data/1st_Sample/factor_sample.csv") print "done" display(HTML(pd.DataFrame(list(crashes.columns)).to_html())) #How many nulls in each column (only show columns with more than zero nulls) crash_nulls = crashes.isnull().sum() crash_nulls = crash_nulls[crash_nulls > 0] def getPercNull(df, master_df): #Function for calculation percent of records that are null percents = [] for b in df.iterrows(): try: percents.append(b[1].Count/float(len(master_df[b[0]]))) except: percents.append("NA") df['perc_null'] = percents crash_nulls = pd.DataFrame(crash_nulls, columns = ["Count"]) getPercNull(crash_nulls, crashes) display(HTML(crash_nulls.to_html())) print "Total Records : " , len(crashes) priority_cols_crashes = ['case_yr', "road_sys", "reportable", "accd_typ", "num_of_veh", "traf_cntl", "light_cond", \ "weather", "road_char","road_surf_", "collision_","ped_loc", \ "ped_actn", "ext_of_inj","regn_cnty_", "dmv_accd_c", "err_cde", \ "highway_in", "intersect1" ] def summarize(column_list, master_df, include_sums = True): """ Cycles through the columns in the priority cols list and does a simple aggregate, total number of cases for each column, the sum of fatalities, and the sum of injuries. """ print 5*">", "START", 5 * "<" for col in column_list: print ">"*20 print col.upper() if include_sums: out = master_df[[col, 'crashid', "num_of_fat", "num_of_inj"]].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' }, 'num_of_fat' : {'sum' : 'sum'}, 'num_of_inj' : {'sum' : 'sum'} }) elif include_sums == "Vehicles": out = master_df[[col, 'crashid', "num_of_fat", "num_of_inj"]].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' }, 'num_of_fat' : {'sum' : lambda x: np.sum(x)} }) else: out = master_df[[col, 'crashid']].groupby(col) aggout = out.agg({'crashid':{'count' : 'count' } }) display(HTML(aggout.to_html())) print 5*">", "END", 5 * "<" return aggout print 5*">", "Crash Data", 5 * "<" summarize(priority_cols_crashes, crashes) #without using column value type enforcement - some mixed type colums emerged, code below corrected those in roadsys crashes.road_sys.loc[crashes[crashes.road_sys == 12].index] = '12' crashes.road_sys.loc[crashes[crashes.road_sys == 9].index] = '9' pd.unique(crashes['road_sys'].ravel()) pd.DataFrame(vehicles.columns, columns = ["Cols"]) #How many nulls in each column (only show columns with more than zero nulls) number_of_nulls = vehicles.isnull().sum() number_of_nulls = number_of_nulls[number_of_nulls > 0] nulls_df = pd.DataFrame(number_of_nulls, columns = ["Count"]) getPercNull(nulls_df, vehicles) display(HTML(nulls_df.to_html())) print "Total Records : " , len(vehicles) priority_cols_vehicles = [ 'case_yr', 'rgst_typ', 'body_typ', 'veh_typ','pre_accd_actn', 'age', 'sex', 'rgst_wgt' ] #Join Crashes(just injuries and Fatalities) & Vehicles crash_short = crashes[['case_num','crashid', 'num_of_fat', 'num_of_inj']] merge_Vehicles = pd.merge(vehicles, crash_short, on='crashid') grouped_by_Case_merge_vehicles = merge_Vehicles.groupby('crashid') test = merge_Vehicles[['crashid','num_of_fat','num_of_inj']].groupby('crashid').sum() # test.head() merge_Vehicles.head() summarize(priority_cols_vehicles, merge_Vehicles, include_sums=True) print factors.head() len(factors) vehicles['uid'] = vehicles.crashid.map(str) + "_" + vehicles.veh_seq_num.map(str) factors['uid'] = factors.crashid.map(str) + "_" + factors.veh_seq_num.map(str) vehicles.crashid factor_out = summarize(["aprnt_fctr"], factors, include_sums = False) factor_out.sort('') print factor_out.columns.get_level_values(1) test = factor_out.iloc[:, factor_out.columns.get_level_values(1)=='count'] pd.DataFrame(test['crashid']['count']).sort_values('count')
0.223462
0.733833
[Sascha Spors](https://orcid.org/0000-0001-7225-9992), Professorship Signal Theory and Digital Signal Processing, [Institute of Communications Engineering (INT)](https://www.int.uni-rostock.de/), Faculty of Computer Science and Electrical Engineering (IEF), [University of Rostock, Germany](https://www.uni-rostock.de/en/) # Tutorial Signals and Systems (Signal- und Systemtheorie) Summer Semester 2021 (Bachelor Course #24015) - lecture: https://github.com/spatialaudio/signals-and-systems-lecture - tutorial: https://github.com/spatialaudio/signals-and-systems-exercises WIP... The project is currently under heavy development while adding new material for the summer semester 2021 Feel free to contact lecturer [frank.schultz@uni-rostock.de](https://orcid.org/0000-0002-3010-0294) ## Übung / Exercise 7 Ideal Dirac Comb Sampling and Ideal Lowpass Reconstruction for Frequency Domain Signals ``` import numpy as np import matplotlib.pyplot as plt def my_sinc(x): # we rather use definition sinc(x) = sin(x)/x, thus: return np.sinc(x/np.pi) A = 2 Ts = 1 ws = 2*np.pi/Ts def sinc_sampling_sinc_interpolation(): print('Ts=%3.2f s, ws=%3.2f rad/s, Th=%3.2f s, Th/2=%3.2f s' % (Ts, ws, Th, Th/2)) print('ws*Th/2=%5.4f' % (ws*Th/2)) plt.figure(figsize=(6, 4.5)) M = 15 nu = np.arange(-M, M+1) w = np.arange(-M*ws, (M+1)*ws, ws / 2**6) # fourier transform XFT = A*Th * my_sinc(w * Th/2) # fourier series XFS = A*Th/Ts * my_sinc((ws*nu) * Th/2) # fourier transform as sinc-interpolation from fourier series Xr = np.zeros_like(w) for nui in nu: XFSnui = A*Th/Ts * my_sinc((ws*nui) * Th/2) Xrnui = (Ts*XFSnui) * my_sinc(np.pi*(w-nui*ws)/ws) plt.plot(w, Xrnui, 'C7', lw=1) Xr += Xrnui # plot last Xrnui to get label plt.plot(w, Xrnui, 'C7', lw=1, label=r'$X_\mathrm{r}(\mathrm{j}\omega)[\nu]$') # plot fourier transform plt.plot(w, XFT, 'C0', lw=3, label=r'$X(\mathrm{j}\omega)$') # plot reconstructed fourier transform, note that we only use finite sum plt.plot(w, Xr, 'C3--', lw=2, label=r'$X_\mathrm{r}(\mathrm{j}\omega)$') # plot fourier series, here notmalized to match amplitude with fourier transform plt.stem(nu*ws, XFS*Ts, use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:', label=r'$X[\nu \omega_s] \cdot T_s$') plt.xticks(np.arange(-7*2*np.pi, 8*2*np.pi, 2*np.pi), [r'$-14\pi$', '', r'$-10\pi$', '', r'$-6\pi$', '', r'$-2\pi$', '', r'$2\pi$', '', r'$6\pi$', '', r'$10\pi$', '', r'$14\pi$']) plt.xlim(-7*2*np.pi, +7*2*np.pi) plt.xlabel(r'$\omega$ / rad/s') plt.title('Ts=%4.3f s, ws=%4.3f rad/s, Th=%4.3f s, Th/2=%4.3f s' % (Ts, ws, Th, Th/2)) plt.legend() plt.grid(True) # chosen parameters for task 45C76AFB33 Th = Ts/2 # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts1_2_45C76AFB33.pdf') # suitable for perfect reconstruction Th = Ts*3/4 # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts3_4_45C76AFB33.pdf') # critical sampling and reconstruction # this leads to Dirac Impulse at w=0 with weight 2, i.e. a DC of 2, as expected Th = Ts # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts1_1_45C76AFB33.pdf') # undersampling case and thus reconstruction fail Th = 4/3*Ts # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts4_3_45C76AFB33.pdf') ``` ## Copyright This tutorial is provided as Open Educational Resource (OER), to be found at https://github.com/spatialaudio/signals-and-systems-exercises accompanying the OER lecture https://github.com/spatialaudio/signals-and-systems-lecture. Both are licensed under a) the Creative Commons Attribution 4.0 International License for text and graphics and b) the MIT License for source code. Please attribute material from the tutorial as *Frank Schultz, Continuous- and Discrete-Time Signals and Systems - A Tutorial Featuring Computational Examples, University of Rostock* with ``main file, github URL, commit number and/or version tag, year``.
github_jupyter
import numpy as np import matplotlib.pyplot as plt def my_sinc(x): # we rather use definition sinc(x) = sin(x)/x, thus: return np.sinc(x/np.pi) A = 2 Ts = 1 ws = 2*np.pi/Ts def sinc_sampling_sinc_interpolation(): print('Ts=%3.2f s, ws=%3.2f rad/s, Th=%3.2f s, Th/2=%3.2f s' % (Ts, ws, Th, Th/2)) print('ws*Th/2=%5.4f' % (ws*Th/2)) plt.figure(figsize=(6, 4.5)) M = 15 nu = np.arange(-M, M+1) w = np.arange(-M*ws, (M+1)*ws, ws / 2**6) # fourier transform XFT = A*Th * my_sinc(w * Th/2) # fourier series XFS = A*Th/Ts * my_sinc((ws*nu) * Th/2) # fourier transform as sinc-interpolation from fourier series Xr = np.zeros_like(w) for nui in nu: XFSnui = A*Th/Ts * my_sinc((ws*nui) * Th/2) Xrnui = (Ts*XFSnui) * my_sinc(np.pi*(w-nui*ws)/ws) plt.plot(w, Xrnui, 'C7', lw=1) Xr += Xrnui # plot last Xrnui to get label plt.plot(w, Xrnui, 'C7', lw=1, label=r'$X_\mathrm{r}(\mathrm{j}\omega)[\nu]$') # plot fourier transform plt.plot(w, XFT, 'C0', lw=3, label=r'$X(\mathrm{j}\omega)$') # plot reconstructed fourier transform, note that we only use finite sum plt.plot(w, Xr, 'C3--', lw=2, label=r'$X_\mathrm{r}(\mathrm{j}\omega)$') # plot fourier series, here notmalized to match amplitude with fourier transform plt.stem(nu*ws, XFS*Ts, use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:', label=r'$X[\nu \omega_s] \cdot T_s$') plt.xticks(np.arange(-7*2*np.pi, 8*2*np.pi, 2*np.pi), [r'$-14\pi$', '', r'$-10\pi$', '', r'$-6\pi$', '', r'$-2\pi$', '', r'$2\pi$', '', r'$6\pi$', '', r'$10\pi$', '', r'$14\pi$']) plt.xlim(-7*2*np.pi, +7*2*np.pi) plt.xlabel(r'$\omega$ / rad/s') plt.title('Ts=%4.3f s, ws=%4.3f rad/s, Th=%4.3f s, Th/2=%4.3f s' % (Ts, ws, Th, Th/2)) plt.legend() plt.grid(True) # chosen parameters for task 45C76AFB33 Th = Ts/2 # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts1_2_45C76AFB33.pdf') # suitable for perfect reconstruction Th = Ts*3/4 # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts3_4_45C76AFB33.pdf') # critical sampling and reconstruction # this leads to Dirac Impulse at w=0 with weight 2, i.e. a DC of 2, as expected Th = Ts # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts1_1_45C76AFB33.pdf') # undersampling case and thus reconstruction fail Th = 4/3*Ts # Th<Ts, Th/2<Ts/2 for perfect reconstruction sinc_sampling_sinc_interpolation() plt.savefig('SpectrumSampling_Th_Ts4_3_45C76AFB33.pdf')
0.612541
0.908089
<a href="https://colab.research.google.com/github/douglascdev/fake_stock_price/blob/main/fake_stock_price.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install quantumrandom import random from datetime import datetime, timedelta import numpy as np import quantumrandom import matplotlib.pyplot as plt import matplotlib.dates as mdates import concurrent.futures import pandas as pd def get_true_random_data(array_length: int = 1024): """ Returns infinite ints obtained from quantumrandom calling their API every array_length number of ints :param array_length: :return: """ with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(quantumrandom.get_data, array_length=array_length) for _ in range(10)] while True: res = futures.pop(0).result() if not futures: futures += [executor.submit(quantumrandom.get_data, array_length=array_length) for _ in range(5)] for i in res: yield i random_data_iterator = iter(get_true_random_data()) def get_true_random_normalized(n: int): """ get_truly_random_data returning values between 0 to 1 :param n: :return: """ for _, seed in zip(range(n), random_data_iterator): random.seed(seed) yield random.random() def true_random_choices(population, weights): while True: random.seed(next(random_data_iterator)) yield random.choices(population, weights, k=1).pop() def generate_weighted_random_variation(): # weights = [10000, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, # 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, # 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] weights = [10000] weights += [0.01] * 5 weights += [0.05] * 5 weights += [0.001] * 50 random_population = sorted(get_true_random_normalized(len(weights))) return next(iter(true_random_choices(random_population, weights))) def gen_positive() -> bool: """ If the random variation in price will be positive or negative. Positive has a slightly higher chance to emulate the general tendendy of going up over time """ return next(iter(true_random_choices([True, False], [1.001, 1]))) def generate_random_data(data_size: int, initial_value: int = 40): values = [initial_value] last_value = initial_value for _ in range(data_size): percent_variation = generate_weighted_random_variation() variation = percent_variation if gen_positive() else -percent_variation last_value = round(last_value + (last_value * variation), 2) if last_value < 0: last_value = 0 values.append(last_value) return values data = generate_random_data(data_size=10000, initial_value=10) dates = [datetime(1990, 9, 28, 0, 0) + timedelta(days=i) for i in range(len(data))] df = pd.DataFrame({"Date": dates, "Close": data}) df.head() df.to_csv("data.csv") y = np.array(data) x = np.array(dates) plt.rcParams["figure.figsize"] = (20, 10) plt.plot(x, y) xformatter = mdates.DateFormatter('%m/%Y') plt.gcf().axes[0].xaxis.set_major_formatter(xformatter) plt.show() ```
github_jupyter
!pip install quantumrandom import random from datetime import datetime, timedelta import numpy as np import quantumrandom import matplotlib.pyplot as plt import matplotlib.dates as mdates import concurrent.futures import pandas as pd def get_true_random_data(array_length: int = 1024): """ Returns infinite ints obtained from quantumrandom calling their API every array_length number of ints :param array_length: :return: """ with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(quantumrandom.get_data, array_length=array_length) for _ in range(10)] while True: res = futures.pop(0).result() if not futures: futures += [executor.submit(quantumrandom.get_data, array_length=array_length) for _ in range(5)] for i in res: yield i random_data_iterator = iter(get_true_random_data()) def get_true_random_normalized(n: int): """ get_truly_random_data returning values between 0 to 1 :param n: :return: """ for _, seed in zip(range(n), random_data_iterator): random.seed(seed) yield random.random() def true_random_choices(population, weights): while True: random.seed(next(random_data_iterator)) yield random.choices(population, weights, k=1).pop() def generate_weighted_random_variation(): # weights = [10000, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, # 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, # 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] weights = [10000] weights += [0.01] * 5 weights += [0.05] * 5 weights += [0.001] * 50 random_population = sorted(get_true_random_normalized(len(weights))) return next(iter(true_random_choices(random_population, weights))) def gen_positive() -> bool: """ If the random variation in price will be positive or negative. Positive has a slightly higher chance to emulate the general tendendy of going up over time """ return next(iter(true_random_choices([True, False], [1.001, 1]))) def generate_random_data(data_size: int, initial_value: int = 40): values = [initial_value] last_value = initial_value for _ in range(data_size): percent_variation = generate_weighted_random_variation() variation = percent_variation if gen_positive() else -percent_variation last_value = round(last_value + (last_value * variation), 2) if last_value < 0: last_value = 0 values.append(last_value) return values data = generate_random_data(data_size=10000, initial_value=10) dates = [datetime(1990, 9, 28, 0, 0) + timedelta(days=i) for i in range(len(data))] df = pd.DataFrame({"Date": dates, "Close": data}) df.head() df.to_csv("data.csv") y = np.array(data) x = np.array(dates) plt.rcParams["figure.figsize"] = (20, 10) plt.plot(x, y) xformatter = mdates.DateFormatter('%m/%Y') plt.gcf().axes[0].xaxis.set_major_formatter(xformatter) plt.show()
0.771585
0.930521
# MicroGrad A tiny Autograd engine ![awww](puppy.jpg) ``` import random import numpy as np import matplotlib.pyplot as plt %matplotlib inline # The tiniest Autograd engine. It's so cute! class Value: """ stores a single scalar value and its gradient """ def __init__(self, data): self.data = data self.grad = 0 self.backward = lambda: None def __add__(self, other): other = other if isinstance(other, Value) else Value(other) # attempt to wrap if given an int/float/etc out = Value(self.data + other.data) def backward(): self.grad += out.grad other.grad += out.grad self.backward() other.backward() out.backward = backward return out def __radd__(self, other): return self.__add__(other) def __mul__(self, other): other = other if isinstance(other, Value) else Value(other) # attempt to wrap if given an int/float/etc out = Value(self.data * other.data) def backward(): self.grad += other.data * out.grad other.grad += self.data * out.grad self.backward() other.backward() out.backward = backward return out def __rmul__(self, other): return self.__mul__(other) def relu(self): out = Value(0 if self.data < 0 else self.data) def backward(): self.grad += (out.data > 0) * out.grad self.backward() out.backward = backward return out def __repr__(self): return f"Value(data={self.data}, grad={self.grad})" # A neural networks "library" :D on top of it! I'm dying class Module: def zero_grad(self): for p in self.parameters(): p.grad = 0 class Neuron(Module): def __init__(self, nin, nonlin=True): self.w = [Value(random.uniform(-1,1)) for _ in range(nin)] self.b = Value(0) self.nonlin = nonlin def __call__(self, x): act = sum([wi*xi for wi,xi in zip(self.w, x)], self.b) return act.relu() if self.nonlin else act def parameters(self): return self.w + [self.b] def __repr__(self): return f"{'ReLU' if self.nonlin else 'Linear'}Neuron({len(self.w)})" class Layer(Module): def __init__(self, nin, nout, **kwargs): self.neurons = [Neuron(nin, **kwargs) for _ in range(nout)] def __call__(self, x): out = [n(x) for n in self.neurons] return out[0] if len(out) == 1 else out def parameters(self): return [p for n in self.neurons for p in n.parameters()] def __repr__(self): return f"Layer of [{', '.join(str(n) for n in self.neurons)}]" class MLP(Module): def __init__(self, nin, nouts): sz = [nin] + nouts self.layers = [Layer(sz[i], sz[i+1], nonlin=i!=len(nouts)-1) for i in range(len(nouts))] def __call__(self, x): for layer in self.layers: x = layer(x) return x def parameters(self): return [p for layer in self.layers for p in layer.parameters()] def __repr__(self): return f"MLP of [{', '.join(str(layer) for layer in self.layers)}]" np.random.seed(1337) random.seed(1337) # make up a dataset from sklearn.datasets import make_moons, make_blobs X, y = make_moons(n_samples=100, noise=0.1) y = y*2 - 1 # make y be -1 or 1 # visualize in 2D plt.figure(figsize=(5,5)) plt.scatter(X[:,0], X[:,1], c=y, s=20, cmap='jet') # initialize a model #model = MLP(2, [12, 10, 1]) # 2-layer neural network model = MLP(2, [16, 16, 1]) # 2-layer neural network print(model) print("number of parameters", len(model.parameters())) # loss function def loss(batch_size=None): # inline DataLoader :) if batch_size is None: Xb, yb = X, y else: ri = np.random.permutation(X.shape[0])[:batch_size] Xb, yb = X[ri], y[ri] inputs = [list(map(Value, xrow)) for xrow in Xb] # forward the model to get scores scores = list(map(model, inputs)) # svm "max-margin" loss losses = [(Value(yi) * scorei + 1).relu() for yi, scorei in zip(yb, scores)] data_loss = sum(losses) * (1.0 / len(losses)) # L2 regularization alpha = 1e-4 reg_loss = alpha * sum((p*p for p in model.parameters())) total_loss = data_loss + reg_loss # also get accuracy accuracy = [yi == (int(scorei.data < 0)*2-1) for yi, scorei in zip(yb, scores)] return total_loss, sum(accuracy) / len(accuracy) total_loss, acc = loss() print(total_loss, acc) # optimization learning_rate = 0.001 for k in range(200): # forward total_loss, acc = loss() # backward model.zero_grad() total_loss.grad = 1 total_loss.backward() # update (sgd) for p in model.parameters(): p.data -= learning_rate * p.grad if k % 1 == 0: print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%") # visualize decision boundary h = 0.25 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Xmesh = np.c_[xx.ravel(), yy.ravel()] inputs = [list(map(Value, xrow)) for xrow in Xmesh] scores = list(map(model, inputs)) Z = np.array([s.data < 0 for s in scores]) Z = Z.reshape(xx.shape) fig = plt.figure() plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) ```
github_jupyter
import random import numpy as np import matplotlib.pyplot as plt %matplotlib inline # The tiniest Autograd engine. It's so cute! class Value: """ stores a single scalar value and its gradient """ def __init__(self, data): self.data = data self.grad = 0 self.backward = lambda: None def __add__(self, other): other = other if isinstance(other, Value) else Value(other) # attempt to wrap if given an int/float/etc out = Value(self.data + other.data) def backward(): self.grad += out.grad other.grad += out.grad self.backward() other.backward() out.backward = backward return out def __radd__(self, other): return self.__add__(other) def __mul__(self, other): other = other if isinstance(other, Value) else Value(other) # attempt to wrap if given an int/float/etc out = Value(self.data * other.data) def backward(): self.grad += other.data * out.grad other.grad += self.data * out.grad self.backward() other.backward() out.backward = backward return out def __rmul__(self, other): return self.__mul__(other) def relu(self): out = Value(0 if self.data < 0 else self.data) def backward(): self.grad += (out.data > 0) * out.grad self.backward() out.backward = backward return out def __repr__(self): return f"Value(data={self.data}, grad={self.grad})" # A neural networks "library" :D on top of it! I'm dying class Module: def zero_grad(self): for p in self.parameters(): p.grad = 0 class Neuron(Module): def __init__(self, nin, nonlin=True): self.w = [Value(random.uniform(-1,1)) for _ in range(nin)] self.b = Value(0) self.nonlin = nonlin def __call__(self, x): act = sum([wi*xi for wi,xi in zip(self.w, x)], self.b) return act.relu() if self.nonlin else act def parameters(self): return self.w + [self.b] def __repr__(self): return f"{'ReLU' if self.nonlin else 'Linear'}Neuron({len(self.w)})" class Layer(Module): def __init__(self, nin, nout, **kwargs): self.neurons = [Neuron(nin, **kwargs) for _ in range(nout)] def __call__(self, x): out = [n(x) for n in self.neurons] return out[0] if len(out) == 1 else out def parameters(self): return [p for n in self.neurons for p in n.parameters()] def __repr__(self): return f"Layer of [{', '.join(str(n) for n in self.neurons)}]" class MLP(Module): def __init__(self, nin, nouts): sz = [nin] + nouts self.layers = [Layer(sz[i], sz[i+1], nonlin=i!=len(nouts)-1) for i in range(len(nouts))] def __call__(self, x): for layer in self.layers: x = layer(x) return x def parameters(self): return [p for layer in self.layers for p in layer.parameters()] def __repr__(self): return f"MLP of [{', '.join(str(layer) for layer in self.layers)}]" np.random.seed(1337) random.seed(1337) # make up a dataset from sklearn.datasets import make_moons, make_blobs X, y = make_moons(n_samples=100, noise=0.1) y = y*2 - 1 # make y be -1 or 1 # visualize in 2D plt.figure(figsize=(5,5)) plt.scatter(X[:,0], X[:,1], c=y, s=20, cmap='jet') # initialize a model #model = MLP(2, [12, 10, 1]) # 2-layer neural network model = MLP(2, [16, 16, 1]) # 2-layer neural network print(model) print("number of parameters", len(model.parameters())) # loss function def loss(batch_size=None): # inline DataLoader :) if batch_size is None: Xb, yb = X, y else: ri = np.random.permutation(X.shape[0])[:batch_size] Xb, yb = X[ri], y[ri] inputs = [list(map(Value, xrow)) for xrow in Xb] # forward the model to get scores scores = list(map(model, inputs)) # svm "max-margin" loss losses = [(Value(yi) * scorei + 1).relu() for yi, scorei in zip(yb, scores)] data_loss = sum(losses) * (1.0 / len(losses)) # L2 regularization alpha = 1e-4 reg_loss = alpha * sum((p*p for p in model.parameters())) total_loss = data_loss + reg_loss # also get accuracy accuracy = [yi == (int(scorei.data < 0)*2-1) for yi, scorei in zip(yb, scores)] return total_loss, sum(accuracy) / len(accuracy) total_loss, acc = loss() print(total_loss, acc) # optimization learning_rate = 0.001 for k in range(200): # forward total_loss, acc = loss() # backward model.zero_grad() total_loss.grad = 1 total_loss.backward() # update (sgd) for p in model.parameters(): p.data -= learning_rate * p.grad if k % 1 == 0: print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%") # visualize decision boundary h = 0.25 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Xmesh = np.c_[xx.ravel(), yy.ravel()] inputs = [list(map(Value, xrow)) for xrow in Xmesh] scores = list(map(model, inputs)) Z = np.array([s.data < 0 for s in scores]) Z = Z.reshape(xx.shape) fig = plt.figure() plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max())
0.796925
0.744401
# Slice and Dice Biomarkers Brian is wanting the top 10 named biomarkers for each cluster to start doing a literature review. There are several ways that this slice and dice can be done, so it will probably be easier to present him with a few tables. ``` import sys import os from pathlib import Path import numpy as np import pandas as pd sys.path.insert(0, '../lib') from larval_gonad.x_to_a import CHROMS_CHR # Constants REF = os.environ['REFERENCES_DIR'] OUTPUT = '../output/testis_scRNAseq_pilot' Path(OUTPUT).mkdir(exist_ok=True) NAME = '2018-02-01_slice_and_dice_biomarkers' # Create fbgn2symbol and symbol2fbgn map annot = pd.read_csv(Path(REF, 'dmel/r6-16/fb_annotation/dmel_r6-16.fb_annotation'), sep='\t', index_col=1) fbgn2symbol = annot['gene_symbol'].to_dict() symbol2fbgn = {v: k for k, v in fbgn2symbol.items()} # Create fbgn2chrom genes = [] with Path(REF, 'dmel/r6-16/gtf/dmel_r6-16.gtf').open() as fh: for row in fh: rows = row.strip().split() if len(rows) == 0: continue if rows[2] == 'gene': genes.append((rows[0], rows[9].replace('"', '').replace(';', ''))) fbgn2chrom = pd.DataFrame(genes, columns=['chrom', 'FBgn']) fbgn2chrom.set_index('FBgn', inplace=True) fbgn2chrom = fbgn2chrom[fbgn2chrom['chrom'].isin(CHROMS_CHR)] # Get biomarker datas and cleanup df = pd.read_csv(f'{OUTPUT}/biomarkers.tsv', sep='\t', index_col='gene') df.index.name = 'FBgn' df['gene'] = df.index.map(lambda x: fbgn2symbol[x]) df.set_index('gene', append=True, inplace=True) # Remove CG and CRs cg = ~df.index.get_level_values('gene').str.startswith('CG') cr = ~df.index.get_level_values('gene').str.startswith('CR') pv = df.p_val_adj < .01 df = df[cg & cr & pv] df.to_csv(f'{OUTPUT}/{NAME}_named_cluster_markers.tsv', sep='\t') # Sort by adj p-val clean = df.sort_values(by='p_val_adj').groupby('cluster').head(10).sort_values('cluster').drop(['p_val', 'pct.1', 'pct.2'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_adj-pval_cluster_markers.tsv', sep='\t') # Sort by logFC df['abs_avg_logFC'] = np.abs(df.avg_logFC) clean = df.sort_values(by='abs_avg_logFC', ascending=False).groupby('cluster').head(10).sort_values('cluster').drop(['p_val', 'pct.1', 'pct.2'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_avg-logFC_cluster_markers.tsv', sep='\t') # sort by difference pct cells expressed df['pct_diff'] = np.abs(df['pct.1'] - df['pct.2']) clean = df.sort_values(by='pct_diff', ascending=False).groupby('cluster').head(10).sort_values('cluster').drop(['p_val'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_pct-cells-diff_cluster_markers.tsv', sep='\t') ```
github_jupyter
import sys import os from pathlib import Path import numpy as np import pandas as pd sys.path.insert(0, '../lib') from larval_gonad.x_to_a import CHROMS_CHR # Constants REF = os.environ['REFERENCES_DIR'] OUTPUT = '../output/testis_scRNAseq_pilot' Path(OUTPUT).mkdir(exist_ok=True) NAME = '2018-02-01_slice_and_dice_biomarkers' # Create fbgn2symbol and symbol2fbgn map annot = pd.read_csv(Path(REF, 'dmel/r6-16/fb_annotation/dmel_r6-16.fb_annotation'), sep='\t', index_col=1) fbgn2symbol = annot['gene_symbol'].to_dict() symbol2fbgn = {v: k for k, v in fbgn2symbol.items()} # Create fbgn2chrom genes = [] with Path(REF, 'dmel/r6-16/gtf/dmel_r6-16.gtf').open() as fh: for row in fh: rows = row.strip().split() if len(rows) == 0: continue if rows[2] == 'gene': genes.append((rows[0], rows[9].replace('"', '').replace(';', ''))) fbgn2chrom = pd.DataFrame(genes, columns=['chrom', 'FBgn']) fbgn2chrom.set_index('FBgn', inplace=True) fbgn2chrom = fbgn2chrom[fbgn2chrom['chrom'].isin(CHROMS_CHR)] # Get biomarker datas and cleanup df = pd.read_csv(f'{OUTPUT}/biomarkers.tsv', sep='\t', index_col='gene') df.index.name = 'FBgn' df['gene'] = df.index.map(lambda x: fbgn2symbol[x]) df.set_index('gene', append=True, inplace=True) # Remove CG and CRs cg = ~df.index.get_level_values('gene').str.startswith('CG') cr = ~df.index.get_level_values('gene').str.startswith('CR') pv = df.p_val_adj < .01 df = df[cg & cr & pv] df.to_csv(f'{OUTPUT}/{NAME}_named_cluster_markers.tsv', sep='\t') # Sort by adj p-val clean = df.sort_values(by='p_val_adj').groupby('cluster').head(10).sort_values('cluster').drop(['p_val', 'pct.1', 'pct.2'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_adj-pval_cluster_markers.tsv', sep='\t') # Sort by logFC df['abs_avg_logFC'] = np.abs(df.avg_logFC) clean = df.sort_values(by='abs_avg_logFC', ascending=False).groupby('cluster').head(10).sort_values('cluster').drop(['p_val', 'pct.1', 'pct.2'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_avg-logFC_cluster_markers.tsv', sep='\t') # sort by difference pct cells expressed df['pct_diff'] = np.abs(df['pct.1'] - df['pct.2']) clean = df.sort_values(by='pct_diff', ascending=False).groupby('cluster').head(10).sort_values('cluster').drop(['p_val'], axis=1) clean['link'] = clean.index.get_level_values('FBgn').map(lambda fbgn: '=HYPERLINK("http://flybase.org/reports/{}", "FlyBase")'.format(fbgn)) clean.to_csv(f'{OUTPUT}/{NAME}_top10_pct-cells-diff_cluster_markers.tsv', sep='\t')
0.232397
0.748467
# Statistical Rethinking Chapter 8 > Code rewitten in Python for this chapter's practice - toc: true - badges: true - comments: true - categories: [statistical_rethinking] ``` import numpy as np import pandas as pd import pymc3 as pm import matplotlib.pyplot as plt import seaborn as sns import scipy ``` ## 8H1 8H2 ``` d = pd.read_csv( 'https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/tulips.csv', sep=';') d.head() d['blooms_std'] = d['blooms'] / d['blooms'].max() d['water_cent'] = d['water'] - d['water'].mean() d['shade_cent'] = d['shade'] - d['shade'].mean() with pm.Model() as model_8_7: a = pm.Normal('a', mu=0.5, sd=0.25) bW = pm.Normal('bW', mu=0, sd=0.25) bS = pm.Normal('bS', mu=0, sd=0.25) bWS = pm.Normal('bWS', mu=0, sd=0.25) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bW * d['water_cent'] + bS * d['shade_cent'] + bWS * d['water_cent'] * d['shade_cent']) blooms = pm.Normal('blooms', mu, sigma, observed=d.blooms_std) trace_8_7 = pm.sample(1000, tune=1000) # start = {'a':np.mean(d.blooms), 'bW':0, 'bS':0, 'bWS':0, 'sigma':np.std(d.blooms)} varnames = ['a', 'bW', 'bS', 'bWS', 'sigma'] pm.summary(trace_8_7, varnames, kind='stats').round(3) with pm.Model() as model_8H1: a = pm.Normal('a', mu=0.5, sd=0.25) bB = pm.Normal('bB', 0, 0.1, shape=d['bed'].nunique()) bW = pm.Normal('bW', mu=0, sd=0.25) bS = pm.Normal('bS', mu=0, sd=0.25) bWS = pm.Normal('bWS', mu=0, sd=0.25) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bB[d['bed'].astype('category').cat.codes.values] + bW * d['water_cent'] + bS * d['shade_cent'] + bWS * d['water_cent'] * d['shade_cent']) blooms = pm.Normal('blooms', mu, sigma, observed=d.blooms_std) trace_8H1 = pm.sample(1000, tune=1000) varnames = ['a', 'bB', 'bW', 'bS', 'bWS', 'sigma'] pm.summary(trace_8H1, varnames, kind='stats').round(3) ``` ### Compare WAIC ``` comp_df = pm.compare({'without_bed': trace_8_7, 'with_bed': trace_8H1}) comp_df ``` - value of bB indicates weak relationship as the credible interval includes zero - dse is 4.41 and d_waic is 3.64, which means the difference between waic between these two models is not significant ## 8H3 ``` d = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/rugged.csv', sep=';') d = d.dropna(subset=['rgdppc_2000']) d['log_gdp_std'] = np.log(d['rgdppc_2000']) / np.log(d['rgdppc_2000']).mean() d['rugged_std'] = d['rugged'] / d['rugged'].max() dd = d[d['country'] != 'Seychelles'] ``` ### With Seychelles ``` with pm.Model() as model_8_5: a = pm.Normal('a', mu=1, sd=0.1, shape=d['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3, shape=d['cont_africa'].nunique()) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[d['cont_africa'].values] + b[d['cont_africa'].values] * (d.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=d.log_gdp_std) trace_8_5 = pm.sample(1000, tune=1000) mean_q = pm.find_MAP() means = np.concatenate([mean_q[k].reshape(-1) for k in ['a', 'b', 'sigma']]) cov_q = np.linalg.inv(pm.find_hessian(mean_q, vars=[a, b, sigma])) stds = np.sqrt(np.diagonal(cov_q)) print('means: ', means.round(3)) print('stds: ', stds.round(3)) varnames = ['a', 'b', 'sigma'] pm.summary(trace_8_5, varnames, kind='stats').round(3) d_a = d[d['cont_africa']==1] d_na = d[d['cont_africa']==0] dd_a = dd[dd['cont_africa']==1] dd_na = dd[dd['cont_africa']==0] rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: trace_8_5['a'][:, 1] + trace_8_5['b'][:, 1] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: trace_8_5['a'][:, 0] + trace_8_5['b'][:, 0] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(d_a['rugged_std'], d_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(d_na['rugged_std'], d_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); ``` ### Without Seychelles ``` with pm.Model() as model_8H3: a = pm.Normal('a', mu=1, sd=0.1, shape=dd['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3, shape=dd['cont_africa'].nunique()) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[dd['cont_africa'].values] + b[dd['cont_africa'].values] * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_8H3 = pm.sample(1000, tune=1000) mean_q = pm.find_MAP() means = np.concatenate([mean_q[k].reshape(-1) for k in ['a', 'b', 'sigma']]) cov_q = np.linalg.inv(pm.find_hessian(mean_q, vars=[a, b, sigma])) stds = np.sqrt(np.diagonal(cov_q)) print('means: ', means.round(3)) print('stds: ', stds.round(3)) varnames = ['a', 'b', 'sigma'] pm.summary(trace_8H3, varnames, kind='stats').round(3) rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: trace_8H3['a'][:, 1] + trace_8H3['b'][:, 1] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: trace_8H3['a'][:, 0] + trace_8H3['b'][:, 0] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(dd_a['rugged_std'], dd_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(dd_na['rugged_std'], dd_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); ``` #### Compare WAIC ``` with pm.Model() as model_1: a = pm.Normal('a', mu=1, sd=0.1) b = pm.Normal('b', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic('mu', a + b * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_1 = pm.sample(1000, tune=1000) with pm.Model() as model_2: a = pm.Normal('a', mu=1, sd=0.1, shape=dd['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[dd['cont_africa'].values] + b * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_2 = pm.sample(1000, tune=1000) comp_df = pm.compare({'model1': trace_1, 'model2': trace_2, 'model3': trace_8H3}) comp_df ``` #### Weighted prediction ``` rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: comp_df.weight[0] * (trace_8H3['a'][:, 1] + trace_8H3['b'][:, 1] * x) + comp_df.weight[1] * (trace_2['a'][:, 1] + trace_2['b'] * x) + comp_df.weight[2] * (trace_1['a'] + trace_1['b'] * x), axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: comp_df.weight[0] * (trace_8H3['a'][:, 0] + trace_8H3['b'][:, 0] * x) + comp_df.weight[1] * (trace_2['a'][:, 0] + trace_2['b'] * x) + comp_df.weight[2] * (trace_1['a'] + trace_1['b'] * x), axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(dd_a['rugged_std'], dd_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(dd_na['rugged_std'], dd_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); ``` ## 8H4 ``` d = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/nettle.csv', sep=';') d['lang.per.cap.log'] = np.log(d['num.lang'] / d['k.pop']) d['lang.per.cap.log.cent'] = d['lang.per.cap.log'] / d['lang.per.cap.log'].mean() d['area.log'] = np.log(d['area']) d['area.log.cent'] = (d['area.log'] - d['area.log'].min()) / ( d['area.log'].max() - d['area.log'].min()) d['area.log.cent'] = d['area.log.cent'] - d['area.log.cent'].mean() d['mean.growing.season.cent'] = ( d['mean.growing.season'] - d['mean.growing.season'].min()) / ( d['mean.growing.season'].max() - d['mean.growing.season'].min()) d['mean.growing.season.cent'] = d['mean.growing.season.cent'] - d['mean.growing.season.cent'].mean() d['sd.growing.season.cent'] = ( d['sd.growing.season'] - d['sd.growing.season'].min()) / ( d['sd.growing.season'].max() - d['sd.growing.season'].min()) d['sd.growing.season.cent'] = d['sd.growing.season.cent'] - d['sd.growing.season.cent'].mean() with pm.Model() as model_1: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_1 = pm.sample(1000, tune=1000) pm.summary(trace_1, ['a', 'bA', 'bM'], kind='stats').round(3) with pm.Model() as model_2: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bS * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_2 = pm.sample(1000, tune=1000) pm.summary(trace_2, ['a', 'bA', 'bS'], kind='stats').round(3) with pm.Model() as model_3: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent'] + bS * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_3 = pm.sample(1000, tune=1000) pm.summary(trace_3, ['a', 'bA', 'bM', 'bS'], kind='stats').round(3) with pm.Model() as model_4: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) bMS = pm.Normal('bMS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent'] + bS * d['sd.growing.season.cent'] + bMS * d['mean.growing.season.cent'] * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_4 = pm.sample(1000, tune=1000) pm.summary(trace_4, ['a', 'bA', 'bM', 'bS', 'bMS'], kind='stats').round(3) ``` ### Compare WAIC ``` comp_df = pm.compare({'mean': trace_1, 'sd': trace_2, 'mean + st': trace_3, 'mean * st': trace_4}) comp_df ``` ### Plot posterior with interaction ``` d['mean.growing.season.cent'].hist() d['sd.growing.season.cent'].hist() seq_s = np.linspace(-0.3, 0.7, 25) f, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 3)) for ax, m in zip(axs.flat, [-0.4, 0, 0.4]): mu = np.apply_along_axis(lambda x: trace_4['a'] + trace_4['bM'] * m + trace_4['bS'] * x + trace_4['bMS'] * m * x, axis=1, arr=seq_s[:, np.newaxis]) mu_mean = mu.mean(1) mu_PI = np.quantile(mu, [0.055, 0.945], axis=1) ax.plot(seq_s, mu_mean, 'k') ax.plot(seq_s, mu_PI[0], 'k--') ax.plot(seq_s, mu_PI[1], 'k--') ax.set_ylabel('area.log') ax.set_xlabel('sd.growing.season') ax.set_title(f'mean.growing.season = {m}') ``` The idea is that, in nations with longer average growing seasons, high variance makes storage and redistribution even more important than it would be otherwise.
github_jupyter
import numpy as np import pandas as pd import pymc3 as pm import matplotlib.pyplot as plt import seaborn as sns import scipy d = pd.read_csv( 'https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/tulips.csv', sep=';') d.head() d['blooms_std'] = d['blooms'] / d['blooms'].max() d['water_cent'] = d['water'] - d['water'].mean() d['shade_cent'] = d['shade'] - d['shade'].mean() with pm.Model() as model_8_7: a = pm.Normal('a', mu=0.5, sd=0.25) bW = pm.Normal('bW', mu=0, sd=0.25) bS = pm.Normal('bS', mu=0, sd=0.25) bWS = pm.Normal('bWS', mu=0, sd=0.25) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bW * d['water_cent'] + bS * d['shade_cent'] + bWS * d['water_cent'] * d['shade_cent']) blooms = pm.Normal('blooms', mu, sigma, observed=d.blooms_std) trace_8_7 = pm.sample(1000, tune=1000) # start = {'a':np.mean(d.blooms), 'bW':0, 'bS':0, 'bWS':0, 'sigma':np.std(d.blooms)} varnames = ['a', 'bW', 'bS', 'bWS', 'sigma'] pm.summary(trace_8_7, varnames, kind='stats').round(3) with pm.Model() as model_8H1: a = pm.Normal('a', mu=0.5, sd=0.25) bB = pm.Normal('bB', 0, 0.1, shape=d['bed'].nunique()) bW = pm.Normal('bW', mu=0, sd=0.25) bS = pm.Normal('bS', mu=0, sd=0.25) bWS = pm.Normal('bWS', mu=0, sd=0.25) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bB[d['bed'].astype('category').cat.codes.values] + bW * d['water_cent'] + bS * d['shade_cent'] + bWS * d['water_cent'] * d['shade_cent']) blooms = pm.Normal('blooms', mu, sigma, observed=d.blooms_std) trace_8H1 = pm.sample(1000, tune=1000) varnames = ['a', 'bB', 'bW', 'bS', 'bWS', 'sigma'] pm.summary(trace_8H1, varnames, kind='stats').round(3) comp_df = pm.compare({'without_bed': trace_8_7, 'with_bed': trace_8H1}) comp_df d = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/rugged.csv', sep=';') d = d.dropna(subset=['rgdppc_2000']) d['log_gdp_std'] = np.log(d['rgdppc_2000']) / np.log(d['rgdppc_2000']).mean() d['rugged_std'] = d['rugged'] / d['rugged'].max() dd = d[d['country'] != 'Seychelles'] with pm.Model() as model_8_5: a = pm.Normal('a', mu=1, sd=0.1, shape=d['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3, shape=d['cont_africa'].nunique()) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[d['cont_africa'].values] + b[d['cont_africa'].values] * (d.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=d.log_gdp_std) trace_8_5 = pm.sample(1000, tune=1000) mean_q = pm.find_MAP() means = np.concatenate([mean_q[k].reshape(-1) for k in ['a', 'b', 'sigma']]) cov_q = np.linalg.inv(pm.find_hessian(mean_q, vars=[a, b, sigma])) stds = np.sqrt(np.diagonal(cov_q)) print('means: ', means.round(3)) print('stds: ', stds.round(3)) varnames = ['a', 'b', 'sigma'] pm.summary(trace_8_5, varnames, kind='stats').round(3) d_a = d[d['cont_africa']==1] d_na = d[d['cont_africa']==0] dd_a = dd[dd['cont_africa']==1] dd_na = dd[dd['cont_africa']==0] rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: trace_8_5['a'][:, 1] + trace_8_5['b'][:, 1] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: trace_8_5['a'][:, 0] + trace_8_5['b'][:, 0] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(d_a['rugged_std'], d_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(d_na['rugged_std'], d_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); with pm.Model() as model_8H3: a = pm.Normal('a', mu=1, sd=0.1, shape=dd['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3, shape=dd['cont_africa'].nunique()) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[dd['cont_africa'].values] + b[dd['cont_africa'].values] * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_8H3 = pm.sample(1000, tune=1000) mean_q = pm.find_MAP() means = np.concatenate([mean_q[k].reshape(-1) for k in ['a', 'b', 'sigma']]) cov_q = np.linalg.inv(pm.find_hessian(mean_q, vars=[a, b, sigma])) stds = np.sqrt(np.diagonal(cov_q)) print('means: ', means.round(3)) print('stds: ', stds.round(3)) varnames = ['a', 'b', 'sigma'] pm.summary(trace_8H3, varnames, kind='stats').round(3) rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: trace_8H3['a'][:, 1] + trace_8H3['b'][:, 1] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: trace_8H3['a'][:, 0] + trace_8H3['b'][:, 0] * x, axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(dd_a['rugged_std'], dd_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(dd_na['rugged_std'], dd_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); with pm.Model() as model_1: a = pm.Normal('a', mu=1, sd=0.1) b = pm.Normal('b', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic('mu', a + b * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_1 = pm.sample(1000, tune=1000) with pm.Model() as model_2: a = pm.Normal('a', mu=1, sd=0.1, shape=dd['cont_africa'].nunique()) b = pm.Normal('b', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a[dd['cont_africa'].values] + b * (dd.rugged_std - 0.215)) log_gdp = pm.Normal('log_gdp', mu, sigma, observed=dd.log_gdp_std) trace_2 = pm.sample(1000, tune=1000) comp_df = pm.compare({'model1': trace_1, 'model2': trace_2, 'model3': trace_8H3}) comp_df rugged_seq = np.linspace(-0.1, 1.1, 30) mu_a = np.apply_along_axis( lambda x: comp_df.weight[0] * (trace_8H3['a'][:, 1] + trace_8H3['b'][:, 1] * x) + comp_df.weight[1] * (trace_2['a'][:, 1] + trace_2['b'] * x) + comp_df.weight[2] * (trace_1['a'] + trace_1['b'] * x), axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_a = mu_a.mean(axis=1) mu_PI_a = np.quantile(mu_a, [0.055, 0.945], axis=1) mu_na = np.apply_along_axis( lambda x: comp_df.weight[0] * (trace_8H3['a'][:, 0] + trace_8H3['b'][:, 0] * x) + comp_df.weight[1] * (trace_2['a'][:, 0] + trace_2['b'] * x) + comp_df.weight[2] * (trace_1['a'] + trace_1['b'] * x), axis=1, arr=rugged_seq[:, np.newaxis]) mu_mean_na = mu_na.mean(axis=1) mu_PI_na = np.quantile(mu_na, [0.055, 0.945], axis=1) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(8,3)) ax1.plot(dd_a['rugged_std'], dd_a['log_gdp_std'], 'C0o') ax1.plot(rugged_seq, mu_mean_a, 'C0') ax1.fill_between(rugged_seq, mu_PI_a[0], mu_PI_a[1], color='C0', alpha=0.5) ax1.set_title('African Nations') ax1.set_ylabel('log GDP year 2000', fontsize=14); ax1.set_xlabel('Terrain Ruggedness Index', fontsize=14) ax2.plot(dd_na['rugged_std'], dd_na['log_gdp_std'], 'ko') ax2.plot(rugged_seq, mu_mean_na, 'k') ax2.fill_between(rugged_seq, mu_PI_na[0], mu_PI_na[1], color='k', alpha=0.5) ax2.set_title('Non-African Nations') ax2.set_ylabel('log GDP year 2000', fontsize=14) ax2.set_xlabel('Terrain Ruggedness Index', fontsize=14); d = pd.read_csv('https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/nettle.csv', sep=';') d['lang.per.cap.log'] = np.log(d['num.lang'] / d['k.pop']) d['lang.per.cap.log.cent'] = d['lang.per.cap.log'] / d['lang.per.cap.log'].mean() d['area.log'] = np.log(d['area']) d['area.log.cent'] = (d['area.log'] - d['area.log'].min()) / ( d['area.log'].max() - d['area.log'].min()) d['area.log.cent'] = d['area.log.cent'] - d['area.log.cent'].mean() d['mean.growing.season.cent'] = ( d['mean.growing.season'] - d['mean.growing.season'].min()) / ( d['mean.growing.season'].max() - d['mean.growing.season'].min()) d['mean.growing.season.cent'] = d['mean.growing.season.cent'] - d['mean.growing.season.cent'].mean() d['sd.growing.season.cent'] = ( d['sd.growing.season'] - d['sd.growing.season'].min()) / ( d['sd.growing.season'].max() - d['sd.growing.season'].min()) d['sd.growing.season.cent'] = d['sd.growing.season.cent'] - d['sd.growing.season.cent'].mean() with pm.Model() as model_1: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_1 = pm.sample(1000, tune=1000) pm.summary(trace_1, ['a', 'bA', 'bM'], kind='stats').round(3) with pm.Model() as model_2: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bS * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_2 = pm.sample(1000, tune=1000) pm.summary(trace_2, ['a', 'bA', 'bS'], kind='stats').round(3) with pm.Model() as model_3: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent'] + bS * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_3 = pm.sample(1000, tune=1000) pm.summary(trace_3, ['a', 'bA', 'bM', 'bS'], kind='stats').round(3) with pm.Model() as model_4: a = pm.Normal('a', mu=1, sd=0.1) bA = pm.Normal('bA', mu=0, sd=0.3) bM = pm.Normal('bM', mu=0, sd=0.3) bS = pm.Normal('bS', mu=0, sd=0.3) bMS = pm.Normal('bMS', mu=0, sd=0.3) sigma = pm.Exponential('sigma', 1) mu = pm.Deterministic( 'mu', a + bA * d['area.log.cent'] + bM * d['mean.growing.season.cent'] + bS * d['sd.growing.season.cent'] + bMS * d['mean.growing.season.cent'] * d['sd.growing.season.cent']) y = pm.Normal('y', mu, sigma, observed=d['lang.per.cap.log.cent']) trace_4 = pm.sample(1000, tune=1000) pm.summary(trace_4, ['a', 'bA', 'bM', 'bS', 'bMS'], kind='stats').round(3) comp_df = pm.compare({'mean': trace_1, 'sd': trace_2, 'mean + st': trace_3, 'mean * st': trace_4}) comp_df d['mean.growing.season.cent'].hist() d['sd.growing.season.cent'].hist() seq_s = np.linspace(-0.3, 0.7, 25) f, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 3)) for ax, m in zip(axs.flat, [-0.4, 0, 0.4]): mu = np.apply_along_axis(lambda x: trace_4['a'] + trace_4['bM'] * m + trace_4['bS'] * x + trace_4['bMS'] * m * x, axis=1, arr=seq_s[:, np.newaxis]) mu_mean = mu.mean(1) mu_PI = np.quantile(mu, [0.055, 0.945], axis=1) ax.plot(seq_s, mu_mean, 'k') ax.plot(seq_s, mu_PI[0], 'k--') ax.plot(seq_s, mu_PI[1], 'k--') ax.set_ylabel('area.log') ax.set_xlabel('sd.growing.season') ax.set_title(f'mean.growing.season = {m}')
0.443359
0.838614
# HistGradientBoostingClassifier with RobustScaler This code template is for classification analysis using a HistGradientBoostingClassifier and the feature rescaling technique called RobustScaler ### Required Packages ``` import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.metrics import classification_report,plot_confusion_matrix from sklearn.preprocessing import RobustScaler warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path=" " ``` List of features which are required for model training . ``` #x_values features = [] ``` Target feature for prediction. ``` #y_value target=' ' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X = df[features] Y = df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) se.countplot(Y) ``` ### Data Rescaling For rescaling the data RobustScaler function of Sklearn is used. RobustScaler scales using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). #### RobustScaler function Reference URL to RobustScaler API : https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html ``` X_Scaled=RobustScaler().fit_transform(X) X=pd.DataFrame(X_Scaled,columns=X.columns) X.head(3) ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ``` ### Model Histogram-based Gradient Boosting Classification Tree.This estimator is much faster than GradientBoostingClassifier for big datasets (n_samples >= 10 000).This estimator has native support for missing values (NaNs). [Reference](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html#sklearn.ensemble.HistGradientBoostingClassifier) > **loss**: The loss function to use in the boosting process. ‘binary_crossentropy’ (also known as logistic loss) is used for binary classification and generalizes to ‘categorical_crossentropy’ for multiclass classification. ‘auto’ will automatically choose either loss depending on the nature of the problem. > **learning_rate**: The learning rate, also known as shrinkage. This is used as a multiplicative factor for the leaves values. Use 1 for no shrinkage. > **max_iter**: The maximum number of iterations of the boosting process, i.e. the maximum number of trees. > **max_depth**: The maximum depth of each tree. The depth of a tree is the number of edges to go from the root to the deepest leaf. Depth isn’t constrained by default. > **l2_regularization**: The L2 regularization parameter. Use 0 for no regularization (default). > **early_stopping**: If ‘auto’, early stopping is enabled if the sample size is larger than 10000. If True, early stopping is enabled, otherwise early stopping is disabled. > **n_iter_no_change**: Used to determine when to “early stop”. The fitting process is stopped when none of the last n_iter_no_change scores are better than the n_iter_no_change - 1 -th-to-last one, up to some tolerance. Only used if early stopping is performed. > **tol**: The absolute tolerance to use when comparing scores during early stopping. The higher the tolerance, the more likely we are to early stop: higher tolerance means that it will be harder for subsequent iterations to be considered an improvement upon the reference score. > **scoring**: Scoring parameter to use for early stopping. ``` model = HistGradientBoostingClassifier(random_state = 123) model.fit(x_train, y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues) ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(x_test))) ``` #### Creator: Surya Kiran , Github: [Profile](https://github.com/surya2365)
github_jupyter
import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.metrics import classification_report,plot_confusion_matrix from sklearn.preprocessing import RobustScaler warnings.filterwarnings('ignore') #filepath file_path=" " #x_values features = [] #y_value target=' ' df=pd.read_csv(file_path) df.head() X = df[features] Y = df[target] def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() plt.figure(figsize = (10,6)) se.countplot(Y) X_Scaled=RobustScaler().fit_transform(X) X=pd.DataFrame(X_Scaled,columns=X.columns) X.head(3) x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) model = HistGradientBoostingClassifier(random_state = 123) model.fit(x_train, y_train) print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues) print(classification_report(y_test,model.predict(x_test)))
0.201185
0.987747
Tensors are higher order extensions of matrices that can encode multi-dimensional data ![tensor_illustration](../img/tensor_cartoon.jpg) In this tutorial we will show how to manipulate tensors as NDArrays, and write from scratch functions to manipulate these as defined in [TensorLy](http://tensorly.github.io). ``` import mxnet.ndarray as nd ``` # 1. Creating a Tensor A tensor can be represented in multiple ways. The simplest is the slice representation through multiple matrices. Let's take for this example the tensor $\tilde X$ defined by its frontal slices: $$ X_1 = \left[ \begin{matrix} 0 & 2 & 4 & 6\\ 8 & 10 & 12 & 14\\ 16 & 18 & 20 & 22 \end{matrix} \right] $$ and $$ X_2 = \left[ \begin{matrix} 1 & 3 & 5 & 7\\ 9 & 11 & 13 & 15\\ 17 & 19 & 21 & 23 \end{matrix} \right] $$ In Python, this array can be expressed as a numpy array:: ``` X = nd.arange(24).reshape((3, 4, 2)) X ``` You can view the frontal slices by fixing the last axis: ``` X[:, :, 0] X[:, :, 1] ``` # 3. Basic Tensor Operations ## 3.1 Unfolding Also called **matrization**, **unfolding** a tensor is done by reading the element in a given way as to obtain a matrix instead of a tensor. It is done by stacking the **fibers** of the tensor into a matrix. ![tensor_illustration](../img/tensor_fibers.png) Illustration: *Nonnegative Matrix and Tensor Factorizations*, Andrzej Cichocki, Rafal Zdunek, Anh Huy Phan, and Shun-ichi Amari, John Wiley & Sons, 2009. ### Definition For a tensor of size $(I_1, I_2, \cdots, I_N)$, the n-mode unfolding of this tensor will be of size $(I_n, I_1 \times \cdots \times I_{n-1} \times I_{n+1} \cdots \times I_N)$ and is obtained by reading the tensor as a matrix with the $n$-th dimension first. Specifically, given a tensor $\tilde X \in \mathbb{R}^{I_1, I_2, \cdots, I_N}$, the mode-n unfolding of $\tilde X$ is a matrix $\mathbf{X}_{[n]} \in \mathbb{R}^{I_n, I_M}$, with $M = \prod\limits_{\substack{k=1,\\k \neq n}}^N I_k$ and is defined by the mapping from element $(i_1, i_2, \cdots, i_N)$ to $(i_n, j)$, with $$ j = \sum\limits_{\substack{k=1,\\k \neq n}}^N i_k \times \prod\limits_{\substack{m=k+1,\\m \neq n}}^N I_m. $$ ### Convention Traditionally, mode-1 unfolding denotes the unfolding along the first dimension. However, to be consistent with the Python indexing that always starts at zero, as done in tensorly, we will start indexing modes at zero! Therefore ``unfold(tensor, 0)`` will unfold said tensor along its first dimension! ### Example For instance, using the $\tilde X$ previously defined: $$ X_1 = \left[ \begin{matrix} 0 & 2 & 4 & 6\\ 8 & 10 & 12 & 14\\ 16 & 18 & 20 & 22 \end{matrix} \right] $$ and $$ X_2 = \left[ \begin{matrix} 1 & 3 & 5 & 7\\ 9 & 11 & 13 & 15\\ 17 & 19 & 21 & 23 \end{matrix} \right] $$ The 0-mode unfolding of $\tilde X$: $$ \tilde X_{[0]} = \left[ \begin{matrix} 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7\\ 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15\\ 16 & 17 & 18 & 19 & 20 & 21 & 22 & 23\\ \end{matrix} \right] $$ The 1-mode unfolding is given by: $$ \tilde X_{[1]} = \left[ \begin{matrix} 0 & 1 & 8 & 9 & 16 & 17\\ 2 & 3 & 10 & 11 & 18 & 19\\ 4 & 5 & 12 & 13 & 20 & 21\\ 6 & 7 & 14 & 15 & 22 & 23\\ \end{matrix} \right] $$ Finally, the 2-mode unfolding is the unfolding along the last axis: $$ \tilde X_{[2]} = \left[ \begin{matrix} 0 & 2 & 4 & 6 & 8 & 10 & 12 & 14 & 16 & 18 & 20 & 22\\ 1 & 3 & 5 & 7 & 9 & 11 & 13 & 15 & 17 & 19 & 21 & 23\\ \end{matrix} \right] $$ ### In MXNet Let's define the unfolding function in MXNet. Given the mode $n$ along which to unfold, it will take a tensor, put the $n$-th dimension first, and matricize the result. Note that our definition of unfolding corresponds to a C-ordering of the elements. MXNet also has a C ordering of the elements, making that matricization a simple reshaping. ``` def unfold(tensor, mode): """Returns the mode-`mode` unfolding of `tensor` with modes starting at `0`. Parameters ---------- tensor : ndarray mode : int, default is 0 indexing starts at 0, therefore mode is in ``range(0, tensor.ndim)`` Returns ------- ndarray unfolded_tensor of shape ``(tensor.shape[mode], -1)`` """ return nd.reshape(nd.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1)) unfold(X, mode=0) unfold(X, mode=1) unfold(X, mode=2) ``` ## 3.2 Folding Folding is the inverse operation: we reshape the matrix into a tensor and move back the first dimension back to its original place. ``` def fold(unfolded_tensor, mode, shape): """Refolds the mode-`mode` unfolding into a tensor of shape `shape` Parameters ---------- unfolded_tensor : ndarray unfolded tensor of shape ``(shape[mode], -1)`` mode : int the mode of the unfolding shape : tuple shape of the original tensor before unfolding Returns ------- ndarray folded_tensor of shape `shape` """ full_shape = list(shape) mode_dim = full_shape.pop(mode) full_shape.insert(0, mode_dim) return nd.moveaxis(nd.reshape(unfolded_tensor, full_shape), 0, mode) unfolding = unfold(X, 1) original_shape = X.shape fold(unfolding, mode=1, shape=original_shape) ``` ## 3.3 n-mode product Also known as **tensor contraction**. This is a natural generalization of matrix-vector and matrix-matrix product. When multiplying a tensor by a matrix or a vector, we now have to specify the **mode** $n$ along which to take the product. ### Tensor times matrix In that case we are doing an operation analogous to a matrix multiplication on the $n$-th mode. Given a tensor $\tilde X$ of size $(I_1, I_2, \cdots, I_N)$, and a matrix $M$ of size $(D, I_n)$, the $n$-mode product of $\tilde X$ by $M$ is written $\tilde X \times_n M$ and is of size $(D, I_1 \times \cdots \times I_{n-1} \times I_{n+1} \cdots \times I_N)$. One simple way to mathematically define the n-mode product is using the unfolding: if we write $\tilde R = \tilde X \times_n M$, then we have: $$ \tilde R_{[n]} = M \times \tilde X_{[n]} $$ As a consequence, to get the n-mode product of $\tilde X$ by $M$, we can simply take a matrix product between $M$ and the unfolding of $\tilde X$ along the $n^{th}$ dimension, and refold the result into a tensor of shape $(I_1, \cdots, I_{n-1}, D, I_{n+1}, \cdots, I_N)$. ### Tensor times vector In that case we are contracting over the $n$-th mode by multiplying it with a vector. Given a tensor $\tilde X$ of size $(I_1, I_2, \cdots, I_N)$, and a vector $v$ of size $(I_n)$, the $n$-mode product of $\tilde X$ by $v$ is written $\tilde X \times_n v$ and is of size $(I_1 \times \cdots \times I_{n-1} \times I_{n+1} \cdots \times I_N)$ --we have essentially summed over (or contracted over) the $n$-th dimension--. ![tensor_illustration](../img/tensor_contraction.png) ### Example We will write a function `mode_dot` that works transparently for multiplying a tensor by a matrix or a vector, along a given mode. ``` def mode_dot(tensor, matrix_or_vector, mode): """n-mode product of a tensor by a matrix at the specified mode. Parameters ---------- tensor : ndarray tensor of shape ``(i_1, ..., i_k, ..., i_N)`` matrix_or_vector : ndarray 1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )`` matrix or vectors to which to n-mode multiply the tensor mode : int Returns ------- ndarray `mode`-mode product of `tensor` by `matrix_or_vector` * of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)` if matrix_or_vector is a matrix * of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)` if matrix_or_vector is a vector """ # the mode along which to fold might decrease if we take product with a vector fold_mode = mode new_shape = list(tensor.shape) # tensor times vector case: make sure the sizes are correct # (we are contracting over one dimension which then disappearas) if matrix_or_vector.ndim == 1: if len(new_shape) > 1: new_shape.pop(mode) fold_mode -= 1 else: new_shape = [1] # This is the actual operation: we use the equivalent formulation of the n-mode-product using the unfolding res = nd.dot(matrix_or_vector, unfold(tensor, mode)) # refold the result into a tensor and return it return fold(res, fold_mode, new_shape) ``` #### Tensor times matrix With the tensor $\tilde X$ of size (3, 4, 2) we defined previously, let's define a matrix M of size (5, 4) to multiply along the second mode: ``` M = nd.arange(4*5).reshape((5, 4)) print(M.shape) ``` Keep in mind indexing starts at zero, so the second mode is represented by `mode=1`: ``` res = mode_dot(X, M, mode=1) ``` As expected the result is of shape (3, 5, 2) ``` res.shape ``` #### Tensor times vector Similarly, we can contract along mode 1 with a vector of size 4 (our tensor is of size (3, 4, 2). ``` v = nd.arange(4) print(v.shape) res = mode_dot(X, v, mode=1) ``` Since we have multiplied by a vector, we have effectively contracted out one mode of the tensor so the result is a matrix: ``` res.shape ```
github_jupyter
import mxnet.ndarray as nd X = nd.arange(24).reshape((3, 4, 2)) X X[:, :, 0] X[:, :, 1] def unfold(tensor, mode): """Returns the mode-`mode` unfolding of `tensor` with modes starting at `0`. Parameters ---------- tensor : ndarray mode : int, default is 0 indexing starts at 0, therefore mode is in ``range(0, tensor.ndim)`` Returns ------- ndarray unfolded_tensor of shape ``(tensor.shape[mode], -1)`` """ return nd.reshape(nd.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1)) unfold(X, mode=0) unfold(X, mode=1) unfold(X, mode=2) def fold(unfolded_tensor, mode, shape): """Refolds the mode-`mode` unfolding into a tensor of shape `shape` Parameters ---------- unfolded_tensor : ndarray unfolded tensor of shape ``(shape[mode], -1)`` mode : int the mode of the unfolding shape : tuple shape of the original tensor before unfolding Returns ------- ndarray folded_tensor of shape `shape` """ full_shape = list(shape) mode_dim = full_shape.pop(mode) full_shape.insert(0, mode_dim) return nd.moveaxis(nd.reshape(unfolded_tensor, full_shape), 0, mode) unfolding = unfold(X, 1) original_shape = X.shape fold(unfolding, mode=1, shape=original_shape) def mode_dot(tensor, matrix_or_vector, mode): """n-mode product of a tensor by a matrix at the specified mode. Parameters ---------- tensor : ndarray tensor of shape ``(i_1, ..., i_k, ..., i_N)`` matrix_or_vector : ndarray 1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )`` matrix or vectors to which to n-mode multiply the tensor mode : int Returns ------- ndarray `mode`-mode product of `tensor` by `matrix_or_vector` * of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)` if matrix_or_vector is a matrix * of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)` if matrix_or_vector is a vector """ # the mode along which to fold might decrease if we take product with a vector fold_mode = mode new_shape = list(tensor.shape) # tensor times vector case: make sure the sizes are correct # (we are contracting over one dimension which then disappearas) if matrix_or_vector.ndim == 1: if len(new_shape) > 1: new_shape.pop(mode) fold_mode -= 1 else: new_shape = [1] # This is the actual operation: we use the equivalent formulation of the n-mode-product using the unfolding res = nd.dot(matrix_or_vector, unfold(tensor, mode)) # refold the result into a tensor and return it return fold(res, fold_mode, new_shape) M = nd.arange(4*5).reshape((5, 4)) print(M.shape) res = mode_dot(X, M, mode=1) res.shape v = nd.arange(4) print(v.shape) res = mode_dot(X, v, mode=1) res.shape
0.894853
0.981945
``` import pandas as pd import numpy as np import os from collections import defaultdict from datetime import * from fn import * from o_rpa_def import * from o_rpa_defonly import * import o_time as oT def colchk(df): mcols = ['EQUIPMENTKEY','SITECODE','SUMMARY','ALERTKEY','LASTOCCURRENCE','CLEARTIMESTAMP'] ocols = ['RESOURCE','CUSTOMATTR15','SUMMARY','ALERTKEY','LASTOCCURRENCE','CLEARTIMESTAMP'] df = df.rename (columns=str.upper) cols = df.columns.to_list() if cols.count('SITECODE') != 0: df = df.rename(columns={'SITECODE':'CUSTOMATTR15'}) if cols.count('EQUIPMENTKEY') != 0: df = df.rename(columns={'EQUIPMENTKEY':'RESOURCE'}) for i in ocols: if ocols.count(i) == 0: print('must have column needs in table: but missing !',chr(10),mcols,chr(10),'exiting .....') exit(0) else: sx = chrstream() omnm(sx) print(chr(10)) return df def inner_list_to_dic(dic): for key, value in dic.items(): dic[key] = set(value) return dic def joinls(l1,l2): lss = [] for i in l1: for j in l2: lss.append(str(i) + '$' + str(j)) return lss class o_dic: def __init__(self,dff): self.dc = defaultdict(dict) self.df0 = colchk(dff) self.df1 = catmap(self.df0, os.getcwd() + "\\OMDB.csv") self.df = dff self.testdc = {} self.ls = [] self.lsky = [] self.lsvl = [] def add1(self, k, v, setsame = False): if len(self.dc) != 0: for i in self.dc: if type(self.dc[i]) is dict: self.dc[i][k] = v if not list(self.dc[i]) else self.dc[i].get(k, []) + v else: if setsame: self.dc[i] = self.dc.get(k, []) + v else: if type(self.dc[i]) is list: if v not in self.dc[i]: self.dc[i].append(v) else: self.dc[i].append([v]) else: self.dc[0] = {k:v} self.testdc = dict(zip(list(k),v)) self.ls.append(v) def pnt(self): print(self.dc) print(self.ls) def dc2np(self): self.nar = np.array([self.dc[0] for item in self.dc[0]], dtype=object) return self.nar def dc2df(self): #self.df = pd.DataFrame(self.dc[0]) self.df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in self.dc[0].items() ])) return self.df def add2(self, k, v): self.df = self.df[self.df[k].isin(v)] self.lsky.append([k]) self.lsvl.append(v) try: self.df = self.df[self.df[k].isin(v)] except: print('except trigger') def genx(self): ln = len(self.lsky) ls = [] cnt = 0 if ln>=2: for i in range(ln): cnt = cnt + 1 if len(ls) == 0: ls = joinls(self.lsval[i],self.lsval[cnt]) elif len(ls) != 0 and cnt<ln: lss = [] lss = joinls(ls,self.lsval[cnt]) ls = lss else: print(ls) msg = defaultdict(list) b1 = [['2G','3G','4G'],['MF','DL'],['P1','P2']] xy = [dict(zip(b1[0], values)) for values in b1] data = {'abc': ['aaa', 'bbb', 'ccc'], 'def': ['ddd', 'eee', 'fff']} b = {} a1 = {"A1": ["T100",'T200',['N200','N600','N700']]} a2 = {'0': {'three': {'five': ['2', '3', '5', '6', '8', '9'],'six': ['2', '3', '5', '6', '8', '9']}}, '1': {'one': ['1', '11', '12','11', '12']}} a3 = {'B1':[['p1','p2','p3'],['q1','q3']]} b1 = [['2G','3G','4G'],['MF','DL'],['P1','P2']] b2 = [['2G','3G','4G',['MF','DL']],['P1','P2']] d1 = {'CAT': ['2g', '3g', '4g'], 'ZONE': ['NOA', 'COM']} df = pd.read_csv (os.getcwd() + "\\sclick.csv") dc = df.to_dict() ar = df.to_numpy() x = o_dic(df) x.add2('zone', ['COM','NOA']) x.add2('CAT', ['2G', '3G', '4G']) x.add2('Priority', ['P1','P2']) x.pnt() data = {'abc': ['aaa', 'bbb', 'ccc'], 'def': ['ddd', 'eee', 'fff']} dict2 = {} i = 0 for key, value in data.items(): dict2[key] = {} for element in value: dict2[key][i] = element i += 1 i = 0 print(dict2) def dict2numpy(dc): res = np.array([list(dc.values()) for item in dc.values()],dtype=object) return res def inner_list_to_dic(dic): for key, value in dic.items(): dic[key] = set(value) return dic def inif(*args): for i in range(len(args)): if type(args[i]) is dict: dc = args[i] for key, value in dc.items(): print(key, value) elif type(args[i]) is list: ls = args[i] for n in range(len(ls)): print(ls[n]) elif type(args[i]) is str: st = args[i] print(st) inif(['a3'],['p1','p2'],["a2",["omi"]]) groups = [['Group1', 'A', 'B'], ['Group2', 'C', 'D']] result = {} for group in groups: for item in group: result[item] = group[0] print(result) import itertools b = ['a','b','c'] a = [['a','b'], ['c','d']] print(list(itertools.chain.from_iterable(b))) list1 = ['2G','3G','4G'] list2 = ['MF','DL'] lss = [] test = [] for key, values in zip(list1, list2): if values: values = [key + "_" + str(v) for v in values] test.append([values]) else: test.append([key]) print(test) ```
github_jupyter
import pandas as pd import numpy as np import os from collections import defaultdict from datetime import * from fn import * from o_rpa_def import * from o_rpa_defonly import * import o_time as oT def colchk(df): mcols = ['EQUIPMENTKEY','SITECODE','SUMMARY','ALERTKEY','LASTOCCURRENCE','CLEARTIMESTAMP'] ocols = ['RESOURCE','CUSTOMATTR15','SUMMARY','ALERTKEY','LASTOCCURRENCE','CLEARTIMESTAMP'] df = df.rename (columns=str.upper) cols = df.columns.to_list() if cols.count('SITECODE') != 0: df = df.rename(columns={'SITECODE':'CUSTOMATTR15'}) if cols.count('EQUIPMENTKEY') != 0: df = df.rename(columns={'EQUIPMENTKEY':'RESOURCE'}) for i in ocols: if ocols.count(i) == 0: print('must have column needs in table: but missing !',chr(10),mcols,chr(10),'exiting .....') exit(0) else: sx = chrstream() omnm(sx) print(chr(10)) return df def inner_list_to_dic(dic): for key, value in dic.items(): dic[key] = set(value) return dic def joinls(l1,l2): lss = [] for i in l1: for j in l2: lss.append(str(i) + '$' + str(j)) return lss class o_dic: def __init__(self,dff): self.dc = defaultdict(dict) self.df0 = colchk(dff) self.df1 = catmap(self.df0, os.getcwd() + "\\OMDB.csv") self.df = dff self.testdc = {} self.ls = [] self.lsky = [] self.lsvl = [] def add1(self, k, v, setsame = False): if len(self.dc) != 0: for i in self.dc: if type(self.dc[i]) is dict: self.dc[i][k] = v if not list(self.dc[i]) else self.dc[i].get(k, []) + v else: if setsame: self.dc[i] = self.dc.get(k, []) + v else: if type(self.dc[i]) is list: if v not in self.dc[i]: self.dc[i].append(v) else: self.dc[i].append([v]) else: self.dc[0] = {k:v} self.testdc = dict(zip(list(k),v)) self.ls.append(v) def pnt(self): print(self.dc) print(self.ls) def dc2np(self): self.nar = np.array([self.dc[0] for item in self.dc[0]], dtype=object) return self.nar def dc2df(self): #self.df = pd.DataFrame(self.dc[0]) self.df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in self.dc[0].items() ])) return self.df def add2(self, k, v): self.df = self.df[self.df[k].isin(v)] self.lsky.append([k]) self.lsvl.append(v) try: self.df = self.df[self.df[k].isin(v)] except: print('except trigger') def genx(self): ln = len(self.lsky) ls = [] cnt = 0 if ln>=2: for i in range(ln): cnt = cnt + 1 if len(ls) == 0: ls = joinls(self.lsval[i],self.lsval[cnt]) elif len(ls) != 0 and cnt<ln: lss = [] lss = joinls(ls,self.lsval[cnt]) ls = lss else: print(ls) msg = defaultdict(list) b1 = [['2G','3G','4G'],['MF','DL'],['P1','P2']] xy = [dict(zip(b1[0], values)) for values in b1] data = {'abc': ['aaa', 'bbb', 'ccc'], 'def': ['ddd', 'eee', 'fff']} b = {} a1 = {"A1": ["T100",'T200',['N200','N600','N700']]} a2 = {'0': {'three': {'five': ['2', '3', '5', '6', '8', '9'],'six': ['2', '3', '5', '6', '8', '9']}}, '1': {'one': ['1', '11', '12','11', '12']}} a3 = {'B1':[['p1','p2','p3'],['q1','q3']]} b1 = [['2G','3G','4G'],['MF','DL'],['P1','P2']] b2 = [['2G','3G','4G',['MF','DL']],['P1','P2']] d1 = {'CAT': ['2g', '3g', '4g'], 'ZONE': ['NOA', 'COM']} df = pd.read_csv (os.getcwd() + "\\sclick.csv") dc = df.to_dict() ar = df.to_numpy() x = o_dic(df) x.add2('zone', ['COM','NOA']) x.add2('CAT', ['2G', '3G', '4G']) x.add2('Priority', ['P1','P2']) x.pnt() data = {'abc': ['aaa', 'bbb', 'ccc'], 'def': ['ddd', 'eee', 'fff']} dict2 = {} i = 0 for key, value in data.items(): dict2[key] = {} for element in value: dict2[key][i] = element i += 1 i = 0 print(dict2) def dict2numpy(dc): res = np.array([list(dc.values()) for item in dc.values()],dtype=object) return res def inner_list_to_dic(dic): for key, value in dic.items(): dic[key] = set(value) return dic def inif(*args): for i in range(len(args)): if type(args[i]) is dict: dc = args[i] for key, value in dc.items(): print(key, value) elif type(args[i]) is list: ls = args[i] for n in range(len(ls)): print(ls[n]) elif type(args[i]) is str: st = args[i] print(st) inif(['a3'],['p1','p2'],["a2",["omi"]]) groups = [['Group1', 'A', 'B'], ['Group2', 'C', 'D']] result = {} for group in groups: for item in group: result[item] = group[0] print(result) import itertools b = ['a','b','c'] a = [['a','b'], ['c','d']] print(list(itertools.chain.from_iterable(b))) list1 = ['2G','3G','4G'] list2 = ['MF','DL'] lss = [] test = [] for key, values in zip(list1, list2): if values: values = [key + "_" + str(v) for v in values] test.append([values]) else: test.append([key]) print(test)
0.084238
0.201912
### Note * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ``` # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read School and Student Data File and store into Pandas DataFrames school_data = pd.read_csv(school_data_to_load) student_data = pd.read_csv(student_data_to_load) # Combine the data into a single dataset. school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"]) school_data_complete ``` ## District Summary * Calculate the total number of schools * Calculate the total number of students * Calculate the total budget * Calculate the average math score * Calculate the average reading score * Calculate the percentage of students with a passing math score (70 or greater) * Calculate the percentage of students with a passing reading score (70 or greater) * Calculate the percentage of students who passed math **and** reading (% Overall Passing) * Create a dataframe to hold the above results * Optional: give the displayed data cleaner formatting ``` # Calculate the total number of schools # Find school names (not a necessary step, but useful) schools = school_data_complete['school_name'].unique() schools # Count the number in the list (could also have used nunique function) school_count = len(schools) school_count # Calculate the total number of students students = len(school_data_complete['student_name']) students # Calculate the total budget # Find the budget for each school school_budgets = sum(school_data_complete['budget'].unique()) school_budgets # Calculate the average math score average_math_score = school_data_complete['math_score'].mean() average_math_score # Calculate the average reading score average_reading_score = school_data_complete['reading_score'].mean() average_reading_score # Calculate the percentage of students with a passing math score (70 or greater) math_score_passing = (school_data_complete['math_score'] >=70).values.sum()/students*100 math_score_passing # Calculate the percentage of students with a passing reading score (70 or greater) reading_score_passing = (school_data_complete['reading_score'] >=70).values.sum()/students*100 reading_score_passing # Calculate the percentage of students who passed math and reading (% Overall Passing) math_and_reading_scores_passing = school_data_complete.loc[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >=70)] math_and_reading_scores_passing_percentage = len(math_and_reading_scores_passing)/students*100 math_and_reading_scores_passing_percentage # Create a dataframe to hold the results (total number of schools, total number of students, total budget, average math score,average reading score, percentage of students passing math, percentage of students passing reading, percentage of students passing math and reading district_summary_df = pd.DataFrame({"Schools": [school_count], "Students": [students], "School Budget": [school_budgets], "Average Math Score": [average_math_score], "Average Reading Score": [average_reading_score], "Percent Passing Math": [math_score_passing], "Percent Passing Reading": [reading_score_passing], "Percent Passing Math and Reading": [math_and_reading_scores_passing_percentage]}) district_summary_df # Optional: give the displayed data cleaner formatting ``` ## School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results ``` # School Names schools = school_data_complete['school_name'].unique() schools school_data_complete.columns school_summary_df = school_data_complete.set_index("school_name") school_summary_df school_type_df = school_data.set_index("school_name") school_type_df school_type = school_type_df["type"] school_type student_counts = school_summary_df.value_counts("school_name") student_counts school_budget = school_type_df["budget"] school_budget budget_per_student = school_budget/student_counts budget_per_student # average math score by school school_math_average = school_data_complete.groupby(["school_name"]).mean()["math_score"] school_math_average # average reading score by school school_reading_average = school_data_complete.groupby(["school_name"]).mean()["reading_score"] school_reading_average # percent passing math school_math_percent_passing = school_data_complete[(school_data_complete['math_score'] >=70)] school_math_percent_passing math_passing_by_school = school_math_percent_passing.groupby(["school_name"]).count()["math_score"]/student_counts*100 math_passing_by_school school_reading_percent_passing = school_data_complete[(school_data_complete['reading_score'] >=70)] school_reading_percent_passing # percent passing reading reading_passing_by_school = school_reading_percent_passing.groupby(["school_name"]).count()["reading_score"]/student_counts*100 reading_passing_by_school overall_passing_by_school = math_and_reading_scores_passing.groupby(['school_name']).count()["student_name"]/student_counts*100 overall_passing_by_school ``` ## Top Performing Schools (By % Overall Passing) ``` school_performance_df = pd.DataFrame(overall_passing_by_school) school_performance_df ``` * Sort and display the top five performing schools by % overall passing. ``` top_performers_df = school_performance_df.sort_values([0], ascending=False) top_performers_df ``` ## Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing. ``` bottom_performers_df = school_performance_df.sort_values([0], ascending=True) bottom_performers_df ``` ## Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting ``` ninth_grade_math = school_summary_df.loc[school_summary_df["grade"] == "9th", :] ninth_grade_math ninth_grade_math_averages = pd.DataFrame(ninth_grade_math.groupby(["school_name"]).mean()["math_score"]) ninth_grade_math_averages tenth_grade_math = school_summary_df.loc[school_summary_df["grade"] == "10th", :] tenth_grade_math tenth_grade_math_averages = pd.DataFrame(tenth_grade_math.groupby(["school_name"]).mean()["math_score"]) tenth_grade_math_averages eleventh_grade_math = school_summary_df.loc[school_summary_df["grade"] == "11th", :] eleventh_grade_math eleventh_grade_math_averages = pd.DataFrame(eleventh_grade_math.groupby(["school_name"]).mean()["math_score"]) eleventh_grade_math_averages twelfth_grade_math = school_data_complete.loc[school_data_complete["grade"] == "12th", :] twelfth_grade_math twelfth_grade_math_averages = pd.DataFrame(twelfth_grade_math.groupby(["school_name"]).mean()["math_score"]) twelfth_grade_math_averages ninth_and_tenth_df = pd.merge(ninth_grade_math_averages,tenth_grade_math_averages, on="school_name") ninth_and_tenth_math_df = ninth_and_tenth_df.rename(columns={"math_score_x":"9th Grade Math Average", "math_score_y":"10th Grade Math Average"}) ninth_and_tenth_math_df eleventh_and_twelfth_df = pd.merge(eleventh_grade_math_averages,twelfth_grade_math_averages, on="school_name") eleventh_and_twelfth_math_df = eleventh_and_twelfth_df.rename(columns={"math_score_x":"11th Grade Math Average", "math_score_y":"12th Grade Math Average"}) eleventh_and_twelfth_math_df all_grades_math_df = pd.merge(ninth_and_tenth_math_df, eleventh_and_twelfth_math_df, on="school_name") all_grades_math_df ``` ## Reading Score by Grade * Perform the same operations as above for reading scores ``` ninth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "9th", :] ninth_grade_reading ninth_grade_reading_averages = pd.DataFrame(ninth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) ninth_grade_reading_averages tenth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "10th", :] tenth_grade_reading tenth_grade_reading_averages = pd.DataFrame(tenth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) tenth_grade_reading_averages eleventh_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "11th", :] eleventh_grade_reading eleventh_grade_reading_averages = pd.DataFrame(eleventh_grade_reading.groupby(["school_name"]).mean()["reading_score"]) eleventh_grade_reading_averages twelfth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "12th", :] twelfth_grade_reading twelfth_grade_reading_averages = pd.DataFrame(twelfth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) twelfth_grade_reading_averages ninth_and_tenth_rdf = pd.merge(ninth_grade_reading_averages,tenth_grade_reading_averages, on="school_name") ninth_and_tenth_reading_df = ninth_and_tenth_rdf.rename(columns={"reading_score_x":"9th Grade Reading Average", "reading_score_y":"10th Grade Reading Average"}) ninth_and_tenth_reading_df eleventh_and_twelfth_rdf = pd.merge(eleventh_grade_reading_averages,twelfth_grade_reading_averages, on="school_name") eleventh_and_twelfth_reading_df = eleventh_and_twelfth_rdf.rename(columns={"reading_score_x":"11th Grade Reading Average", "reading_score_y":"12th Grade Reading Average"}) eleventh_and_twelfth_reading_df all_grades_reading_df = pd.merge(ninth_and_tenth_reading_df, eleventh_and_twelfth_reading_df, on="school_name") all_grades_reading_df ``` ## Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) ``` budget_per_student_df = pd.DataFrame(budget_per_student) budget_per_student_df = budget_per_student_df.sort_values([0]) budget_per_student_df budget_per_student_new_df= budget_per_student_df.rename(columns = {0:"budget_per_student"}) budget_per_student_new_df budget_per_student_bins = [0, 595, 630, 645, 656] budget_per_student_bin_names = ["cheapest", "cheap", "about right", "big spenders"] budget_per_student_new_df["budget_rankings"] = pd.cut(budget_per_student_new_df["budget_per_student"], budget_per_student_bins, labels = budget_per_student_bin_names) budget_per_student_new_df school_summary_by_budgetm = pd.merge(budget_per_student_new_df, school_math_average, on="school_name") school_summary_by_budgetm school_summary_by_budgetmr = pd.merge(school_summary_by_budgetm, school_reading_average, on="school_name") school_summary_by_budgetmr budget_passing_df = pd.DataFrame({"math_passing":math_passing_by_school, "reading_passing":reading_passing_by_school, "overall_passing":overall_passing_by_school}) budget_passing_df budget_school_summary_df = pd.merge(school_summary_by_budgetmr, budget_passing_df, on="school_name") budget_school_summary_df budget_ranked_df = budget_school_summary_df.set_index("budget_rankings") budget_ranked_df budget_group_summary = budget_ranked_df.groupby("budget_rankings") budget_group_summary[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean() ``` ## Scores by School Size ``` school_size_df = pd.DataFrame(student_counts) school_size_df = school_size_df.sort_values([0]) school_size_df school_size_new_df= school_size_df.rename(columns = {0:"school_size"}) school_size_new_df school_size_bins = [0, 1700, 2000, 3000, 5000] school_size_bin_names = ["tiny", "average", "big", "enormous"] school_size_new_df["school_size"] = pd.cut(school_size_new_df["school_size"], school_size_bins, labels = school_size_bin_names) school_size_new_df school_sizem = pd.merge(school_size_new_df, school_math_average, on="school_name") school_sizem school_sizemr = pd.merge(school_sizem, school_reading_average, on="school_name") school_sizemr school_size_summary = pd.merge(school_sizemr, budget_passing_df, on="school_name") school_size_summary school_sizes_df = school_size_summary.set_index("school_size") school_sizes_df school_size_group_summary = school_sizes_df.groupby("school_size") school_size_group_summary[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean() ``` ## Scores by School Type * Perform the same operations as above, based on school type ``` school_type_df = pd.DataFrame(school_type) school_type_df school_typem = pd.merge(school_type_df, school_math_average, on="school_name") school_typem school_typemr = pd.merge(school_typem, school_reading_average, on="school_name") school_typemr school_type_summary_df = pd.merge(school_typemr, budget_passing_df, on="school_name") school_type_summary_df school_type_summary_df.set_index("type") school_type_summary_condensed = school_type_summary_df.groupby("type") school_type_summary_condensed[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean() ```
github_jupyter
# Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read School and Student Data File and store into Pandas DataFrames school_data = pd.read_csv(school_data_to_load) student_data = pd.read_csv(student_data_to_load) # Combine the data into a single dataset. school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"]) school_data_complete # Calculate the total number of schools # Find school names (not a necessary step, but useful) schools = school_data_complete['school_name'].unique() schools # Count the number in the list (could also have used nunique function) school_count = len(schools) school_count # Calculate the total number of students students = len(school_data_complete['student_name']) students # Calculate the total budget # Find the budget for each school school_budgets = sum(school_data_complete['budget'].unique()) school_budgets # Calculate the average math score average_math_score = school_data_complete['math_score'].mean() average_math_score # Calculate the average reading score average_reading_score = school_data_complete['reading_score'].mean() average_reading_score # Calculate the percentage of students with a passing math score (70 or greater) math_score_passing = (school_data_complete['math_score'] >=70).values.sum()/students*100 math_score_passing # Calculate the percentage of students with a passing reading score (70 or greater) reading_score_passing = (school_data_complete['reading_score'] >=70).values.sum()/students*100 reading_score_passing # Calculate the percentage of students who passed math and reading (% Overall Passing) math_and_reading_scores_passing = school_data_complete.loc[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >=70)] math_and_reading_scores_passing_percentage = len(math_and_reading_scores_passing)/students*100 math_and_reading_scores_passing_percentage # Create a dataframe to hold the results (total number of schools, total number of students, total budget, average math score,average reading score, percentage of students passing math, percentage of students passing reading, percentage of students passing math and reading district_summary_df = pd.DataFrame({"Schools": [school_count], "Students": [students], "School Budget": [school_budgets], "Average Math Score": [average_math_score], "Average Reading Score": [average_reading_score], "Percent Passing Math": [math_score_passing], "Percent Passing Reading": [reading_score_passing], "Percent Passing Math and Reading": [math_and_reading_scores_passing_percentage]}) district_summary_df # Optional: give the displayed data cleaner formatting # School Names schools = school_data_complete['school_name'].unique() schools school_data_complete.columns school_summary_df = school_data_complete.set_index("school_name") school_summary_df school_type_df = school_data.set_index("school_name") school_type_df school_type = school_type_df["type"] school_type student_counts = school_summary_df.value_counts("school_name") student_counts school_budget = school_type_df["budget"] school_budget budget_per_student = school_budget/student_counts budget_per_student # average math score by school school_math_average = school_data_complete.groupby(["school_name"]).mean()["math_score"] school_math_average # average reading score by school school_reading_average = school_data_complete.groupby(["school_name"]).mean()["reading_score"] school_reading_average # percent passing math school_math_percent_passing = school_data_complete[(school_data_complete['math_score'] >=70)] school_math_percent_passing math_passing_by_school = school_math_percent_passing.groupby(["school_name"]).count()["math_score"]/student_counts*100 math_passing_by_school school_reading_percent_passing = school_data_complete[(school_data_complete['reading_score'] >=70)] school_reading_percent_passing # percent passing reading reading_passing_by_school = school_reading_percent_passing.groupby(["school_name"]).count()["reading_score"]/student_counts*100 reading_passing_by_school overall_passing_by_school = math_and_reading_scores_passing.groupby(['school_name']).count()["student_name"]/student_counts*100 overall_passing_by_school school_performance_df = pd.DataFrame(overall_passing_by_school) school_performance_df top_performers_df = school_performance_df.sort_values([0], ascending=False) top_performers_df bottom_performers_df = school_performance_df.sort_values([0], ascending=True) bottom_performers_df ninth_grade_math = school_summary_df.loc[school_summary_df["grade"] == "9th", :] ninth_grade_math ninth_grade_math_averages = pd.DataFrame(ninth_grade_math.groupby(["school_name"]).mean()["math_score"]) ninth_grade_math_averages tenth_grade_math = school_summary_df.loc[school_summary_df["grade"] == "10th", :] tenth_grade_math tenth_grade_math_averages = pd.DataFrame(tenth_grade_math.groupby(["school_name"]).mean()["math_score"]) tenth_grade_math_averages eleventh_grade_math = school_summary_df.loc[school_summary_df["grade"] == "11th", :] eleventh_grade_math eleventh_grade_math_averages = pd.DataFrame(eleventh_grade_math.groupby(["school_name"]).mean()["math_score"]) eleventh_grade_math_averages twelfth_grade_math = school_data_complete.loc[school_data_complete["grade"] == "12th", :] twelfth_grade_math twelfth_grade_math_averages = pd.DataFrame(twelfth_grade_math.groupby(["school_name"]).mean()["math_score"]) twelfth_grade_math_averages ninth_and_tenth_df = pd.merge(ninth_grade_math_averages,tenth_grade_math_averages, on="school_name") ninth_and_tenth_math_df = ninth_and_tenth_df.rename(columns={"math_score_x":"9th Grade Math Average", "math_score_y":"10th Grade Math Average"}) ninth_and_tenth_math_df eleventh_and_twelfth_df = pd.merge(eleventh_grade_math_averages,twelfth_grade_math_averages, on="school_name") eleventh_and_twelfth_math_df = eleventh_and_twelfth_df.rename(columns={"math_score_x":"11th Grade Math Average", "math_score_y":"12th Grade Math Average"}) eleventh_and_twelfth_math_df all_grades_math_df = pd.merge(ninth_and_tenth_math_df, eleventh_and_twelfth_math_df, on="school_name") all_grades_math_df ninth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "9th", :] ninth_grade_reading ninth_grade_reading_averages = pd.DataFrame(ninth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) ninth_grade_reading_averages tenth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "10th", :] tenth_grade_reading tenth_grade_reading_averages = pd.DataFrame(tenth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) tenth_grade_reading_averages eleventh_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "11th", :] eleventh_grade_reading eleventh_grade_reading_averages = pd.DataFrame(eleventh_grade_reading.groupby(["school_name"]).mean()["reading_score"]) eleventh_grade_reading_averages twelfth_grade_reading = school_summary_df.loc[school_summary_df["grade"] == "12th", :] twelfth_grade_reading twelfth_grade_reading_averages = pd.DataFrame(twelfth_grade_reading.groupby(["school_name"]).mean()["reading_score"]) twelfth_grade_reading_averages ninth_and_tenth_rdf = pd.merge(ninth_grade_reading_averages,tenth_grade_reading_averages, on="school_name") ninth_and_tenth_reading_df = ninth_and_tenth_rdf.rename(columns={"reading_score_x":"9th Grade Reading Average", "reading_score_y":"10th Grade Reading Average"}) ninth_and_tenth_reading_df eleventh_and_twelfth_rdf = pd.merge(eleventh_grade_reading_averages,twelfth_grade_reading_averages, on="school_name") eleventh_and_twelfth_reading_df = eleventh_and_twelfth_rdf.rename(columns={"reading_score_x":"11th Grade Reading Average", "reading_score_y":"12th Grade Reading Average"}) eleventh_and_twelfth_reading_df all_grades_reading_df = pd.merge(ninth_and_tenth_reading_df, eleventh_and_twelfth_reading_df, on="school_name") all_grades_reading_df budget_per_student_df = pd.DataFrame(budget_per_student) budget_per_student_df = budget_per_student_df.sort_values([0]) budget_per_student_df budget_per_student_new_df= budget_per_student_df.rename(columns = {0:"budget_per_student"}) budget_per_student_new_df budget_per_student_bins = [0, 595, 630, 645, 656] budget_per_student_bin_names = ["cheapest", "cheap", "about right", "big spenders"] budget_per_student_new_df["budget_rankings"] = pd.cut(budget_per_student_new_df["budget_per_student"], budget_per_student_bins, labels = budget_per_student_bin_names) budget_per_student_new_df school_summary_by_budgetm = pd.merge(budget_per_student_new_df, school_math_average, on="school_name") school_summary_by_budgetm school_summary_by_budgetmr = pd.merge(school_summary_by_budgetm, school_reading_average, on="school_name") school_summary_by_budgetmr budget_passing_df = pd.DataFrame({"math_passing":math_passing_by_school, "reading_passing":reading_passing_by_school, "overall_passing":overall_passing_by_school}) budget_passing_df budget_school_summary_df = pd.merge(school_summary_by_budgetmr, budget_passing_df, on="school_name") budget_school_summary_df budget_ranked_df = budget_school_summary_df.set_index("budget_rankings") budget_ranked_df budget_group_summary = budget_ranked_df.groupby("budget_rankings") budget_group_summary[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean() school_size_df = pd.DataFrame(student_counts) school_size_df = school_size_df.sort_values([0]) school_size_df school_size_new_df= school_size_df.rename(columns = {0:"school_size"}) school_size_new_df school_size_bins = [0, 1700, 2000, 3000, 5000] school_size_bin_names = ["tiny", "average", "big", "enormous"] school_size_new_df["school_size"] = pd.cut(school_size_new_df["school_size"], school_size_bins, labels = school_size_bin_names) school_size_new_df school_sizem = pd.merge(school_size_new_df, school_math_average, on="school_name") school_sizem school_sizemr = pd.merge(school_sizem, school_reading_average, on="school_name") school_sizemr school_size_summary = pd.merge(school_sizemr, budget_passing_df, on="school_name") school_size_summary school_sizes_df = school_size_summary.set_index("school_size") school_sizes_df school_size_group_summary = school_sizes_df.groupby("school_size") school_size_group_summary[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean() school_type_df = pd.DataFrame(school_type) school_type_df school_typem = pd.merge(school_type_df, school_math_average, on="school_name") school_typem school_typemr = pd.merge(school_typem, school_reading_average, on="school_name") school_typemr school_type_summary_df = pd.merge(school_typemr, budget_passing_df, on="school_name") school_type_summary_df school_type_summary_df.set_index("type") school_type_summary_condensed = school_type_summary_df.groupby("type") school_type_summary_condensed[["math_score", "reading_score", "math_passing", "reading_passing", "overall_passing"]].mean()
0.660063
0.891811
# Westeros Tutorial - Adding representation of renewables (part 3/3): Introducing `renewable_resource_constraints` This tutorial, which demonstrates how to apply various model features to provide a more realistic representation of renewable energy integration in the energy system, is comprised of three parts. Previously, we introduced constraints on [`firm capacity`](https://docs.messageix.org/en/stable/model/MESSAGE/model_core.html?highlight=FIRM_CAPACITY_PROVISION#equation-firm-capacity-provision) and [`flexible generation`](https://docs.messageix.org/en/stable/model/MESSAGE/model_core.html?highlight=flexibility#equation-system-flexibility-constraint). In the third part we will show you how to introduce renewable resource potentials. Up until now, `wind_ppl` activity was unrestricted. In order to reflect the fact that there are limited wind potentials within a given region and the fact that these differ in quality, we will add introduce [`renewable_potentials` and `renewable_capacity_factors`](https://docs.messageix.org/en/stable/model/MESSAGE/model_core.html?highlight=renewable#constraints-representing-renewable-integration) for wind. <img src='_static/renewable_resource_res.png' width='900'> Further information can be found in https://doi.org/10.1016/j.esr.2013.01.001 (*Sullivan et al., 2013*) **Pre-requisites** - You have the *MESSAGEix* framework installed and working - You have run Westeros scenario which adds emission taxes (``westeros_emissions_taxes.ipynb``) and solved it successfully ## Online documentation The full framework documentation is available at [https://docs.messageix.org](https://docs.messageix.org)] ``` import pandas as pd import ixmp import message_ix from message_ix.utils import make_df %matplotlib inline mp = ixmp.Platform() ``` ## Load existing and clone to new scenario We load the existing scenario '*carbon_tax*' and clone it to a new scenario '*renewable_potential*' to which we will apply the `renewable_resource_constraints` constraint ``` model = 'Westeros Electrified' base = message_ix.Scenario(mp, model=model, scenario='carbon_tax') scen = base.clone(model, 'renewable_potential', 'illustration of renewable_resource_constraint formulation', keep_solution=False) scen.check_out() ``` ## Retrieve parameters We will retrieve those parameters necessary to perform subsequent additions of parameters ``` year_df = scen.vintage_and_active_years() vintage_years, act_years = year_df['year_vtg'], year_df['year_act'] model_horizon = scen.set('year') country = 'Westeros' ``` ## `renewable_resource_constraints` - Describing the renewable resource potentials From the previous tutorials, we know based on the results that in 720, wind capacity reaches over 150GWa. We will therefore define 4 wind potential categories which in total will provide 200GWa, yet the quality of these potentials will vary substantially from the current assumptions, where the capacity factor for `wind_ppl` has been assumed to be 1, meaning that the installed `wind_ppl` capacity can operate 8760 hours per year i.e., 100% of the year. Depending on the region, high quality on-shore wind potentials result in capacity factors around 35%, yet the majority of the potentials will lie below this value. Therefore, 4 resource categories will be introduced: | Resource Category | Potential \[GWa\] | Capacity Factor \[%\] | | ----------------- | ----------------- | --------------------- | | c1 | 100 | 15 | | c2 | 50 | 20 | | c3 | 25 | 25 | | c4 | 25 | 30 | The figure below illustrates the potential categories as listed in the above table. <img src='_static/westeros_renewable_resource_potentials.png' width='500'> The capacity factor of the `wind_ppl` will remain unchanged and will be reflected in the parametrization of the `renewable_resources`. The following steps are required: 1. Add level and commodity: - Specify and new level and commodity which accounts for the wind potentials and which serve as inputs to the `wind_ppl` - Specify which level is a `level_renewable` 2. Modify existing renewable technology: - Specify which technology is classified as a `type_tec` renewable (optional) - Modify the input of the `wind_ppl` 3. Add potentials and corresponding capacity factors: - Add grades - Add `renewable_potentials` - Add `renewable_capacity_factor` ### 1 Define new level and commodity The level and commodity which we add will allow us to account for potentials for wind ``` scen.add_set('level', ['renewable']) scen.add_set('commodity', ['wind_onshore']) scen.add_set('level_renewable', ['renewable']) ``` ### 2.1 Define a new technology category `renewable` We will add `wind_ppl` to this newly defined `type_tec`. This can be used for example, to simplify the reporting code, where results can be retrieved for technologies within a given sets as opposed to specifying individual technologies. ``` scen.add_set('type_tec', ['renewable']) df = pd.DataFrame({'type_tec': ['renewable'], 'technology': ['wind_ppl']}) scen.add_set('cat_tec', df) ``` ### 2.2 Add `input` parameter for `wind_ppl` We will add the parameter `input` for `wind_ppl` therefore establishing a connection to the newly defined `renewable_potential` categories. ``` df = pd.DataFrame({ 'node_loc': country, 'technology': 'wind_ppl', 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'node_origin': country, 'commodity': 'wind_onshore', 'level': 'renewable', 'time': 'year', 'time_origin': 'year', 'value': 1, 'unit': '%'}) scen.add_par('input', df) ``` ### 3.1 Add new resource potential categories Each renewable potential category is defined as a separate `grade`. ``` grades = ['c1', 'c2', 'c3', 'c4'] scen.add_set('grade', grades) ``` ### 3.2 Add resource potentials Note, that unlike fossil resources which are finite, renewable resources must be defined for each year. ``` # renewable_potential has the following index structure scen.idx_names('renewable_potential') idx = pd.MultiIndex.from_product([[country], ['wind_onshore'], grades, ['renewable'], model_horizon, ['GWa']], names=['node', 'commodity', 'grade', 'level', 'year', 'unit']) df = pd.DataFrame({'value': sorted([100, 50, 25, 25] * len(model_horizon), reverse=True)}, idx).reset_index() scen.add_par('renewable_potential', df) ``` ### 3.3 Add `renewable_capacity_factor` ``` # renewable_capacity_factor has the following index structure scen.idx_names('renewable_capacity_factor') idx = pd.MultiIndex.from_product([[country], ['wind_onshore'], grades, ['renewable'], model_horizon, ['-']], names=['node', 'commodity', 'grade', 'level', 'year', 'unit']) df = pd.DataFrame({'value': sorted([.15, .20, .25, .30] * len(model_horizon))}, idx).reset_index() scen.add_par('renewable_capacity_factor', df) ``` ## Commit and solve ``` scen.commit(comment='define parameters for renewable implementation') scen.set_as_default() scen.solve() scen.var('OBJ')['lvl'] ``` ## Plotting Results ``` from message_ix.reporting import Reporter from message_ix.util.tutorial import prepare_plots rep_base = Reporter.from_scenario(base) prepare_plots(rep_base) rep_scen = Reporter.from_scenario(scen) prepare_plots(rep_scen) ``` ### Activity *** When comparing the results of the original scenario without the renewable potentials ('*carbon_tax*') with the results of our newly modified scenario ('*renewable_potential*'), for the same carbon price we can observe that the activity of the `wind_ppl` has substantially decreased. This is because through adding potentials with corresponding plant factors, the `wind_ppl` has become increasingly economically unattractive and despite the carbon tax is not used. Note, that the `coal_ppl` still has a plant factor of 1 and has no resource constraints, thus in order to further improve the model, the parameters for the `coal_ppl` would need to be adjusted. #### Scenario: '*carbon_tax*' ``` rep_base.set_filters(t=["coal_ppl", "wind_ppl"]) rep_base.get("plot activity") ``` #### Scenario: '*renewable_potential*' ``` rep_scen.set_filters(t=["coal_ppl", "wind_ppl"]) rep_scen.get("plot activity") ``` ### Capacity *** The behavior observed for the activity of the two electricity generation technologies is reflected in the capacity. No further capacity is built for the `wind_ppl` and thus is phased out by 720. #### Scenario: '*carbon_tax*' ``` rep_base.get("plot capacity") ``` #### Scenario: '*renewable_potential*' ``` rep_scen.get("plot capacity") ``` ### Prices *** Especially in the earlier model time periods, electricity and therefore the price for light increase dramatically. The increase in 720 is due to the emission taxes associated with the operation of the `coal_ppl`. #### Scenario: '*carbon_tax*' ``` rep_base.set_filters(t=None, c=["light"]) rep_base.get("plot prices") ``` #### Scenario: '*renewable_potential*' ``` rep_scen.set_filters(t=None, c=["light"]) rep_scen.get("plot prices") mp.close_db() ``` <div class="alert alert-block alert-success"> ***Additional exercise*** The renewable potential categories have been defined such that `capacity_factor` decreases with increasing potentials. The model will thus first make of use the renewable potential with the highest capacity factor, and when saturated, the model proceed with the next highest capacity factor. Typically, the potentials of better quality are not necessarily located close to the demand centers. As an exercise in separate tutorial, add cost to these potentials, by adding one technology for each grade, called something like "connection_to_grid_\<potential grade name\>", with variable costs as shown in the table below. | Resource Category | Potential \[GWa\] | Capacity Factor \[%\] | Variable OM Cost in \[USD/kWa\] | | ----------------- | ----------------- | --------------------- | -------------------------------| | c1 | 100 | 15 | 1 | | c2 | 50 | 20 | 15 | | c3 | 25 | 25 | 10 | | c4 | 25 | 30 | 30 | Remember that each of the renewable potential categories will require an individual `commodity` the `wind_ppl`. <img src='_static/renewable_resource_res_exercise.png' width='900'>
github_jupyter
import pandas as pd import ixmp import message_ix from message_ix.utils import make_df %matplotlib inline mp = ixmp.Platform() model = 'Westeros Electrified' base = message_ix.Scenario(mp, model=model, scenario='carbon_tax') scen = base.clone(model, 'renewable_potential', 'illustration of renewable_resource_constraint formulation', keep_solution=False) scen.check_out() year_df = scen.vintage_and_active_years() vintage_years, act_years = year_df['year_vtg'], year_df['year_act'] model_horizon = scen.set('year') country = 'Westeros' scen.add_set('level', ['renewable']) scen.add_set('commodity', ['wind_onshore']) scen.add_set('level_renewable', ['renewable']) scen.add_set('type_tec', ['renewable']) df = pd.DataFrame({'type_tec': ['renewable'], 'technology': ['wind_ppl']}) scen.add_set('cat_tec', df) df = pd.DataFrame({ 'node_loc': country, 'technology': 'wind_ppl', 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'node_origin': country, 'commodity': 'wind_onshore', 'level': 'renewable', 'time': 'year', 'time_origin': 'year', 'value': 1, 'unit': '%'}) scen.add_par('input', df) grades = ['c1', 'c2', 'c3', 'c4'] scen.add_set('grade', grades) # renewable_potential has the following index structure scen.idx_names('renewable_potential') idx = pd.MultiIndex.from_product([[country], ['wind_onshore'], grades, ['renewable'], model_horizon, ['GWa']], names=['node', 'commodity', 'grade', 'level', 'year', 'unit']) df = pd.DataFrame({'value': sorted([100, 50, 25, 25] * len(model_horizon), reverse=True)}, idx).reset_index() scen.add_par('renewable_potential', df) # renewable_capacity_factor has the following index structure scen.idx_names('renewable_capacity_factor') idx = pd.MultiIndex.from_product([[country], ['wind_onshore'], grades, ['renewable'], model_horizon, ['-']], names=['node', 'commodity', 'grade', 'level', 'year', 'unit']) df = pd.DataFrame({'value': sorted([.15, .20, .25, .30] * len(model_horizon))}, idx).reset_index() scen.add_par('renewable_capacity_factor', df) scen.commit(comment='define parameters for renewable implementation') scen.set_as_default() scen.solve() scen.var('OBJ')['lvl'] from message_ix.reporting import Reporter from message_ix.util.tutorial import prepare_plots rep_base = Reporter.from_scenario(base) prepare_plots(rep_base) rep_scen = Reporter.from_scenario(scen) prepare_plots(rep_scen) rep_base.set_filters(t=["coal_ppl", "wind_ppl"]) rep_base.get("plot activity") rep_scen.set_filters(t=["coal_ppl", "wind_ppl"]) rep_scen.get("plot activity") rep_base.get("plot capacity") rep_scen.get("plot capacity") rep_base.set_filters(t=None, c=["light"]) rep_base.get("plot prices") rep_scen.set_filters(t=None, c=["light"]) rep_scen.get("plot prices") mp.close_db()
0.44071
0.985426
# TP N°5 - Circuitos trifásicos __U.N.L.Z. - Facultad de Ingeniería__ __Electrotecnia__ __Alumno:__ Daniel Antonio Lorenzo <mark><strong>(Resolución en python3)</strong></mark> <a href="https://colab.research.google.com/github/daniel-lorenzo/Electrotecnia/blob/master/Ejercitacion/TP5-2.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> ## Problema N°2 Un sistema trifásico CBA (secuencia inversa o indirecta 3x380V (tensión eficaz compuesta o de línea), de 4 conductores (tetrafilar), alimenta una carga trifásica conectada en estrella. El valor de cada impedancia es de 20 Ohm con ángulo de desfasaje de 30° capacitivo. 1. Calcular las corrientes $I_r, \, I_s, \, I_t, \, I_\mathrm{neutro}$, representar diagrama fasorial de tensiones y corrientes. 2. Determinar la potencia por fase y trifásica. <img src="img/tp5ej2.png"> ### Solución * Las tensiones de generación las denominaremos con la letra E, $E_{an}, \, E_{bn}, \, E_{cn}$ * Las caídas de tensión en las impedancias de carga las denominamos con la letra U, $U_{ao}, \; U_{bo}, \, U_{co}$ * Las corrientes que circulan por la carga las denominamos con la letra I, $I_a, \, I_b \, I_c$ La corriente que circula por el cuarto conductor, que une el centro de estrella del generador (n) y el neutro de la carga (o), se denomina corriente de neutro, $I_\mathrm{neutro}$ ``` import numpy as np import cmath # Datos E_lin = 380 # V Tensión de línea E_fase = E_lin/np.sqrt(3) # Tensión de fase # Tensiones de generación Ean = cmath.rect(E_fase, np.deg2rad(0) ) Ebn = cmath.rect(E_fase, np.deg2rad(120)) Ecn = cmath.rect(E_fase, np.deg2rad(240)) # Tensiones en las impedancias de carga Uao = Ean Ubo = Ebn Uco = Ecn # Impedancias de carga Za = cmath.rect(20, np.deg2rad(-30) ) Zb = Za Zc = Za # Cálculo de corrientes de fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc # Corriente de neutro (sistema balanceado) I_neutro = Ia + Ib + Ic # Potencia aparente Sa = Uao*Ia.conjugate() Sb = Ubo*Ib.conjugate() Sc = Uco*Ic.conjugate() # Potencia real Pa = Sa.real Pb = Sb.real Pc = Sc.real # Potencia reactiva Qa = Sa.imag Qb = Sb.imag Qc = Sc.imag # Potencia trifásica aparente Strif = Sa + Sb + Sc # Potencia trifásica real Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de fase:') print('Ia = (%.2f ∠ %.2f°) A'%(abs(Ia) , np.rad2deg( cmath.phase(Ia) ) )) print('Ib = (%.2f ∠ %.2f°) A'%(abs(Ib) , np.rad2deg( cmath.phase(Ib) ) )) print('Ic = (%.2f ∠ %.2f°) A'%(abs(Ic) , np.rad2deg( cmath.phase(Ic) ) )) print('Corriente de neutro:') print('I_neutro = %.2f A'%abs(I_neutro)) print('Potencia aparente:') print('Sa = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sa)%(abs(Sa) , np.rad2deg( cmath.phase(Sa) ) )) print('Sb = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sb)%(abs(Sb) , np.rad2deg( cmath.phase(Sb) ) )) print('Sc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sc)%(abs(Sc) , np.rad2deg( cmath.phase(Sc) ) )) print('Potencia activa:') print('Pa = %.2f W'%Pa) print('Pb = %.2f W'%Pb) print('Pc = %.2f W'%Pc) print('Potencia reactiva:') print('Qa = %.2f VAr'%Qa) print('Qb = %.2f VAr'%Qb) print('Qc = %.2f VAr'%Qc) print('Potencia trifásica aparente:') print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Strif) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(7,7)) ax = plt.gca() ax.quiver(0,0,Pa,Qa,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Pa,0,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Pa,0,0,Qa,angles='xy',scale_units='xy',scale=1,color='green') plt.text(Pa - 300, Qa, r'$\vec S$', fontsize=18, color='red', fontweight='bold') plt.text(Pa - 200, 100, r'$\vec P$', fontsize=18, color='blue', fontweight='bold') plt.text(Pa + 100, Qa, r'$\vec Q$', fontsize=18, color='green', fontweight='bold') plt.text(550, -200, r'$\varphi =$ %.2f°'%( np.rad2deg( cmath.phase(Sa) ) ), fontsize=14) theta = np.linspace(0, cmath.phase(Sa), 100) x1 = 500 * np.cos(theta) x2 = 500 * np.sin(theta) plt.plot(x1, x2, color='red') ax.set_xlim([0,2500]) ax.set_ylim([-1500,500]) ax.set_aspect('equal') plt.title('Triángulo de potencias por fase', fontsize=18) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) plt.draw() plt.show() ``` A cada una de las fases le corresponde un valor de potencia monofásica. ``` %reset -s -f ``` ## Problema N°3 Un sistema trifásico de secuencia CBA 3x380V de 4 conductores, alimenta una carga trifásica en estrella. $Z_a = 6 \, \Omega$ con ángulo de desfasaje 0° $Z_b = 6 \, \Omega$ con ángulo de desfasaje 30° inductivo $Z_c = 5 \, \Omega$ con ángulo de desfasaje 45° inductivo 1. Calcular las corrientes $I_r, \, I_s, \ I_t, \, I_\mathrm{neutro}$, representar el diagrama de tensiones y corrientes. 2. Determinar la potencia en cada fase y la potencia trifásica. <img src="img/tp5ej2.png"> ### Solución ``` import numpy as np import cmath # Datos: E_lin = 380 # V (tensión de línea) E_fase = E_lin/np.sqrt(3) # V (tensión de fase) # Tensiones de generación: Ean = cmath.rect(E_fase, np.deg2rad(0) ) Ebn = cmath.rect(E_fase, np.deg2rad(120)) Ecn = cmath.rect(E_fase, np.deg2rad(240)) # Caídas de tensión en las cargas Uao = Ean Ubo = Ebn Uco = Ecn # Impedancias de carga Za = cmath.rect(6,0) Zb = cmath.rect(6, np.deg2rad(30) ) Zc = cmath.rect(5, np.deg2rad(45) ) ``` <div class="alert-danger"> <strong>La carga trifásica NO es equilibrada</strong>, conectada en estrella, eso significa que las tres impedancias <strong>NO SON iguales</strong> entre sí. </div> ``` # Cálculo de las intensidades de corriente en fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc # Cálculo de intensidad de corriente de Neutro (4to. conductor) In = Ia + Ib +Ic # Cálculo de potencia aparente Sa = Uao*Ia.conjugate() Sb = Ubo*Ib.conjugate() Sc = Uco*Ic.conjugate() # Cálculo de potencia activa Pa = Sa.real Pb = Sb.real Pc = Sc.real # Cálculo de potencia reactiva Qa = Sa.imag Qb = Sb.imag Qc = Sc.imag # Cálculo de potencia trifásica aparente Strif = Sa + Sb + Sc # Potencia trifásica activa Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de fase:') print('Ia = (%.2f ∠ %.2f°) A'%(abs(Ia) , np.rad2deg( cmath.phase(Ia) ) )) print('Ib = (%.2f ∠ %.2f°) A'%(abs(Ib) , np.rad2deg( cmath.phase(Ib) ) )) print('Ic = (%.2f ∠ %.2f°) A'%(abs(Ic) , np.rad2deg( cmath.phase(Ic) ) )) print('Corriente de neutro:') print('In = (%.2f ∠ %.2f°) A'%(abs(In) , np.rad2deg( cmath.phase(In) ) )) print('Potencia aparente:') print('Sa = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sa)%(abs(Sa) , np.rad2deg( cmath.phase(Sa) ) )) print('Sb = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sb)%(abs(Sb) , np.rad2deg( cmath.phase(Sb) ) )) print('Sc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sc)%(abs(Sc) , np.rad2deg( cmath.phase(Sc) ) )) print('Potencia activa:') print('Pa = %.2f W'%Pa) print('Pb = %.2f W'%Pb) print('Pc = %.2f W'%Pc) print('Potencia reactiva:') print('Qa = %.2f VAr'%Qa) print('Qb = %.2f VAr'%Qb) print('Qc = %.2f VAr'%Qc) print('Potencia trifásica aparente:') print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Strif) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) %reset -s -f ``` ## Problema N°4 Un sistema trifásico de secuencia ABC (secuencia directa), 3x380V (tensiones de línea en valor eficaz), de 3 conductores (sistema trifilar), alimente una carga trifásica __equilibrada y simétrica (perfecta)__ conectada en triángulo. El valor de cada impedancia es de 5 Ohm con ángulo de desfasaje de 45° inductivo. 1. Calcular las corrientes $I_r, \, I_s, \, I_t$, representar el diagrama fasorial de tensiones y corrientes. 2. Determinar las potencias por fase y trifásica (P,Q,S) <img src="img/tp5ej4.png"> > Las impedancias de carga son iguales entre sí, (cargatrifásica equilibrada y simétrica perfecta). ### Solución ``` import numpy as np import cmath # Datos: # Tensiones de línea Eab = cmath.rect(380, np.deg2rad(30) ) Ebc = cmath.rect(380, np.deg2rad(30-120) ) Eca = cmath.rect(380, np.deg2rad(30+120) ) # Caídas de tensión compuesta en las impedancias de cargas Uab = Eab Ubc = Ebc Uca = Eca # Impedancias de carga Zab = cmath.rect(5, np.deg2rad(45) ) Zbc = Zab Zca = Zbc # Cálculo de corriente de línea o compuesta Iab = Uab/Zab Ibc = Ubc/Zbc Ica = Uca/Zca # Cálculo de corrientes de fase Ir = Iab - Ica Is = Ibc - Iab It = Ica - Ibc Ia = Ir ; Ib = Is ; Ic = It # Cálculo de potencia aparente Sab = Uab*Iab.conjugate() Sbc = Ubc*Ibc.conjugate() Sca = Uca*Ica.conjugate() # Potencia activa Pab = Sab.real Pbc = Sbc.real Pca = Sca.real # Potencia reactiva Qab = Sab.imag Qbc = Sab.imag Qca = Sca.imag # Potencia trifásica aparente Strif = Sab + Sbc + Sca # Potencia trifásica activa Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de línea o compuesta:') print('Iab = (%.2f ∠ %.2f) A'%(abs(Iab) , np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f ∠ %.2f) A'%(abs(Ibc) , np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f ∠ %.2f) A'%(abs(Ica) , np.rad2deg( cmath.phase(Ica) ) )) print('Corrientes de fase:') print('Ir = Ia = (%.2f ∠ %.2f) A'%(abs(Ir) , np.rad2deg( cmath.phase(Ir) ) )) print('Is = Ib = (%.2f ∠ %.2f) A'%(abs(Is) , np.rad2deg( cmath.phase(Is) ) )) print('It = Ic = (%.2f ∠ %.2f) A'%(abs(It) , np.rad2deg( cmath.phase(It) ) )) print('Potencia aparente:') print('|Sab| = %.1f VA'%abs(Sab)) print('|Sbc| = %.1f VA'%abs(Sbc)) print('|Sca| = %.1f VA'%abs(Sca)) print('Sab = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sab)%(abs(Sab), np.rad2deg( cmath.phase(Sab) ) )) print('Sbc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sbc)%(abs(Sbc), np.rad2deg( cmath.phase(Sbc) ) )) print('Sca = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sca)%(abs(Sca), np.rad2deg( cmath.phase(Sca) ) )) print('Potencia activa:') print('Pab = %.2f W'%Pab) print('Pbc = %.2f W'%Pbc) print('Pca = %.2f W'%Pca) print('Potencia reactiva:') print('Qab = %.2f VAr'%Qab) print('Qbc = %.2f VAr'%Qbc) print('Qca = %.2f VAr'%Qca) print('Potencia trifásica aparente:') print('|Strif| = %.2f VA'%abs(Strif)) print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Sab) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) %reset -s -f ``` ## Problema 5 Un sistema trifásico de secuencia ABC 3x380V, de 3 conductores, alimenta una carga trifásica conectada en triángulo. * $Z_{ab} = 10 \, \Omega$ con ángulo de desfasaje 0° * $Z_{bc} = 10 \, \Omega$ con ángulo de desfasaje 30° inductivo * $Z_{ca} = 15 \, \Omega$ con ángulo de desfasaje 30° capacitivo 1. Calcular las corrientes $I_r, \, I_s, \, I_t$, representar diagrama fasorial de tensiones y corrientes. 2. Determinar las potencias en cada fase y las potencias trifásicas (P,Q,S) <img src="img/tp5ej5.png"> ### Solución ``` import numpy as np import cmath # Datos: # Tensiones de línea Eab = cmath.rect(380 , np.deg2rad(30) ) Ebc = cmath.rect(380 , np.deg2rad(30-120) ) Eca = cmath.rect(380 , np.deg2rad(30+120) ) # Caídas de tensión compuesta en las impedancias de cargas Uab = Eab Ubc = Ebc Uca = Eca # Impedancias de carga Zab = cmath.rect(10 , 0) Zbc = cmath.rect(10 , np.deg2rad(30) ) Zca = cmath.rect(15 , np.deg2rad(-30) ) # Cálculo de corrientes de línea o compuesta Iab = Uab/Zab Ibc = Ubc/Zbc Ica = Uca/Zca # Cálculo de corrientes de fase Ir = Iab - Ica Is = Ibc - Iab It = Ica - Ibc Ia = Ir ; Ib = Is ; Ic = It # Cálculo de potencia aparente Sab = Uab*Iab.conjugate() Sbc = Ubc*Ibc.conjugate() Sca = Uca*Ica.conjugate() # Potencia activa Pab = Sab.real Pbc = Sbc.real Pca = Sca.real # Potencia reactiva Qab = Sab.imag Qbc = Sbc.imag Qca = Sca.imag # Cálculo de potencia trifásica aparente Strif = Sab + Sbc + Sca Ptrif = Strif.real Qtrif = Strif.imag print('Corrientes de línea o compuesta:') print('Iab = (%.2f ∠ %.2f) A'%(abs(Iab) , np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f ∠ %.2f) A'%(abs(Ibc) , np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f ∠ %.2f) A'%(abs(Ica) , np.rad2deg( cmath.phase(Ica) ) )) print('Corrientes de fase:') print('Ir = Ia = (%.2f ∠ %.2f) A'%(abs(Ir) , np.rad2deg( cmath.phase(Ir) ) )) print('Is = Ib = (%.2f ∠ %.2f) A'%(abs(Is) , np.rad2deg( cmath.phase(Is) ) )) print('It = Ic = (%.2f ∠ %.2f) A'%(abs(It) , np.rad2deg( cmath.phase(It) ) )) print('Potencia aparente:') print('|Sab| = %.1f VA'%abs(Sab)) print('|Sbc| = %.1f VA'%abs(Sbc)) print('|Sca| = %.1f VA'%abs(Sca)) print('Sab = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sab)%(abs(Sab), np.rad2deg( cmath.phase(Sab) ) )) print('Sbc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sbc)%(abs(Sbc), np.rad2deg( cmath.phase(Sbc) ) )) print('Sca = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sca)%(abs(Sca), np.rad2deg( cmath.phase(Sca) ) )) print('Potencia activa:') print('Pab = %.2f W'%Pab) print('Pbc = %.2f W'%Pbc) print('Pca = %.2f W'%Pca) print('Potencia reactiva:') print('Qab = %.2f VAr'%Qab) print('Qbc = %.2f VAr'%Qbc) print('Qca = %.2f VAr'%Qca) print('Potencia trifásica aparente:') print('|Strif| = %.2f VA'%abs(Strif)) print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Sab) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) import matplotlib import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(7,7)) ax = plt.gca() ax.quiver(0,0,Uab.real,Uab.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Ubc.real,Ubc.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Uca.real,Uca.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Ia.real,Ia.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Ib.real,Ib.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Ic.real,Ic.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Iab.real,Iab.imag,angles='xy',scale_units='xy',scale=1,color='yellow') ax.quiver(0,0,Ibc.real,Ibc.imag,angles='xy',scale_units='xy',scale=1,color='yellow') ax.quiver(0,0,Ica.real,Ica.imag,angles='xy',scale_units='xy',scale=1,color='yellow') plt.text(Ia.real, Ia.imag, r'$I_a ∠ %.2f°$'%(np.rad2deg(cmath.phase(Ia))), fontsize=14, color='red') plt.text(Ib.real - 5 , Ib.imag - 5, r'$I_b ∠ %.2f$°'%(np.rad2deg(cmath.phase(Ib))), fontsize=14, color='red') plt.text(Ic.real, Ic.imag, r'$I_c ∠ %.2f$°'%(np.rad2deg(cmath.phase(Ic))), fontsize=14, color='red') plt.text(Iab.real, Iab.imag, r'$I_{ab}$', fontsize=18, fontweight='bold') plt.text(Ibc.real - 5 , Ibc.imag - 5, r'$I_{bc}$', fontsize=18, fontweight='bold') plt.text(Ica.real - 12, Ica.imag, r'$I_{ca}$', fontsize=18, fontweight='bold') #plt.axhline(y=0, xmin=0.5, xmax=1, linestyle="--") ax.set_aspect('equal') plt.title('Diagrama fasorial de corrientes en la carga', fontsize=16) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) ax.set_xlim([-100,100]) ax.set_ylim([-100,100]) #plt.draw() plt.show() print('Ia = (%.2f < %.2f°) A'%(abs(Ir), np.rad2deg( cmath.phase(Ir) ) )) print('Ib = (%.2f < %.2f°) A'%(abs(Is), np.rad2deg( cmath.phase(Is) ) )) print('Ic = (%.2f < %.2f°) A'%(abs(It), np.rad2deg( cmath.phase(It) ) )) print('Iab = (%.2f < %.2f°) A'%(abs(Iab), np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f < %.2f°) A'%(abs(Ibc), np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f < %.2f°) A'%(abs(Ica), np.rad2deg( cmath.phase(Ica) ) )) print('Uab = (%.2f < %.2f°) V'%(abs(Uab), np.rad2deg( cmath.phase(Uab) ) )) print('Ubc = (%.2f < %.2f°) V'%(abs(Ubc), np.rad2deg( cmath.phase(Ubc) ) )) print('Uca = (%.2f < %.2f°) V'%(abs(Uca), np.rad2deg( cmath.phase(Uca) ) )) %reset -s -f ``` ## Problema 6 Un sistema trifásico de secuencia TSR (cba), 3x380V, de 3 conductores alimenta una carga trifásica conectada en estrella. $Z_a =6 \, \Omega$ con ángulo de desfasaje 0° $Z_b = 6 \, \Omega$ con ángulo de desfasaje 30° inductivo $Z_c = 5 \, \Omega$ con ángulo de desfasaje 45° inductivo Construis el triángulo de tensiones y determinar la tensión de desplazamiento del neutro $V_{on}$ <img src="img/tp5ej6.png"> ### Solución ``` import numpy as np import cmath # Datos: Ean = cmath.rect(220 , 0) Ebn = cmath.rect(220 , np.deg2rad(120)) Ecn = cmath.rect(220 , np.deg2rad(240)) Za = cmath.rect(6 , 0) Zb = cmath.rect(6 , np.deg2rad(30)) Zc = cmath.rect(5 , np.deg2rad(45)) # Cálculo de admitancias Ya = 1/Za Yb = 1/Zb Yc = 1/Zc # Cálculo de tensión de neutro Von = (Ean*Ya + Ebn*Yb + Ecn*Yc)/(Ya + Yb + Yc) # Cálculo de tensiones de fase Uao = Ean - Von Ubo = Ebn - Von Uco = Ecn - Von # Cálculo de corrientes de fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc print('Admitancias:') print('Ya = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Ya)%(abs(Ya), np.rad2deg(cmath.phase(Ya)) )) print('Yb = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Yb)%(abs(Yb), np.rad2deg(cmath.phase(Yb)) )) print('Yc = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Yc)%(abs(Yc), np.rad2deg(cmath.phase(Yc)) )) print('Tensión de desplazamiento de neutro:') print('Von = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Von)%(abs(Von), np.rad2deg(cmath.phase(Von)) )) print('Tensiones de fase:') print('Uao = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uao)%(abs(Uao), np.rad2deg(cmath.phase(Uao)) )) print('Ubo = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubo)%(abs(Ubo), np.rad2deg(cmath.phase(Ubo)) )) print('Uco = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uco)%(abs(Uco), np.rad2deg(cmath.phase(Uco)) )) print('Corrientes de fase:') print('Ia = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ia)%(abs(Ia), np.rad2deg(cmath.phase(Ia)) )) print('Ib = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ib)%(abs(Ib), np.rad2deg(cmath.phase(Ib)) )) print('Ic = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ic)%(abs(Ic), np.rad2deg(cmath.phase(Ic)) )) Uab = Ebn - Ean Ubc = Ecn - Ebn Uca = Ean - Ecn import matplotlib import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(8,8)) ax = plt.gca() ax.quiver(0,0,Ean.real,Ean.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(0,0,Ebn.real,Ebn.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(0,0,Ecn.real,Ecn.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(Von.real,Von.imag,Uao.real,Uao.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Von.real,Von.imag,Ubo.real,Ubo.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Von.real,Von.imag,Uco.real,Uco.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Von.real,Von.imag,angles='xy',scale_units='xy',scale=1,color='green') ax.quiver(Ean.real,Ean.imag,Uab.real,Uab.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(Ecn.real,Ecn.imag,Uca.real,Uca.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(Ebn.real,Ebn.imag,Ubc.real,Ubc.imag,angles='xy',scale_units='xy',scale=1,color='red') plt.text(Ean.real, Ean.imag, r'$E_{an} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ean))), fontsize=14) plt.text(Ebn.real, Ebn.imag + 10, r'$E_{bn} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ebn))), fontsize=14) plt.text(Ecn.real, Ecn.imag - 20, r'$E_{cn} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ecn))), fontsize=14) plt.text(Ean.real/2, Ebn.imag/2, r'$U_{ab} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uab))), fontsize=14,color='red') plt.text(Ean.real/2, Ecn.imag/2, r'$U_{ca} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uca))), fontsize=14,color='red') plt.text(Ebn.real - 50, 0, r'$U_{bc} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ubc))), fontsize=14,color='red') plt.text(Von.real + 20, Von.imag, r'$V_{on} < %.1f°$'%(np.rad2deg(cmath.phase(Von))), fontsize=14,color='green') plt.text(Uao.real + Von.real - 15, Uao.imag + Von.imag + 20, r'$U_{ao} < %.1f°$'%(np.rad2deg(cmath.phase(Uao))), fontsize=14,color='blue') plt.text(Ubo.real + Von.real, Ubo.imag + Von.imag + 30, r'$U_{bo} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ubo))), fontsize=14,color='blue') plt.text(Uco.real + Von.real + 20, Uco.imag + Von.imag, r'$U_{co} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uco))), fontsize=14,color='blue') plt.text(0, -20, r'$N$', fontsize=14,color='green') ax.set_aspect('equal') plt.title('Tensiones de fase y compuesta', fontsize=16) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) ax.set_xlim([-200,300]) ax.set_ylim([-250,250]) #plt.draw() plt.show() print('Tensiones de generación:') print('Ean = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ean)%(abs(Ean), np.rad2deg(cmath.phase(Ean)) )) print('Ebn = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ebn)%(abs(Ebn), np.rad2deg(cmath.phase(Ebn)) )) print('Ecn = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ecn)%(abs(Ecn), np.rad2deg(cmath.phase(Ecn)) )) print('Tensiones compuestas:') print('Uab = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uab)%(abs(Uab), np.rad2deg(cmath.phase(Uab)) )) print('Ubc = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubc)%(abs(Ubc), np.rad2deg(cmath.phase(Ubc)) )) print('Uca = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uca)%(abs(Ecn), np.rad2deg(cmath.phase(Uca)) )) print('Tensión de desplazamiento de neutro:') print('Von = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Von)%(abs(Von), np.rad2deg(cmath.phase(Von)) )) print('Tensiones de fase:') print('Uao = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uao)%(abs(Uao), np.rad2deg(cmath.phase(Uao)) )) print('Ubo = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubo)%(abs(Ubo), np.rad2deg(cmath.phase(Ubo)) )) print('Uco = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uco)%(abs(Uco), np.rad2deg(cmath.phase(Uco)) )) ``` ----------- <a href="https://colab.research.google.com/github/daniel-lorenzo/Electrotecnia/blob/master/Ejercitacion/TP5-2.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
import numpy as np import cmath # Datos E_lin = 380 # V Tensión de línea E_fase = E_lin/np.sqrt(3) # Tensión de fase # Tensiones de generación Ean = cmath.rect(E_fase, np.deg2rad(0) ) Ebn = cmath.rect(E_fase, np.deg2rad(120)) Ecn = cmath.rect(E_fase, np.deg2rad(240)) # Tensiones en las impedancias de carga Uao = Ean Ubo = Ebn Uco = Ecn # Impedancias de carga Za = cmath.rect(20, np.deg2rad(-30) ) Zb = Za Zc = Za # Cálculo de corrientes de fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc # Corriente de neutro (sistema balanceado) I_neutro = Ia + Ib + Ic # Potencia aparente Sa = Uao*Ia.conjugate() Sb = Ubo*Ib.conjugate() Sc = Uco*Ic.conjugate() # Potencia real Pa = Sa.real Pb = Sb.real Pc = Sc.real # Potencia reactiva Qa = Sa.imag Qb = Sb.imag Qc = Sc.imag # Potencia trifásica aparente Strif = Sa + Sb + Sc # Potencia trifásica real Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de fase:') print('Ia = (%.2f ∠ %.2f°) A'%(abs(Ia) , np.rad2deg( cmath.phase(Ia) ) )) print('Ib = (%.2f ∠ %.2f°) A'%(abs(Ib) , np.rad2deg( cmath.phase(Ib) ) )) print('Ic = (%.2f ∠ %.2f°) A'%(abs(Ic) , np.rad2deg( cmath.phase(Ic) ) )) print('Corriente de neutro:') print('I_neutro = %.2f A'%abs(I_neutro)) print('Potencia aparente:') print('Sa = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sa)%(abs(Sa) , np.rad2deg( cmath.phase(Sa) ) )) print('Sb = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sb)%(abs(Sb) , np.rad2deg( cmath.phase(Sb) ) )) print('Sc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sc)%(abs(Sc) , np.rad2deg( cmath.phase(Sc) ) )) print('Potencia activa:') print('Pa = %.2f W'%Pa) print('Pb = %.2f W'%Pb) print('Pc = %.2f W'%Pc) print('Potencia reactiva:') print('Qa = %.2f VAr'%Qa) print('Qb = %.2f VAr'%Qb) print('Qc = %.2f VAr'%Qc) print('Potencia trifásica aparente:') print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Strif) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(7,7)) ax = plt.gca() ax.quiver(0,0,Pa,Qa,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Pa,0,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Pa,0,0,Qa,angles='xy',scale_units='xy',scale=1,color='green') plt.text(Pa - 300, Qa, r'$\vec S$', fontsize=18, color='red', fontweight='bold') plt.text(Pa - 200, 100, r'$\vec P$', fontsize=18, color='blue', fontweight='bold') plt.text(Pa + 100, Qa, r'$\vec Q$', fontsize=18, color='green', fontweight='bold') plt.text(550, -200, r'$\varphi =$ %.2f°'%( np.rad2deg( cmath.phase(Sa) ) ), fontsize=14) theta = np.linspace(0, cmath.phase(Sa), 100) x1 = 500 * np.cos(theta) x2 = 500 * np.sin(theta) plt.plot(x1, x2, color='red') ax.set_xlim([0,2500]) ax.set_ylim([-1500,500]) ax.set_aspect('equal') plt.title('Triángulo de potencias por fase', fontsize=18) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) plt.draw() plt.show() %reset -s -f import numpy as np import cmath # Datos: E_lin = 380 # V (tensión de línea) E_fase = E_lin/np.sqrt(3) # V (tensión de fase) # Tensiones de generación: Ean = cmath.rect(E_fase, np.deg2rad(0) ) Ebn = cmath.rect(E_fase, np.deg2rad(120)) Ecn = cmath.rect(E_fase, np.deg2rad(240)) # Caídas de tensión en las cargas Uao = Ean Ubo = Ebn Uco = Ecn # Impedancias de carga Za = cmath.rect(6,0) Zb = cmath.rect(6, np.deg2rad(30) ) Zc = cmath.rect(5, np.deg2rad(45) ) # Cálculo de las intensidades de corriente en fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc # Cálculo de intensidad de corriente de Neutro (4to. conductor) In = Ia + Ib +Ic # Cálculo de potencia aparente Sa = Uao*Ia.conjugate() Sb = Ubo*Ib.conjugate() Sc = Uco*Ic.conjugate() # Cálculo de potencia activa Pa = Sa.real Pb = Sb.real Pc = Sc.real # Cálculo de potencia reactiva Qa = Sa.imag Qb = Sb.imag Qc = Sc.imag # Cálculo de potencia trifásica aparente Strif = Sa + Sb + Sc # Potencia trifásica activa Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de fase:') print('Ia = (%.2f ∠ %.2f°) A'%(abs(Ia) , np.rad2deg( cmath.phase(Ia) ) )) print('Ib = (%.2f ∠ %.2f°) A'%(abs(Ib) , np.rad2deg( cmath.phase(Ib) ) )) print('Ic = (%.2f ∠ %.2f°) A'%(abs(Ic) , np.rad2deg( cmath.phase(Ic) ) )) print('Corriente de neutro:') print('In = (%.2f ∠ %.2f°) A'%(abs(In) , np.rad2deg( cmath.phase(In) ) )) print('Potencia aparente:') print('Sa = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sa)%(abs(Sa) , np.rad2deg( cmath.phase(Sa) ) )) print('Sb = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sb)%(abs(Sb) , np.rad2deg( cmath.phase(Sb) ) )) print('Sc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sc)%(abs(Sc) , np.rad2deg( cmath.phase(Sc) ) )) print('Potencia activa:') print('Pa = %.2f W'%Pa) print('Pb = %.2f W'%Pb) print('Pc = %.2f W'%Pc) print('Potencia reactiva:') print('Qa = %.2f VAr'%Qa) print('Qb = %.2f VAr'%Qb) print('Qc = %.2f VAr'%Qc) print('Potencia trifásica aparente:') print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Strif) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) %reset -s -f import numpy as np import cmath # Datos: # Tensiones de línea Eab = cmath.rect(380, np.deg2rad(30) ) Ebc = cmath.rect(380, np.deg2rad(30-120) ) Eca = cmath.rect(380, np.deg2rad(30+120) ) # Caídas de tensión compuesta en las impedancias de cargas Uab = Eab Ubc = Ebc Uca = Eca # Impedancias de carga Zab = cmath.rect(5, np.deg2rad(45) ) Zbc = Zab Zca = Zbc # Cálculo de corriente de línea o compuesta Iab = Uab/Zab Ibc = Ubc/Zbc Ica = Uca/Zca # Cálculo de corrientes de fase Ir = Iab - Ica Is = Ibc - Iab It = Ica - Ibc Ia = Ir ; Ib = Is ; Ic = It # Cálculo de potencia aparente Sab = Uab*Iab.conjugate() Sbc = Ubc*Ibc.conjugate() Sca = Uca*Ica.conjugate() # Potencia activa Pab = Sab.real Pbc = Sbc.real Pca = Sca.real # Potencia reactiva Qab = Sab.imag Qbc = Sab.imag Qca = Sca.imag # Potencia trifásica aparente Strif = Sab + Sbc + Sca # Potencia trifásica activa Ptrif = Strif.real # Potencia trifásica reactiva Qtrif = Strif.imag print('Corrientes de línea o compuesta:') print('Iab = (%.2f ∠ %.2f) A'%(abs(Iab) , np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f ∠ %.2f) A'%(abs(Ibc) , np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f ∠ %.2f) A'%(abs(Ica) , np.rad2deg( cmath.phase(Ica) ) )) print('Corrientes de fase:') print('Ir = Ia = (%.2f ∠ %.2f) A'%(abs(Ir) , np.rad2deg( cmath.phase(Ir) ) )) print('Is = Ib = (%.2f ∠ %.2f) A'%(abs(Is) , np.rad2deg( cmath.phase(Is) ) )) print('It = Ic = (%.2f ∠ %.2f) A'%(abs(It) , np.rad2deg( cmath.phase(It) ) )) print('Potencia aparente:') print('|Sab| = %.1f VA'%abs(Sab)) print('|Sbc| = %.1f VA'%abs(Sbc)) print('|Sca| = %.1f VA'%abs(Sca)) print('Sab = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sab)%(abs(Sab), np.rad2deg( cmath.phase(Sab) ) )) print('Sbc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sbc)%(abs(Sbc), np.rad2deg( cmath.phase(Sbc) ) )) print('Sca = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sca)%(abs(Sca), np.rad2deg( cmath.phase(Sca) ) )) print('Potencia activa:') print('Pab = %.2f W'%Pab) print('Pbc = %.2f W'%Pbc) print('Pca = %.2f W'%Pca) print('Potencia reactiva:') print('Qab = %.2f VAr'%Qab) print('Qbc = %.2f VAr'%Qbc) print('Qca = %.2f VAr'%Qca) print('Potencia trifásica aparente:') print('|Strif| = %.2f VA'%abs(Strif)) print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Sab) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) %reset -s -f import numpy as np import cmath # Datos: # Tensiones de línea Eab = cmath.rect(380 , np.deg2rad(30) ) Ebc = cmath.rect(380 , np.deg2rad(30-120) ) Eca = cmath.rect(380 , np.deg2rad(30+120) ) # Caídas de tensión compuesta en las impedancias de cargas Uab = Eab Ubc = Ebc Uca = Eca # Impedancias de carga Zab = cmath.rect(10 , 0) Zbc = cmath.rect(10 , np.deg2rad(30) ) Zca = cmath.rect(15 , np.deg2rad(-30) ) # Cálculo de corrientes de línea o compuesta Iab = Uab/Zab Ibc = Ubc/Zbc Ica = Uca/Zca # Cálculo de corrientes de fase Ir = Iab - Ica Is = Ibc - Iab It = Ica - Ibc Ia = Ir ; Ib = Is ; Ic = It # Cálculo de potencia aparente Sab = Uab*Iab.conjugate() Sbc = Ubc*Ibc.conjugate() Sca = Uca*Ica.conjugate() # Potencia activa Pab = Sab.real Pbc = Sbc.real Pca = Sca.real # Potencia reactiva Qab = Sab.imag Qbc = Sbc.imag Qca = Sca.imag # Cálculo de potencia trifásica aparente Strif = Sab + Sbc + Sca Ptrif = Strif.real Qtrif = Strif.imag print('Corrientes de línea o compuesta:') print('Iab = (%.2f ∠ %.2f) A'%(abs(Iab) , np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f ∠ %.2f) A'%(abs(Ibc) , np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f ∠ %.2f) A'%(abs(Ica) , np.rad2deg( cmath.phase(Ica) ) )) print('Corrientes de fase:') print('Ir = Ia = (%.2f ∠ %.2f) A'%(abs(Ir) , np.rad2deg( cmath.phase(Ir) ) )) print('Is = Ib = (%.2f ∠ %.2f) A'%(abs(Is) , np.rad2deg( cmath.phase(Is) ) )) print('It = Ic = (%.2f ∠ %.2f) A'%(abs(It) , np.rad2deg( cmath.phase(It) ) )) print('Potencia aparente:') print('|Sab| = %.1f VA'%abs(Sab)) print('|Sbc| = %.1f VA'%abs(Sbc)) print('|Sca| = %.1f VA'%abs(Sca)) print('Sab = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sab)%(abs(Sab), np.rad2deg( cmath.phase(Sab) ) )) print('Sbc = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sbc)%(abs(Sbc), np.rad2deg( cmath.phase(Sbc) ) )) print('Sca = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Sca)%(abs(Sca), np.rad2deg( cmath.phase(Sca) ) )) print('Potencia activa:') print('Pab = %.2f W'%Pab) print('Pbc = %.2f W'%Pbc) print('Pca = %.2f W'%Pca) print('Potencia reactiva:') print('Qab = %.2f VAr'%Qab) print('Qbc = %.2f VAr'%Qbc) print('Qca = %.2f VAr'%Qca) print('Potencia trifásica aparente:') print('|Strif| = %.2f VA'%abs(Strif)) print('Strif = {:.2f} VA = (%.2f ∠ %.2f°) VA'.format(Strif)%(abs(Strif) , np.rad2deg( cmath.phase(Sab) ) )) print('Potencia trifásica activa:') print('Ptrif = %.2f W'%Ptrif) print('Potencia trifásica reactiva:') print('Qtrif = %.2f VAr'%Qtrif) import matplotlib import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(7,7)) ax = plt.gca() ax.quiver(0,0,Uab.real,Uab.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Ubc.real,Ubc.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Uca.real,Uca.imag,width=0.003,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Ia.real,Ia.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Ib.real,Ib.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Ic.real,Ic.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(0,0,Iab.real,Iab.imag,angles='xy',scale_units='xy',scale=1,color='yellow') ax.quiver(0,0,Ibc.real,Ibc.imag,angles='xy',scale_units='xy',scale=1,color='yellow') ax.quiver(0,0,Ica.real,Ica.imag,angles='xy',scale_units='xy',scale=1,color='yellow') plt.text(Ia.real, Ia.imag, r'$I_a ∠ %.2f°$'%(np.rad2deg(cmath.phase(Ia))), fontsize=14, color='red') plt.text(Ib.real - 5 , Ib.imag - 5, r'$I_b ∠ %.2f$°'%(np.rad2deg(cmath.phase(Ib))), fontsize=14, color='red') plt.text(Ic.real, Ic.imag, r'$I_c ∠ %.2f$°'%(np.rad2deg(cmath.phase(Ic))), fontsize=14, color='red') plt.text(Iab.real, Iab.imag, r'$I_{ab}$', fontsize=18, fontweight='bold') plt.text(Ibc.real - 5 , Ibc.imag - 5, r'$I_{bc}$', fontsize=18, fontweight='bold') plt.text(Ica.real - 12, Ica.imag, r'$I_{ca}$', fontsize=18, fontweight='bold') #plt.axhline(y=0, xmin=0.5, xmax=1, linestyle="--") ax.set_aspect('equal') plt.title('Diagrama fasorial de corrientes en la carga', fontsize=16) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) ax.set_xlim([-100,100]) ax.set_ylim([-100,100]) #plt.draw() plt.show() print('Ia = (%.2f < %.2f°) A'%(abs(Ir), np.rad2deg( cmath.phase(Ir) ) )) print('Ib = (%.2f < %.2f°) A'%(abs(Is), np.rad2deg( cmath.phase(Is) ) )) print('Ic = (%.2f < %.2f°) A'%(abs(It), np.rad2deg( cmath.phase(It) ) )) print('Iab = (%.2f < %.2f°) A'%(abs(Iab), np.rad2deg( cmath.phase(Iab) ) )) print('Ibc = (%.2f < %.2f°) A'%(abs(Ibc), np.rad2deg( cmath.phase(Ibc) ) )) print('Ica = (%.2f < %.2f°) A'%(abs(Ica), np.rad2deg( cmath.phase(Ica) ) )) print('Uab = (%.2f < %.2f°) V'%(abs(Uab), np.rad2deg( cmath.phase(Uab) ) )) print('Ubc = (%.2f < %.2f°) V'%(abs(Ubc), np.rad2deg( cmath.phase(Ubc) ) )) print('Uca = (%.2f < %.2f°) V'%(abs(Uca), np.rad2deg( cmath.phase(Uca) ) )) %reset -s -f import numpy as np import cmath # Datos: Ean = cmath.rect(220 , 0) Ebn = cmath.rect(220 , np.deg2rad(120)) Ecn = cmath.rect(220 , np.deg2rad(240)) Za = cmath.rect(6 , 0) Zb = cmath.rect(6 , np.deg2rad(30)) Zc = cmath.rect(5 , np.deg2rad(45)) # Cálculo de admitancias Ya = 1/Za Yb = 1/Zb Yc = 1/Zc # Cálculo de tensión de neutro Von = (Ean*Ya + Ebn*Yb + Ecn*Yc)/(Ya + Yb + Yc) # Cálculo de tensiones de fase Uao = Ean - Von Ubo = Ebn - Von Uco = Ecn - Von # Cálculo de corrientes de fase Ia = Uao/Za Ib = Ubo/Zb Ic = Uco/Zc print('Admitancias:') print('Ya = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Ya)%(abs(Ya), np.rad2deg(cmath.phase(Ya)) )) print('Yb = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Yb)%(abs(Yb), np.rad2deg(cmath.phase(Yb)) )) print('Yc = {:.3f} Ohm^-1 = (%.3f ∠ %.2f°) Ohm^-1'.format(Yc)%(abs(Yc), np.rad2deg(cmath.phase(Yc)) )) print('Tensión de desplazamiento de neutro:') print('Von = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Von)%(abs(Von), np.rad2deg(cmath.phase(Von)) )) print('Tensiones de fase:') print('Uao = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uao)%(abs(Uao), np.rad2deg(cmath.phase(Uao)) )) print('Ubo = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubo)%(abs(Ubo), np.rad2deg(cmath.phase(Ubo)) )) print('Uco = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uco)%(abs(Uco), np.rad2deg(cmath.phase(Uco)) )) print('Corrientes de fase:') print('Ia = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ia)%(abs(Ia), np.rad2deg(cmath.phase(Ia)) )) print('Ib = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ib)%(abs(Ib), np.rad2deg(cmath.phase(Ib)) )) print('Ic = {:.3f} A = (%.3f ∠ %.2f) A'.format(Ic)%(abs(Ic), np.rad2deg(cmath.phase(Ic)) )) Uab = Ebn - Ean Ubc = Ecn - Ebn Uca = Ean - Ecn import matplotlib import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(8,8)) ax = plt.gca() ax.quiver(0,0,Ean.real,Ean.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(0,0,Ebn.real,Ebn.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(0,0,Ecn.real,Ecn.imag,angles='xy',scale_units='xy',scale=1) ax.quiver(Von.real,Von.imag,Uao.real,Uao.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Von.real,Von.imag,Ubo.real,Ubo.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(Von.real,Von.imag,Uco.real,Uco.imag,width=0.005,angles='xy',scale_units='xy',scale=1,color='blue') ax.quiver(0,0,Von.real,Von.imag,angles='xy',scale_units='xy',scale=1,color='green') ax.quiver(Ean.real,Ean.imag,Uab.real,Uab.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(Ecn.real,Ecn.imag,Uca.real,Uca.imag,angles='xy',scale_units='xy',scale=1,color='red') ax.quiver(Ebn.real,Ebn.imag,Ubc.real,Ubc.imag,angles='xy',scale_units='xy',scale=1,color='red') plt.text(Ean.real, Ean.imag, r'$E_{an} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ean))), fontsize=14) plt.text(Ebn.real, Ebn.imag + 10, r'$E_{bn} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ebn))), fontsize=14) plt.text(Ecn.real, Ecn.imag - 20, r'$E_{cn} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ecn))), fontsize=14) plt.text(Ean.real/2, Ebn.imag/2, r'$U_{ab} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uab))), fontsize=14,color='red') plt.text(Ean.real/2, Ecn.imag/2, r'$U_{ca} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uca))), fontsize=14,color='red') plt.text(Ebn.real - 50, 0, r'$U_{bc} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ubc))), fontsize=14,color='red') plt.text(Von.real + 20, Von.imag, r'$V_{on} < %.1f°$'%(np.rad2deg(cmath.phase(Von))), fontsize=14,color='green') plt.text(Uao.real + Von.real - 15, Uao.imag + Von.imag + 20, r'$U_{ao} < %.1f°$'%(np.rad2deg(cmath.phase(Uao))), fontsize=14,color='blue') plt.text(Ubo.real + Von.real, Ubo.imag + Von.imag + 30, r'$U_{bo} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Ubo))), fontsize=14,color='blue') plt.text(Uco.real + Von.real + 20, Uco.imag + Von.imag, r'$U_{co} ∠ %.1f°$'%(np.rad2deg(cmath.phase(Uco))), fontsize=14,color='blue') plt.text(0, -20, r'$N$', fontsize=14,color='green') ax.set_aspect('equal') plt.title('Tensiones de fase y compuesta', fontsize=16) plt.xlabel('Re (Eje real)', fontsize=16) plt.ylabel('Im (Eje imaginario)', fontsize=16) plt.grid(linestyle=":") ax.set_axisbelow(True) ax.set_xlim([-200,300]) ax.set_ylim([-250,250]) #plt.draw() plt.show() print('Tensiones de generación:') print('Ean = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ean)%(abs(Ean), np.rad2deg(cmath.phase(Ean)) )) print('Ebn = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ebn)%(abs(Ebn), np.rad2deg(cmath.phase(Ebn)) )) print('Ecn = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ecn)%(abs(Ecn), np.rad2deg(cmath.phase(Ecn)) )) print('Tensiones compuestas:') print('Uab = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uab)%(abs(Uab), np.rad2deg(cmath.phase(Uab)) )) print('Ubc = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubc)%(abs(Ubc), np.rad2deg(cmath.phase(Ubc)) )) print('Uca = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uca)%(abs(Ecn), np.rad2deg(cmath.phase(Uca)) )) print('Tensión de desplazamiento de neutro:') print('Von = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Von)%(abs(Von), np.rad2deg(cmath.phase(Von)) )) print('Tensiones de fase:') print('Uao = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uao)%(abs(Uao), np.rad2deg(cmath.phase(Uao)) )) print('Ubo = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Ubo)%(abs(Ubo), np.rad2deg(cmath.phase(Ubo)) )) print('Uco = {:.2f} V = (%.2f ∠ %.2f°) V'.format(Uco)%(abs(Uco), np.rad2deg(cmath.phase(Uco)) ))
0.269422
0.811863
# Implementing an Image Classification App in One Minute In this tutorial, constructing a LeNet5 model, downloading dataset, training, starting the server and making predictions of the model using TinyMS API will be demonstrated. ## Prerequisite - Ubuntu: `18.04` - Python: `3.7.x` - Flask: `1.1.2` - MindSpore: `CPU-1.1.1` - TinyMS: `0.1.0` - numpy: `1.17.5` - Pillow: `8.1.0` - pip: `21.0.1` - requests: `2.18.4` ## Introduction TinyMS is a high-level API which is designed for amateur of deep learning. It minimizes the number of actions of users required to construct, train, evaluate and serve a model. TinyMS also provides tutorials and documentations for developers. This tutorial consists of six parts, `constructing the model`, `downloading dataset`, `training`, `define servable json`, `starting server` and `making predictions` in which the server will be run in a sub process. ``` import os import json import tinyms.optimizers as opt from PIL import Image from tinyms import context from tinyms.data import MnistDataset, download_dataset from tinyms.vision import mnist_transform, ImageViewer from tinyms.model import Model, lenet5 from tinyms.serving import start_server, predict, list_servables, shutdown, server_started from tinyms.metrics import Accuracy from tinyms.losses import SoftmaxCrossEntropyWithLogits from tinyms.callbacks import ModelCheckpoint, CheckpointConfig, LossMonitor ``` ### 1. Construct the model TinyMS encapsulates init and construct of the LeNet5 model, the line of the code is reduced to construct the LeNet5 model: ``` # build the network net = lenet5(class_num=10) model = Model(net) ``` ### 2. Download dataset The MNIST dataset will be downloaded if `mnist` folder didn't exist at the root. If `mnist` folder already exists, this step will not be performed. ``` # download the dataset mnist_path = '/root/mnist' if not os.path.exists(mnist_path): download_dataset('mnist', '/root') print('************Download complete*************') else: print('************Dataset already exists.**************') ``` ### 3. Train the model & evaluation The dataset for both training and evaluation will be defined here, and the parameters for training also set in this block. A trained ckpt file will be saved to `/etc/tinyms/serving/lenet5` folder for later use, meanwhile the evaluation will be performed and the `Accuracy` can be checked ``` # check lenet folder exists or not ckpt_folder = '/etc/tinyms/serving/lenet5' ckpt_path = '/etc/tinyms/serving/lenet5/lenet5.ckpt' if not os.path.exists(ckpt_folder): !mkdir -p /etc/tinyms/serving/lenet5 else: print('lenet5 ckpt folder already exists') # set environment parameters device_target = "CPU" context.set_context(mode=context.GRAPH_MODE, device_target=device_target) dataset_sink_mode = False # define the training and evaluation dataset train_dataset = MnistDataset(os.path.join(mnist_path, "train"), shuffle=True) train_dataset = mnist_transform.apply_ds(train_dataset) eval_dataset = MnistDataset(os.path.join(mnist_path, "test"), shuffle=True) eval_dataset = mnist_transform.apply_ds(eval_dataset) # parameters for training lr = 0.01 momentum = 0.9 epoch_size = 1 batch_size = 32 # define the loss function net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') # define the optimizer net_opt = opt.Momentum(net.trainable_params(), lr, momentum) net_metrics={"Accuracy": Accuracy()} model.compile(loss_fn=net_loss, optimizer=net_opt, metrics=net_metrics) print('************************Start training*************************') ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)) model.train(epoch_size, train_dataset, callbacks=[ckpoint_cb, LossMonitor()],dataset_sink_mode=dataset_sink_mode) print('************************Finished training*************************') model.save_checkpoint(ckpt_path) model.load_checkpoint(ckpt_path) print('************************Start evaluation*************************') acc = model.eval(eval_dataset, dataset_sink_mode=dataset_sink_mode) print("============== Accuracy:{} ==============".format(acc)) ``` ### 4. Define servable.json Define the lenet5 servable json file for model name, format and number of classes for serving. ``` servable_json = [{'name': 'lenet5', 'description': 'This servable hosts a lenet5 model predicting numbers', 'model': { "name": "lenet5", "format": "ckpt", "class_num": 10}}] os.chdir("/etc/tinyms/serving") json_data = json.dumps(servable_json, indent=4) with open('servable.json', 'w') as json_file: json_file.write(json_data) ``` ### 5. Start server #### 5.1 Introduction TinyMS Serving is a C/S(client/server) structure. TinyMS using [Flask](https://flask.palletsprojects.com/en/1.1.x/) which is a micro web framework written in python as the C/S communication tool. In order to serve a model, user must start server first. If successfully started, the server will be run in a subprocess and listening to POST requests from 127.0.0.1 port 5000 sent by client and handle the requests using MindSpore backend which constructs the model, run the prediction and send the result back to the client. #### 5.2 Start server Run the following code block to start the server: ``` start_server() ``` ### 6. Make predictions #### 6.1 Upload the pic A picture of a single digit number is required to be the input. The picture we use in this tutorial can be found [HERE](https://ascend-tutorials.obs.cn-north-4.myhuaweicloud.com/tinyms-test-pics/numbers/7.png), then save the picture to the root folder, and rename it to `7.png` (or any other name you like). Or run the following code to download the pic for this tutorial: ``` if not os.path.exists('/root/7.png'): !wget -P /root/ https://ascend-tutorials.obs.cn-north-4.myhuaweicloud.com/tinyms-test-pics/numbers/7.png else: print('7.png already exists') ``` #### 6.2 List servables Use `list_servables` function to check what model is being served right now. ``` list_servables() ``` If the output `description` shows it is a `lenet5` model, then we can continue to next step to send our request. #### 6.3 Sending request and get the result Run `predict` function to send the request, select between `TOP1_CLASS` and `TOP5_CLASS`: ``` image_path = "/root/7.png" strategy = "TOP1_CLASS" # predict(image_path, servable_name, dataset='mnist', strategy='TOP1_CLASS') if server_started() is True: img_viewer = ImageViewer(Image.open(image_path), image_path) img_viewer.show() print(predict(image_path, 'lenet5', 'mnist', strategy)) else: print("Server not started") ``` If user can see the output similar to this: ``` TOP1: 7, score: 0.99934917688369750977 ``` that means the prediction is successfully performed ## Shutdown server ``` shutdown() ```
github_jupyter
import os import json import tinyms.optimizers as opt from PIL import Image from tinyms import context from tinyms.data import MnistDataset, download_dataset from tinyms.vision import mnist_transform, ImageViewer from tinyms.model import Model, lenet5 from tinyms.serving import start_server, predict, list_servables, shutdown, server_started from tinyms.metrics import Accuracy from tinyms.losses import SoftmaxCrossEntropyWithLogits from tinyms.callbacks import ModelCheckpoint, CheckpointConfig, LossMonitor # build the network net = lenet5(class_num=10) model = Model(net) # download the dataset mnist_path = '/root/mnist' if not os.path.exists(mnist_path): download_dataset('mnist', '/root') print('************Download complete*************') else: print('************Dataset already exists.**************') # check lenet folder exists or not ckpt_folder = '/etc/tinyms/serving/lenet5' ckpt_path = '/etc/tinyms/serving/lenet5/lenet5.ckpt' if not os.path.exists(ckpt_folder): !mkdir -p /etc/tinyms/serving/lenet5 else: print('lenet5 ckpt folder already exists') # set environment parameters device_target = "CPU" context.set_context(mode=context.GRAPH_MODE, device_target=device_target) dataset_sink_mode = False # define the training and evaluation dataset train_dataset = MnistDataset(os.path.join(mnist_path, "train"), shuffle=True) train_dataset = mnist_transform.apply_ds(train_dataset) eval_dataset = MnistDataset(os.path.join(mnist_path, "test"), shuffle=True) eval_dataset = mnist_transform.apply_ds(eval_dataset) # parameters for training lr = 0.01 momentum = 0.9 epoch_size = 1 batch_size = 32 # define the loss function net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') # define the optimizer net_opt = opt.Momentum(net.trainable_params(), lr, momentum) net_metrics={"Accuracy": Accuracy()} model.compile(loss_fn=net_loss, optimizer=net_opt, metrics=net_metrics) print('************************Start training*************************') ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)) model.train(epoch_size, train_dataset, callbacks=[ckpoint_cb, LossMonitor()],dataset_sink_mode=dataset_sink_mode) print('************************Finished training*************************') model.save_checkpoint(ckpt_path) model.load_checkpoint(ckpt_path) print('************************Start evaluation*************************') acc = model.eval(eval_dataset, dataset_sink_mode=dataset_sink_mode) print("============== Accuracy:{} ==============".format(acc)) servable_json = [{'name': 'lenet5', 'description': 'This servable hosts a lenet5 model predicting numbers', 'model': { "name": "lenet5", "format": "ckpt", "class_num": 10}}] os.chdir("/etc/tinyms/serving") json_data = json.dumps(servable_json, indent=4) with open('servable.json', 'w') as json_file: json_file.write(json_data) start_server() if not os.path.exists('/root/7.png'): !wget -P /root/ https://ascend-tutorials.obs.cn-north-4.myhuaweicloud.com/tinyms-test-pics/numbers/7.png else: print('7.png already exists') list_servables() image_path = "/root/7.png" strategy = "TOP1_CLASS" # predict(image_path, servable_name, dataset='mnist', strategy='TOP1_CLASS') if server_started() is True: img_viewer = ImageViewer(Image.open(image_path), image_path) img_viewer.show() print(predict(image_path, 'lenet5', 'mnist', strategy)) else: print("Server not started") TOP1: 7, score: 0.99934917688369750977 shutdown()
0.384219
0.942082
[View in Colaboratory](https://colab.research.google.com/github/pilar260/buenosaires2018/blob/master/1_intro_to_python.ipynb) # Programming in Python In this part of the workshop, we will introduce some basic programming concepts in Python. We will then explore how these concepts allow us to carry out an anlysis that can be reproduced. ## Working with variables You can get output from Python by typing math into a code cell. Try executing a sum below (for example: 3 + 5). ``` 3+5 971+54 ``` However, to do anything useful, we will need to assign values to `variables`. Assign a height in cm to a variable in the cell below. ``` height_cm = 180 x=20 ``` print x=20 Now the value has been assigned to our variable, we can print it in the console with `print`. ``` print('Height in cm is:', height_cm) ``` We can also do arithmetic with the variable. Convert the height in cm to metres, then print the new value as before (Warning! In Python 2, dividing an integer by an integer will return an integer.) ``` height_m = height_cm / 100 print('height in metres:',height_m) ``` We can check which variables are available in memory with the special command: `%whos` ``` %whos ``` We can see that each of our variables has a type (in this case `int` and `float`), describing the type of data held by the variable. We can use `type` to check the data type of a variable. ``` type(height_cm) ``` type()x print(x*10 ) Another data type is a `list`, which can hold a series of items. For example, we might measure a patient's heart rate several times over a period. ``` heartrate = [66,64,63,62,66,69,70,75,76] type(heartrate) ``` ## Repeating actions in loops We can access individual items in a list using an index (note, in Python, indexing begins with 0!). For example, let's view the first `[0]` and second `[1]` heart rate measurements. ``` print(heartrate[0]) print(heartrate[1]) ``` We can iterate through a list with the help of a `for` loop. Let's try looping over our list of heart rates, printing each item as we go. ``` for hr in heartrate: print('the heart rate is:',hr) ``` ## Making choices Sometimes we want to take different actions depending on a set of conditions. We can do this using an `if/else` statement. Let's write a statement to test if a mean arterial pressure (`meanpressure`) is high or low. ``` meanpressure = 70 if meanpressure < 60: print('Low pressure') elif meanpressure > 100: print('High pressure') else: print('Normal pressure') ``` ## Writing our own functions To help organise our code and to avoid replicating the same code again and again, we can create functions. Let's create a function to convert temperature in fahrenheit to celsius, using the following formula: `celsius = (fahrenheit - 32) * 5/9` ``` def fahr_to_celsius(temp): celsius = (temp - 32) * 5/9 return celsius ``` def celsius_to_fahr(temp): return Now we can call the function `fahr_to_celsius` to convert a temperature from celsius to fahrenheit. ``` body_temp_f = 98.6 body_temp_c = fahr_to_celsius(body_temp_f) print('Patient body temperature is:', body_temp_c, 'celsius') ``` def celsius_to_fahr(temp) return ## Reusing code with libraries ``` # let's assign pandas an alias, pd, for brevity import pandas as pd ``` We have shared a demo dataset online containing physiological data relating to 1000 patients admitted to an intensive care unit in Boston, Massachussetts, USA. Let's load this data into our new data structure. ``` url="https://raw.githubusercontent.com/tompollard/tableone/master/data/pn2012_demo.csv" data=pd.read_csv(url) print(data) ``` The variable `data` should now contain our new dataset. Let's view the first few rows using `head()`. Note: parentheses `"()"` are generally required when we are performing an action/operation. In this case, the action is to select a limited number of rows. ``` data.head(10) ``` We can perform other operations on the dataframe. For example, using `mean()` to get an average of the columns. If we are unsure of the meaning of a method, we can check by adding `?` after the method. For example, what is `max`? ``` data.max() ``` We can access a single column in the data by specifying the column name after the variable. For example, we can select a list of ages with `data.Age`, and then find the mean for this column in a similar way to before. ``` print('The mean age of patients is:', data.Age.mean()) ``` Pandas also provides a convenient method `plot` for plotting data. Let's plot a distribution of the patient ages in our dataset. ``` data.Age.plot(kind='kde', title='Age of patients in years') ```
github_jupyter
3+5 971+54 height_cm = 180 x=20 print('Height in cm is:', height_cm) height_m = height_cm / 100 print('height in metres:',height_m) %whos type(height_cm) heartrate = [66,64,63,62,66,69,70,75,76] type(heartrate) print(heartrate[0]) print(heartrate[1]) for hr in heartrate: print('the heart rate is:',hr) meanpressure = 70 if meanpressure < 60: print('Low pressure') elif meanpressure > 100: print('High pressure') else: print('Normal pressure') def fahr_to_celsius(temp): celsius = (temp - 32) * 5/9 return celsius body_temp_f = 98.6 body_temp_c = fahr_to_celsius(body_temp_f) print('Patient body temperature is:', body_temp_c, 'celsius') # let's assign pandas an alias, pd, for brevity import pandas as pd url="https://raw.githubusercontent.com/tompollard/tableone/master/data/pn2012_demo.csv" data=pd.read_csv(url) print(data) data.head(10) data.max() print('The mean age of patients is:', data.Age.mean()) data.Age.plot(kind='kde', title='Age of patients in years')
0.319546
0.988547
# Simple RNN Simple RNN that predicts the next character. Based on chapter 8 of Dive to Deep learning. ## Preparing dataset For dataset here I used 3 books by Verne. They are contained in dataset directory as text files. In order to use this dataset for training we need to do the following: - Load files into memory. - Split string into tokens (in this case characters). - Encode characters into numbers. First let's load the files. ``` %matplotlib inline import collections import re import glob import random import torch import math import matplotlib.pyplot as plt from tqdm.notebook import tqdm from torch import nn from torch.nn import functional as torch_fn device = "cuda" if torch.cuda.is_available() else "cpu" DATASET_DIR = "dataset" dataset_lines = [] for filename in glob.iglob(f'{DATASET_DIR}/*.txt'): print(f"Loading {filename} ...") with open(filename, 'r') as f: lines = f.readlines() file_lines = [re.sub('[^A-Za-z0-9]+', ' ', line).strip().lower() for line in lines] dataset_lines.extend(file_lines) len(dataset_lines) dataset_lines[:10] ``` Now we will tokenize and flatten the entire dataset. ``` tokenized_dataset = [list(line) for line in dataset_lines] tokenized_dataset = [token for line in tokenized_dataset for token in line] len(tokenized_dataset) print(tokenized_dataset[:100]) ``` This is a simple solution for loading text files but it does have a small flaw. Since text files were stiched the sequence at the point of stiching will not make sense. However this only represents a small fraction of the dataset so this shouldn't have a significant impact on the result. Now let's construct a dictionary that will be used to encode characters into numbers. ``` class Vocabulary: def __init__(self, tokens): counter = collections.Counter(tokenized_dataset) self.vocab = {} for i, c in enumerate(counter): self.vocab[c] = i self.key_list = list(self.vocab.keys()) self.val_list = list(self.vocab.values()) self.size = len(self.key_list) def tokens_to_indexes(self, tokens): indexes = [] for token in tokens: indexes.append(self.vocab[token]) return indexes def indexes_to_tokens(self, indexes): tokens = [] for indx in indexes: tokens.append(self.key_list[self.val_list.index(indx)]) return tokens vocab = Vocabulary(tokenized_dataset) dataset = vocab.tokens_to_indexes(tokenized_dataset) print(dataset[:100]) print(vocab.indexes_to_tokens(dataset[:100])) ``` ## Data loader Now we will need to create a data loader. During training process we will try to predict the next character in the sequence. So in order to train a network we need a batch of sequences and corresponding sequences of labels. Each sequence will be sampled from the dataset using Sequential Partitioning. This means that we sample the sequences randomly with a constrain that subsequences from two adjacent minibatches during iteration are adjacent on the original sequence. Here is the implementation of the loader. ``` class SeqDataLoader: def __init__(self, corpus, batch_size, seq_len, device): self.corpus, self.b, self.n, self.d = corpus, batch_size, seq_len, device def __iter__(self): # Randomly drop the first d tokens. corpus = self.corpus[random.randint(0, self.n - 1):] # No. of subsequences. Subtract 1 to account for labels. m = (len(corpus)-1) // self.n # The starting indices for input sequences. initial_indices = list(range(0, m*self.n, self.n)) random.shuffle(initial_indices) for i in range(0, m // self.b): # The randomized starting indices for this minibatch. batch_indicies = initial_indices[i*self.b : (i+1) * self.b] X = [corpus[j : j+self.n] for j in batch_indicies] Y = [corpus[j+1 : j+1+self.n] for j in batch_indicies] yield torch.tensor(X, dtype=torch.int16, device=self.d), \ torch.tensor(Y, dtype=torch.int16, device=self.d) data_loader = SeqDataLoader(dataset, 2, 40, device) x,y = next(iter(data_loader)) x,y ``` ## Model Model that we will use here is a simple one layer RNN with a hidden state. ![image.png](attachment:1630868a-ba4c-40b8-8d0b-55cf1d2ca4be.png) The following equations are used to compute output and new hidden state: ![image.png](attachment:9ecb31af-63c8-421a-872e-e29820f56624.png) ![image.png](attachment:4cd0466e-4c85-429b-8e19-25185aa09bff.png) Layer that implements first equation is provided by torch.nn.RNN. Second equation is just a linear classifier. Activation function for the recursive layer is going to be tanh. Each character will be encoded as one hot vector. Recursive layer implementation is provided by torch.nn.RNN. This function takes 2 parameters: - Input tensor - Initial state for each element in the batch Each character in the sequence is a one hot vector, so the entire sequence is represente as a matrix and batch is a 3D tensor with shape $(N,L,H_{in})$ Initial state for each sequence is a 1D vector so initial hidden state for all elements in the batch is a matrix with shape $(1, N, H_{out})$. Where - $N$ - batch size - $L$ - length of the sequence - $H_{in}$ - size of the input (size of the one hot vector) - $H_{out}$ - size of the hidden state vector. Here is the implementation of the model: ``` class RNN(nn.Module): def __init__(self, hidden_state_size, vocab_size, device, **kwargs): super(RNN, self).__init__(**kwargs) self.hidden_state_size = hidden_state_size self.vocab_size = vocab_size self.device = device self.recursive_layer = nn.RNN(vocab_size, hidden_state_size) self.classifier_layer = nn.Linear(hidden_state_size, vocab_size) def forward(self, inputs, initial_state): X = torch_fn.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.recursive_layer(X, initial_state) # The fully connected layer will first change the shape of `Y` to # (`num_steps` * `batch_size`, `num_hiddens`). Its output shape is # (`num_steps` * `batch_size`, `vocab_size`). output = self.classifier_layer(Y.reshape((-1, Y.shape[-1]))) return output, state def gen_initial_state(self, batch_size): return torch.zeros((1, batch_size, self.hidden_state_size), device=self.device) net = RNN(256, vocab.size, device) net = net.to(device) initial_state = net.gen_initial_state(256) data_loader = SeqDataLoader(dataset, 256, 40, device) x,y = next(iter(data_loader)) output, state = net(x, initial_state) output.shape output y.shape ``` Output is predictions for all sequences in a batch consolidated into one. ## Making predictions Now let's build a function that will allow us to extend sentence with predictions from the network. ``` def gen_predictions(net, device, vocabulary, input_str, preds_count): torch.set_grad_enabled(False) tokens = [token for token in input_str] indexes = vocabulary.tokens_to_indexes(tokens) net_input = torch.tensor(indexes, dtype=torch.int16, device=device) net_input = net_input.expand(1, -1) initial_state = net.gen_initial_state(1) # Warm up with the provided string. outputs, state = net(net_input, initial_state) get_idx = lambda logits: logits.argmax().expand(1, 1) to_token = lambda idx_tensor: int(idx_tensor[0][0].cpu()) # Get output last_index = get_idx(outputs[-1:]) # Generate new result. output_tokens = [to_token(last_index)] for _ in range(preds_count): outputs, state = net(last_index, state) last_index = get_idx(outputs) output_tokens.extend([to_token(last_index)]) output_chars = vocabulary.indexes_to_tokens(output_tokens) for char in output_chars: input_str += char torch.set_grad_enabled(True) return input_str gen_predictions(net, device, vocab, "journey", 80) ``` As expected untrained network is not doing well. Now let's train it. ## Perplexity But first we will create a metric that will tell us how well the model is doing so we can monitor this metric during training. The standard quantity used for language models is called perplexity and it is defined by the following formula: ![image.png](attachment:ecdfa77f-614c-476a-9b6a-91bd8789c129.png) Perplexity can be best understood as the harmonic mean of the number of real choices that we have when deciding which token to pick next. Let us look at a number of cases: - In the best case scenario, the model always perfectly estimates the probability of the label token as 1. In this case the perplexity of the model is 1. - In the worst case scenario, the model always predicts the probability of the label token as 0. In this situation, the perplexity is positive infinity. - At the baseline, the model predicts a uniform distribution over all the available tokens of the vocabulary. In this case, the perplexity equals the number of unique tokens of the vocabulary. In fact, if we were to store the sequence without any compression, this would be the best we could do to encode it. Hence, this provides a nontrivial upper bound that any useful model must beat. ## Training loop When writing training loop we have to consider two things. First we need to handle the internal state in between batches. Because we are using Sequential Partitioning we are going to initialize internal state at the beginning of each epoch and then preseve it between minibatches. This means that we will need to detach the internal state from the computational graph otherwise graph will continue to grow as we compute more and more minibatches. Second consideration is that we are multiplying state vector by the same matrix many times. This means that we will almost certanly see exploding gradients problem and optimization will become unstable. To this we will apply gradient clipping. Function will clip the gradients so that they norm will not exceed specified threshold. Here is the function for performing gradient clipping. ``` def clip_gradients(model, threshold): params = [p for p in model.parameters() if p.requires_grad] norm = torch.sqrt(sum(torch.sum((p.grad**2)) for p in params)) if norm > threshold: for param in params: param.grad[:] *= threshold / norm ``` THis will however not solve the problem of vanishing gradients. And here is the training loop. For the loss function we will use cross entropy loss since we want to maximize the probability that predictions of next characters are correct and we are returning logits. For optimizer we will use SGD. ``` def train_model(net, dataset, optimizer, batch_size, seq_len, epochs): loss = nn.CrossEntropyLoss() data_loader = SeqDataLoader(dataset, batch_size, seq_len, device) loss_history = [] perplexity_history = [] for epoch in tqdm(range(epochs)): state = None total_loss = 0. total_sample_number = 0 for X,Y in data_loader: if state is None: state = net.gen_initial_state(batch_size) else: state.detach_() y_hat, state = net(X, state) # Loss will be computed for each sequence we compute mean loss # across all sentences. y = Y.T.reshape(-1) l = loss(y_hat, y.long()).mean() optimizer.zero_grad() l.backward() clip_gradients(net, 1) optimizer.step() with torch.no_grad(): total_loss += l.cpu() * y.cpu().numel() total_sample_number += y.cpu().numel() loss_avg = total_loss / total_sample_number perplexity_avg = math.exp(loss_avg) loss_history.append(loss_avg) perplexity_history.append(perplexity_avg) return {"loss": loss_history, "perplexity": perplexity_history} lr = 1 batch_size = 256 sequence_len = 40 epochs = 200 net = RNN(512, vocab.size, device) net = net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr) history = train_model(net, dataset, optimizer, batch_size, sequence_len, epochs) plt.title("Loss history") plt.plot(history["loss"]) plt.xlabel("epoch") plt.ylabel("loss") plt.title("Perplexity history") plt.plot(history["perplexity"]) plt.xlabel("epoch") plt.ylabel("perplexity") ``` ## Some examples and conclusions Now let's have some fun and give network diferent sentences to extend. ``` gen_predictions(net, device, vocab, "journey to the", 200) gen_predictions(net, device, vocab, "before starting afresh i thought a wash would do me good", 200) gen_predictions(net, device, vocab, "towards four oclock", 200) gen_predictions(net, device, vocab, "captain nemo", 200) ``` As we can see AI learned to generate words and some basic language structures quite well but it does not have the ability to generate coherent sentences. Interesting observation is RNN can overfit like any other neural network. When that happens it will start to generate very similar sentences in response to different starting sequences. It also tends to gravitate towards certain sentences. Another interesting observation is that network can get stuck and start to generate the same sentence over and over again.
github_jupyter
%matplotlib inline import collections import re import glob import random import torch import math import matplotlib.pyplot as plt from tqdm.notebook import tqdm from torch import nn from torch.nn import functional as torch_fn device = "cuda" if torch.cuda.is_available() else "cpu" DATASET_DIR = "dataset" dataset_lines = [] for filename in glob.iglob(f'{DATASET_DIR}/*.txt'): print(f"Loading {filename} ...") with open(filename, 'r') as f: lines = f.readlines() file_lines = [re.sub('[^A-Za-z0-9]+', ' ', line).strip().lower() for line in lines] dataset_lines.extend(file_lines) len(dataset_lines) dataset_lines[:10] tokenized_dataset = [list(line) for line in dataset_lines] tokenized_dataset = [token for line in tokenized_dataset for token in line] len(tokenized_dataset) print(tokenized_dataset[:100]) class Vocabulary: def __init__(self, tokens): counter = collections.Counter(tokenized_dataset) self.vocab = {} for i, c in enumerate(counter): self.vocab[c] = i self.key_list = list(self.vocab.keys()) self.val_list = list(self.vocab.values()) self.size = len(self.key_list) def tokens_to_indexes(self, tokens): indexes = [] for token in tokens: indexes.append(self.vocab[token]) return indexes def indexes_to_tokens(self, indexes): tokens = [] for indx in indexes: tokens.append(self.key_list[self.val_list.index(indx)]) return tokens vocab = Vocabulary(tokenized_dataset) dataset = vocab.tokens_to_indexes(tokenized_dataset) print(dataset[:100]) print(vocab.indexes_to_tokens(dataset[:100])) class SeqDataLoader: def __init__(self, corpus, batch_size, seq_len, device): self.corpus, self.b, self.n, self.d = corpus, batch_size, seq_len, device def __iter__(self): # Randomly drop the first d tokens. corpus = self.corpus[random.randint(0, self.n - 1):] # No. of subsequences. Subtract 1 to account for labels. m = (len(corpus)-1) // self.n # The starting indices for input sequences. initial_indices = list(range(0, m*self.n, self.n)) random.shuffle(initial_indices) for i in range(0, m // self.b): # The randomized starting indices for this minibatch. batch_indicies = initial_indices[i*self.b : (i+1) * self.b] X = [corpus[j : j+self.n] for j in batch_indicies] Y = [corpus[j+1 : j+1+self.n] for j in batch_indicies] yield torch.tensor(X, dtype=torch.int16, device=self.d), \ torch.tensor(Y, dtype=torch.int16, device=self.d) data_loader = SeqDataLoader(dataset, 2, 40, device) x,y = next(iter(data_loader)) x,y class RNN(nn.Module): def __init__(self, hidden_state_size, vocab_size, device, **kwargs): super(RNN, self).__init__(**kwargs) self.hidden_state_size = hidden_state_size self.vocab_size = vocab_size self.device = device self.recursive_layer = nn.RNN(vocab_size, hidden_state_size) self.classifier_layer = nn.Linear(hidden_state_size, vocab_size) def forward(self, inputs, initial_state): X = torch_fn.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.recursive_layer(X, initial_state) # The fully connected layer will first change the shape of `Y` to # (`num_steps` * `batch_size`, `num_hiddens`). Its output shape is # (`num_steps` * `batch_size`, `vocab_size`). output = self.classifier_layer(Y.reshape((-1, Y.shape[-1]))) return output, state def gen_initial_state(self, batch_size): return torch.zeros((1, batch_size, self.hidden_state_size), device=self.device) net = RNN(256, vocab.size, device) net = net.to(device) initial_state = net.gen_initial_state(256) data_loader = SeqDataLoader(dataset, 256, 40, device) x,y = next(iter(data_loader)) output, state = net(x, initial_state) output.shape output y.shape def gen_predictions(net, device, vocabulary, input_str, preds_count): torch.set_grad_enabled(False) tokens = [token for token in input_str] indexes = vocabulary.tokens_to_indexes(tokens) net_input = torch.tensor(indexes, dtype=torch.int16, device=device) net_input = net_input.expand(1, -1) initial_state = net.gen_initial_state(1) # Warm up with the provided string. outputs, state = net(net_input, initial_state) get_idx = lambda logits: logits.argmax().expand(1, 1) to_token = lambda idx_tensor: int(idx_tensor[0][0].cpu()) # Get output last_index = get_idx(outputs[-1:]) # Generate new result. output_tokens = [to_token(last_index)] for _ in range(preds_count): outputs, state = net(last_index, state) last_index = get_idx(outputs) output_tokens.extend([to_token(last_index)]) output_chars = vocabulary.indexes_to_tokens(output_tokens) for char in output_chars: input_str += char torch.set_grad_enabled(True) return input_str gen_predictions(net, device, vocab, "journey", 80) def clip_gradients(model, threshold): params = [p for p in model.parameters() if p.requires_grad] norm = torch.sqrt(sum(torch.sum((p.grad**2)) for p in params)) if norm > threshold: for param in params: param.grad[:] *= threshold / norm def train_model(net, dataset, optimizer, batch_size, seq_len, epochs): loss = nn.CrossEntropyLoss() data_loader = SeqDataLoader(dataset, batch_size, seq_len, device) loss_history = [] perplexity_history = [] for epoch in tqdm(range(epochs)): state = None total_loss = 0. total_sample_number = 0 for X,Y in data_loader: if state is None: state = net.gen_initial_state(batch_size) else: state.detach_() y_hat, state = net(X, state) # Loss will be computed for each sequence we compute mean loss # across all sentences. y = Y.T.reshape(-1) l = loss(y_hat, y.long()).mean() optimizer.zero_grad() l.backward() clip_gradients(net, 1) optimizer.step() with torch.no_grad(): total_loss += l.cpu() * y.cpu().numel() total_sample_number += y.cpu().numel() loss_avg = total_loss / total_sample_number perplexity_avg = math.exp(loss_avg) loss_history.append(loss_avg) perplexity_history.append(perplexity_avg) return {"loss": loss_history, "perplexity": perplexity_history} lr = 1 batch_size = 256 sequence_len = 40 epochs = 200 net = RNN(512, vocab.size, device) net = net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr) history = train_model(net, dataset, optimizer, batch_size, sequence_len, epochs) plt.title("Loss history") plt.plot(history["loss"]) plt.xlabel("epoch") plt.ylabel("loss") plt.title("Perplexity history") plt.plot(history["perplexity"]) plt.xlabel("epoch") plt.ylabel("perplexity") gen_predictions(net, device, vocab, "journey to the", 200) gen_predictions(net, device, vocab, "before starting afresh i thought a wash would do me good", 200) gen_predictions(net, device, vocab, "towards four oclock", 200) gen_predictions(net, device, vocab, "captain nemo", 200)
0.758779
0.932638
# Mozilla TTS on CPU Real-Time Speech Synthesis We use Tacotron2 and MultiBand-Melgan models and LJSpeech dataset. Tacotron2 is trained using [Double Decoder Consistency](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/) (DDC) only for 130K steps (3 days) with a single GPU. MultiBand-Melgan is trained 1.45M steps with real spectrograms. Note that both model performances can be improved with more training. ### Download Models ``` !gdown --id 1dntzjWFg7ufWaTaFy80nRz-Tu02xWZos -O tts_model.pth.tar !gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O config.json !gdown --id 1Ty5DZdOc0F7OTGj9oJThYbL5iVu_2G0K -O vocoder_model.pth.tar !gdown --id 1Rd0R_nRCrbjEdpOwq6XwZAktvugiBvmu -O config_vocoder.json !gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O scale_stats.npy ``` ### Setup Libraries ``` #! sudo apt-get install espeak for linux !brew install espeak !git clone https://github.com/mozilla/TTS %cd TTS !git checkout b1935c97 !pip install -r requirements.txt !python setup.py install %cd .. ``` ### Define TTS function ``` def tts(model, text, CONFIG, use_cuda, ap, use_gl, figures=True): t_1 = time.time() waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None, truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars) # mel_postnet_spec = ap._denormalize(mel_postnet_spec.T) if not use_gl: waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0)) waveform = waveform.flatten() if use_cuda: waveform = waveform.cpu() waveform = waveform.numpy() rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate) tps = (time.time() - t_1) / len(waveform) print(waveform.shape) print(" > Run-time: {}".format(time.time() - t_1)) print(" > Real-time factor: {}".format(rtf)) print(" > Time per step: {}".format(tps)) IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) return alignment, mel_postnet_spec, stop_tokens, waveform !pip install inflect ``` ### Load Models ``` import os import torch import time import IPython from TTS.utils.generic_utils import setup_model from TTS.utils.io import load_config from TTS.utils.text.symbols import symbols, phonemes from TTS.utils.audio import AudioProcessor from TTS.utils.synthesis import synthesis # runtime settings use_cuda = False # model paths TTS_MODEL = "tts_model.pth.tar" TTS_CONFIG = "config.json" VOCODER_MODEL = "vocoder_model.pth.tar" VOCODER_CONFIG = "config_vocoder.json" # load configs TTS_CONFIG = load_config(TTS_CONFIG) VOCODER_CONFIG = load_config(VOCODER_CONFIG) # load the audio processor ap = AudioProcessor(**TTS_CONFIG.audio) # LOAD TTS MODEL # multi speaker speaker_id = None speakers = [] # load the model num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols) model = setup_model(num_chars, len(speakers), TTS_CONFIG) # load model state cp = torch.load(TTS_MODEL, map_location=torch.device('cpu')) # load the model model.load_state_dict(cp['model']) if use_cuda: model.cuda() model.eval() # set model stepsize if 'r' in cp: model.decoder.set_r(cp['r']) from TTS.vocoder.utils.generic_utils import setup_generator # LOAD VOCODER MODEL vocoder_model = setup_generator(VOCODER_CONFIG) vocoder_model.load_state_dict(torch.load(VOCODER_MODEL, map_location="cpu")["model"]) vocoder_model.remove_weight_norm() vocoder_model.inference_padding = 0 ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) if use_cuda: vocoder_model.cuda() vocoder_model.eval() ``` ## Run Inference ``` sentence = "William got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go." align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, use_cuda, ap, use_gl=False, figures=True) ```
github_jupyter
!gdown --id 1dntzjWFg7ufWaTaFy80nRz-Tu02xWZos -O tts_model.pth.tar !gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O config.json !gdown --id 1Ty5DZdOc0F7OTGj9oJThYbL5iVu_2G0K -O vocoder_model.pth.tar !gdown --id 1Rd0R_nRCrbjEdpOwq6XwZAktvugiBvmu -O config_vocoder.json !gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O scale_stats.npy #! sudo apt-get install espeak for linux !brew install espeak !git clone https://github.com/mozilla/TTS %cd TTS !git checkout b1935c97 !pip install -r requirements.txt !python setup.py install %cd .. def tts(model, text, CONFIG, use_cuda, ap, use_gl, figures=True): t_1 = time.time() waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None, truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars) # mel_postnet_spec = ap._denormalize(mel_postnet_spec.T) if not use_gl: waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0)) waveform = waveform.flatten() if use_cuda: waveform = waveform.cpu() waveform = waveform.numpy() rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate) tps = (time.time() - t_1) / len(waveform) print(waveform.shape) print(" > Run-time: {}".format(time.time() - t_1)) print(" > Real-time factor: {}".format(rtf)) print(" > Time per step: {}".format(tps)) IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) return alignment, mel_postnet_spec, stop_tokens, waveform !pip install inflect import os import torch import time import IPython from TTS.utils.generic_utils import setup_model from TTS.utils.io import load_config from TTS.utils.text.symbols import symbols, phonemes from TTS.utils.audio import AudioProcessor from TTS.utils.synthesis import synthesis # runtime settings use_cuda = False # model paths TTS_MODEL = "tts_model.pth.tar" TTS_CONFIG = "config.json" VOCODER_MODEL = "vocoder_model.pth.tar" VOCODER_CONFIG = "config_vocoder.json" # load configs TTS_CONFIG = load_config(TTS_CONFIG) VOCODER_CONFIG = load_config(VOCODER_CONFIG) # load the audio processor ap = AudioProcessor(**TTS_CONFIG.audio) # LOAD TTS MODEL # multi speaker speaker_id = None speakers = [] # load the model num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols) model = setup_model(num_chars, len(speakers), TTS_CONFIG) # load model state cp = torch.load(TTS_MODEL, map_location=torch.device('cpu')) # load the model model.load_state_dict(cp['model']) if use_cuda: model.cuda() model.eval() # set model stepsize if 'r' in cp: model.decoder.set_r(cp['r']) from TTS.vocoder.utils.generic_utils import setup_generator # LOAD VOCODER MODEL vocoder_model = setup_generator(VOCODER_CONFIG) vocoder_model.load_state_dict(torch.load(VOCODER_MODEL, map_location="cpu")["model"]) vocoder_model.remove_weight_norm() vocoder_model.inference_padding = 0 ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) if use_cuda: vocoder_model.cuda() vocoder_model.eval() sentence = "William got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go." align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, use_cuda, ap, use_gl=False, figures=True)
0.400984
0.744656
# Generative Adversarial Networks (GANs) So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images. ### What is a GAN? In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real. We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game: $$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ where $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$. To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$: 1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__. 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__. While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661). In this assignment, we will alternate the following updates: 1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data: $$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$ 2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data: $$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ ### What else is there? Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028). GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variatonal autoencoders combine neural networks with variationl inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs. Here's an example of what your outputs from the 3 different models you're going to train should look like... note that GANs are sometimes finicky, so your outputs might not look exactly like this... this is just meant to be a *rough* guideline of the kind of quality you can expect: ![caption](gan_outputs_pytorch.png) ## Setup ``` import torch import torch.nn as nn from torch.nn import init import torchvision import torchvision.transforms as T import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import sampler import torchvision.datasets as dset import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' def show_images(images): images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D) sqrtn = int(np.ceil(np.sqrt(images.shape[0]))) sqrtimg = int(np.ceil(np.sqrt(images.shape[1]))) fig = plt.figure(figsize=(sqrtn, sqrtn)) gs = gridspec.GridSpec(sqrtn, sqrtn) gs.update(wspace=0.05, hspace=0.05) for i, img in enumerate(images): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(img.reshape([sqrtimg,sqrtimg])) return def preprocess_img(x): return 2 * x - 1.0 def deprocess_img(x): return (x + 1.0) / 2.0 def rel_error(x,y): return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def count_params(model): """Count the number of parameters in the current TensorFlow graph """ param_count = np.sum([np.prod(p.size()) for p in model.parameters()]) return param_count answers = dict(np.load('gan-checks-tf.npz')) ``` ## Dataset GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy. To simplify our code here, we will use the PyTorch MNIST wrapper, which downloads and loads the MNIST dataset. See the [documentation](https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py) for more information about the interface. The default parameters will take 5,000 of the training examples and place them into a validation dataset. The data will be saved into a folder called `MNIST_data`. ``` class ChunkSampler(sampler.Sampler): """Samples elements sequentially from some offset. Arguments: num_samples: # of desired datapoints start: offset where we should start selecting from """ def __init__(self, num_samples, start=0): self.num_samples = num_samples self.start = start def __iter__(self): return iter(range(self.start, self.start + self.num_samples)) def __len__(self): return self.num_samples NUM_TRAIN = 50000 NUM_VAL = 5000 NOISE_DIM = 96 batch_size = 128 mnist_train = dset.MNIST('./cs231n/datasets/MNIST_data', train=True, download=True, transform=T.ToTensor()) loader_train = DataLoader(mnist_train, batch_size=batch_size, sampler=ChunkSampler(NUM_TRAIN, 0)) mnist_val = dset.MNIST('./cs231n/datasets/MNIST_data', train=True, download=True, transform=T.ToTensor()) loader_val = DataLoader(mnist_val, batch_size=batch_size, sampler=ChunkSampler(NUM_VAL, NUM_TRAIN)) imgs = loader_train.__iter__().next()[0].view(batch_size, 784).numpy().squeeze() show_images(imgs) ``` ## Random Noise Generate uniform noise from -1 to 1 with shape `[batch_size, dim]`. Hint: use `torch.rand`. ``` def sample_noise(batch_size, dim): """ Generate a PyTorch Tensor of uniform random noise. Input: - batch_size: Integer giving the batch size of noise to generate. - dim: Integer giving the dimension of noise to generate. Output: - A PyTorch Tensor of shape (batch_size, dim) containing uniform random noise in the range (-1, 1). """ return 2*torch.rand(batch_size, dim)-1 ``` Make sure noise is the correct shape and type: ``` def test_sample_noise(): batch_size = 3 dim = 4 torch.manual_seed(231) z = sample_noise(batch_size, dim) np_z = z.cpu().numpy() assert np_z.shape == (batch_size, dim) assert torch.is_tensor(z) assert np.all(np_z >= -1.0) and np.all(np_z <= 1.0) assert np.any(np_z < 0.0) and np.any(np_z > 0.0) print('All tests passed!') test_sample_noise() ``` ## Flatten Recall our Flatten operation from previous notebooks... this time we also provide an Unflatten, which you might want to use when implementing the convolutional generator. We also provide a weight initializer (and call it for you) that uses Xavier initialization instead of PyTorch's uniform default. ``` class Flatten(nn.Module): def forward(self, x): N, C, H, W = x.size() # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image class Unflatten(nn.Module): """ An Unflatten module receives an input of shape (N, C*H*W) and reshapes it to produce an output of shape (N, C, H, W). """ def __init__(self, N=-1, C=128, H=7, W=7): super(Unflatten, self).__init__() self.N = N self.C = C self.H = H self.W = W def forward(self, x): return x.view(self.N, self.C, self.H, self.W) def initialize_weights(m): if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d): init.xavier_uniform_(m.weight.data) ``` ## CPU / GPU By default all code will run on CPU. GPUs are not needed for this assignment, but will help you to train your models faster. If you do want to run the code on a GPU, then change the `dtype` variable in the following cell. ``` dtype = torch.FloatTensor #dtype = torch.cuda.FloatTensor ## UNCOMMENT THIS LINE IF YOU'RE ON A GPU! ``` # Discriminator Our first step is to build a discriminator. Fill in the architecture as part of the `nn.Sequential` constructor in the function below. All fully connected layers should include bias terms. The architecture is: * Fully connected layer with input size 784 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with input_size 256 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with input size 256 and output size 1 Recall that the Leaky ReLU nonlinearity computes $f(x) = \max(\alpha x, x)$ for some fixed constant $\alpha$; for the LeakyReLU nonlinearities in the architecture above we set $\alpha=0.01$. The output of the discriminator should have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image. ``` def discriminator(): """ Build and return a PyTorch model implementing the architecture above. """ model = nn.Sequential( Flatten(), nn.Linear(784, 256), nn.LeakyReLU(), nn.Linear(256, 256), nn.LeakyReLU(), nn.Linear(256, 1) ) return model ``` Test to make sure the number of parameters in the discriminator is correct: ``` def test_discriminator(true_count=267009): model = discriminator() cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in discriminator. Check your achitecture.') else: print('Correct number of parameters in discriminator.') test_discriminator() ``` # Generator Now to build the generator network: * Fully connected layer from noise_dim to 1024 * `ReLU` * Fully connected layer with size 1024 * `ReLU` * Fully connected layer with size 784 * `TanH` (to clip the image to be in the range of [-1,1]) ``` def generator(noise_dim=NOISE_DIM): """ Build and return a PyTorch model implementing the architecture above. """ model = nn.Sequential( nn.Linear(noise_dim, 1024), nn.ReLU(), nn.Linear(1024, 1024), nn.ReLU(), nn.Linear(1024, 784), nn.Tanh() ) return model ``` Test to make sure the number of parameters in the generator is correct: ``` def test_generator(true_count=1858320): model = generator(4) cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_generator() ``` # GAN Loss Compute the generator and discriminator loss. The generator loss is: $$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$ and the discriminator loss is: $$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ Note that these are negated from the equations presented earlier as we will be *minimizing* these losses. **HINTS**: You should use the `bce_loss` function defined below to compute the binary cross entropy loss which is needed to compute the log probability of the true label given the logits output from the discriminator. Given a score $s\in\mathbb{R}$ and a label $y\in\{0, 1\}$, the binary cross entropy loss is $$ bce(s, y) = -y * \log(s) - (1 - y) * \log(1 - s) $$ A naive implementation of this formula can be numerically unstable, so we have provided a numerically stable implementation for you below. You will also need to compute labels corresponding to real or fake and use the logit arguments to determine their size. Make sure you cast these labels to the correct data type using the global `dtype` variable, for example: `true_labels = torch.ones(size).type(dtype)` Instead of computing the expectation of $\log D(G(z))$, $\log D(x)$ and $\log \left(1-D(G(z))\right)$, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. ``` def bce_loss(input, target): """ Numerically stable version of the binary cross-entropy loss function. As per https://github.com/pytorch/pytorch/issues/751 See the TensorFlow docs for a derivation of this formula: https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Inputs: - input: PyTorch Tensor of shape (N, ) giving scores. - target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets. Returns: - A PyTorch Tensor containing the mean BCE loss over the minibatch of input data. """ neg_abs = - input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() return loss.mean() def discriminator_loss(logits_real, logits_fake): """ Computes the discriminator loss described above. Inputs: - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data. - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Returns: - loss: PyTorch Tensor containing (scalar) the loss for the discriminator. """ logits_real = logits_real.type(dtype) logits_fake = logits_fake.type(dtype) loss = bce_loss(logits_real, 1) + bce_loss(logits_fake, 0) return loss def generator_loss(logits_fake): """ Computes the generator loss described above. Inputs: - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Returns: - loss: PyTorch Tensor containing the (scalar) loss for the generator. """ logits_fake = logits_fake.type(dtype) loss = bce_loss(logits_fake, 1) return loss ``` Test your generator and discriminator loss. You should see errors < 1e-7. ``` def test_discriminator_loss(logits_real, logits_fake, d_loss_true): d_loss = discriminator_loss(torch.Tensor(logits_real).type(dtype), torch.Tensor(logits_fake).type(dtype)).cpu().numpy() print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss)) test_discriminator_loss(answers['logits_real'], answers['logits_fake'], answers['d_loss_true']) def test_generator_loss(logits_fake, g_loss_true): g_loss = generator_loss(torch.Tensor(logits_fake).type(dtype)).cpu().numpy() print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss)) test_generator_loss(answers['logits_fake'], answers['g_loss_true']) ``` # Optimizing our loss Make a function that returns an `optim.Adam` optimizer for the given model with a 1e-3 learning rate, beta1=0.5, beta2=0.999. You'll use this to construct optimizers for the generators and discriminators for the rest of the notebook. ``` def get_optimizer(model): """ Construct and return an Adam optimizer for the model with learning rate 1e-3, beta1=0.5, and beta2=0.999. Input: - model: A PyTorch model that we want to optimize. Returns: - An Adam optimizer for the model with the desired hyperparameters. """ optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.5, 0.999)) return optimizer ``` # Training a GAN! We provide you the main training loop... you won't need to change this function, but we encourage you to read through and understand it. ``` def run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, show_every=250, batch_size=128, noise_size=96, num_epochs=10): """ Train a GAN! Inputs: - D, G: PyTorch models for the discriminator and generator - D_solver, G_solver: torch.optim Optimizers to use for training the discriminator and generator. - discriminator_loss, generator_loss: Functions to use for computing the generator and discriminator loss, respectively. - show_every: Show samples after every show_every iterations. - batch_size: Batch size to use for training. - noise_size: Dimension of the noise to use as input to the generator. - num_epochs: Number of epochs over the training dataset to use for training. """ iter_count = 0 for epoch in range(num_epochs): for x, _ in loader_train: if len(x) != batch_size: continue D_solver.zero_grad() real_data = x.type(dtype) logits_real = D(2* (real_data - 0.5)).type(dtype) g_fake_seed = sample_noise(batch_size, noise_size).type(dtype) fake_images = G(g_fake_seed).detach() logits_fake = D(fake_images.view(batch_size, 1, 28, 28)) d_total_error = discriminator_loss(logits_real, logits_fake) d_total_error.backward() D_solver.step() G_solver.zero_grad() g_fake_seed = sample_noise(batch_size, noise_size).type(dtype) fake_images = G(g_fake_seed) gen_logits_fake = D(fake_images.view(batch_size, 1, 28, 28)) g_error = generator_loss(gen_logits_fake) g_error.backward() G_solver.step() if (iter_count % show_every == 0): print('Iter: {}, D: {:.4}, G:{:.4}'.format(iter_count,d_total_error.item(),g_error.item())) imgs_numpy = fake_images.data.cpu().numpy() show_images(imgs_numpy[0:16]) plt.show() print() iter_count += 1 # Make the discriminator D = discriminator().type(dtype) # Make the generator G = generator().type(dtype) # Use the function you wrote earlier to get optimizers for the Discriminator and the Generator D_solver = get_optimizer(D) G_solver = get_optimizer(G) # Run it! run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss) ``` Well that wasn't so hard, was it? In the iterations in the low 100s you should see black backgrounds, fuzzy shapes as you approach iteration 1000, and decent shapes, about half of which will be sharp and clearly recognizable as we pass 3000. # Least Squares GAN We'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alernative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss: $$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$ and the discriminator loss: $$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$ **HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`scores_real` and `scores_fake`). ``` def ls_discriminator_loss(scores_real, scores_fake): """ Compute the Least-Squares GAN loss for the discriminator. Inputs: - scores_real: PyTorch Tensor of shape (N,) giving scores for the real data. - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Outputs: - loss: A PyTorch Tensor containing the loss. """ scores_real = scores_real.type(dtype) scores_fake = scores_fake.type(dtype) loss = 0.5 * (torch.mean((scores_real-1)**2) + torch.mean((scores_fake)**2)) return loss def ls_generator_loss(scores_fake): """ Computes the Least-Squares GAN loss for the generator. Inputs: - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Outputs: - loss: A PyTorch Tensor containing the loss. """ scores_fake = scores_fake.type(dtype) loss = 0.5 * torch.mean((scores_fake-1)**2) return loss ``` Before running a GAN with our new loss function, let's check it: ``` def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true): score_real = torch.Tensor(score_real).type(dtype) score_fake = torch.Tensor(score_fake).type(dtype) d_loss = ls_discriminator_loss(score_real, score_fake).cpu().numpy() g_loss = ls_generator_loss(score_fake).cpu().numpy() print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss)) print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss)) test_lsgan_loss(answers['logits_real'], answers['logits_fake'], answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true']) ``` Run the following cell to train your model! ``` D_LS = discriminator().type(dtype) G_LS = generator().type(dtype) D_LS_solver = get_optimizer(D_LS) G_LS_solver = get_optimizer(G_LS) run_a_gan(D_LS, G_LS, D_LS_solver, G_LS_solver, ls_discriminator_loss, ls_generator_loss) ``` # Deeply Convolutional GANs In the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks #### Discriminator We will use a discriminator inspired by the TensorFlow MNIST classification tutorial, which is able to get above 99% accuracy on the MNIST dataset fairly quickly. * Reshape into image tensor (Use Unflatten!) * Conv2D: 32 Filters, 5x5, Stride 1 * Leaky ReLU(alpha=0.01) * Max Pool 2x2, Stride 2 * Conv2D: 64 Filters, 5x5, Stride 1 * Leaky ReLU(alpha=0.01) * Max Pool 2x2, Stride 2 * Flatten * Fully Connected with output size 4 x 4 x 64 * Leaky ReLU(alpha=0.01) * Fully Connected with output size 1 ``` def build_dc_classifier(): """ Build and return a PyTorch model for the DCGAN discriminator implementing the architecture above. """ return nn.Sequential( Unflatten(batch_size, 1, 28, 28), nn.Conv2d(1, 32, 5), nn.LeakyReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(32, 64, 5), nn.LeakyReLU(), nn.MaxPool2d(2, 2), Flatten(), nn.Linear(4*4*64, 4*4*64), nn.LeakyReLU(), nn.Linear(4*4*64, 1) ) data = next(enumerate(loader_train))[-1][0].type(dtype) b = build_dc_classifier().type(dtype) out = b(data) print(out.size()) ``` Check the number of parameters in your classifier as a sanity check: ``` def test_dc_classifer(true_count=1102721): model = build_dc_classifier() cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_dc_classifer() ``` #### Generator For the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. See the documentation for [tf.nn.conv2d_transpose](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_transpose). We are always "training" in GAN mode. * Fully connected with output size 1024 * `ReLU` * BatchNorm * Fully connected with output size 7 x 7 x 128 * ReLU * BatchNorm * Reshape into Image Tensor of shape 7, 7, 128 * Conv2D^T (Transpose): 64 filters of 4x4, stride 2, 'same' padding * `ReLU` * BatchNorm * Conv2D^T (Transpose): 1 filter of 4x4, stride 2, 'same' padding * `TanH` * Should have a 28x28x1 image, reshape back into 784 vector ``` def build_dc_generator(noise_dim=NOISE_DIM): """ Build and return a PyTorch model implementing the DCGAN generator using the architecture described above. """ return nn.Sequential( nn.Linear(noise_dim, 1024), nn.ReLU(), nn.BatchNorm1d(1024), nn.Linear(1024, 7*7*128), nn.ReLU(), nn.BatchNorm1d(7*7*128), Unflatten(-1, 128, 7, 7), nn.ConvTranspose2d(128, 64, 4, 2, 1, 0), nn.ReLU(), nn.BatchNorm2d(64), nn.ConvTranspose2d(64, 1, 4, 2, 1, 0), nn.Tanh(), Flatten() ) test_g_gan = build_dc_generator().type(dtype) test_g_gan.apply(initialize_weights) fake_seed = torch.randn(batch_size, NOISE_DIM).type(dtype) fake_images = test_g_gan.forward(fake_seed) fake_images.size() ``` Check the number of parameters in your generator as a sanity check: ``` def test_dc_generator(true_count=6580801): model = build_dc_generator(4) cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_dc_generator() D_DC = build_dc_classifier().type(dtype) D_DC.apply(initialize_weights) G_DC = build_dc_generator().type(dtype) G_DC.apply(initialize_weights) D_DC_solver = get_optimizer(D_DC) G_DC_solver = get_optimizer(G_DC) run_a_gan(D_DC, G_DC, D_DC_solver, G_DC_solver, discriminator_loss, generator_loss, num_epochs=5) ``` ## INLINE QUESTION 1 We will look at an example to see why alternating minimization of the same objective (like in a GAN) can be tricky business. Consider $f(x,y)=xy$. What does $\min_x\max_y f(x,y)$ evaluate to? (Hint: minmax tries to minimize the maximum value achievable.) Now try to evaluate this function numerically for 6 steps, starting at the point $(1,1)$, by using alternating gradient (first updating y, then updating x) with step size $1$. You'll find that writing out the update step in terms of $x_t,y_t,x_{t+1},y_{t+1}$ will be useful. Record the six pairs of explicit values for $(x_t,y_t)$ in the table below. ### Your answer: $y_0$ | $y_1$ | $y_2$ | $y_3$ | $y_4$ | $y_5$ | $y_6$ ----- | ----- | ----- | ----- | ----- | ----- | ----- 1 | 2 | 1 | -1 | -2 | -1 | 1 $x_0$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ 1 | -1 | -2 | -1 | 1 | 2 | 1 ## INLINE QUESTION 2 Using this method, will we ever reach the optimal value? Why or why not? ### Your answer: In this specific example, we arrived at the value we started from. For $\forall{n}$, $y_n = y_{n+6}$ and $x_n = x_{n+6}$. The value never converges. ## INLINE QUESTION 3 If the generator loss decreases during training while the discriminator loss stays at a constant high value from the start, is this a good sign? Why or why not? A qualitative answer is sufficient ### Your answer: It could be the case that the generator is just generating purely random images that the discriminator cannot classify correctly at all. The generator keeps on generating stronger gibberish (generator loss decreases) and the discriminator accuracy never improves (constant discriminator loss).
github_jupyter
import torch import torch.nn as nn from torch.nn import init import torchvision import torchvision.transforms as T import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import sampler import torchvision.datasets as dset import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' def show_images(images): images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D) sqrtn = int(np.ceil(np.sqrt(images.shape[0]))) sqrtimg = int(np.ceil(np.sqrt(images.shape[1]))) fig = plt.figure(figsize=(sqrtn, sqrtn)) gs = gridspec.GridSpec(sqrtn, sqrtn) gs.update(wspace=0.05, hspace=0.05) for i, img in enumerate(images): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(img.reshape([sqrtimg,sqrtimg])) return def preprocess_img(x): return 2 * x - 1.0 def deprocess_img(x): return (x + 1.0) / 2.0 def rel_error(x,y): return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) def count_params(model): """Count the number of parameters in the current TensorFlow graph """ param_count = np.sum([np.prod(p.size()) for p in model.parameters()]) return param_count answers = dict(np.load('gan-checks-tf.npz')) class ChunkSampler(sampler.Sampler): """Samples elements sequentially from some offset. Arguments: num_samples: # of desired datapoints start: offset where we should start selecting from """ def __init__(self, num_samples, start=0): self.num_samples = num_samples self.start = start def __iter__(self): return iter(range(self.start, self.start + self.num_samples)) def __len__(self): return self.num_samples NUM_TRAIN = 50000 NUM_VAL = 5000 NOISE_DIM = 96 batch_size = 128 mnist_train = dset.MNIST('./cs231n/datasets/MNIST_data', train=True, download=True, transform=T.ToTensor()) loader_train = DataLoader(mnist_train, batch_size=batch_size, sampler=ChunkSampler(NUM_TRAIN, 0)) mnist_val = dset.MNIST('./cs231n/datasets/MNIST_data', train=True, download=True, transform=T.ToTensor()) loader_val = DataLoader(mnist_val, batch_size=batch_size, sampler=ChunkSampler(NUM_VAL, NUM_TRAIN)) imgs = loader_train.__iter__().next()[0].view(batch_size, 784).numpy().squeeze() show_images(imgs) def sample_noise(batch_size, dim): """ Generate a PyTorch Tensor of uniform random noise. Input: - batch_size: Integer giving the batch size of noise to generate. - dim: Integer giving the dimension of noise to generate. Output: - A PyTorch Tensor of shape (batch_size, dim) containing uniform random noise in the range (-1, 1). """ return 2*torch.rand(batch_size, dim)-1 def test_sample_noise(): batch_size = 3 dim = 4 torch.manual_seed(231) z = sample_noise(batch_size, dim) np_z = z.cpu().numpy() assert np_z.shape == (batch_size, dim) assert torch.is_tensor(z) assert np.all(np_z >= -1.0) and np.all(np_z <= 1.0) assert np.any(np_z < 0.0) and np.any(np_z > 0.0) print('All tests passed!') test_sample_noise() class Flatten(nn.Module): def forward(self, x): N, C, H, W = x.size() # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image class Unflatten(nn.Module): """ An Unflatten module receives an input of shape (N, C*H*W) and reshapes it to produce an output of shape (N, C, H, W). """ def __init__(self, N=-1, C=128, H=7, W=7): super(Unflatten, self).__init__() self.N = N self.C = C self.H = H self.W = W def forward(self, x): return x.view(self.N, self.C, self.H, self.W) def initialize_weights(m): if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d): init.xavier_uniform_(m.weight.data) dtype = torch.FloatTensor #dtype = torch.cuda.FloatTensor ## UNCOMMENT THIS LINE IF YOU'RE ON A GPU! def discriminator(): """ Build and return a PyTorch model implementing the architecture above. """ model = nn.Sequential( Flatten(), nn.Linear(784, 256), nn.LeakyReLU(), nn.Linear(256, 256), nn.LeakyReLU(), nn.Linear(256, 1) ) return model def test_discriminator(true_count=267009): model = discriminator() cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in discriminator. Check your achitecture.') else: print('Correct number of parameters in discriminator.') test_discriminator() def generator(noise_dim=NOISE_DIM): """ Build and return a PyTorch model implementing the architecture above. """ model = nn.Sequential( nn.Linear(noise_dim, 1024), nn.ReLU(), nn.Linear(1024, 1024), nn.ReLU(), nn.Linear(1024, 784), nn.Tanh() ) return model def test_generator(true_count=1858320): model = generator(4) cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_generator() def bce_loss(input, target): """ Numerically stable version of the binary cross-entropy loss function. As per https://github.com/pytorch/pytorch/issues/751 See the TensorFlow docs for a derivation of this formula: https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Inputs: - input: PyTorch Tensor of shape (N, ) giving scores. - target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets. Returns: - A PyTorch Tensor containing the mean BCE loss over the minibatch of input data. """ neg_abs = - input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() return loss.mean() def discriminator_loss(logits_real, logits_fake): """ Computes the discriminator loss described above. Inputs: - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data. - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Returns: - loss: PyTorch Tensor containing (scalar) the loss for the discriminator. """ logits_real = logits_real.type(dtype) logits_fake = logits_fake.type(dtype) loss = bce_loss(logits_real, 1) + bce_loss(logits_fake, 0) return loss def generator_loss(logits_fake): """ Computes the generator loss described above. Inputs: - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Returns: - loss: PyTorch Tensor containing the (scalar) loss for the generator. """ logits_fake = logits_fake.type(dtype) loss = bce_loss(logits_fake, 1) return loss def test_discriminator_loss(logits_real, logits_fake, d_loss_true): d_loss = discriminator_loss(torch.Tensor(logits_real).type(dtype), torch.Tensor(logits_fake).type(dtype)).cpu().numpy() print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss)) test_discriminator_loss(answers['logits_real'], answers['logits_fake'], answers['d_loss_true']) def test_generator_loss(logits_fake, g_loss_true): g_loss = generator_loss(torch.Tensor(logits_fake).type(dtype)).cpu().numpy() print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss)) test_generator_loss(answers['logits_fake'], answers['g_loss_true']) def get_optimizer(model): """ Construct and return an Adam optimizer for the model with learning rate 1e-3, beta1=0.5, and beta2=0.999. Input: - model: A PyTorch model that we want to optimize. Returns: - An Adam optimizer for the model with the desired hyperparameters. """ optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.5, 0.999)) return optimizer def run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, show_every=250, batch_size=128, noise_size=96, num_epochs=10): """ Train a GAN! Inputs: - D, G: PyTorch models for the discriminator and generator - D_solver, G_solver: torch.optim Optimizers to use for training the discriminator and generator. - discriminator_loss, generator_loss: Functions to use for computing the generator and discriminator loss, respectively. - show_every: Show samples after every show_every iterations. - batch_size: Batch size to use for training. - noise_size: Dimension of the noise to use as input to the generator. - num_epochs: Number of epochs over the training dataset to use for training. """ iter_count = 0 for epoch in range(num_epochs): for x, _ in loader_train: if len(x) != batch_size: continue D_solver.zero_grad() real_data = x.type(dtype) logits_real = D(2* (real_data - 0.5)).type(dtype) g_fake_seed = sample_noise(batch_size, noise_size).type(dtype) fake_images = G(g_fake_seed).detach() logits_fake = D(fake_images.view(batch_size, 1, 28, 28)) d_total_error = discriminator_loss(logits_real, logits_fake) d_total_error.backward() D_solver.step() G_solver.zero_grad() g_fake_seed = sample_noise(batch_size, noise_size).type(dtype) fake_images = G(g_fake_seed) gen_logits_fake = D(fake_images.view(batch_size, 1, 28, 28)) g_error = generator_loss(gen_logits_fake) g_error.backward() G_solver.step() if (iter_count % show_every == 0): print('Iter: {}, D: {:.4}, G:{:.4}'.format(iter_count,d_total_error.item(),g_error.item())) imgs_numpy = fake_images.data.cpu().numpy() show_images(imgs_numpy[0:16]) plt.show() print() iter_count += 1 # Make the discriminator D = discriminator().type(dtype) # Make the generator G = generator().type(dtype) # Use the function you wrote earlier to get optimizers for the Discriminator and the Generator D_solver = get_optimizer(D) G_solver = get_optimizer(G) # Run it! run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss) def ls_discriminator_loss(scores_real, scores_fake): """ Compute the Least-Squares GAN loss for the discriminator. Inputs: - scores_real: PyTorch Tensor of shape (N,) giving scores for the real data. - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Outputs: - loss: A PyTorch Tensor containing the loss. """ scores_real = scores_real.type(dtype) scores_fake = scores_fake.type(dtype) loss = 0.5 * (torch.mean((scores_real-1)**2) + torch.mean((scores_fake)**2)) return loss def ls_generator_loss(scores_fake): """ Computes the Least-Squares GAN loss for the generator. Inputs: - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data. Outputs: - loss: A PyTorch Tensor containing the loss. """ scores_fake = scores_fake.type(dtype) loss = 0.5 * torch.mean((scores_fake-1)**2) return loss def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true): score_real = torch.Tensor(score_real).type(dtype) score_fake = torch.Tensor(score_fake).type(dtype) d_loss = ls_discriminator_loss(score_real, score_fake).cpu().numpy() g_loss = ls_generator_loss(score_fake).cpu().numpy() print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss)) print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss)) test_lsgan_loss(answers['logits_real'], answers['logits_fake'], answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true']) D_LS = discriminator().type(dtype) G_LS = generator().type(dtype) D_LS_solver = get_optimizer(D_LS) G_LS_solver = get_optimizer(G_LS) run_a_gan(D_LS, G_LS, D_LS_solver, G_LS_solver, ls_discriminator_loss, ls_generator_loss) def build_dc_classifier(): """ Build and return a PyTorch model for the DCGAN discriminator implementing the architecture above. """ return nn.Sequential( Unflatten(batch_size, 1, 28, 28), nn.Conv2d(1, 32, 5), nn.LeakyReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(32, 64, 5), nn.LeakyReLU(), nn.MaxPool2d(2, 2), Flatten(), nn.Linear(4*4*64, 4*4*64), nn.LeakyReLU(), nn.Linear(4*4*64, 1) ) data = next(enumerate(loader_train))[-1][0].type(dtype) b = build_dc_classifier().type(dtype) out = b(data) print(out.size()) def test_dc_classifer(true_count=1102721): model = build_dc_classifier() cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_dc_classifer() def build_dc_generator(noise_dim=NOISE_DIM): """ Build and return a PyTorch model implementing the DCGAN generator using the architecture described above. """ return nn.Sequential( nn.Linear(noise_dim, 1024), nn.ReLU(), nn.BatchNorm1d(1024), nn.Linear(1024, 7*7*128), nn.ReLU(), nn.BatchNorm1d(7*7*128), Unflatten(-1, 128, 7, 7), nn.ConvTranspose2d(128, 64, 4, 2, 1, 0), nn.ReLU(), nn.BatchNorm2d(64), nn.ConvTranspose2d(64, 1, 4, 2, 1, 0), nn.Tanh(), Flatten() ) test_g_gan = build_dc_generator().type(dtype) test_g_gan.apply(initialize_weights) fake_seed = torch.randn(batch_size, NOISE_DIM).type(dtype) fake_images = test_g_gan.forward(fake_seed) fake_images.size() def test_dc_generator(true_count=6580801): model = build_dc_generator(4) cur_count = count_params(model) if cur_count != true_count: print('Incorrect number of parameters in generator. Check your achitecture.') else: print('Correct number of parameters in generator.') test_dc_generator() D_DC = build_dc_classifier().type(dtype) D_DC.apply(initialize_weights) G_DC = build_dc_generator().type(dtype) G_DC.apply(initialize_weights) D_DC_solver = get_optimizer(D_DC) G_DC_solver = get_optimizer(G_DC) run_a_gan(D_DC, G_DC, D_DC_solver, G_DC_solver, discriminator_loss, generator_loss, num_epochs=5)
0.911857
0.990954
# Dynamic Profile Scrape In this example, we will subclass and extend instascrape.Profile to dynamically scrape all the posts of a profile using Selenium. Additionally, all posts will be loaded as instascrape.Post objects which will give us the ability to scrape individual posts for all of their data as well. ``` import pandas as pd import matplotlib.pyplot as plt from dynamic_profile import DynamicProfile from selenium.webdriver import Chrome, ChromeOptions # pip3 install selenium ``` ### Download Chrome WebDriver Besides Selenium, you also need Chrome WebDriver for this example. Download and extract from the below link. Download: https://sites.google.com/a/chromium.org/chromedriver/downloads Set `webdriver_executable` to the absolute path of the chromedriver executable ``` webdriver_executable = 'path/to/webdriver/chromedriver' ``` ### Scraping the data First, we'll start by scraping the data with our DynamicProfile subclass of instascrape.Profile. To get an understanding of how this class works, go take a look at it's source in dynamic_profile.py included in this folder. For the purpose of this exercise, it will make requests synchronously but if you wanted to speed it up, you could rewrite this asynchrounously. ``` username = 'realpython' max_posts_to_load = 50 profile = DynamicProfile.from_username(username) profile.load() # get basic profile info (e.g followers, followings) profile.dynamic_load(Chrome(webdriver_executable), max_posts=max_posts_to_load) # get posts ``` ### Plotting the data Now that the data has been scraped, we can get into analyzing it! Let's clean it up a little, create a DataFrame, and get going with plotting this data. First, let's build a list of tuples where each tuple represents a posts upload date, the amount of likes a post got, and the amount of comments. ``` data_arr = [] for post in profile.posts: try: data_arr.append((post.upload_date, post.likes, post.comments)) except AttributeError as e: pass ``` With that list of tuples now, we can instantiate a pandas.DataFrame to make working with our data more manageable. ``` dataframe = pd.DataFrame(data_arr, columns=['datetime', 'likes', 'comments']) dataframe = dataframe.sort_values(by=['datetime']).reset_index(drop=True) #Sort by date dataframe.head() #Show first few data points ``` Now that we have our DataFrame, we can begin exploring this profile's data. Let's get a simple scatter plot so we can see if there are any trends we can see right off the bat. ``` from pandas.plotting import register_matplotlib_converters plt.style.use('ggplot') #Draw the scatter plot plt.scatter(dataframe['datetime'], dataframe['likes']) fig = plt.gcf() ax = plt.gca() fig.set_size_inches(16, 6) #Write text where applicable print(profile.followers) description = f"followers={profile.followers: ,}\n" description += f"following={profile.following: ,}\n" description += f"posts={len(profile.posts): ,}" plt.text(0.05, 0.8, description, transform=ax.transAxes, fontsize=14) #Write labels plt.xlabel('Datetime', fontsize=16) plt.ylabel('Likes', fontsize=16) plt.title(f'@{username} Instagram time series', fontsize=20) # plt.legend(loc="upper left") plt.show() ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt from dynamic_profile import DynamicProfile from selenium.webdriver import Chrome, ChromeOptions # pip3 install selenium webdriver_executable = 'path/to/webdriver/chromedriver' username = 'realpython' max_posts_to_load = 50 profile = DynamicProfile.from_username(username) profile.load() # get basic profile info (e.g followers, followings) profile.dynamic_load(Chrome(webdriver_executable), max_posts=max_posts_to_load) # get posts data_arr = [] for post in profile.posts: try: data_arr.append((post.upload_date, post.likes, post.comments)) except AttributeError as e: pass dataframe = pd.DataFrame(data_arr, columns=['datetime', 'likes', 'comments']) dataframe = dataframe.sort_values(by=['datetime']).reset_index(drop=True) #Sort by date dataframe.head() #Show first few data points from pandas.plotting import register_matplotlib_converters plt.style.use('ggplot') #Draw the scatter plot plt.scatter(dataframe['datetime'], dataframe['likes']) fig = plt.gcf() ax = plt.gca() fig.set_size_inches(16, 6) #Write text where applicable print(profile.followers) description = f"followers={profile.followers: ,}\n" description += f"following={profile.following: ,}\n" description += f"posts={len(profile.posts): ,}" plt.text(0.05, 0.8, description, transform=ax.transAxes, fontsize=14) #Write labels plt.xlabel('Datetime', fontsize=16) plt.ylabel('Likes', fontsize=16) plt.title(f'@{username} Instagram time series', fontsize=20) # plt.legend(loc="upper left") plt.show()
0.38122
0.883286
# Goals ### In the previous tutorial you studied the role of freezing models on a small dataset. ### Understand the role of freezing models in transfer learning on a fairly large dataset ### Why freeze/unfreeze base models in transfer learning ### Use comparison feature to appropriately set this parameter on custom dataset ### You will be using lego bricks dataset to train the classifiers # What is freezing base network - To recap you have two parts in your network - One that already existed, the pretrained one, the base network - The new sub-network or a single layer you added -The hyper-parameter we can see here: Freeze base network - Freezing base network makes the base network untrainable - The base network now acts as a feature extractor and only the next half is trained - If you do not freeze the base network the entire network is trained # Table of Contents ## [0. Install](#0) ## [1. Freeze Base network in densenet121 and train a classifier](#1) ## [2. Unfreeze base network in densenet121 and train another classifier](#2) ## [3. Compare both the experiment](#3) <a id='0'></a> # Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version) ``` !git clone https://github.com/Tessellate-Imaging/monk_v1.git # Select the requirements file as per OS and CUDA version !cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt ``` ## Dataset - LEGO Classification - https://www.kaggle.com/joosthazelzet/lego-brick-images/ ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ" -O skin_cancer_mnist_dataset.zip && rm -rf /tmp/cookies.txt ! unzip -qq skin_cancer_mnist_dataset.zip ``` # Imports ``` # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using pytorch backend from pytorch_prototype import prototype ``` <a id='1'></a> # Freeze Base network in densenet121 and train a classifier ## Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "Freeze_Base_Network"); ``` ### This creates files and directories as per the following structure workspace | |--------Project | | |-----Freeze_Base_Network | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) ## Set dataset and select the model ## Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs ## Sample Dataset folder structure parent_directory | | |------cats | |------img1.jpg |------img2.jpg |------.... (and so on) |------dogs | |------img1.jpg |------img2.jpg |------.... (and so on) ## Modifyable params - dataset_path: path to data - model_name: which pretrained model to use - freeze_base_network: Retrain already trained network or not - num_epochs: Number of epochs to train for ``` gtf.Default(dataset_path="skin_cancer_mnist_dataset/images", path_to_csv="skin_cancer_mnist_dataset/train_labels.csv", model_name="densenet121", freeze_base_network=True, # Set this param as true num_epochs=5); #Read the summary generated once you run this cell. ``` ## From the summary above - Model Params Model name: densenet121 Use Gpu: True Use pretrained: True Freeze base network: True ## Another thing to notice from summary Model Details Loading pretrained model Model Loaded on device Model name: densenet121 Num of potentially trainable layers: 242 Num of actual trainable layers: 1 ### There are a total of 242 layers ### Since we have freezed base network only 1 is trainable, the final layer ## Train the classifier ``` #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` ### Best validation Accuracy achieved - 74.77 % (You may get a different result) <a id='2'></a> # Unfreeze Base network in densenet121 and train a classifier ## Creating and managing experiments - Provide project name - Provide experiment name - For a specific data create a single project - Inside each project multiple experiments can be created - Every experiment can be have diferent hyper-parameters attached to it ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "Unfreeze_Base_Network"); ``` ### This creates files and directories as per the following structure workspace | |--------Project | | |-----Freeze_Base_Network (Previously created) | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) | | |-----Unfreeze_Base_Network (Created Now) | |-----experiment-state.json | |-----output | |------logs (All training logs and graphs saved here) | |------models (all trained models saved here) ## Set dataset and select the model ## Quick mode training - Using Default Function - dataset_path - model_name - freeze_base_network - num_epochs ## Sample Dataset folder structure parent_directory | | |------cats | |------img1.jpg |------img2.jpg |------.... (and so on) |------dogs | |------img1.jpg |------img2.jpg |------.... (and so on) ## Modifyable params - dataset_path: path to data - model_name: which pretrained model to use - freeze_base_network: Retrain already trained network or not - num_epochs: Number of epochs to train for ``` gtf.Default(dataset_path="skin_cancer_mnist_dataset/images", path_to_csv="skin_cancer_mnist_dataset/train_labels.csv", model_name="densenet121", freeze_base_network=False, # Set this param as false num_epochs=5); #Read the summary generated once you run this cell. ``` ## From the summary above - Model Params Model name: densenet121 Use Gpu: True Use pretrained: True Freeze base network: False ## Another thing to notice from summary Model Details Loading pretrained model Model Loaded on device Model name: densenet121 Num of potentially trainable layers: 242 Num of actual trainable layers: 242 ### There are a total of 242 layers ### Since we have unfreezed base network all 242 layers are trainable including the final layer ## Train the classifier ``` #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` ### Best Val Accuracy achieved - 81.33 % (You may get a different result) <a id='3'></a> # Compare both the experiment ``` # Invoke the comparison class from compare_prototype import compare ``` ### Creating and managing comparison experiments - Provide project name ``` # Create a project gtf = compare(verbose=1); gtf.Comparison("Compare-effect-of-freezing"); ``` ### This creates files and directories as per the following structure workspace | |--------comparison | | |-----Compare-effect-of-freezing | |------stats_best_val_acc.png |------stats_max_gpu_usage.png |------stats_training_time.png |------train_accuracy.png |------train_loss.png |------val_accuracy.png |------val_loss.png | |-----comparison.csv (Contains necessary details of all experiments) ### Add the experiments - First argument - Project name - Second argument - Experiment name ``` gtf.Add_Experiment("Project", "Freeze_Base_Network"); gtf.Add_Experiment("Project", "Unfreeze_Base_Network"); ``` ### Run Analysis ``` gtf.Generate_Statistics(); ``` ## Visualize and study comparison metrics ### Training Accuracy Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/train_accuracy.png") ``` ### Training Loss Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/train_loss.png") ``` ### Validation Accuracy Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/val_accuracy.png") ``` ### Validation loss curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/val_loss.png") ``` ## Accuracies achieved on validation dataset ### With freezing base network - 74.77 % ### Without freezing base network - 81.33 % #### For this classifier, keeping the base network trainable seems to be a good option. Thus for other data it may result in overfitting the training data (You may get a different result)
github_jupyter
!git clone https://github.com/Tessellate-Imaging/monk_v1.git # Select the requirements file as per OS and CUDA version !cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ" -O skin_cancer_mnist_dataset.zip && rm -rf /tmp/cookies.txt ! unzip -qq skin_cancer_mnist_dataset.zip # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using pytorch backend from pytorch_prototype import prototype gtf = prototype(verbose=1); gtf.Prototype("Project", "Freeze_Base_Network"); gtf.Default(dataset_path="skin_cancer_mnist_dataset/images", path_to_csv="skin_cancer_mnist_dataset/train_labels.csv", model_name="densenet121", freeze_base_network=True, # Set this param as true num_epochs=5); #Read the summary generated once you run this cell. #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed gtf = prototype(verbose=1); gtf.Prototype("Project", "Unfreeze_Base_Network"); gtf.Default(dataset_path="skin_cancer_mnist_dataset/images", path_to_csv="skin_cancer_mnist_dataset/train_labels.csv", model_name="densenet121", freeze_base_network=False, # Set this param as false num_epochs=5); #Read the summary generated once you run this cell. #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # Invoke the comparison class from compare_prototype import compare # Create a project gtf = compare(verbose=1); gtf.Comparison("Compare-effect-of-freezing"); gtf.Add_Experiment("Project", "Freeze_Base_Network"); gtf.Add_Experiment("Project", "Unfreeze_Base_Network"); gtf.Generate_Statistics(); from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/train_accuracy.png") from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/train_loss.png") from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/val_accuracy.png") from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-freezing/val_loss.png")
0.424293
0.932145
``` !pip install eli5 import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import median_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook from eli5.sklearn import permutation_importance cd '/content/drive/My Drive/Colab Notebooks/dw_matrix' ls data df = pd.read_csv('data/men_shoes.csv', low_memory=False) def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): x = df[feats].values y=df['prices_amountmin'].values scores = cross_val_score(model,x,y,scoring='neg_mean_absolute_error') return np.mean(scores),np.std(scores) df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0] run_model(['brand_cat']) model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'],model) df.features.head().values key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]' literal_eval(str_dict)[0]['value'][0] def parse_features(x): output_dict = {} if str(x) == 'nan':return output_dict features = literal_eval(x.replace('\\"','"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict df['features_parsed']=df['features'].map(parse_features) df['features_parsed'].head().values keys = set() df['features_parsed'].map(lambda x: keys.update(x.keys())) len(keys) def get_name_feat(key): return 'feat_' + key for key in tqdm_notebook(keys): df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan) df.columns keys_stat = {} for key in keys: keys_stat[key] = df[False == df[get_name_feat(key)].isnull()].shape[0] / df.shape[0] * 100 {k:v for k,v in keys_stat.items() if v > 30} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] for key in keys: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] df['brand'] = df['brand'].map(lambda x: str(x).lower()) df[df.brand == df.feat_brand][['brand','feat_brand']].head() model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(['brand_cat'],model) feats_cat = [x for x in df.columns if '_cat' in x] feats_cat feats = ['brand_cat','feat_metal type_cat','feat_shape_cat','feat_brand_cat','feat_gender_cat','feat_material_cat','feat_sport_cat','feat_style_cat'] #feats += feats_cat #feats = list(set(feats)) model = RandomForestRegressor(max_depth=5, n_estimators=100) result = run_model(feats,model) X = df[feats].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X,y) print(result) perm = PermutationImportance(m, random_state=1).fit(X,y); eli5.show_weights(perm, feature_names=feats) df[df['brand'] == 'nike'].features_parsed.sample(5).values ```
github_jupyter
!pip install eli5 import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import median_absolute_error from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance from ast import literal_eval from tqdm import tqdm_notebook from eli5.sklearn import permutation_importance cd '/content/drive/My Drive/Colab Notebooks/dw_matrix' ls data df = pd.read_csv('data/men_shoes.csv', low_memory=False) def run_model(feats, model = DecisionTreeRegressor(max_depth=5)): x = df[feats].values y=df['prices_amountmin'].values scores = cross_val_score(model,x,y,scoring='neg_mean_absolute_error') return np.mean(scores),np.std(scores) df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0] run_model(['brand_cat']) model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) run_model(['brand_cat'],model) df.features.head().values key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]' literal_eval(str_dict)[0]['value'][0] def parse_features(x): output_dict = {} if str(x) == 'nan':return output_dict features = literal_eval(x.replace('\\"','"')) for item in features: key = item['key'].lower().strip() value = item['value'][0].lower().strip() output_dict[key] = value return output_dict df['features_parsed']=df['features'].map(parse_features) df['features_parsed'].head().values keys = set() df['features_parsed'].map(lambda x: keys.update(x.keys())) len(keys) def get_name_feat(key): return 'feat_' + key for key in tqdm_notebook(keys): df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan) df.columns keys_stat = {} for key in keys: keys_stat[key] = df[False == df[get_name_feat(key)].isnull()].shape[0] / df.shape[0] * 100 {k:v for k,v in keys_stat.items() if v > 30} df['feat_brand_cat'] = df['feat_brand'].factorize()[0] df['feat_color_cat'] = df['feat_color'].factorize()[0] df['feat_gender_cat'] = df['feat_gender'].factorize()[0] df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0] df['feat_material_cat'] = df['feat_material'].factorize()[0] df['feat_sport_cat'] = df['feat_sport'].factorize()[0] df['feat_style_cat'] = df['feat_style'].factorize()[0] for key in keys: df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0] df['brand'] = df['brand'].map(lambda x: str(x).lower()) df[df.brand == df.feat_brand][['brand','feat_brand']].head() model = RandomForestRegressor(max_depth=5, n_estimators=100) run_model(['brand_cat'],model) feats_cat = [x for x in df.columns if '_cat' in x] feats_cat feats = ['brand_cat','feat_metal type_cat','feat_shape_cat','feat_brand_cat','feat_gender_cat','feat_material_cat','feat_sport_cat','feat_style_cat'] #feats += feats_cat #feats = list(set(feats)) model = RandomForestRegressor(max_depth=5, n_estimators=100) result = run_model(feats,model) X = df[feats].values y = df['prices_amountmin'].values m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0) m.fit(X,y) print(result) perm = PermutationImportance(m, random_state=1).fit(X,y); eli5.show_weights(perm, feature_names=feats) df[df['brand'] == 'nike'].features_parsed.sample(5).values
0.416441
0.32142
``` import pandas as pd import requests from requests_html import HTML import time base_url = 'https://stackoverflow.com/questions/tagged/' tag = 'python' url = f'{base_url}{tag}' url def parse_tagged_page(html): question_summaries = html.find('.question-summary') key_names = ['question','votes','tags'] classes_needed = ['.question-hyperlink','.vote', '.tags'] data_list = [] for quest_ele in question_summaries: question_data = {} for i, _class in enumerate(classes_needed): sub_ele = quest_ele.find(_class, first=True) keyname = key_names[i] question_data[keyname] = clean_scraped_data(sub_ele.text, keyname=keyname) data_list.append(question_data) return data_list def extract_data_from_url(url): response_ = requests.get(url) if response_.status_code not in range(200,299): return [] html_text = response_.text html = HTML(html=html_text) data = parse_tagged_page(html) return data def scrape_tag(tag='python', query_filter='Votes', max_pages=1, pagesize=25): base_url = 'https://stackoverflow.com/questions/tagged/' data_ = [] for p in range(max_pages): page_num = p + 1 url = f'{base_url}{tag}?tab={query_filter}&page={page_num}&pagesize={pagesize}' data_ += extract_data_from_url(url) time.sleep(1.2) return data_ data = scrape_tag(tag='python') df = pd.DataFrame(data) df.head() df.shape df.to_csv('python_votes.csv', index=False) base_url = 'https://stackoverflow.com/questions/tagged/' tag = 'python' url = f'{base_url}{tag}' url response_ = requests.get(url) print(response_.status_code) html_text = response_.text html = HTML(html=html_text) html question_summaries = html.find('.question-summary') key_names = ['question','votes','tags'] classes_needed = ['.question-hyperlink','.vote', '.tags'] data_list = [] for quest_ele in question_summaries: print(quest_ele) # question_data = {} # for i, _class in enumerate(classes_needed): # sub_ele = quest_ele.find(_class, first=True) # keyname = key_names[i] # question_data[keyname] = clean_scraped_data(sub_ele.text, keyname=keyname) # data_list.append(question_data) # data_list question_summaries = html.find('.s-post-summary') votes = html.find('.s-post-summary--stats-item-number') for object_ in html.find('.s-post-summary--stats-item-number'): first = object_.find('.s-post-summary--stats-item-number', first=True) first.html <span class="s-post-summary--stats-item-number mr4">220</span> <span class="s-post-summary--stats-item-number mr4">5</s<div class="s-post-summary--stats-item has-answers has-accepted-answer" title="one of the answers was accepted as the correct answer">flexpan> ```
github_jupyter
import pandas as pd import requests from requests_html import HTML import time base_url = 'https://stackoverflow.com/questions/tagged/' tag = 'python' url = f'{base_url}{tag}' url def parse_tagged_page(html): question_summaries = html.find('.question-summary') key_names = ['question','votes','tags'] classes_needed = ['.question-hyperlink','.vote', '.tags'] data_list = [] for quest_ele in question_summaries: question_data = {} for i, _class in enumerate(classes_needed): sub_ele = quest_ele.find(_class, first=True) keyname = key_names[i] question_data[keyname] = clean_scraped_data(sub_ele.text, keyname=keyname) data_list.append(question_data) return data_list def extract_data_from_url(url): response_ = requests.get(url) if response_.status_code not in range(200,299): return [] html_text = response_.text html = HTML(html=html_text) data = parse_tagged_page(html) return data def scrape_tag(tag='python', query_filter='Votes', max_pages=1, pagesize=25): base_url = 'https://stackoverflow.com/questions/tagged/' data_ = [] for p in range(max_pages): page_num = p + 1 url = f'{base_url}{tag}?tab={query_filter}&page={page_num}&pagesize={pagesize}' data_ += extract_data_from_url(url) time.sleep(1.2) return data_ data = scrape_tag(tag='python') df = pd.DataFrame(data) df.head() df.shape df.to_csv('python_votes.csv', index=False) base_url = 'https://stackoverflow.com/questions/tagged/' tag = 'python' url = f'{base_url}{tag}' url response_ = requests.get(url) print(response_.status_code) html_text = response_.text html = HTML(html=html_text) html question_summaries = html.find('.question-summary') key_names = ['question','votes','tags'] classes_needed = ['.question-hyperlink','.vote', '.tags'] data_list = [] for quest_ele in question_summaries: print(quest_ele) # question_data = {} # for i, _class in enumerate(classes_needed): # sub_ele = quest_ele.find(_class, first=True) # keyname = key_names[i] # question_data[keyname] = clean_scraped_data(sub_ele.text, keyname=keyname) # data_list.append(question_data) # data_list question_summaries = html.find('.s-post-summary') votes = html.find('.s-post-summary--stats-item-number') for object_ in html.find('.s-post-summary--stats-item-number'): first = object_.find('.s-post-summary--stats-item-number', first=True) first.html <span class="s-post-summary--stats-item-number mr4">220</span> <span class="s-post-summary--stats-item-number mr4">5</s<div class="s-post-summary--stats-item has-answers has-accepted-answer" title="one of the answers was accepted as the correct answer">flexpan>
0.253491
0.141994
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/7/7d/Copper_Price_History_USD.png" width="600px" height="400px" /> # Descarga y manipulación de precios históricos *Objetivos:* - Aprender a importar datos desde archivos separados por comas (extensión `.csv`). - Descargar el paquete `pandas-datareader`. - Aprender a descargar datos desde fuentes remotas. **Referencias:** - http://pandas.pydata.org/ - https://pandas-datareader.readthedocs.io/en/latest/ ___ ## 1. Importar datos desde archivos locales <img style="float: left; margin: 0px 0px 15px 15px;" src="https://1000marcas.net/wp-content/uploads/2020/12/Microsoft-Excel-Logo.png" width="300px" height="125px" /> <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/0/0a/Python.svg" width="300px" height="125px" /> ### 1.1. ¿Porqué? - Muchas veces tenemos bases de datos proporcionadas como archivos locales. - Para poder analizar, procesar y tomar decisiones con estos datos, es necesario importarlos a python. - Ejemplos de archivos donde comúnmente se guardan bases de datos son: - `.xls` o `.xlsx` - `.cvs` - Excel es ampliamente usado en distintos campos de aplicación en todo el mundo. - Nos guste o no, esto también aplica a ciencia de datos (ingeniería financiera). - Muchos de ustedes en su futuro académico y profesional tendrán que trabajar con estas hojas de cálculo, pero no siempre querrán trabajar directamente con ellas si tienen que hacer un análisis un poco más avanzado de los datos. - Por eso en Python se han implementado herramientas para leer, escribir y manipular este tipo de archivos. En esta clase veremos cómo podemos trabajar con Excel y Python de manera básica utilizando la librería *pandas*. ### 1.2. Reglas básicas para antes de leer hojas de cálculo Antes de comenzar a leer una hoja de cálculo en Python (o cualquier otro programa), debemos considerar el ajustar nuestro archivo para cumplir ciertos principios, como: - La primer fila de la hoja de cálculo se reserva para los títulos, mientras que la primer columna se usa para identificar la unidad de muestreo o indización de los datos (tiempo, fecha, eventos...) - Evitar nombres, valores o campos con espacios en blanco. De otra manera, cada palabra se interpreta como variable separada y resultan errores relacionados con el número de elementos por línea. - Los nombres cortos se prefieren sobre nombre largos. - Evite símbolos como ?, $, %, ^, &, *, (,),-,#, ?, ,,<,>, /, |, \, [ ,] , {, y }. - Borre cualquier tipo de comentario que haya hecho en su archivo para evitar columnas extras. - Asegúrese de que cualquier valor inexistente esté indicado como NA. Si se hizo algún cambio, estar seguro de guardarlo. Si estás trabajando con Microsoft Excel, verás que hay muchas opciones para guardar archivos, a parte de las extensiones por defecto .xls or .xlsx. Para esto ir a “Save As” y seleccionar una de las extensiones listadas en “Save as Type”. La extensión más común es .csv (archivos de texto separados por comas). **Actividad.** Descargar precios de acciones de Apple (AAPL) de Yahoo Finance, con una ventana de tiempo desde el 01-01-2015 al 31-12-2017 y frecuencia diaria. - Ir a https://finance.yahoo.com/. - Buscar cada una de las compañías solicitadas. - Dar click en la pestaña *'Historical Data'*. - Cambiar las fechas en *'Time Period'*, click en *'Apply'* y, finalmente, click en *'Download Data'*. - **¡POR FAVOR! GUARDAR ESTOS ARCHIVOS EN UNA CARPETA LLAMADA precios EN EL MISMO DIRECTORIO DONDE TIENEN ESTE ARCHIVO**. ### 1.3. Carguemos archivos .csv como ventanas de datos de pandas Ahora podemos comenzar a importar nuestros archivos. Una de las formas más comunes de trabajar con análisis de datos es en pandas. Esto es debido a que pandas está construido sobre NumPy y provee estructuras de datos y herramientas de análisis fáciles de usar. ``` import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt # Importamos pandas import pandas as pd #algunas opciones para Pandas # pd.set_option('display.notebook_repr_html', False) # pd.set_option('display.max_columns', 6) # pd.set_option('display.max_rows', 10) # pd.set_option('display.width', 78) # pd.set_option('precision', 3) pd.set_option('display.max_rows', 10) ``` Para leer archivos `.csv`, utilizaremos la función `read_csv` de pandas: ``` # Función read_csv help(pd.read_csv) # Cargamos hoja de calculo en un dataframe file_name = 'Precios/AAPL.csv' aapl = pd.read_csv(file_name) aapl ``` #### Anotación #1 - Quisieramos indizar por fecha. ``` # Cargamos hoja de calculo en un dataframe aapl = pd.read_csv(file_name, index_col=['Date']) aapl # Graficar precios de cierre y precios de cierre ajustados import matplotlib.pyplot as plt %matplotlib inline aapl[['Close', 'Adj Close']].plot(figsize=(8,8)) plt.show() ``` #### Anotación #2 - Para nuestra aplicación solo nos interesan los precios de cierre de las acciones (columna Adj Close). ``` # Cargamos hoja de calculo en un dataframe aapl = pd.read_csv(file_name, index_col=['Date'], usecols=['Date', 'Adj Close']) aapl.columns = ['AAPL'] aapl ``` **Actividad.** Importen todos los archivos .csv como acabamos de hacerlo con el de apple. Además, crear un solo DataFrame que cuyos encabezados por columna sean los nombres respectivos (AAPL, AMZN,...) y contengan los datos de precio de cierre. > Leer archivos usando el paquete `os`: [link](https://realpython.com/working-with-files-in-python/) ``` import os # List all files in a directory using os.listdir ---> os.path.isfile check if is a file basepath = 'Precios' # Poner en una lista todos los nombres de los archivos files = files # Read the data of Adj Close for each file and concatenate each one data = # Rename columns data.columns = data # Graficar los precios de AAPL y AMZN en una sóla gráfica ``` ## 2. Descargar los datos remotamente Para esto utilizaremos el paquete *pandas_datareader*. **Nota**: Usualmente, las distribuciones de Python no cuentan, por defecto, con el paquete *pandas_datareader*. Por lo que será necesario instalarlo aparte: - buscar en inicio "Anaconda prompt" y ejecutarlo como administrador; - el siguiente comando instala el paquete en Anaconda: **conda install pandas-datareader**; - una vez finalice la instalación correr el comando: *conda list*, y buscar que sí se haya instalado pandas-datareader ``` # Importar el modulo data del paquete pandas_datareader. La comunidad lo importa con el nombre de web import pandas as pd import pandas_datareader.data as web from datetime import datetime ``` El módulo data del paquete pandas_datareader contiene la funcion `DataReader`: ``` # Función DataReader help(web.DataReader) ``` - A esta función le podemos especificar la fuente de los datos para que se use la api específica para la descarga de datos de cada fuente. - Fuentes: - Google Finance: se tiene acceso a su api a través de Stooq Index Data. - Quandl: solo permite descargar datos de equities estadounidenses de manera gratuita. Es la base de datos más completa. Si se desea usar hay que crear una cuenta para autenticarse en la API. - IEX: los datos tienen antiguedad máxima de 5 años y de equities estadounidenses. - Yahoo! Finance: su api ha tenido cambios significativos y ya no es posible usarla desde DataReader. Sin embargo permite obtener datos de distintas bolsas (incluida la mexicana), por eso le haremos la luchita. > Enlace de las API disponibles de DataReader [link](https://pandas-datareader.readthedocs.io/en/latest/remote_data.html) ``` datetime.today() # Ejemplo google finance ticker = 'AAPL' source = 'stooq' start = '2015-01-01' end = datetime.today() aapl_goo = web.DataReader(ticker, source) aapl_goo ``` ## - Precios desde `quandl` >Página oficial de `quandl` para crear cuenta y tutorial de instalación de su api > Recuerden que cuando se usa anaconda no se debe de usar el comando `pip` o `pip3` sino `conda`, por ejemplo en este caso sería `conda install quandl` > https://docs.quandl.com/docs/python-installation ![image.png](attachment:image.png) Tu api_key lo encuentras en los detalles de tu cuenta después de haber creado un usuario ``` # Ejemplo quandl import quandl ######################### USar la api key que les arroja la página de quandl quandl.ApiConfig.api_key = "YOURAPIKEY " ticker = ['AAPL', 'MSFT','KO'] date = { 'gte': '2016-01-01', 'lte': datetime.today() } column = { 'columns': ['ticker', 'date', 'Adj_close']} data = quandl.get_table('WIKI/PRICES', qopts=column, ticker=ticker, date=date)# ticker = 'WIKI/AAPL' #'AAPL.US' # Poner los índices como las fechas # Seleccionar los ADJ_CLOSE de ticker y renombrar las columnas data # Gráfica de precios ``` ### Uso de Pandas para bajar datos de Yahoo! Finance * Intentamos con la función YahooDailyReader y con la función DataReader ``` help(web.YahooDailyReader) # YahooDailyReader ticker = 'AEROMEX.MX' start = '2015-01-01' end = datetime.today() aapl_yah = web.YahooDailyReader(ticker, start, end, interval='d').read() aapl_yah help(web.DataReader) # Librería DataReader # Observar que se puede usar las dos librerías closes = web.DataReader(name=ticker, data_source='yahoo', start=start, end=end) closes ``` Para efectos del curso y debido a que en yahoo finance podemos tener acceso a activos de la bolsa méxicana vamos a utilizar de acá en adelante el paquete de DataReader y la siguiente función para descargar precios de distintos activos: ``` # Función para descargar precios de cierre ajustados: def get_adj_closes(tickers, start_date=None, end_date=None): # Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today) # Descargamos DataFrame con todos los datos closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date) # Solo necesitamos los precios ajustados en el cierre closes = closes['Adj Close'] # Se ordenan los índices de manera ascendente closes.sort_index(inplace=True) return closes # Ejemplo: 'AAPL', 'MSFT', 'NVDA', '^GSPC' ticker = ['AAPL', 'MSFT', 'NVDA', '^GSPC'] start = '2018-01-01' end = None closes = get_adj_closes(tickers=ticker, start_date=start, end_date=end) closes # Gráfica de datos ``` **Nota**: Para descargar datos de la bolsa mexicana de valores (BMV), el ticker debe tener la extensión MX. Por ejemplo: *MEXCHEM.MX*, *LABB.MX*, *GFINBURO.MX* y *GFNORTEO.MX*. Como se puede notar, en este caso se consideran tres activos - Nvidia:NVDA - Apple: AAPL - Microsoft: MSFT y, el índice - Standard & Poor's: 500S&P500. Todos almacenados en la variable *closes*. El objeto *assets* tiene la característica *items*. Con estos, se pueden verificar los registros almacenados ``` closes.columns ``` Acceder a alguna posición específica de la variable *closes* ``` # Uso de la función iloc ``` Si deseamos encontrar los precios de cierre en una fecha específica usamos ``` # Uso de la función loc ``` O, finalmente, los valores del S&P500 ``` # Selección de alguna columna ``` ### Actividad Obtener datos históricos de - GRUPO CARSO, S.A.B. DE C.V. - GRUPO FINANCIERO INBURSA, S.A.B. DE C.V. - GRUPO FINANCIERO BANORTE, S.A.B DE C.V. - GRUPO AEROMÉXICO, S.A.B. DE C.V. en el año 2014. 1. ¿Qué compañía reportó precios de cierre más altos en *2014-07-14*? 2. Obtener los precios de cierre de cada compañía en todo el año. 3. Comparar, para cada compañía, los precios de cierre entre *2014-01-02* y *2014-12-31*. > Revisar los nombres de estas acciones en yahoo: https://finance.yahoo.com/ ``` # nombre de los activos mexícanos en yahoo ticker_mx = [] start = '2014-01-02' end = '2014-12-31' # assets_mx = get_adj_closes(tickers=ticker_mx, start_date=start, end_date=end) # assets_mx # Encontrar los precios en la fecha 2014-07-14 assets_mx_20140714 # Encontrar la acción que reportó mayor valor en la fecha 2014-07-14 assets_mx_20140714 # Acceder a algunas filas particulares de los precios (iloc) #encontrar la diferencias entre dos filas en particular ``` # 2. Graficos de las series de datos En primer lugar, se toma como ejemplo la serie de precios `AEROMEX.MX`, así como el volumen de transacciones. ``` ticker = 'AEROMEX.MX' start = '2015-01-01' end = datetime.today() aero_mx = web.DataReader(ticker, data_source='yahoo', start=start, end=end) # Se extraen los precios de cierre y los volúmenes de transacción clos_aero_mx = aero_mx['Adj Close'] # Se extraen los volúmenes de transacción vol_aero_mx = aero_mx['Volume'] # Se verifican las dimensiones clos_aero_mx ``` El gráfico de esta serie se obtiene de forma simple mediante el siguiente comando De forma similar, se grafica la serie de volúmenes de transacción Usualmente, es conveniente graficar al precio de cierre de una acción en conjunto con su volumen de transacciones. El siguiente es un ejemplo de esta clase de graficas para el caso de Aeroméxico. ``` ############## Forma de graficar 1 top = plt.subplot2grid((4,4), (0, 0), rowspan=2, colspan=4) top.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre') plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016') plt.legend(loc='best') bottom = plt.subplot2grid((4,4), (2, 0), rowspan=1, colspan=4) bottom.bar(vol_aero_mx.index, vol_aero_mx) plt.title('Aeroméxico: Volumen diario de transacción de la acción') plt.gcf().set_size_inches(12,8) plt.subplots_adjust(hspace=0.75) ############## Otra forma de graficar # plt.figure(figsize=(10,10)) # plt.subplot(2,1,1) # plt.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre') # plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016') # plt.legend(loc='best') # plt.xlim([clos_aero_mx.index[0],clos_aero_mx.index[-1]]) # plt.show() # plt.figure(figsize=(10,5)) # plt.subplot(2,1,2) # plt.bar(vol_aero_mx.index, vol_aero_mx) # plt.title('Aeroméxico: Volumen diario de transacción de la acción') # plt.xlabel('Date') # plt.xlim([vol_aero_mx.index[0],vol_aero_mx.index[-1]]) # plt.ylim([0,.8e7]) # plt.show() ``` ### Graficar usando paquete `plotly` En el caso que deseen compilar los gráficos usando la paquetería ploty deben instalar está con el siguiete comando ` conda install -c plotly plotly ` Documentación [subplots](https://plotly.com/python/subplots/) ``` pd.options.plotting.backend = "plotly" from plotly.subplots import make_subplots import plotly.graph_objs as go fig = make_subplots(rows=2, cols=1) fig.add_trace( go.Scatter(x=clos_aero_mx.index, y=clos_aero_mx.values, name='Adj Closes'), row=1, col=1 ) fig.add_trace( go.Scatter(x=vol_aero_mx.index, y=vol_aero_mx.values, name='Volume'), row=2, col=1 ) fig.update_layout(height=600, width=600, title_text="Stacked Subplots") fig.show() ``` Otro procedimiento que se efectúa con frecuencia, es el cálculo de promedios y desviaciones móviles para la serie de precios. Los promedios móviles se calculan mediante: ``` # Realizar una media móvil con ventana de 20 y 100 para los precios de cierre ajustado short_rollmean_AM_AC = clos_aero_mx.rolling(window=20).mean() short_rollmean_AM_AC ``` Grafiquemos los precios junto con las medias móviles que acabamos de calcular ``` # Poner por defecto nuevamente matplotlib pd.options.plotting.backend = "matplotlib" # Gráfica de los precios de cierre ajustados y sus medias móviles fig, ax = plt.subplots(1,1, figsize=(10,9)) clos_aero_mx.plot(ax=ax, label='d') short_rollmean_AM_AC.plot(ax=ax) plt.legend() ``` Las desviaciones estándar móviles se calculan con ``` short_rollstd_AM_AC long_rollstd_AM_AC ``` y los gráficos... ``` fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico') ax.plot(clos_aero_mx.index, clos_aero_mx+short_rollstd_AM_AC, label = '+ Desviación ventana 20 días') ax.plot(clos_aero_mx.index, clos_aero_mx-short_rollstd_AM_AC, label = '- Desviación ventana 20 días') ax.set_xlabel('Fecha') ax.set_ylabel('Precios Aeroméxico en 2014-2016') ax.legend(loc='best') fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico') ax.plot(clos_aero_mx.index, clos_aero_mx+long_rollstd_AM_AC, label = '+ Desviación ventana 100 días') ax.plot(clos_aero_mx.index, clos_aero_mx-long_rollstd_AM_AC, label = '- Desviación ventana 100 días') ax.set_xlabel('Fecha') ax.set_ylabel('Precios Aeroméxico en 2014-2016') ax.legend(loc='best') ``` Podemos graficar los precios de las acciones americanas Sin embargo, vemos que los precios de cierre del índice S&P500 están muy por encima de los precios de cierre de los activos, lo cual dificulta la visualización. Entonces, obtenemos el gráfico de solo los activos <script> $(document).ready(function(){ $('div.prompt').hide(); $('div.back-to-top').hide(); $('nav#menubar').hide(); $('.breadcrumb').hide(); $('.hidden-print').hide(); }); </script> <footer id="attribution" style="float:right; color:#808080; background:#fff;"> Created with Jupyter by Esteban Jiménez Rodríguez and modified by Oscar Jaramillo Z. </footer>
github_jupyter
import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt # Importamos pandas import pandas as pd #algunas opciones para Pandas # pd.set_option('display.notebook_repr_html', False) # pd.set_option('display.max_columns', 6) # pd.set_option('display.max_rows', 10) # pd.set_option('display.width', 78) # pd.set_option('precision', 3) pd.set_option('display.max_rows', 10) # Función read_csv help(pd.read_csv) # Cargamos hoja de calculo en un dataframe file_name = 'Precios/AAPL.csv' aapl = pd.read_csv(file_name) aapl # Cargamos hoja de calculo en un dataframe aapl = pd.read_csv(file_name, index_col=['Date']) aapl # Graficar precios de cierre y precios de cierre ajustados import matplotlib.pyplot as plt %matplotlib inline aapl[['Close', 'Adj Close']].plot(figsize=(8,8)) plt.show() # Cargamos hoja de calculo en un dataframe aapl = pd.read_csv(file_name, index_col=['Date'], usecols=['Date', 'Adj Close']) aapl.columns = ['AAPL'] aapl import os # List all files in a directory using os.listdir ---> os.path.isfile check if is a file basepath = 'Precios' # Poner en una lista todos los nombres de los archivos files = files # Read the data of Adj Close for each file and concatenate each one data = # Rename columns data.columns = data # Graficar los precios de AAPL y AMZN en una sóla gráfica # Importar el modulo data del paquete pandas_datareader. La comunidad lo importa con el nombre de web import pandas as pd import pandas_datareader.data as web from datetime import datetime # Función DataReader help(web.DataReader) datetime.today() # Ejemplo google finance ticker = 'AAPL' source = 'stooq' start = '2015-01-01' end = datetime.today() aapl_goo = web.DataReader(ticker, source) aapl_goo # Ejemplo quandl import quandl ######################### USar la api key que les arroja la página de quandl quandl.ApiConfig.api_key = "YOURAPIKEY " ticker = ['AAPL', 'MSFT','KO'] date = { 'gte': '2016-01-01', 'lte': datetime.today() } column = { 'columns': ['ticker', 'date', 'Adj_close']} data = quandl.get_table('WIKI/PRICES', qopts=column, ticker=ticker, date=date)# ticker = 'WIKI/AAPL' #'AAPL.US' # Poner los índices como las fechas # Seleccionar los ADJ_CLOSE de ticker y renombrar las columnas data # Gráfica de precios help(web.YahooDailyReader) # YahooDailyReader ticker = 'AEROMEX.MX' start = '2015-01-01' end = datetime.today() aapl_yah = web.YahooDailyReader(ticker, start, end, interval='d').read() aapl_yah help(web.DataReader) # Librería DataReader # Observar que se puede usar las dos librerías closes = web.DataReader(name=ticker, data_source='yahoo', start=start, end=end) closes # Función para descargar precios de cierre ajustados: def get_adj_closes(tickers, start_date=None, end_date=None): # Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today) # Descargamos DataFrame con todos los datos closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date) # Solo necesitamos los precios ajustados en el cierre closes = closes['Adj Close'] # Se ordenan los índices de manera ascendente closes.sort_index(inplace=True) return closes # Ejemplo: 'AAPL', 'MSFT', 'NVDA', '^GSPC' ticker = ['AAPL', 'MSFT', 'NVDA', '^GSPC'] start = '2018-01-01' end = None closes = get_adj_closes(tickers=ticker, start_date=start, end_date=end) closes # Gráfica de datos closes.columns # Uso de la función iloc # Uso de la función loc # Selección de alguna columna # nombre de los activos mexícanos en yahoo ticker_mx = [] start = '2014-01-02' end = '2014-12-31' # assets_mx = get_adj_closes(tickers=ticker_mx, start_date=start, end_date=end) # assets_mx # Encontrar los precios en la fecha 2014-07-14 assets_mx_20140714 # Encontrar la acción que reportó mayor valor en la fecha 2014-07-14 assets_mx_20140714 # Acceder a algunas filas particulares de los precios (iloc) #encontrar la diferencias entre dos filas en particular ticker = 'AEROMEX.MX' start = '2015-01-01' end = datetime.today() aero_mx = web.DataReader(ticker, data_source='yahoo', start=start, end=end) # Se extraen los precios de cierre y los volúmenes de transacción clos_aero_mx = aero_mx['Adj Close'] # Se extraen los volúmenes de transacción vol_aero_mx = aero_mx['Volume'] # Se verifican las dimensiones clos_aero_mx ############## Forma de graficar 1 top = plt.subplot2grid((4,4), (0, 0), rowspan=2, colspan=4) top.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre') plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016') plt.legend(loc='best') bottom = plt.subplot2grid((4,4), (2, 0), rowspan=1, colspan=4) bottom.bar(vol_aero_mx.index, vol_aero_mx) plt.title('Aeroméxico: Volumen diario de transacción de la acción') plt.gcf().set_size_inches(12,8) plt.subplots_adjust(hspace=0.75) ############## Otra forma de graficar # plt.figure(figsize=(10,10)) # plt.subplot(2,1,1) # plt.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre') # plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016') # plt.legend(loc='best') # plt.xlim([clos_aero_mx.index[0],clos_aero_mx.index[-1]]) # plt.show() # plt.figure(figsize=(10,5)) # plt.subplot(2,1,2) # plt.bar(vol_aero_mx.index, vol_aero_mx) # plt.title('Aeroméxico: Volumen diario de transacción de la acción') # plt.xlabel('Date') # plt.xlim([vol_aero_mx.index[0],vol_aero_mx.index[-1]]) # plt.ylim([0,.8e7]) # plt.show() pd.options.plotting.backend = "plotly" from plotly.subplots import make_subplots import plotly.graph_objs as go fig = make_subplots(rows=2, cols=1) fig.add_trace( go.Scatter(x=clos_aero_mx.index, y=clos_aero_mx.values, name='Adj Closes'), row=1, col=1 ) fig.add_trace( go.Scatter(x=vol_aero_mx.index, y=vol_aero_mx.values, name='Volume'), row=2, col=1 ) fig.update_layout(height=600, width=600, title_text="Stacked Subplots") fig.show() # Realizar una media móvil con ventana de 20 y 100 para los precios de cierre ajustado short_rollmean_AM_AC = clos_aero_mx.rolling(window=20).mean() short_rollmean_AM_AC # Poner por defecto nuevamente matplotlib pd.options.plotting.backend = "matplotlib" # Gráfica de los precios de cierre ajustados y sus medias móviles fig, ax = plt.subplots(1,1, figsize=(10,9)) clos_aero_mx.plot(ax=ax, label='d') short_rollmean_AM_AC.plot(ax=ax) plt.legend() short_rollstd_AM_AC long_rollstd_AM_AC fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico') ax.plot(clos_aero_mx.index, clos_aero_mx+short_rollstd_AM_AC, label = '+ Desviación ventana 20 días') ax.plot(clos_aero_mx.index, clos_aero_mx-short_rollstd_AM_AC, label = '- Desviación ventana 20 días') ax.set_xlabel('Fecha') ax.set_ylabel('Precios Aeroméxico en 2014-2016') ax.legend(loc='best') fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico') ax.plot(clos_aero_mx.index, clos_aero_mx+long_rollstd_AM_AC, label = '+ Desviación ventana 100 días') ax.plot(clos_aero_mx.index, clos_aero_mx-long_rollstd_AM_AC, label = '- Desviación ventana 100 días') ax.set_xlabel('Fecha') ax.set_ylabel('Precios Aeroméxico en 2014-2016') ax.legend(loc='best')
0.257205
0.922657
<table> <tr> <td width=15%><img src="./img/UGA.png"></img></td> <td><center><h1>Introduction to Python for Data Sciences</h1></center></td> <td width=15%><a href="http://www.iutzeler.org" style="font-size: 16px; font-weight: bold">Franck Iutzeler</a><br/> 2017/2018 </td> </tr> </table> <br/><br/><div id="top"></div> <center><a style="font-size: 40pt; font-weight: bold">Chap. 4 - Machine Learning with ScikitLearn </a></center> <br/> # ``2. Supervised Learning`` --- <a href="#style"><b>Package check and Styling</b></a><br/><br/><b>Outline</b><br/><br/> &nbsp;&nbsp;&nbsp; a) <a href="#supCla"> Classification</a><br/>&nbsp;&nbsp;&nbsp; b) <a href="#supReg"> Regression</a><br/>&nbsp;&nbsp;&nbsp; c) <a href="#supExo"> Exercises </a><br/> <div class="warn"><b>Warning:</b> In the session, we will investigate <i>examples</i> on how to deal with popular learning problems using standard algorithms. Many other problems and algorithms exist so this course is not at all exhaustive. </div> ## <a id="supCla"> a) Classification</a> <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p> ``` import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs %matplotlib inline # we create 40 separable points in R^2 around 2 centers (random_state=6 is a seed so that the set is separable) X, y = make_blobs(n_samples=40, n_features=2, centers=2 , random_state=6) print(X[:5,:],y[:5]) # print the first 5 points and labels plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) ``` Support Vector Machines (SVM) are based on learning a vector $w$ and an intercept $b$ such that the hyperplane $w^T x - b = 0$ separates the data i.e. $a$ belongs to one class if $w^T a - b > 0$ and the other elsewhere. They were later extended to *Kernel methods* that is $\kappa(w, a) - b = 0$ is now the separating *curve* where $\kappa$ is the *kernel*, typically: * linear: $\kappa(x,y)= x^T y$ (original SVM) * polynomial: $\kappa(x,y)= (x^T y)^d$ * Gaussian radial basis function (rfb): $\kappa(x,y)= \exp( - \gamma \| x - y \|^2 )$ ``` from sklearn.svm import SVC # Support vector classifier i.e. Classifier by SVM modelSVMLinear = SVC(kernel="linear") modelSVMLinear.fit(X,y) ``` The following illustration can be found in the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas. ``` def plot_svc_decision_function(model, ax=None, plot_support=True): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model x = np.linspace(xlim[0], xlim[1], 30) y = np.linspace(ylim[0], ylim[1], 30) Y, X = np.meshgrid(y, x) xy = np.vstack([X.ravel(), Y.ravel()]).T P = model.decision_function(xy).reshape(X.shape) # plot decision boundary and margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors if plot_support: ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=300, linewidth=1, facecolors='none'); ax.set_xlim(xlim) ax.set_ylim(ylim) plt.scatter(X[:, 0], X[:, 1], c=y , cmap=plt.cm.Paired) plot_svc_decision_function(modelSVMLinear) ``` We see clearly that the linear SVM seeks at maximizing the *margin* between the hyperplane and the two well defined classes from the data. ### Non-separable data In real cases, the data is usually not linearly separable as before. ``` # we create points in R^2 around 2 centers (random_state=48443 is a seed so that the set is *not* separable) X, y = make_blobs(n_samples=100, n_features=2, centers=2 , random_state=48443) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) ``` Let us use the *same* linear SVM classifier. Obviously, there are *misclassified points*, the model is thus learnt not by maximizing the margin (which does not exist anymore) but by minimizing a penalty over misclassified data. This penalty takes the form of an allowance margin controlled by a parameter $C$. The smaller $C$ the more inclusive the margin. Finding a good value for $C$ is up to the data scientist. ``` try: from sklearn.model_selection import train_test_split # sklearn > ... except: from sklearn.cross_validation import train_test_split # sklearn < ... XTrain, XTest, yTrain, yTest = train_test_split(X,y,test_size = 0.5) # split data in two model1 = SVC(kernel="linear",C=0.01) model1.fit(XTrain,yTrain) model2 = SVC(kernel="linear",C=100) model2.fit(XTrain,yTrain) plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model1) plt.title("C = 0.01") plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model2) plt.title("C = 100") ``` To find out which value of $C$ to use or globally the performance of the classifier, one can use Scikit Learn's [classification metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics), for instance the confusion matrix. ``` from sklearn.metrics import confusion_matrix yFit1 = model1.predict(XTest) yFit2 = model2.predict(XTest) mat1 = confusion_matrix(yTest, yFit1) mat2 = confusion_matrix(yTest, yFit2) print('Model with C = 0.01') print(mat1) print("Model with C = 100") print(mat2) ``` It can also be plotted in a fancier way with seaborn. ``` import seaborn as sns sns.heatmap(mat1, square=True, annot=True ,cbar=False) plt.ylabel('true label') plt.xlabel('predicted label') ``` ### Kernels When the separation between classes is not *linear*, kernels may be used to draw separating curves instead of lines. The most popular is the Gaussian rbf. ``` from sklearn.datasets import make_moons X,y = make_moons(noise=0.1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) modelLinear = SVC(kernel="linear") modelLinear.fit(X,y) modelRbf = SVC(kernel="rbf") modelRbf.fit(X,y) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plot_svc_decision_function(modelLinear) plot_svc_decision_function(modelRbf) plt.title("The two models superposed") ``` Let us compare the linear and rbf training error using the zero one loss (the proportion of misclassified examples). ``` from sklearn.metrics import zero_one_loss yFitLinear = modelLinear.predict(X) yFitRbf = modelRbf.predict(X) print("0/1 loss -- Linear: {:.3f} Rbf: {:.3f}".format(zero_one_loss(y, yFitLinear),zero_one_loss(y, yFitRbf))) ``` ### Multiple classes Where there are multiples classes (as in the *iris* dataset of the Pandas notebook), different strategies can be adopted: * Transforming the multiclass problem into a binary one by looking at the *one-vs-rest* problem (for each class construct a binary classifier between it and the rest) or the *one-vs-one* one (where each couple of classes is considered separately). After this transformation, standard binary classifiers can be used. * Using dedicated algorithms such as *decision trees* The corresponding algorithms can be found in the [multiclass module documentation](http://scikit-learn.org/stable/modules/multiclass.html). We are going to illustrate this by the iris 3-class classification problem using only the 2 petal features (width and length, this is only so that the feature vector is 2D and easy to visualize). ``` import pandas as pd import numpy as np iris = pd.read_csv('data/iris.csv') classes = pd.DataFrame(iris["species"]) features = iris.drop(["species","sepal_length","sepal_width"],axis=1) classes.sample(6) features.sample(6) XTrain, XTest, yTrain, yTest = train_test_split(features,classes,test_size = 0.5) from sklearn.multiclass import OneVsRestClassifier yPred = OneVsRestClassifier(SVC()).fit(XTrain, yTrain).predict(XTest) print(yPred) # Note the classes are not number but everything went as expected class_labels= ['virginica' , 'setosa' , 'versicolor'] sns.heatmap(confusion_matrix(yTest, yPred), square=True, annot=True ,cbar=False, xticklabels= class_labels, yticklabels=class_labels) plt.ylabel('true label') plt.xlabel('predicted label') ``` ### Other classifiers The main classifiers from Scikit learn are: *Linear SVM, RBF SVM (as already seen), Nearest Neighbors, Gaussian Process, Decision Tree, Random Forest, Neural Net, AdaBoost, Naive Bayes, QDA*. Use is: from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis()] ## <a id="supReg"> b) Regression</a> <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p> Let consider the problem of predicting real values from a set of features. We will consider the <a href="http://archive.ics.uci.edu/ml/datasets/Student+Performance">student performance</a> dataset. The goal is to predict the final grade from the other information, we get from the documentation: ``` import pandas as pd import numpy as np student = pd.read_csv('data/student-mat.csv') student.head() target = pd.DataFrame(student["G3"]) features = student.drop(["G3"],axis=1) ``` One immediate problem here is that the features are not *numeric* (not floats). Thankfully, Scikit Learn provides [encoders](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html#sklearn.preprocessing.LabelEncoder) to convert categorical (aka nominal, discrete) features to numerical ones. ``` from sklearn.preprocessing import LabelEncoder lenc = LabelEncoder() num_features = features.apply(lenc.fit_transform) num_features.head() ``` Even numerical values were encoded, as we are going to normalize, it is not really important. The normalization is done by removing the mean and equalizing the variance per feature, in addition, we are going to add an intercept. ``` from sklearn.preprocessing import StandardScaler, add_dummy_feature scaler = StandardScaler() normFeatures = add_dummy_feature(scaler.fit_transform(num_features)) preproData = pd.DataFrame(normFeatures , columns=[ "intercept" ] + list(num_features.columns) ) preproData.describe().T ``` ### Regression and Feature selection with the Lasso The lasso problem is finding a regressor $w$ such that minimizes $$ \frac{1}{2 n_{samples}} \|X w - y ||^2_2 + \alpha \|w\|_1 $$ and is popular for prediction as it simultaneously *selects features* thanks to the $\ell_1$-term. The greater $\alpha$ the fewer features. ``` try: from sklearn.model_selection import train_test_split # sklearn > ... except: from sklearn.cross_validation import train_test_split # sklearn < ... from sklearn.linear_model import Lasso XTrain, XTest, yTrain, yTest = train_test_split(preproData,target,test_size = 0.25) model = Lasso(alpha=0.1) model.fit(XTrain,yTrain) ``` We can observe the regressor $w$ provided by the model, notice the sparsity. ``` model.coef_ ``` We can observe which coefficients are put to $0$ and which ones are positively/negatively correlated. ``` print("Value Feature") for idx,val in enumerate(model.coef_): print("{:6.3f} {}".format(val,preproData.columns[idx])) ``` Let us take a look at our predictions. ``` targetPred = model.predict(XTest) print("Predicted True") for idx,val in enumerate(targetPred): print("{:4.1f} {:.0f}".format(val,float(yTest.iloc[idx]))) ``` ### Regularization path Selecting a good parameter $\alpha$ is the role of the data scientist. For instance, a easy way to do is the following. ``` n_test = 15 alpha_tab = np.logspace(-10,1,base=2,num = n_test) print(alpha_tab) trainError = np.zeros(n_test) testError = np.zeros(n_test) featureNum = np.zeros(n_test) for idx,alpha in enumerate(alpha_tab): model = Lasso(alpha=alpha) model.fit(XTrain,yTrain) yPredTrain = model.predict(XTrain) yPredTest = model.predict(XTest) trainError[idx] = np.linalg.norm(yPredTrain-yTrain["G3"].values)/yTrain.count() testError[idx] = np.linalg.norm(yPredTest-yTest["G3"].values)/yTest.count() featureNum[idx] = sum(model.coef_!=0) alpha_opt = alpha_tab[np.argmin(testError)] import matplotlib.pyplot as plt import seaborn as sns sns.set() %matplotlib inline plt.subplot(311) plt.xscale("log") plt.plot(alpha_tab, trainError,label="train error") plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.legend() plt.xticks([]) plt.axvline(x=alpha_opt) plt.ylabel("error") plt.subplot(312) plt.xscale("log") plt.plot(alpha_tab, testError,'r',label="test error") plt.xlim([min(alpha_tab),max(alpha_tab)]) #plt.ylim([0.19, 0.21]) plt.legend() plt.axvline(x=alpha_opt) plt.xticks([]) plt.ylabel("error") plt.subplot(313) plt.xscale("log") plt.scatter(alpha_tab, featureNum) plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.ylim([0,28]) plt.axvline(x=alpha_opt) plt.ylabel("nb. of features") plt.xlabel("alpha") ``` ## <a id="supExo"> c) Exercises </a> <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p> <div class="exo"> <b>Exercise 4.2.1:</b> a very popular binary classification exercise is the <a href="https://www.kaggle.com/c/titanic">survival prediction from Titanic shipwreck on Kaggle</a>. <br/><br/> <i> The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.<br/> One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.<br/> In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.<br/><br/><br/></i> The data - taken from <a href="https://www.kaggle.com/c/titanic">Kaggle</a> - is located in <tt>data/titanic/train.csv</tt> and has the following form: <table> <tbody> <tr><th><b>Feature</b></th><th><b>Definition</b></th><th><b>Comment</b></th></tr> <tr> <td>PassengerId</td> <td>ID</td> <td>numeric</td> </tr> <tr> <td>Survival</td> <td>Survival of the passenger</td> <td>0 = No, 1 = Yes <b>target to predict</b></td> </tr> <tr> <td>Pclass</td> <td>Ticket class</td> <td>1 = 1st, 2 = 2nd, 3 = 3rd</td> </tr> <tr> <td>Name</td> <td>Full name w/ Mr. Mrs. etc.</td> <td>string</td> </tr> <tr> <td>Sex</td> <td>Sex</td> <td><tt>male</tt> or <tt>female</tt></td> </tr> <tr> <td>Age</td> <td>Age in years</td> <td>numeric</td> </tr> <tr> <td>SibSp</td> <td># of siblings / spouses aboard the Titanic</td> <td>numeric</td> </tr> <tr> <td>Parch</td> <td># of parents / children aboard the Titanic</td> <td></td> </tr> <tr> <td>Ticket</td> <td>Ticket number</td> <td>quite messy</td> </tr> <tr> <td>Fare</td> <td>Passenger fare</td> <td></td> </tr> <tr> <td>cabin</td> <td>Cabin number</td> <td>letter + number (e.g. C85), often missing</td> </tr> <tr> <td>Embarked</td> <td>Port of Embarkation</td> <td>C = Cherbourg, Q = Queenstown, S = Southampton</td> </tr> </tbody> </table> <ul> <li> Load the dataset and preprocess the features. (you can remove features that seem uninteresting to you). <li> Perform binary classification to predict the survival of a passenger depending on its information and validate you approach. <li> Perform some feature engineering to improve the performance of you classifier (see e.g. <a href="https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/">here</a>) </ul> </div> <div class="exo"> <b>Exercise 4.2.2:</b> a very popular regression exercise is the <a href="https://www.kaggle.com/c/house-prices-advanced-regression-techniques">house price prediction in Ames, Iowa on Kaggle</a>. <br/><br/> The data - taken from <a href="https://www.kaggle.com/c/house-prices-advanced-regression-techniques">Kaggle</a> - is located in <tt>data/house_prices/train.csv</tt>. <ul> <li> Try to reach the best accurracy in terms of mean absolute error on the log of the prices ($Error = \frac{1}{n} \sum_{i=1}^n | \log(predicted_i) - \log(true_i) |$). <li> Which features (original or made up) are the most relevant? </ul> </div> --- <div id="style"></div> ### Package Check and Styling <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p> ``` import lib.notebook_setting as nbs packageList = ['IPython', 'numpy', 'scipy', 'matplotlib', 'cvxopt', 'pandas', 'seaborn', 'sklearn', 'tensorflow'] nbs.packageCheck(packageList) nbs.cssStyling() ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs %matplotlib inline # we create 40 separable points in R^2 around 2 centers (random_state=6 is a seed so that the set is separable) X, y = make_blobs(n_samples=40, n_features=2, centers=2 , random_state=6) print(X[:5,:],y[:5]) # print the first 5 points and labels plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) from sklearn.svm import SVC # Support vector classifier i.e. Classifier by SVM modelSVMLinear = SVC(kernel="linear") modelSVMLinear.fit(X,y) def plot_svc_decision_function(model, ax=None, plot_support=True): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model x = np.linspace(xlim[0], xlim[1], 30) y = np.linspace(ylim[0], ylim[1], 30) Y, X = np.meshgrid(y, x) xy = np.vstack([X.ravel(), Y.ravel()]).T P = model.decision_function(xy).reshape(X.shape) # plot decision boundary and margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors if plot_support: ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=300, linewidth=1, facecolors='none'); ax.set_xlim(xlim) ax.set_ylim(ylim) plt.scatter(X[:, 0], X[:, 1], c=y , cmap=plt.cm.Paired) plot_svc_decision_function(modelSVMLinear) # we create points in R^2 around 2 centers (random_state=48443 is a seed so that the set is *not* separable) X, y = make_blobs(n_samples=100, n_features=2, centers=2 , random_state=48443) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) try: from sklearn.model_selection import train_test_split # sklearn > ... except: from sklearn.cross_validation import train_test_split # sklearn < ... XTrain, XTest, yTrain, yTest = train_test_split(X,y,test_size = 0.5) # split data in two model1 = SVC(kernel="linear",C=0.01) model1.fit(XTrain,yTrain) model2 = SVC(kernel="linear",C=100) model2.fit(XTrain,yTrain) plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model1) plt.title("C = 0.01") plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model2) plt.title("C = 100") from sklearn.metrics import confusion_matrix yFit1 = model1.predict(XTest) yFit2 = model2.predict(XTest) mat1 = confusion_matrix(yTest, yFit1) mat2 = confusion_matrix(yTest, yFit2) print('Model with C = 0.01') print(mat1) print("Model with C = 100") print(mat2) import seaborn as sns sns.heatmap(mat1, square=True, annot=True ,cbar=False) plt.ylabel('true label') plt.xlabel('predicted label') from sklearn.datasets import make_moons X,y = make_moons(noise=0.1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) modelLinear = SVC(kernel="linear") modelLinear.fit(X,y) modelRbf = SVC(kernel="rbf") modelRbf.fit(X,y) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plot_svc_decision_function(modelLinear) plot_svc_decision_function(modelRbf) plt.title("The two models superposed") from sklearn.metrics import zero_one_loss yFitLinear = modelLinear.predict(X) yFitRbf = modelRbf.predict(X) print("0/1 loss -- Linear: {:.3f} Rbf: {:.3f}".format(zero_one_loss(y, yFitLinear),zero_one_loss(y, yFitRbf))) import pandas as pd import numpy as np iris = pd.read_csv('data/iris.csv') classes = pd.DataFrame(iris["species"]) features = iris.drop(["species","sepal_length","sepal_width"],axis=1) classes.sample(6) features.sample(6) XTrain, XTest, yTrain, yTest = train_test_split(features,classes,test_size = 0.5) from sklearn.multiclass import OneVsRestClassifier yPred = OneVsRestClassifier(SVC()).fit(XTrain, yTrain).predict(XTest) print(yPred) # Note the classes are not number but everything went as expected class_labels= ['virginica' , 'setosa' , 'versicolor'] sns.heatmap(confusion_matrix(yTest, yPred), square=True, annot=True ,cbar=False, xticklabels= class_labels, yticklabels=class_labels) plt.ylabel('true label') plt.xlabel('predicted label') import pandas as pd import numpy as np student = pd.read_csv('data/student-mat.csv') student.head() target = pd.DataFrame(student["G3"]) features = student.drop(["G3"],axis=1) from sklearn.preprocessing import LabelEncoder lenc = LabelEncoder() num_features = features.apply(lenc.fit_transform) num_features.head() from sklearn.preprocessing import StandardScaler, add_dummy_feature scaler = StandardScaler() normFeatures = add_dummy_feature(scaler.fit_transform(num_features)) preproData = pd.DataFrame(normFeatures , columns=[ "intercept" ] + list(num_features.columns) ) preproData.describe().T try: from sklearn.model_selection import train_test_split # sklearn > ... except: from sklearn.cross_validation import train_test_split # sklearn < ... from sklearn.linear_model import Lasso XTrain, XTest, yTrain, yTest = train_test_split(preproData,target,test_size = 0.25) model = Lasso(alpha=0.1) model.fit(XTrain,yTrain) model.coef_ print("Value Feature") for idx,val in enumerate(model.coef_): print("{:6.3f} {}".format(val,preproData.columns[idx])) targetPred = model.predict(XTest) print("Predicted True") for idx,val in enumerate(targetPred): print("{:4.1f} {:.0f}".format(val,float(yTest.iloc[idx]))) n_test = 15 alpha_tab = np.logspace(-10,1,base=2,num = n_test) print(alpha_tab) trainError = np.zeros(n_test) testError = np.zeros(n_test) featureNum = np.zeros(n_test) for idx,alpha in enumerate(alpha_tab): model = Lasso(alpha=alpha) model.fit(XTrain,yTrain) yPredTrain = model.predict(XTrain) yPredTest = model.predict(XTest) trainError[idx] = np.linalg.norm(yPredTrain-yTrain["G3"].values)/yTrain.count() testError[idx] = np.linalg.norm(yPredTest-yTest["G3"].values)/yTest.count() featureNum[idx] = sum(model.coef_!=0) alpha_opt = alpha_tab[np.argmin(testError)] import matplotlib.pyplot as plt import seaborn as sns sns.set() %matplotlib inline plt.subplot(311) plt.xscale("log") plt.plot(alpha_tab, trainError,label="train error") plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.legend() plt.xticks([]) plt.axvline(x=alpha_opt) plt.ylabel("error") plt.subplot(312) plt.xscale("log") plt.plot(alpha_tab, testError,'r',label="test error") plt.xlim([min(alpha_tab),max(alpha_tab)]) #plt.ylim([0.19, 0.21]) plt.legend() plt.axvline(x=alpha_opt) plt.xticks([]) plt.ylabel("error") plt.subplot(313) plt.xscale("log") plt.scatter(alpha_tab, featureNum) plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.ylim([0,28]) plt.axvline(x=alpha_opt) plt.ylabel("nb. of features") plt.xlabel("alpha") import lib.notebook_setting as nbs packageList = ['IPython', 'numpy', 'scipy', 'matplotlib', 'cvxopt', 'pandas', 'seaborn', 'sklearn', 'tensorflow'] nbs.packageCheck(packageList) nbs.cssStyling()
0.765243
0.95469
# Link labeldata to window cut-outs ``` # %pip install rioxarray # %pip install geopandas import numpy as np import rasterio from rasterio.features import shapes, geometry_mask import rioxarray import json dataPath = '/Users/maaikeizeboud/Documents/Data/test/' imName = 'S2_comp_first.tif' # labName = 'S2_20190131_-100p7_-75p0.geojson' labName = 'output.geojson' ``` ## Load Image (Kopied from Meierts 'rasterize_labeled_data.ipynb') ``` bands = rioxarray.open_rasterio(dataPath + imName) bands.rio.bounds() bands.rio.crs bands.spatial_ref.crs_wkt ``` ## Load labeldata labeldata.geojson contains both Polygon and MultiLine. NB: the labeldata is stored in EPSG:4325 projection, (lat,lon) values. This is converted to EPSG:3031 projection (Antarctic polarstereographic) BEFORE LOADING HERE. This has been done in terminal, not in this notebook, using GDAL's ogr2ogr: ``ogr2ogr -s_srs EPSG:4326 -t_srs EPSG:3031 output.geojson input.geojson`` ``` with open(dataPath + labName) as f: gj = json.load(f) features = gj['features'][0]['geometry'] # select one feature polygon for testing len(gj) ``` ### Check if geometry is valid geojson geomtetry to input to rasterio.features Even though the features are recognised as is_valid_geom()=True, using this as input for rasterio.features.geometry_mask(geometries,...) (see later kernel) yields: ``ValueError: No valid geometry objects found for rasterize`` ``` print(rasterio.features.is_valid_geom(features)) ``` ### Convert poly to georegisterted polygon Would rather skip this step, if we can get geometry_mask to work with 'features' geometry ATTENTION: should test for MultiLine as well (now only Polygon) ``` from geopandas import GeoSeries from shapely import geometry from shapely.geometry import shape, mapping, MultiPolygon # poly1 = geometry.Polygon([[p.x,p.y] for p in plist1]) poly = np.squeeze(features['coordinates']) # ndarray poly1 = geometry.Polygon(poly) polys = GeoSeries([poly1],crs=bands.spatial_ref.crs_wkt) type(polys) ``` ## Create mask with rasterio.features.geometry_mask ### (1) use georegistered polygon -- works create mask based on geometry. Invert mask to select pixels WITHIN bounds. ATTENTION possible to select on touch or center inclusion polys : dtype geometry / geopandas.geoseries.GeoSeries ``` mmask = geometry_mask(polys,out_shape=(len(bands.y),len(bands.x)),transform=bands.rio.transform(),invert=True) # Inspect data type of mask -> ndarray mmask = np.expand_dims(mmask,axis=0) mmask.shape ``` ### (2) use geojson-like object -- doesnt work https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html GeoJSON-like objects should work, and would save some conversion from geojson > polygon > georegistered polygon. ``` mmask = geometry_mask(features,out_shape=(len(bands.y),len(bands.x)),transform=bands.rio.transform(),invert=True) mmask = np.expand_dims(mmask,axis=0) mmas.shape m2mask = mmask.astype(np.dtype('uint16')) ``` inspect mask ``` import matplotlib.pyplot as plt # imshow(amask[0]) fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(10,10)) ax2.imshow(m2mask[0]) ax1.imshow(bands[0,:,:]) ax2.set_title('labeldata'); ax1.set_title('image (1/3 bnd)'); ``` ### Convert Mask to integer and add as band to image mask is boolean. Convert to integer representation true==1 false==0 convert mask to DataArray. import coordinates from bands ``` import xarray amask= xarray.DataArray(data=m2mask,dims=['band','y','x'],coords={'band':[0],'y':bands[0].coords['y'],'x':bands[0].coords['x']}) from rioxarray.rioxarray import _add_attrs_proj _add_attrs_proj(amask,bands[0]) out=xarray.concat([bands,amask],'band') out ```
github_jupyter
# %pip install rioxarray # %pip install geopandas import numpy as np import rasterio from rasterio.features import shapes, geometry_mask import rioxarray import json dataPath = '/Users/maaikeizeboud/Documents/Data/test/' imName = 'S2_comp_first.tif' # labName = 'S2_20190131_-100p7_-75p0.geojson' labName = 'output.geojson' bands = rioxarray.open_rasterio(dataPath + imName) bands.rio.bounds() bands.rio.crs bands.spatial_ref.crs_wkt with open(dataPath + labName) as f: gj = json.load(f) features = gj['features'][0]['geometry'] # select one feature polygon for testing len(gj) print(rasterio.features.is_valid_geom(features)) from geopandas import GeoSeries from shapely import geometry from shapely.geometry import shape, mapping, MultiPolygon # poly1 = geometry.Polygon([[p.x,p.y] for p in plist1]) poly = np.squeeze(features['coordinates']) # ndarray poly1 = geometry.Polygon(poly) polys = GeoSeries([poly1],crs=bands.spatial_ref.crs_wkt) type(polys) mmask = geometry_mask(polys,out_shape=(len(bands.y),len(bands.x)),transform=bands.rio.transform(),invert=True) # Inspect data type of mask -> ndarray mmask = np.expand_dims(mmask,axis=0) mmask.shape mmask = geometry_mask(features,out_shape=(len(bands.y),len(bands.x)),transform=bands.rio.transform(),invert=True) mmask = np.expand_dims(mmask,axis=0) mmas.shape m2mask = mmask.astype(np.dtype('uint16')) import matplotlib.pyplot as plt # imshow(amask[0]) fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(10,10)) ax2.imshow(m2mask[0]) ax1.imshow(bands[0,:,:]) ax2.set_title('labeldata'); ax1.set_title('image (1/3 bnd)'); import xarray amask= xarray.DataArray(data=m2mask,dims=['band','y','x'],coords={'band':[0],'y':bands[0].coords['y'],'x':bands[0].coords['x']}) from rioxarray.rioxarray import _add_attrs_proj _add_attrs_proj(amask,bands[0]) out=xarray.concat([bands,amask],'band') out
0.378689
0.912241
# Uber 交通数据可视化 [![Licensed with MIT!](https://img.shields.io/github/license/Dragon1573/Python-Analysis?color=blue&label=License&style=flat-square)](https://github.com/Dragon1573/Python-Analysis/blob/master/LICENSE) [![Datasets from Kaggle](https://img.shields.io/badge/Kaggle-118KB-blue?style=flat-square&logo=Kaggle)](https://www.kaggle.com/shobhit18th/uber-traffic-data-visualization) ## 背景 &emsp;&emsp;班加罗尔是印度南部城市,卡纳塔克邦的首府,印度第五大城市,人口约1050万人。印度在1947年独立以后,班加罗尔发展成重工业的中心。高科技公司在班加罗尔的成功建立使其成为印度信息科技的中心,被誉为“亚洲的硅谷”。班加罗尔是印度科技研究的枢纽,其中的印度科学学院是印度历史最为悠久的大学和研究所。 &emsp;&emsp;人们漫步在美国的硅谷,都会由衷地赞叹那儿优美的自然环境。但班加罗尔市给人的印象并不是世界科技的中心、美国的“后台办公室”。低矮的房屋绵绵不断,人畜并行的现象随处可见。在城市交通方面,豪瑟大道已成为班加罗尔市交通事故死亡率最高的地方,平均每年约有800个生灵葬身于车轮之下。身处这儿,你会发现各式各样的机动车、非机动车和行人在狭窄的街道上挤成一团。尤其在市区南部高新技术公司比较集中的地方,交通高峰期道路更是拥挤不堪。 &emsp;&emsp;据统计,班加罗尔人口数量从1992年的450万猛增到2002年的650万。人口增长间接造成社会车辆的增加,目前班加罗尔市机动车保有量约为200万辆,远超市政道路设施的承受力。由于印度法律严格保护私有财产,使得在市区内进行拆迁和道路改造工作举步维艰,高架桥建设进展缓慢,结果造成交通堵塞现象日益严重。班加罗尔的交通问题已经严重影响了当地公司的工作和生产效率,引发许多公司的抱怨和不满,一些大公司甚至开始在其他城市建起新的研发和生产基地。 &emsp;&emsp;目前,邦政府已经制定了一个详细的基础设施发展纲要,计划开始修建两条穿城而过的轨道交通线路,增加高架桥和立交桥的修建,拓宽部分道路,同时鼓励新公司到交通状况较好的北部地区建厂,以缓解南部的交通压力。 ## 内容 &emsp;&emsp;现代城市日新月异,机动车交通的兴起改变了我们的城市设计。了解一座城市的交通流量和峰谷时段变化至关重要,因此**分析交通数据并从中提取关键信息**非常重要。我们邀请数据科学家、相关分析人员和存在研究兴趣的社会人士来分析**班加罗尔市**的交通数据,并帮助邦政府提出符合实际、行之有效的交通调度和城市规划方案。 &emsp;&emsp;数据下载自 [Kaggle](https://www.kaggle.com/shobhit18th/uber-traffic-data-visualization) ,从 [MachineHack](https://www.machinehack.com/) 转载而来,最初来源为 [Uber Movement](https://movement.uber.com/) 。 ## 题目及任务 ### 步骤一 下载数据集并导入所需程序包 ```bash # 激活 Anaconda 环境 activate # 确认 Numpy 是否已安装 python -m pip list | grep 'numpy' > /dev/null if [ $? == 1 ]; then echo 'Installing Numpy ...' python -m pip install numpy fi echo 'Numpy has successfully installed!' # 确认 Pandas 是否已安装 python -m pip list | grep 'pandas' > /dev/null if [ $? == 1 ]; then echo 'Installing Pandas ...' python -m pip install numpy fi echo 'Pandas has successfully installed!' # 确认 Scikit-learn 是否已安装 python -m pip list | grep 'sklearn' > /dev/null if [ $? == 1 ]; then echo 'Installing Scikit-learn ...' python -m pip install numpy fi echo 'Scikit-learn has successfully installed!' # 确认 Matplotlib 是否已经安装 python -m pip list | grep 'matplotlib' > /dev/null if [ $? == 1]; then echo 'Installing Matplotlib ...' python -m pip install matplotlib fi echo 'Matplotlib has successfully installed!' # 确认 kaggle 是否已安装 python -m pip list | grep 'kaggle' > /dev/null if [ $? == 1 ]; then echo 'Installing Kaggle ...' python -m pip install kaggle fi echo 'Kaggle has successfully installed!' # 检查数据集是否存在 if [ -f 'Final_Majestic_to_AIM_jan-2016tomarch-2018.xlsx' ]; then echo 'Datasets has successfully downloaded!' else echo 'Downloading datasets ...' python -m kaggle datasets download -d shobhit18th/uber-traffic-data-visualization -q --unzip echo 'Download complete!' fi ``` ``` # 数据框 import pandas # pandas时间转换器 from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # 数学运算工具集 import numpy # 绘图工具集 from matplotlib import pyplot # 聚类分析 from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans ``` ### 步骤二 数据预处理 ``` # 从 Excel 读入数据 table = pandas.read_excel('Final_Majestic_to_AIM_jan-2016tomarch-2018.xlsx', encoding='UTF-8', index_col='Date') ``` &emsp;&emsp;通过`Microsoft Excel`直接查看数据,可以发现数据中存在个别标签列,我们需要进行哑元沉默处理。 ``` # 哑变量处理 table = pandas.get_dummies(table) ``` &emsp;&emsp;哑元处理完成后,我们发现被沉默的哑元标签其实只有1种,即只存在1个相应的哑元列且哑元列所有元素均为1。 &emsp;&emsp;对于一个元素完全相同的数据集,其标准差为0。我们可以根据这个特点剔除没有实际意义的列。 &emsp;&emsp;剔除完成后的数据仍有18列,我们仅选取各参数的平均数据(`* Mean Travel Time (Seconds)`)进行研究。 ``` # 提取需要研究的列 table = table[[ 'AM Mean Travel Time (Seconds)', 'PM Mean Travel Time (Seconds)', 'Midday Mean Travel Time (Seconds)', 'Evening Mean Travel Time (Seconds)', 'Early Morning Mean Travel Time (Seconds)' ]] table.columns = ['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight'] ``` &emsp;&emsp;由于`Microsoft Excel`的数据解析格式与`pandas`不同,导致原始数据的时间列排序混乱。我们需要在`pandas`中按照正确的时间匹配规则重新对时间列进行提取转换。 ``` # 转换为时刻索引和时段列 table.index = pandas.to_datetime(table.index) table.sort_index(inplace=True) ``` &emsp;&emsp;通过`Microsoft Excel`观察原始数据,我们发现数据集整体共计821行,仅有1行存在空缺单元格,残缺行占总数据规模的$0.125\%$。 &emsp;&emsp;由于残缺行占比很小,可以直接删除残缺行。 ``` # 删除数据残缺行 table.dropna(axis=0, how='any', inplace=True) ``` &emsp;&emsp;原始数据中存在不少的异常数据,我们需要对这些异常值进行处理。 ``` def outliersScaler(series: pandas.Series) -> pandas.Series: ''' 异常值清洗方法 @param series - 数据列 @return 数据列 ''' QL = series.quantile(0.25) QU = series.quantile(0.75) IQR = QU - QL series.loc[series > (QU + 1.5 * IQR)] = QU series.loc[series < (QL - 1.5 * IQR)] = QL return series # 清洗异常值 for column in table.columns: table[column] = outliersScaler(table[column]) ``` ### 步骤三 数据可视化 #### 一.箱线图 &emsp;&emsp;箱线图是由数据的5项统计性指标绘制而成的,这5个指标分别为`上边缘`、`上四分位数`、`中位数`、`下四分位数`、`下边缘`。它的功能并不局限于识别异常值,它还能反映数据的分布特征,并用于多组数据间的特征比较。 ``` %matplotlib inline pyplot.figure(figsize=(10, 8)) pyplot.boxplot(table.to_numpy(), notch=True, labels=table.columns, meanline=True) pyplot.ylabel('Duration (Seconds)') pyplot.savefig('imgs/Boxplot.svg') pyplot.show() ``` &emsp;&emsp;根据以上绘制的箱线图,我们可以获得以下结论:在2016年1月~2018年12月间, - 下午是每日交通的高峰期,平均通勤时长超过 `3600 sec`(`1 hr`) ;而凌晨是每日交通的低谷期,平均通勤时长在 `1500 sec`(`25 min`),其他时段基本处于每日的正常通勤状况。 - 在凌晨,城市的交通情况最为优秀,乘客通勤时间最稳定;而在其他时段,城市的交通情况较为复杂,乘客通勤过程中频繁出现提前或滞后很长时间到达。 - 在早晨,较多乘客会比平均预期通勤时间更早到达;而在其他时段,乘客通勤时长分布比例比较平均。 #### 二.移动平均数 &emsp;&emsp;移动平均数是一种用于描述特定时间段内数据变化趋势的平滑曲线,我们可以根据移动平均数并辅以回归分析,对未来数据进行可靠地预测。 &emsp;&emsp;此处,我们选择的移动窗口大小为360天,即绘制 `MA360` 曲线。 ``` %matplotlib inline for i in range(len(table.columns)): # 创建画布 pyplot.figure(figsize=(10, 4)) # 生成移动平均数据集 series = numpy.zeros_like(table.iloc[:, i]) for j in range(360, len(series)): # 使用 MA60 作为移动平均曲线 series[j] = numpy.average(table.iloc[j - 360:j, i]) series[series == 0] = numpy.NaN pyplot.plot(table.index, series) pyplot.title('MA360 of ' + table.columns[i] + ' Travel Time') # 保存子图 pyplot.savefig('imgs/MA10-%d.svg' % i) pyplot.show() ``` &emsp;&emsp;通过以上6幅曲线图,我们可以得出以下结论: - 在2017年初~2018年底的2年时间里,班加罗尔市早晨、下午的通勤时长都出现了非常剧烈的波动。这2个时段都是城市交通最为繁忙的,它们分别包含了早高峰和晚高峰,高峰时段内的交通状况最为复杂,通勤时长难以固定。 - 班加罗尔市正午和晚间时段的交通状况这2年中变化趋势明显,正午通勤时长显著缩短,而晚间通勤时间在不断延长。 #### 三.K-Means(K阶平均值)聚类 &emsp;&emsp;聚类分析能帮助我们对数据进行分类。在此例中,我们将数据分为3类,并分别标记为 `Good`、`Normal`、`Worse` 。通过对2016年初~2018年底共3年时间820条数据进行聚类,我们可以近似描绘班加罗尔市3年来的整体交通概况。 ``` # 对数据进行标准差标准化 tableScaled = StandardScaler().fit_transform(table) table['Cluster'] = KMeans(n_clusters=3, random_state=0x6A1B).fit_predict(tableScaled) print('3个类别的元素数量分别为:', table['Cluster'].value_counts(), sep='\n') # 确定聚类编号与类别标签的对应关系 mean_value = pandas.DataFrame(index=['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight']) for i in range(0, 3): generatedCluster = table.loc[ table['Cluster'] == i, ['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight'] ] mean_value[str(i)] = generatedCluster.mean() print('3个聚类各分量的平均值为:', mean_value, sep='\n') ``` &emsp;&emsp;根据以上2项数据,我们可以大致推断出: - `Cluster No.0` 对应的标签是 `Worse` ,3年间共有254天处于这种交通状况。 - `Cluster No.1` 对应的标签是 `Good` ,3年间只有181天处于这种交通状况。 - `Cluster No.2` 对应的标签是 `Normal` ,3年间共有385天处于这种交通状况。 ## 结论与改进方案 &emsp;&emsp;从以上数据及其分析可以看出,班加罗尔市的城市交通质量相对较差,在早晚高峰期时段交通拥堵情况尤为严重;只有在晚间到深夜时段,交通压力才有所缓解。 &emsp;&emsp;建设城市轨道交通、大力推进公共交通出行,控制并逐步降低城市社会机动车保有量是一个有效的解决方案。班加罗尔市道路狭窄,不利于社会机动车通行。借助公共交通,政府监管部门能够对全市的交通状况进行严格地调度,保证城市路面交通的正常运行。 ## 致谢 1. MachineHack: <https://www.machinehack.com/> 2. Uber Movement: <https://movement.uber.com/>
github_jupyter
# 激活 Anaconda 环境 activate # 确认 Numpy 是否已安装 python -m pip list | grep 'numpy' > /dev/null if [ $? == 1 ]; then echo 'Installing Numpy ...' python -m pip install numpy fi echo 'Numpy has successfully installed!' # 确认 Pandas 是否已安装 python -m pip list | grep 'pandas' > /dev/null if [ $? == 1 ]; then echo 'Installing Pandas ...' python -m pip install numpy fi echo 'Pandas has successfully installed!' # 确认 Scikit-learn 是否已安装 python -m pip list | grep 'sklearn' > /dev/null if [ $? == 1 ]; then echo 'Installing Scikit-learn ...' python -m pip install numpy fi echo 'Scikit-learn has successfully installed!' # 确认 Matplotlib 是否已经安装 python -m pip list | grep 'matplotlib' > /dev/null if [ $? == 1]; then echo 'Installing Matplotlib ...' python -m pip install matplotlib fi echo 'Matplotlib has successfully installed!' # 确认 kaggle 是否已安装 python -m pip list | grep 'kaggle' > /dev/null if [ $? == 1 ]; then echo 'Installing Kaggle ...' python -m pip install kaggle fi echo 'Kaggle has successfully installed!' # 检查数据集是否存在 if [ -f 'Final_Majestic_to_AIM_jan-2016tomarch-2018.xlsx' ]; then echo 'Datasets has successfully downloaded!' else echo 'Downloading datasets ...' python -m kaggle datasets download -d shobhit18th/uber-traffic-data-visualization -q --unzip echo 'Download complete!' fi # 数据框 import pandas # pandas时间转换器 from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # 数学运算工具集 import numpy # 绘图工具集 from matplotlib import pyplot # 聚类分析 from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans # 从 Excel 读入数据 table = pandas.read_excel('Final_Majestic_to_AIM_jan-2016tomarch-2018.xlsx', encoding='UTF-8', index_col='Date') # 哑变量处理 table = pandas.get_dummies(table) # 提取需要研究的列 table = table[[ 'AM Mean Travel Time (Seconds)', 'PM Mean Travel Time (Seconds)', 'Midday Mean Travel Time (Seconds)', 'Evening Mean Travel Time (Seconds)', 'Early Morning Mean Travel Time (Seconds)' ]] table.columns = ['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight'] # 转换为时刻索引和时段列 table.index = pandas.to_datetime(table.index) table.sort_index(inplace=True) # 删除数据残缺行 table.dropna(axis=0, how='any', inplace=True) def outliersScaler(series: pandas.Series) -> pandas.Series: ''' 异常值清洗方法 @param series - 数据列 @return 数据列 ''' QL = series.quantile(0.25) QU = series.quantile(0.75) IQR = QU - QL series.loc[series > (QU + 1.5 * IQR)] = QU series.loc[series < (QL - 1.5 * IQR)] = QL return series # 清洗异常值 for column in table.columns: table[column] = outliersScaler(table[column]) %matplotlib inline pyplot.figure(figsize=(10, 8)) pyplot.boxplot(table.to_numpy(), notch=True, labels=table.columns, meanline=True) pyplot.ylabel('Duration (Seconds)') pyplot.savefig('imgs/Boxplot.svg') pyplot.show() %matplotlib inline for i in range(len(table.columns)): # 创建画布 pyplot.figure(figsize=(10, 4)) # 生成移动平均数据集 series = numpy.zeros_like(table.iloc[:, i]) for j in range(360, len(series)): # 使用 MA60 作为移动平均曲线 series[j] = numpy.average(table.iloc[j - 360:j, i]) series[series == 0] = numpy.NaN pyplot.plot(table.index, series) pyplot.title('MA360 of ' + table.columns[i] + ' Travel Time') # 保存子图 pyplot.savefig('imgs/MA10-%d.svg' % i) pyplot.show() # 对数据进行标准差标准化 tableScaled = StandardScaler().fit_transform(table) table['Cluster'] = KMeans(n_clusters=3, random_state=0x6A1B).fit_predict(tableScaled) print('3个类别的元素数量分别为:', table['Cluster'].value_counts(), sep='\n') # 确定聚类编号与类别标签的对应关系 mean_value = pandas.DataFrame(index=['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight']) for i in range(0, 3): generatedCluster = table.loc[ table['Cluster'] == i, ['Morning', 'Afternoon', 'Midday', 'Evening', 'Midnight'] ] mean_value[str(i)] = generatedCluster.mean() print('3个聚类各分量的平均值为:', mean_value, sep='\n')
0.286968
0.723291
``` import ibm_db ``` When the command above completes, the `ibm_db` library is loaded in your notebook. ## Identify the database connection credentials Connecting to dashDB or DB2 database requires the following information: * Driver Name * Database name * Host DNS name or IP address * Host port * Connection protocol * User ID (or username) * User Password __Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course Now enter your database credentials below and execute the cell with `Shift` + `Enter` ``` #Replace the placeholder values with your actual Db2 hostname, username, and password: dsn_hostname = "dashdb-entry-yp-dal09-08.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net" dsn_uid = "dash100038" # e.g. "abc12345" dsn_pwd = "n_3w3vgSGW_M" # e.g. "7dBZ3wWt9XN6$o0J" dsn_driver = "{IBM DB2 ODBC DRIVER}" dsn_database = "BLUDB" # e.g. "BLUDB" dsn_port = "50000" # e.g. "50000" dsn_protocol = "TCPIP" # i.e. "TCPIP" ``` ## Create the DB2 database connection Ibm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix. Lets build the dsn connection string using the credentials you entered above ``` #DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter #Create the dsn connection string dsn = ( "DRIVER={0};" "DATABASE={1};" "HOSTNAME={2};" "PORT={3};" "PROTOCOL={4};" "UID={5};" "PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd) #print the connection string to check correct values are specified print(dsn) ``` Now establish the connection to the database ``` #DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter #Create database connection try: conn = ibm_db.connect(dsn, "", "") print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname) except: print ("Unable to connect: ", ibm_db.conn_errormsg() ) ``` Congratulations if you were able to connect successfuly. Otherwise check the error and try again. ``` #Retrieve Metadata for the Database Server server = ibm_db.server_info(conn) print ("DBMS_NAME: ", server.DBMS_NAME) print ("DBMS_VER: ", server.DBMS_VER) print ("DB_NAME: ", server.DB_NAME) #Retrieve Metadata for the Database Client / Driver client = ibm_db.client_info(conn) print ("DRIVER_NAME: ", client.DRIVER_NAME) print ("DRIVER_VER: ", client.DRIVER_VER) print ("DATA_SOURCE_NAME: ", client.DATA_SOURCE_NAME) print ("DRIVER_ODBC_VER: ", client.DRIVER_ODBC_VER) print ("ODBC_VER: ", client.ODBC_VER) print ("ODBC_SQL_CONFORMANCE: ", client.ODBC_SQL_CONFORMANCE) print ("APPL_CODEPAGE: ", client.APPL_CODEPAGE) print ("CONN_CODEPAGE: ", client.CONN_CODEPAGE) ``` ## Close the Connection We free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources. ``` ibm_db.close(conn) ``` ## Summary In this tutorial you established a connection to a DB2 database on Cloud database from a Python notebook using ibm_db API. Copyright &copy; 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
github_jupyter
import ibm_db #Replace the placeholder values with your actual Db2 hostname, username, and password: dsn_hostname = "dashdb-entry-yp-dal09-08.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net" dsn_uid = "dash100038" # e.g. "abc12345" dsn_pwd = "n_3w3vgSGW_M" # e.g. "7dBZ3wWt9XN6$o0J" dsn_driver = "{IBM DB2 ODBC DRIVER}" dsn_database = "BLUDB" # e.g. "BLUDB" dsn_port = "50000" # e.g. "50000" dsn_protocol = "TCPIP" # i.e. "TCPIP" #DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter #Create the dsn connection string dsn = ( "DRIVER={0};" "DATABASE={1};" "HOSTNAME={2};" "PORT={3};" "PROTOCOL={4};" "UID={5};" "PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd) #print the connection string to check correct values are specified print(dsn) #DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter #Create database connection try: conn = ibm_db.connect(dsn, "", "") print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname) except: print ("Unable to connect: ", ibm_db.conn_errormsg() ) #Retrieve Metadata for the Database Server server = ibm_db.server_info(conn) print ("DBMS_NAME: ", server.DBMS_NAME) print ("DBMS_VER: ", server.DBMS_VER) print ("DB_NAME: ", server.DB_NAME) #Retrieve Metadata for the Database Client / Driver client = ibm_db.client_info(conn) print ("DRIVER_NAME: ", client.DRIVER_NAME) print ("DRIVER_VER: ", client.DRIVER_VER) print ("DATA_SOURCE_NAME: ", client.DATA_SOURCE_NAME) print ("DRIVER_ODBC_VER: ", client.DRIVER_ODBC_VER) print ("ODBC_VER: ", client.ODBC_VER) print ("ODBC_SQL_CONFORMANCE: ", client.ODBC_SQL_CONFORMANCE) print ("APPL_CODEPAGE: ", client.APPL_CODEPAGE) print ("CONN_CODEPAGE: ", client.CONN_CODEPAGE) ibm_db.close(conn)
0.098832
0.476762
``` from matplotlib import pyplot from utils.mnist_reader import load_mnist from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.optimizers import Adam from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D from keras.layers.advanced_activations import LeakyReLU from keras.preprocessing.image import ImageDataGenerator X_train, y_train = load_mnist('data', kind='train') X_test, y_test = load_mnist('data', kind='test') X_train = X_train.reshape(X_train.shape[0], 64, 64, 1) X_test = X_test.reshape(X_test.shape[0], 64, 64, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train/=255 X_test/=255 number_of_classes = 26 Y_train = np_utils.to_categorical(y_train, number_of_classes) Y_test = np_utils.to_categorical(y_test, number_of_classes) # Model model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(64, 64, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(number_of_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) gen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2) test_gen = ImageDataGenerator() train_generator = gen.flow(X_train, Y_train, batch_size=32) test_generator = test_gen.flow(X_test, Y_test, batch_size=32) model.fit_generator(train_generator, steps_per_epoch=2549//32, epochs=80, validation_data=test_generator) score = model.evaluate(X_test, Y_test) score = model.evaluate(X_test, Y_test) print() print('Test accuracy: ', score[1]) predictions = model.predict_classes(X_test) import pandas as pd predictions = model.predict_classes(X_test) predictions = list(predictions) actuals = list(y_test) sub = pd.DataFrame({'Actual': actuals, 'Predictions': predictions}) sub model.save_weights("chars74k_weights_5.h5") model.save('chars74k_5.h5') import numpy as np first_image = X_test[2] print(Y_test[2]) first_image = np.array(first_image, dtype='float') pixels = first_image.reshape((64, 64)) from matplotlib import pyplot as plt %matplotlib inline plt.imshow(pixels, cmap='gray') model.summary() ```
github_jupyter
from matplotlib import pyplot from utils.mnist_reader import load_mnist from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.optimizers import Adam from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D from keras.layers.advanced_activations import LeakyReLU from keras.preprocessing.image import ImageDataGenerator X_train, y_train = load_mnist('data', kind='train') X_test, y_test = load_mnist('data', kind='test') X_train = X_train.reshape(X_train.shape[0], 64, 64, 1) X_test = X_test.reshape(X_test.shape[0], 64, 64, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train/=255 X_test/=255 number_of_classes = 26 Y_train = np_utils.to_categorical(y_train, number_of_classes) Y_test = np_utils.to_categorical(y_test, number_of_classes) # Model model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(64, 64, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(number_of_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) gen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2) test_gen = ImageDataGenerator() train_generator = gen.flow(X_train, Y_train, batch_size=32) test_generator = test_gen.flow(X_test, Y_test, batch_size=32) model.fit_generator(train_generator, steps_per_epoch=2549//32, epochs=80, validation_data=test_generator) score = model.evaluate(X_test, Y_test) score = model.evaluate(X_test, Y_test) print() print('Test accuracy: ', score[1]) predictions = model.predict_classes(X_test) import pandas as pd predictions = model.predict_classes(X_test) predictions = list(predictions) actuals = list(y_test) sub = pd.DataFrame({'Actual': actuals, 'Predictions': predictions}) sub model.save_weights("chars74k_weights_5.h5") model.save('chars74k_5.h5') import numpy as np first_image = X_test[2] print(Y_test[2]) first_image = np.array(first_image, dtype='float') pixels = first_image.reshape((64, 64)) from matplotlib import pyplot as plt %matplotlib inline plt.imshow(pixels, cmap='gray') model.summary()
0.873808
0.779196
# Positive vs. Negative Sentiment Classification Here we demonstrate how to explain a sentiment classification model for movie reviews. positive vs. negative sentim ``` import transformers import datasets import shap import numpy as np ``` ## Load the IMDB movie review dataset ``` dataset = datasets.load_dataset("imdb", split="test") # shorten the strings to fit into the pipeline model short_data = [v[:500] for v in dataset["text"][:20]] ``` ## Load and run a sentiment analysis pipeline ``` classifier = transformers.pipeline('sentiment-analysis', return_all_scores=True) classifier(short_data[:2]) ``` ## Explain the sentiment analysis pipeline ``` # define the explainer explainer = shap.Explainer(classifier) # explain the predictions of the pipeline on the first two samples shap_values = explainer(short_data[:2]) shap.plots.text(shap_values[:,:,"POSITIVE"]) ``` ## Wrap the pipeline manually SHAP requires tensor outputs from the classifier, and explanations works best in additive spaces so we transform the probabilities into logit values (information values instead of probabilites). ### Create a TransformersPipeline wrapper ``` pmodel = shap.models.TransformersPipeline(classifier, rescale_to_logits=False) pmodel(short_data[:2]) pmodel = shap.models.TransformersPipeline(classifier, rescale_to_logits=True) pmodel(short_data[:2]) explainer2 = shap.Explainer(pmodel) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) ``` ### Pass a tokenizer as the masker object ``` explainer2 = shap.Explainer(pmodel, classifier.tokenizer) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) ``` ### Build a Text masker explicitly ``` masker = shap.maskers.Text(classifier.tokenizer) explainer2 = shap.Explainer(pmodel, masker) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) ``` ## Explore how the Text masker works ``` masker.shape("I like this movie.") model_args = masker(np.array([True, True, True, True, True, True, True]), "I like this movie.") model_args pmodel(*model_args) model_args = masker(np.array([True, True, False, False, True, True, True]), "I like this movie.") model_args pmodel(*model_args) masker2 = shap.maskers.Text(classifier.tokenizer, mask_token="...", collapse_mask_token=True) model_args2 = masker2(np.array([True, True, False, False, True, True, True]), "I like this movie.") model_args2 pmodel(*model_args2) ``` ## Plot summary statistics and bar charts ``` # explain the predictions of the pipeline on the first two samples shap_values = explainer(short_data[:20]) shap.plots.bar(shap_values[0,:,"POSITIVE"]) shap.plots.bar(shap_values[:, :, "POSITIVE"].mean(0)) shap.plots.bar(shap_values[:, :, "POSITIVE"].mean(0), order=shap.Explanation.argsort) ```
github_jupyter
import transformers import datasets import shap import numpy as np dataset = datasets.load_dataset("imdb", split="test") # shorten the strings to fit into the pipeline model short_data = [v[:500] for v in dataset["text"][:20]] classifier = transformers.pipeline('sentiment-analysis', return_all_scores=True) classifier(short_data[:2]) # define the explainer explainer = shap.Explainer(classifier) # explain the predictions of the pipeline on the first two samples shap_values = explainer(short_data[:2]) shap.plots.text(shap_values[:,:,"POSITIVE"]) pmodel = shap.models.TransformersPipeline(classifier, rescale_to_logits=False) pmodel(short_data[:2]) pmodel = shap.models.TransformersPipeline(classifier, rescale_to_logits=True) pmodel(short_data[:2]) explainer2 = shap.Explainer(pmodel) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) explainer2 = shap.Explainer(pmodel, classifier.tokenizer) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) masker = shap.maskers.Text(classifier.tokenizer) explainer2 = shap.Explainer(pmodel, masker) shap_values2 = explainer2(short_data[:2]) shap.plots.text(shap_values2[:,:,1]) masker.shape("I like this movie.") model_args = masker(np.array([True, True, True, True, True, True, True]), "I like this movie.") model_args pmodel(*model_args) model_args = masker(np.array([True, True, False, False, True, True, True]), "I like this movie.") model_args pmodel(*model_args) masker2 = shap.maskers.Text(classifier.tokenizer, mask_token="...", collapse_mask_token=True) model_args2 = masker2(np.array([True, True, False, False, True, True, True]), "I like this movie.") model_args2 pmodel(*model_args2) # explain the predictions of the pipeline on the first two samples shap_values = explainer(short_data[:20]) shap.plots.bar(shap_values[0,:,"POSITIVE"]) shap.plots.bar(shap_values[:, :, "POSITIVE"].mean(0)) shap.plots.bar(shap_values[:, :, "POSITIVE"].mean(0), order=shap.Explanation.argsort)
0.490724
0.986363
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/clipping.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/clipping.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/clipping.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('Installing geemap ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) import ee import geemap ``` ## Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ``` Map = geemap.Map(center=[40,-100], zoom=4) Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset roi = ee.Geometry.Polygon( [[[-73.99891354682285, 40.74560250077625], [-73.99891354682285, 40.74053023068626], [-73.98749806525547, 40.74053023068626], [-73.98749806525547, 40.74560250077625]]]) fc = ee.FeatureCollection('TIGER/2016/Roads').filterBounds(roi) clipped = fc.map(lambda f: f.intersection(roi)) Map.centerObject(ee.FeatureCollection(roi), 17) Map.addLayer(ee.Image().paint(roi, 0, 2), {'palette': 'yellow'}, 'ROI') # Map.setCenter(-73.9596, 40.7688, 12) Map.addLayer(ee.Image().paint(clipped, 0, 3), {'palette': 'red'}, 'clipped') Map.addLayer(fc, {}, 'Census roads', False) ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Installs geemap package import subprocess try: import geemap except ImportError: print('Installing geemap ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) import ee import geemap Map = geemap.Map(center=[40,-100], zoom=4) Map # Add Earth Engine dataset roi = ee.Geometry.Polygon( [[[-73.99891354682285, 40.74560250077625], [-73.99891354682285, 40.74053023068626], [-73.98749806525547, 40.74053023068626], [-73.98749806525547, 40.74560250077625]]]) fc = ee.FeatureCollection('TIGER/2016/Roads').filterBounds(roi) clipped = fc.map(lambda f: f.intersection(roi)) Map.centerObject(ee.FeatureCollection(roi), 17) Map.addLayer(ee.Image().paint(roi, 0, 2), {'palette': 'yellow'}, 'ROI') # Map.setCenter(-73.9596, 40.7688, 12) Map.addLayer(ee.Image().paint(clipped, 0, 3), {'palette': 'red'}, 'clipped') Map.addLayer(fc, {}, 'Census roads', False) Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map
0.409693
0.963403
``` import pandas as pd import numpy as np from sklearn.model_selection import train_test_split, KFold, GridSearchCV, cross_val_score from sklearn import metrics from sklearn.metrics import mean_squared_error, r2_score from sklearn import preprocessing from sklearn.preprocessing import StandardScaler scaler = StandardScaler() import xgboost as xgb newTrain = pd.read_csv('Datacsv/New_trainData.csv') avg_delay = pd.read_csv('Datacsv/Avg_Arr_Dep_Delay.csv') carrier_delay = pd.read_csv('Datacsv/avg_op_unique_carrier_delay.csv') flights_test = pd.read_csv('Datacsv/flights_test_data.csv') #Here we are adding Average Arrival Delay relative to the month #Start by changing the date from object to datetime avg_delay['fl_date'] = pd.to_datetime(avg_delay['fl_date'], format='%Y-%m-%d') #Groupby to compare monthly averages in delays #NOTE: Negative values (early arrivals) ARE INCLUDED month_arr = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_arr_delay'].mean() month_arr = month_arr.to_frame() month_dep = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_dep_delay'].mean() month_dep = month_dep.to_frame() #Resetting the index month_arr = month_arr.reset_index() month_dep = month_dep.reset_index() #Creating 2 copies of fl_date, extracting the month in order to replace the month with its respective Average Arrival and/or Departure Delay newTrain['Month_Avg_Arr_Delay'] = pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.month newTrain['Month_Avg_Dep_Delay'] = pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.month #Creating a dictionary containing descriptive statistics with months as keys and their respective average arrival/departure delay as values month_arr_dict = dict(zip(month_arr.fl_date, month_arr.avg_arr_delay)) month_dep_dict = dict(zip(month_dep.fl_date, month_dep.avg_dep_delay)) #Replacing the values of the copied fl_date features with their respective average arrival/departure delays newTrain.replace({'Month_Avg_Arr_Delay': {1: 3.9281951759577782, 2: 6.670705822847316, 3: 2.854581405409215, 4: 4.177950054675787, 5: 6.416833084409337, 6: 10.393455353404956, 7: 8.910038151256863, 8: 8.847961842961464, 9: 1.5852627540712663, 10: 2.7923909776573588, 11: 2.757202900691894, 12: 4.815971225866452}}, inplace=True) newTrain.replace({'Month_Avg_Dep_Delay': {1: 9.82808600285777, 2: 11.689433403570048, 3: 8.45752678421839, 4: 9.375029826488923, 5: 11.283686509030298, 6: 14.629757423341372, 7: 13.770582924983167, 8: 13.279282347021876, 9: 6.900262796528355, 10: 7.502918821697483, 11: 8.049444482526964, 12: 10.62795705344142}}, inplace=True) #Groupby to account for taxi in/out based on carriers which appeared to have the largest cross variance Avg_Taxi_Out_Carrier = newTrain['taxi_out'].groupby(newTrain['op_unique_carrier']).mean().reset_index() Avg_Taxi_In_Carrier = newTrain['taxi_in'].groupby(newTrain['op_unique_carrier']).mean().reset_index() #Create dictionary filled with op_unique_carrier as keys and the mean taxi in and out times as values taxi_out_dict = dict(zip(Avg_Taxi_Out_Carrier.op_unique_carrier, Avg_Taxi_Out_Carrier.taxi_out)) taxi_in_dict = dict(zip(Avg_Taxi_In_Carrier.op_unique_carrier, Avg_Taxi_In_Carrier.taxi_in)) #Creating two copies of op_unique_carrier to replace the values with the carrier's respective average taxi in and out time newTrain['Avg_Taxi_In_Carrier'] = newTrain['op_unique_carrier'] newTrain['Avg_Taxi_Out_Carrier'] = newTrain['op_unique_carrier'] #Replacing the Carrier codes in copied features with their respective average taxi in and out times. newTrain.replace({'Avg_Taxi_In_Carrier': {'9E': 7.360715045754416, '9K': 4.714285714285714, 'AA': 9.445789265313048, 'AS': 8.082283095510885, 'AX': 7.877306903622693, 'B6': 7.36336976806185, 'C5': 8.20173646578141, 'CP': 9.47292817679558, 'DL': 7.542487551418056, 'EM': 4.005050505050505, 'EV': 8.146282587705182, 'F9': 10.15011596036264, 'G4': 6.785416666666666, 'G7': 7.6468788249694, 'HA': 7.200770960488275, 'KS': 3.617021276595745, 'MQ': 8.747318339100346, 'NK': 9.849809617825413, 'OH': 8.452057416267943, 'OO': 7.693122041031036, 'PT': 8.16294088425236, 'QX': 5.72971114167813, 'UA': 7.847001223990208, 'VX': 8.774086378737541, 'WN': 5.293501334008452, 'YV': 7.493231100994369, 'YX': 8.656821963394343, 'ZW': 8.605810234541577}}, inplace=True) newTrain.replace({'Avg_Taxi_Out_Carrier': {'9E': 21.49329644605235, '9K': 8.785714285714286, 'AA': 18.694389457609862, 'AS': 18.991042599729195, 'AX': 20.173615857826384, 'B6': 17.75419888029859, 'C5': 24.258426966292134, 'CP': 18.9292817679558, 'DL': 17.24063650140723, 'EM': 8.146464646464647, 'EV': 20.229320888316703, 'F9': 16.60278304870335, 'G4': 13.095052083333334, 'G7': 19.86689106487148, 'HA': 11.959524574365563, 'KS': 5.872340425531915, 'MQ': 18.889359861591696, 'NK': 15.177690029615006, 'OH': 17.736363636363638, 'OO': 19.763907154129406, 'PT': 20.783904619970194, 'QX': 13.661393856029344, 'UA': 19.814797619550077, 'VX': 21.036544850498338, 'WN': 12.319694638649244, 'YV': 17.57553612076195, 'YX': 21.11281198003328, 'ZW': 19.840618336886994}}, inplace=True) #Create 4 copies of origin_city_name feature to replace the current values with their respective longtitude and latitude values newTrain['originLat'] = newTrain['origin_city_name'] newTrain['originLong'] = newTrain['origin_city_name'] newTrain['destLat'] = newTrain['dest_city_name'] newTrain['destLong'] = newTrain['dest_city_name'] #Replacing the City names with their longitude and latitude values #Geopy (from geopy.geocoders import Nominatim) was used in the aggregation of these values, but some had to manually encoded due to API call limits newTrain.replace({'originLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146, 'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) newTrain.replace({'originLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186, 'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) newTrain.replace({'destLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146,'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) newTrain.replace({'destLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186,'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) #Converting the planned departure time from 24 hours to a more catagorical variable, which captures an newTrain['crs_dep_time'] = (newTrain['crs_dep_time']/100).astype(int) newTrain['crs_arr_time'] = (newTrain['crs_arr_time']/100).astype(int) #Convert fl_date to Datetime, then just month number to account for higher delays within certain months monthDummies = pd.get_dummies(pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.strftime('%B')) dayDummies = pd.get_dummies(pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.strftime('%A')) #Creating dummy variables for carriers to account for delays related to certain carriers then concat these dummies onto newTrain mktCarrierDummies = pd.get_dummies(newTrain['mkt_unique_carrier']) # opCarrierDummies = pd.get_dummies(newTrain['op_unique_carrier']) newTrain = pd.concat([newTrain, mktCarrierDummies, monthDummies, dayDummies], axis=1) #tes without these dummies then swap and check results #op dummies was giving better results than mkt dummies newTrain['distanceSQ'] = newTrain['distance']**2 newTrain['originLong*Lat'] = newTrain['originLong']*newTrain['originLat'] newTrain['originLongSQ'] = newTrain['originLong']**2 newTrain['originLatSQ'] = newTrain['originLat']**2 newTrain['Month_Avg_Arr_DelaySQ'] = newTrain['Month_Avg_Arr_Delay']**2 #Assign X & y y = newTrain['arr_delay'].values.reshape(-1,1) X = newTrain.drop(columns = ['fl_date', 'origin', 'dep_time', 'mkt_unique_carrier','op_unique_carrier','arr_delay','origin_city_name','dest','dest_city_name', 'arr_time', 'taxi_out', 'taxi_in', 'VX']) X # #Scale both X and y due to differing units of measurements between features XScaled = scaler.fit_transform(X) yScaled = scaler.fit_transform(y) #Split data into train and test data X_train, X_test, y_train, y_test = train_test_split(XScaled, yScaled, train_size = 0.75, random_state=5) xg_reg = xgb.XGBRegressor(objective ='reg:squarederror', learning_rate = 0.04, max_depth = 5, alpha = 20, n_estimators = 800) xg_reg.fit(X_train, y_train) y_pred = xg_reg.predict(X_test) print('MSE: ', mean_squared_error(y_test, y_pred)) print('R^2 Score: ', r2_score(y_test, y_pred)) print('R^2 Adj-Score: ', 1-(1-r2_score(y_test, y_pred))*((len(X_test)-1)/(len(X_test)-len(X_test[0])-1))) #Here we are adding Average Arrival Delay relative to the month #Start by changing the date from object to datetime avg_delay['fl_date'] = pd.to_datetime(avg_delay['fl_date'], format='%Y-%m-%d') #Groupby to compare monthly averages in delays #NOTE: Negative values (early arrivals) ARE INCLUDED month_arr = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_arr_delay'].mean() month_arr = month_arr.to_frame() month_dep = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_dep_delay'].mean() month_dep = month_dep.to_frame() #Resetting the index month_arr = month_arr.reset_index() month_dep = month_dep.reset_index() #Creating 2 copies of fl_date, extracting the month in order to replace the month with its respective Average Arrival and/or Departure Delay flights_test['Month_Avg_Arr_Delay'] = pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.month flights_test['Month_Avg_Dep_Delay'] = pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.month #Creating a dictionary containing descriptive statistics with months as keys and their respective average arrival/departure delay as values month_arr_dict = dict(zip(month_arr.fl_date, month_arr.avg_arr_delay)) month_dep_dict = dict(zip(month_dep.fl_date, month_dep.avg_dep_delay)) #Replacing the values of the copied fl_date features with their respective average arrival/departure delays flights_test.replace({'Month_Avg_Arr_Delay': {1: 3.9281951759577782, 2: 6.670705822847316, 3: 2.854581405409215, 4: 4.177950054675787, 5: 6.416833084409337, 6: 10.393455353404956, 7: 8.910038151256863, 8: 8.847961842961464, 9: 1.5852627540712663, 10: 2.7923909776573588, 11: 2.757202900691894, 12: 4.815971225866452}}, inplace=True) flights_test.replace({'Month_Avg_Dep_Delay': {1: 9.82808600285777, 2: 11.689433403570048, 3: 8.45752678421839, 4: 9.375029826488923, 5: 11.283686509030298, 6: 14.629757423341372, 7: 13.770582924983167, 8: 13.279282347021876, 9: 6.900262796528355, 10: 7.502918821697483, 11: 8.049444482526964, 12: 10.62795705344142}}, inplace=True) #Groupby to account for taxi in/out based on carriers which appeared to have the largest cross variance Avg_Taxi_Out_Carrier = newTrain['taxi_out'].groupby(newTrain['op_unique_carrier']).mean().reset_index() Avg_Taxi_In_Carrier = newTrain['taxi_in'].groupby(newTrain['op_unique_carrier']).mean().reset_index() #Create dictionary filled with op_unique_carrier as keys and the mean taxi in and out times as values taxi_out_dict = dict(zip(Avg_Taxi_Out_Carrier.op_unique_carrier, Avg_Taxi_Out_Carrier.taxi_out)) taxi_in_dict = dict(zip(Avg_Taxi_In_Carrier.op_unique_carrier, Avg_Taxi_In_Carrier.taxi_in)) #Creating two copies of op_unique_carrier to replace the values with the carrier's respective average taxi in and out time flights_test['Avg_Taxi_In_Carrier'] = flights_test['op_unique_carrier'] flights_test['Avg_Taxi_Out_Carrier'] = flights_test['op_unique_carrier'] #Replacing the Carrier codes in copied features with their respective average taxi in and out times. flights_test.replace({'Avg_Taxi_In_Carrier': {'9E': 7.360715045754416, '9K': 4.714285714285714, 'AA': 9.445789265313048, 'AS': 8.082283095510885, 'AX': 7.877306903622693, 'B6': 7.36336976806185, 'C5': 8.20173646578141, 'CP': 9.47292817679558, 'DL': 7.542487551418056, 'EM': 4.005050505050505, 'EV': 8.146282587705182, 'F9': 10.15011596036264, 'G4': 6.785416666666666, 'G7': 7.6468788249694, 'HA': 7.200770960488275, 'KS': 3.617021276595745, 'MQ': 8.747318339100346, 'NK': 9.849809617825413, 'OH': 8.452057416267943, 'OO': 7.693122041031036, 'PT': 8.16294088425236, 'QX': 5.72971114167813, 'UA': 7.847001223990208, 'VX': 8.774086378737541, 'WN': 5.293501334008452, 'YV': 7.493231100994369, 'YX': 8.656821963394343, 'ZW': 8.605810234541577}}, inplace=True) flights_test.replace({'Avg_Taxi_Out_Carrier': {'9E': 21.49329644605235, '9K': 8.785714285714286, 'AA': 18.694389457609862, 'AS': 18.991042599729195, 'AX': 20.173615857826384, 'B6': 17.75419888029859, 'C5': 24.258426966292134, 'CP': 18.9292817679558, 'DL': 17.24063650140723, 'EM': 8.146464646464647, 'EV': 20.229320888316703, 'F9': 16.60278304870335, 'G4': 13.095052083333334, 'G7': 19.86689106487148, 'HA': 11.959524574365563, 'KS': 5.872340425531915, 'MQ': 18.889359861591696, 'NK': 15.177690029615006, 'OH': 17.736363636363638, 'OO': 19.763907154129406, 'PT': 20.783904619970194, 'QX': 13.661393856029344, 'UA': 19.814797619550077, 'VX': 21.036544850498338, 'WN': 12.319694638649244, 'YV': 17.57553612076195, 'YX': 21.11281198003328, 'ZW': 19.840618336886994}}, inplace=True) #Create 4 copies of origin_city_name feature to replace the current values with their respective longtitude and latitude values flights_test['originLat'] = flights_test['origin_city_name'] flights_test['originLong'] = flights_test['origin_city_name'] flights_test['destLat'] = flights_test['dest_city_name'] flights_test['destLong'] = flights_test['dest_city_name'] #Replacing the City names with their longitude and latitude values #Geopy (from geopy.geocoders import Nominatim) was used in the aggregation of these values, but some had to manually encoded due to API call limits flights_test.replace({'originLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146, 'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) flights_test.replace({'originLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186, 'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) flights_test.replace({'destLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146,'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) flights_test.replace({'destLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186,'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) #Converting the planned departure time from 24 hours to a more catagorical variable, which captures an flights_test['crs_dep_time'] = (flights_test['crs_dep_time']/100).astype(int) flights_test['crs_arr_time'] = (flights_test['crs_arr_time']/100).astype(int) #Convert fl_date to Datetime, then just month number to account for higher delays within certain months monthDummies = pd.get_dummies(pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.strftime('%B')) dayDummies = pd.get_dummies(pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.strftime('%A')) #Creating dummy variables for carriers to account for delays related to certain carriers then concat these dummies onto newTrain mktCarrierDummies = pd.get_dummies(flights_test['mkt_unique_carrier']) # opCarrierDummies = pd.get_dummies(newTrain['op_unique_carrier']) flights_test = pd.concat([flights_test, mktCarrierDummies, monthDummies, dayDummies], axis=1) #tes without these dummies then swap and check results #op dummies was giving better results than mkt dummies flights_test['distanceSQ'] = flights_test['distance']**2 flights_test['originLong*Lat'] = flights_test['originLong']*flights_test['originLat'] flights_test['originLongSQ'] = flights_test['originLong']**2 flights_test['originLatSQ'] = flights_test['originLat']**2 flights_test['Month_Avg_Arr_DelaySQ'] = flights_test['Month_Avg_Arr_Delay']**2 flights_test['February'] = 0 flights_test['March'] = 0 flights_test['April'] = 0 flights_test['May'] = 0 flights_test['June'] = 0 flights_test['July'] = 0 flights_test['August'] = 0 flights_test['September'] = 0 flights_test['October'] = 0 flights_test['November'] = 0 flights_test['December'] = 0 submission = flights_test[['fl_date', 'mkt_carrier', 'mkt_carrier_fl_num', 'origin', 'dest']] #Assign X X_finaltest = flights_test.drop(columns = ['fl_date', 'mkt_unique_carrier', 'branded_code_share', 'mkt_carrier', 'mkt_carrier_fl_num', 'op_unique_carrier','tail_num', 'op_carrier_fl_num', 'origin_airport_id', 'origin', 'origin_city_name', 'dest_airport_id', 'dest','dest_city_name', 'dup', 'crs_elapsed_time', 'flights']) X_finaltest = X_finaltest[['crs_dep_time', 'crs_arr_time','distance','Month_Avg_Arr_Delay','Month_Avg_Dep_Delay', 'Avg_Taxi_In_Carrier','Avg_Taxi_Out_Carrier','originLat','originLong','destLat','destLong', 'AA','AS','B6','DL','F9','G4','HA','NK','UA','WN','April','August','December', 'February', 'January', 'July', 'June', 'March', 'May','November', 'October', 'September', 'Friday', 'Monday','Saturday','Sunday', 'Thursday', 'Tuesday', 'Wednesday', 'distanceSQ','originLong*Lat','originLongSQ','originLatSQ','Month_Avg_Arr_DelaySQ']] X_finaltest #Scale both X and y due to differing units of measurements between features X_finaltest = scaler.fit_transform(X_finaltest) predicted_delay = xg_reg.predict(X_finaltest) len(predicted_delay) submission submission['predicted_delay'] = predicted_delay submission.to_csv('submission.csv') ```
github_jupyter
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split, KFold, GridSearchCV, cross_val_score from sklearn import metrics from sklearn.metrics import mean_squared_error, r2_score from sklearn import preprocessing from sklearn.preprocessing import StandardScaler scaler = StandardScaler() import xgboost as xgb newTrain = pd.read_csv('Datacsv/New_trainData.csv') avg_delay = pd.read_csv('Datacsv/Avg_Arr_Dep_Delay.csv') carrier_delay = pd.read_csv('Datacsv/avg_op_unique_carrier_delay.csv') flights_test = pd.read_csv('Datacsv/flights_test_data.csv') #Here we are adding Average Arrival Delay relative to the month #Start by changing the date from object to datetime avg_delay['fl_date'] = pd.to_datetime(avg_delay['fl_date'], format='%Y-%m-%d') #Groupby to compare monthly averages in delays #NOTE: Negative values (early arrivals) ARE INCLUDED month_arr = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_arr_delay'].mean() month_arr = month_arr.to_frame() month_dep = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_dep_delay'].mean() month_dep = month_dep.to_frame() #Resetting the index month_arr = month_arr.reset_index() month_dep = month_dep.reset_index() #Creating 2 copies of fl_date, extracting the month in order to replace the month with its respective Average Arrival and/or Departure Delay newTrain['Month_Avg_Arr_Delay'] = pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.month newTrain['Month_Avg_Dep_Delay'] = pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.month #Creating a dictionary containing descriptive statistics with months as keys and their respective average arrival/departure delay as values month_arr_dict = dict(zip(month_arr.fl_date, month_arr.avg_arr_delay)) month_dep_dict = dict(zip(month_dep.fl_date, month_dep.avg_dep_delay)) #Replacing the values of the copied fl_date features with their respective average arrival/departure delays newTrain.replace({'Month_Avg_Arr_Delay': {1: 3.9281951759577782, 2: 6.670705822847316, 3: 2.854581405409215, 4: 4.177950054675787, 5: 6.416833084409337, 6: 10.393455353404956, 7: 8.910038151256863, 8: 8.847961842961464, 9: 1.5852627540712663, 10: 2.7923909776573588, 11: 2.757202900691894, 12: 4.815971225866452}}, inplace=True) newTrain.replace({'Month_Avg_Dep_Delay': {1: 9.82808600285777, 2: 11.689433403570048, 3: 8.45752678421839, 4: 9.375029826488923, 5: 11.283686509030298, 6: 14.629757423341372, 7: 13.770582924983167, 8: 13.279282347021876, 9: 6.900262796528355, 10: 7.502918821697483, 11: 8.049444482526964, 12: 10.62795705344142}}, inplace=True) #Groupby to account for taxi in/out based on carriers which appeared to have the largest cross variance Avg_Taxi_Out_Carrier = newTrain['taxi_out'].groupby(newTrain['op_unique_carrier']).mean().reset_index() Avg_Taxi_In_Carrier = newTrain['taxi_in'].groupby(newTrain['op_unique_carrier']).mean().reset_index() #Create dictionary filled with op_unique_carrier as keys and the mean taxi in and out times as values taxi_out_dict = dict(zip(Avg_Taxi_Out_Carrier.op_unique_carrier, Avg_Taxi_Out_Carrier.taxi_out)) taxi_in_dict = dict(zip(Avg_Taxi_In_Carrier.op_unique_carrier, Avg_Taxi_In_Carrier.taxi_in)) #Creating two copies of op_unique_carrier to replace the values with the carrier's respective average taxi in and out time newTrain['Avg_Taxi_In_Carrier'] = newTrain['op_unique_carrier'] newTrain['Avg_Taxi_Out_Carrier'] = newTrain['op_unique_carrier'] #Replacing the Carrier codes in copied features with their respective average taxi in and out times. newTrain.replace({'Avg_Taxi_In_Carrier': {'9E': 7.360715045754416, '9K': 4.714285714285714, 'AA': 9.445789265313048, 'AS': 8.082283095510885, 'AX': 7.877306903622693, 'B6': 7.36336976806185, 'C5': 8.20173646578141, 'CP': 9.47292817679558, 'DL': 7.542487551418056, 'EM': 4.005050505050505, 'EV': 8.146282587705182, 'F9': 10.15011596036264, 'G4': 6.785416666666666, 'G7': 7.6468788249694, 'HA': 7.200770960488275, 'KS': 3.617021276595745, 'MQ': 8.747318339100346, 'NK': 9.849809617825413, 'OH': 8.452057416267943, 'OO': 7.693122041031036, 'PT': 8.16294088425236, 'QX': 5.72971114167813, 'UA': 7.847001223990208, 'VX': 8.774086378737541, 'WN': 5.293501334008452, 'YV': 7.493231100994369, 'YX': 8.656821963394343, 'ZW': 8.605810234541577}}, inplace=True) newTrain.replace({'Avg_Taxi_Out_Carrier': {'9E': 21.49329644605235, '9K': 8.785714285714286, 'AA': 18.694389457609862, 'AS': 18.991042599729195, 'AX': 20.173615857826384, 'B6': 17.75419888029859, 'C5': 24.258426966292134, 'CP': 18.9292817679558, 'DL': 17.24063650140723, 'EM': 8.146464646464647, 'EV': 20.229320888316703, 'F9': 16.60278304870335, 'G4': 13.095052083333334, 'G7': 19.86689106487148, 'HA': 11.959524574365563, 'KS': 5.872340425531915, 'MQ': 18.889359861591696, 'NK': 15.177690029615006, 'OH': 17.736363636363638, 'OO': 19.763907154129406, 'PT': 20.783904619970194, 'QX': 13.661393856029344, 'UA': 19.814797619550077, 'VX': 21.036544850498338, 'WN': 12.319694638649244, 'YV': 17.57553612076195, 'YX': 21.11281198003328, 'ZW': 19.840618336886994}}, inplace=True) #Create 4 copies of origin_city_name feature to replace the current values with their respective longtitude and latitude values newTrain['originLat'] = newTrain['origin_city_name'] newTrain['originLong'] = newTrain['origin_city_name'] newTrain['destLat'] = newTrain['dest_city_name'] newTrain['destLong'] = newTrain['dest_city_name'] #Replacing the City names with their longitude and latitude values #Geopy (from geopy.geocoders import Nominatim) was used in the aggregation of these values, but some had to manually encoded due to API call limits newTrain.replace({'originLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146, 'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) newTrain.replace({'originLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186, 'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) newTrain.replace({'destLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146,'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) newTrain.replace({'destLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186,'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) #Converting the planned departure time from 24 hours to a more catagorical variable, which captures an newTrain['crs_dep_time'] = (newTrain['crs_dep_time']/100).astype(int) newTrain['crs_arr_time'] = (newTrain['crs_arr_time']/100).astype(int) #Convert fl_date to Datetime, then just month number to account for higher delays within certain months monthDummies = pd.get_dummies(pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.strftime('%B')) dayDummies = pd.get_dummies(pd.to_datetime(newTrain.fl_date , format="%Y-%m-%d").dt.strftime('%A')) #Creating dummy variables for carriers to account for delays related to certain carriers then concat these dummies onto newTrain mktCarrierDummies = pd.get_dummies(newTrain['mkt_unique_carrier']) # opCarrierDummies = pd.get_dummies(newTrain['op_unique_carrier']) newTrain = pd.concat([newTrain, mktCarrierDummies, monthDummies, dayDummies], axis=1) #tes without these dummies then swap and check results #op dummies was giving better results than mkt dummies newTrain['distanceSQ'] = newTrain['distance']**2 newTrain['originLong*Lat'] = newTrain['originLong']*newTrain['originLat'] newTrain['originLongSQ'] = newTrain['originLong']**2 newTrain['originLatSQ'] = newTrain['originLat']**2 newTrain['Month_Avg_Arr_DelaySQ'] = newTrain['Month_Avg_Arr_Delay']**2 #Assign X & y y = newTrain['arr_delay'].values.reshape(-1,1) X = newTrain.drop(columns = ['fl_date', 'origin', 'dep_time', 'mkt_unique_carrier','op_unique_carrier','arr_delay','origin_city_name','dest','dest_city_name', 'arr_time', 'taxi_out', 'taxi_in', 'VX']) X # #Scale both X and y due to differing units of measurements between features XScaled = scaler.fit_transform(X) yScaled = scaler.fit_transform(y) #Split data into train and test data X_train, X_test, y_train, y_test = train_test_split(XScaled, yScaled, train_size = 0.75, random_state=5) xg_reg = xgb.XGBRegressor(objective ='reg:squarederror', learning_rate = 0.04, max_depth = 5, alpha = 20, n_estimators = 800) xg_reg.fit(X_train, y_train) y_pred = xg_reg.predict(X_test) print('MSE: ', mean_squared_error(y_test, y_pred)) print('R^2 Score: ', r2_score(y_test, y_pred)) print('R^2 Adj-Score: ', 1-(1-r2_score(y_test, y_pred))*((len(X_test)-1)/(len(X_test)-len(X_test[0])-1))) #Here we are adding Average Arrival Delay relative to the month #Start by changing the date from object to datetime avg_delay['fl_date'] = pd.to_datetime(avg_delay['fl_date'], format='%Y-%m-%d') #Groupby to compare monthly averages in delays #NOTE: Negative values (early arrivals) ARE INCLUDED month_arr = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_arr_delay'].mean() month_arr = month_arr.to_frame() month_dep = avg_delay.groupby(avg_delay['fl_date'].dt.strftime('%m'))['avg_dep_delay'].mean() month_dep = month_dep.to_frame() #Resetting the index month_arr = month_arr.reset_index() month_dep = month_dep.reset_index() #Creating 2 copies of fl_date, extracting the month in order to replace the month with its respective Average Arrival and/or Departure Delay flights_test['Month_Avg_Arr_Delay'] = pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.month flights_test['Month_Avg_Dep_Delay'] = pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.month #Creating a dictionary containing descriptive statistics with months as keys and their respective average arrival/departure delay as values month_arr_dict = dict(zip(month_arr.fl_date, month_arr.avg_arr_delay)) month_dep_dict = dict(zip(month_dep.fl_date, month_dep.avg_dep_delay)) #Replacing the values of the copied fl_date features with their respective average arrival/departure delays flights_test.replace({'Month_Avg_Arr_Delay': {1: 3.9281951759577782, 2: 6.670705822847316, 3: 2.854581405409215, 4: 4.177950054675787, 5: 6.416833084409337, 6: 10.393455353404956, 7: 8.910038151256863, 8: 8.847961842961464, 9: 1.5852627540712663, 10: 2.7923909776573588, 11: 2.757202900691894, 12: 4.815971225866452}}, inplace=True) flights_test.replace({'Month_Avg_Dep_Delay': {1: 9.82808600285777, 2: 11.689433403570048, 3: 8.45752678421839, 4: 9.375029826488923, 5: 11.283686509030298, 6: 14.629757423341372, 7: 13.770582924983167, 8: 13.279282347021876, 9: 6.900262796528355, 10: 7.502918821697483, 11: 8.049444482526964, 12: 10.62795705344142}}, inplace=True) #Groupby to account for taxi in/out based on carriers which appeared to have the largest cross variance Avg_Taxi_Out_Carrier = newTrain['taxi_out'].groupby(newTrain['op_unique_carrier']).mean().reset_index() Avg_Taxi_In_Carrier = newTrain['taxi_in'].groupby(newTrain['op_unique_carrier']).mean().reset_index() #Create dictionary filled with op_unique_carrier as keys and the mean taxi in and out times as values taxi_out_dict = dict(zip(Avg_Taxi_Out_Carrier.op_unique_carrier, Avg_Taxi_Out_Carrier.taxi_out)) taxi_in_dict = dict(zip(Avg_Taxi_In_Carrier.op_unique_carrier, Avg_Taxi_In_Carrier.taxi_in)) #Creating two copies of op_unique_carrier to replace the values with the carrier's respective average taxi in and out time flights_test['Avg_Taxi_In_Carrier'] = flights_test['op_unique_carrier'] flights_test['Avg_Taxi_Out_Carrier'] = flights_test['op_unique_carrier'] #Replacing the Carrier codes in copied features with their respective average taxi in and out times. flights_test.replace({'Avg_Taxi_In_Carrier': {'9E': 7.360715045754416, '9K': 4.714285714285714, 'AA': 9.445789265313048, 'AS': 8.082283095510885, 'AX': 7.877306903622693, 'B6': 7.36336976806185, 'C5': 8.20173646578141, 'CP': 9.47292817679558, 'DL': 7.542487551418056, 'EM': 4.005050505050505, 'EV': 8.146282587705182, 'F9': 10.15011596036264, 'G4': 6.785416666666666, 'G7': 7.6468788249694, 'HA': 7.200770960488275, 'KS': 3.617021276595745, 'MQ': 8.747318339100346, 'NK': 9.849809617825413, 'OH': 8.452057416267943, 'OO': 7.693122041031036, 'PT': 8.16294088425236, 'QX': 5.72971114167813, 'UA': 7.847001223990208, 'VX': 8.774086378737541, 'WN': 5.293501334008452, 'YV': 7.493231100994369, 'YX': 8.656821963394343, 'ZW': 8.605810234541577}}, inplace=True) flights_test.replace({'Avg_Taxi_Out_Carrier': {'9E': 21.49329644605235, '9K': 8.785714285714286, 'AA': 18.694389457609862, 'AS': 18.991042599729195, 'AX': 20.173615857826384, 'B6': 17.75419888029859, 'C5': 24.258426966292134, 'CP': 18.9292817679558, 'DL': 17.24063650140723, 'EM': 8.146464646464647, 'EV': 20.229320888316703, 'F9': 16.60278304870335, 'G4': 13.095052083333334, 'G7': 19.86689106487148, 'HA': 11.959524574365563, 'KS': 5.872340425531915, 'MQ': 18.889359861591696, 'NK': 15.177690029615006, 'OH': 17.736363636363638, 'OO': 19.763907154129406, 'PT': 20.783904619970194, 'QX': 13.661393856029344, 'UA': 19.814797619550077, 'VX': 21.036544850498338, 'WN': 12.319694638649244, 'YV': 17.57553612076195, 'YX': 21.11281198003328, 'ZW': 19.840618336886994}}, inplace=True) #Create 4 copies of origin_city_name feature to replace the current values with their respective longtitude and latitude values flights_test['originLat'] = flights_test['origin_city_name'] flights_test['originLong'] = flights_test['origin_city_name'] flights_test['destLat'] = flights_test['dest_city_name'] flights_test['destLong'] = flights_test['dest_city_name'] #Replacing the City names with their longitude and latitude values #Geopy (from geopy.geocoders import Nominatim) was used in the aggregation of these values, but some had to manually encoded due to API call limits flights_test.replace({'originLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146, 'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) flights_test.replace({'originLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186, 'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) flights_test.replace({'destLat': {'Aberdeen, SD': 45.4649805, 'Abilene, TX': 32.44645, 'Adak Island, AK': 51.7961654, 'Aguadilla, PR': 18.4274359, 'Akron, OH': 41.083064, 'Albany, GA': 42.7439143, 'Albany, NY': 42.6511674, 'Albuquerque, NM': 35.0841034, 'Alexandria, LA': 31.199004, 'Allentown/Bethlehem/Easton, PA': 40.651163100000005, 'Alpena, MI': 45.0176181, 'Amarillo, TX': 35.2072185, 'Anchorage, AK': 61.2163129, 'Appleton, WI': 44.2611337, 'Arcata/Eureka, CA': 40.8033073, 'Asheville, NC': 35.6009498, 'Ashland, WV': 37.4084488, 'Aspen, CO': 39.1911128, 'Atlanta, GA': 33.7489924, 'Atlantic City, NJ': 39.3642852, 'Augusta, GA': 48.3689438, 'Austin, TX': 30.2711286, 'Bakersfield, CA': 35.3738712, 'Baltimore, MD': 39.2908816, 'Bangor, ME': 44.8011821, 'Barrow, AK': 71.387113, 'Baton Rouge, LA': 30.4459596, 'Beaumont/Port Arthur, TX': 29.954324, 'Belleville, IL': 48.8176714, 'Bellingham, WA': 48.7544012, 'Bemidji, MN': 47.4785418, 'Bend/Redmond, OR': 44.2165084, 'Bethel, AK': 60.7922222, 'Billings, MT': 45.7874957, 'Binghamton, NY': 42.096968, 'Birmingham, AL': 52.4459629, 'Bismarck/Mandan, ND': 46.8101709, 'Bloomington/Normal, IL': 40.508752, 'Boise, ID': 43.6166163, 'Boston, MA': 42.3602534, 'Bozeman, MT': 45.6794293, 'Brainerd, MN': 46.3580221, 'Branson, MO': 36.6411357, 'Brownsville, TX': 25.9140256, 'Brunswick, GA': 52.3175903, 'Buffalo, NY': 42.8867166, 'Bullhead City, AZ': 35.1477774, 'Burbank, CA': 34.1816482, 'Burlington, VT': 44.4761601, 'Butte, MT': 39.6519275, 'Cape Girardeau, MO': 37.3034933, 'Casper, WY': 42.849709, 'Cedar City, UT': 37.6774238, 'Cedar Rapids/Iowa City, IA': 41.9758872, 'Champaign/Urbana, IL': 40.1157948, 'Charleston/Dunbar, WV': 38.3616659, 'Charleston, SC': 32.7876012, 'Charlotte Amalie, VI': 18.341137, 'Charlotte, NC': 35.2272086, 'Charlottesville, VA': 38.0360726, 'Chattanooga, TN': 35.0457219, 'Cheyenne, WY': 41.139981, 'Chicago, IL': 41.8755616, 'Christiansted, VI': 17.7439481, 'Cincinnati, OH': 39.1014537, 'Clarksburg/Fairmont, WV': 39.2798118, 'Cleveland, OH': 41.5051613, 'Cody, WY': 44.5263107, 'Colorado Springs, CO': 38.8339578, 'Columbia, MO': 38.951883, 'Columbia, SC': 34.0007493, 'Columbus, GA': 40.0838862, 'Columbus, MS': 33.4956744, 'Columbus, OH': 39.9622601, 'Concord, NC': 35.4094178, 'Cordova, AK': 60.5439444, 'Corpus Christi, TX': 27.7477253, 'Dallas/Fort Worth, TX': 32.7476308, 'Dallas, TX': 32.7762719, 'Daytona Beach, FL': 29.2108147, 'Dayton, OH': 39.7589478, 'Deadhorse, AK': 70.2006973, 'Del Rio, TX': 29.3655405, 'Denver, CO': 5.3428475, 'Des Moines, IA': 41.5910323, 'Detroit, MI': 42.3315509, 'Devils Lake, ND': 48.112779, 'Dickinson, ND': 46.8791756, 'Dillingham, AK': 59.0397222, 'Dothan, AL': 31.2237434, 'Dubuque, IA': 42.5006217, 'Duluth, MN': 46.7729322, 'Durango, CO': 24.833333, 'Eagle, CO': 39.6161124, 'Eau Claire, WI': 44.811349, 'Elko, NV': 41.1958128, 'Elmira/Corning, NY': 42.1608441, 'El Paso, TX': 31.7754152, 'Erie, PA': 42.1294712, 'Escanaba, MI': 45.7455707, 'Eugene, OR': 44.0505054, 'Evansville, IN': 37.9386712, 'Everett, WA': 47.9673056, 'Fairbanks, AK': 64.837845, 'Fargo, ND': 46.877229, 'Fayetteville, AR': 36.0625843, 'Fayetteville, NC': 35.0525759, 'Flagstaff, AZ': 35.1816047, 'Flint, MI': 43.0161693, 'Florence, SC': 34.1984435, 'Fort Lauderdale, FL': 26.1223084, 'Fort Myers, FL': 26.640628, 'Fort Smith, AR': 35.3872218, 'Fort Wayne, IN': 41.0799898, 'Fresno, CA': 36.7394421, 'Gainesville, FL': 29.6519684, 'Garden City, KS': 37.9716898, 'Gillette, WY': 44.290635, 'Grand Forks, ND': 47.9078244, 'Grand Island, NE': 40.924271, 'Grand Junction, CO': 39.063956, 'Grand Rapids, MI': 42.9632405, 'Great Falls, MT': 47.5048851, 'Green Bay, WI': 44.5126379, 'Greensboro/High Point, NC': 36.0726355, 'Greenville, NC': 35.613224, 'Greer, SC': 34.9381361, 'Guam, TT': 13.486490199999999, 'Gulfport/Biloxi, MS': 30.4900534, 'Gunnison, CO': 38.6476702, 'Gustavus, AK': 58.4128377, 'Hagerstown, MD': 39.6419219, 'Hancock/Houghton, MI': 47.126871, 'Harrisburg, PA': 40.2663107, 'Hartford, CT': 41.7655582, 'Hayden, CO': 47.7725145, 'Hays, KS': 38.8791783, 'Helena, MT': 46.5927425, 'Hibbing, MN': 47.427155, 'Hilo, HI': 19.7073734, 'Hilton Head, SC': 32.3836213, 'Hobbs, NM': 32.707667, 'Honolulu, HI': 21.304547, 'Hoolehua, HI': 21.1590908, 'Houston, TX': 29.7589382, 'Huntsville, AL': 34.729847, 'Hyannis, MA': 41.651513, 'Idaho Falls, ID': 43.4935245, 'Indianapolis, IN': 39.9164009, 'International Falls, MN': 48.601033, 'Islip, NY': 40.7304311, 'Ithaca/Cortland, NY': 42.4415242, 'Jackson/Vicksburg, MS': 32.3520532, 'Jacksonville/Camp Lejeune, NC': 34.7338577, 'Jacksonville, FL': 30.3321838, 'Jackson, WY': 32.2990384, 'Jamestown, ND': 46.910544, 'Joplin, MO': 37.08418, 'Juneau, AK': 58.3019496, 'Kahului, HI': 20.8747708, 'Kalamazoo, MI': 42.291707, 'Kalispell, MT': 48.2022563, 'Kansas City, MO': 39.100105, 'Kapalua, HI': 20.99490395, 'Kearney, NE': 40.4906216, 'Ketchikan, AK': 55.3430696, 'Key West, FL': 24.5625566, 'Killeen, TX': 31.1171441, 'King Salmon, AK': 58.7551615, 'Knoxville, TN': 35.9603948, 'Kodiak, AK': 57.79, 'Kona, HI': 19.743906, 'Kotzebue, AK': 66.8982057, 'La Crosse, WI': 43.8014053, 'Lafayette, LA': 30.2240897, 'Lake Charles, LA': 30.2265949, 'Lanai, HI': 20.830544099999997, 'Lansing, MI': 42.7337712, 'Laramie, WY': 41.311367, 'Laredo, TX': 27.5199841, 'Las Vegas, NV': 36.1672559, 'Latrobe, PA': 40.317287, 'Lawton/Fort Sill, OK': 34.6172103, 'Lewisburg, WV': 37.8017879, 'Lewiston, ID': 46.4195913, 'Lexington, KY': 38.0464066, 'Liberal, KS': 37.0430812, 'Lihue, HI': 21.9769622, 'Lincoln, NE': 40.8088861, 'Little Rock, AR': 34.7464809, 'Long Beach, CA': 33.7690164, 'Longview, TX': 32.5007031, 'Los Angeles, CA': 34.0536909, 'Louisville, KY': 38.2542376, 'Lubbock, TX': 33.5635206, 'Lynchburg, VA': 37.4137536, 'Madison, WI': 43.074761, 'Mammoth Lakes, CA': 37.6432525, 'Manchester, NH': 42.9956397, 'Manhattan/Ft. Riley, KS': 40.8576918, 'Marquette, MI': 46.4481521, "Martha's Vineyard, MA": 41.3918832, 'Medford, OR': 42.3264181, 'Melbourne, FL': 28.106471, 'Memphis, TN': 35.1490215, 'Meridian, MS': 32.3643098, 'Miami, FL': 25.7741728, 'Midland/Odessa, TX': 31.8329723, 'Milwaukee, WI': 43.0349931, 'Minneapolis, MN': 44.9772995, 'Minot, ND': 48.23251, 'Missoula, MT': 46.8701049, 'Moab, UT': 38.5738096, 'Mobile, AL': 30.6943566, 'Moline, IL': 41.5067003, 'Monroe, LA': 38.2722313, 'Monterey, CA': 36.2231079, 'Montgomery, AL': 32.379952849999995, 'Montrose/Delta, CO': 38.8777609, 'Mosinee, WI': 44.7927298, 'Muskegon, MI': 43.2341813, 'Myrtle Beach, SC': 33.6956461, 'Nantucket, MA': 41.316911450000006, 'Nashville, TN': 36.1622296, 'Newark, NJ': 40.735657, 'New Haven, CT': 41.298434349999994, 'New Orleans, LA': 29.9499323, 'New York, NY': 40.7127281, 'Niagara Falls, NY': 43.08436, 'Nome, AK': 64.4989922, 'Norfolk, VA': 52.56365215, 'North Bend/Coos Bay, OR': 43.4065089, 'North Platte, NE': 41.1238873, 'Oakland, CA': 37.8044557, 'Ogdensburg, NY': 44.694285, 'Ogden, UT': 41.2230048, 'Oklahoma City, OK': 35.4729886, 'Omaha, NE': 41.2587459, 'Ontario, CA': 50.000678, 'Orlando, FL': 28.5421109, 'Owensboro, KY': 37.7742152, 'Paducah, KY': 37.0833893, 'Pago Pago, TT': -14.2754786, 'Palm Springs, CA': 33.772179449999996, 'Panama City, FL': 30.1600827, 'Pasco/Kennewick/Richland, WA': 46.1736015, 'Pellston, MI': 45.552789, 'Pensacola, FL': 30.421309, 'Peoria, IL': 40.6938609, 'Petersburg, AK': 56.8127965, 'Philadelphia, PA': 39.9527237, 'Phoenix, AZ': 33.4484367, 'Pierre, SD': 44.3683644, 'Pittsburgh, PA': 40.4416941, 'Plattsburgh, NY': 44.69282, 'Pocatello, ID': 42.8688613, 'Ponce, PR': 18.0039949, 'Portland, ME': 43.6610277, 'Portland, OR': 45.5202471, 'Portsmouth, NH': 43.0702223, 'Prescott, AZ': 34.5399962, 'Presque Isle/Houlton, ME': 46.661867799999996, 'Providence, RI': 41.8239891, 'Provo, UT': 40.2338438, 'Pueblo, CO': 10.961033, 'Pullman, WA': 46.7304268, 'Punta Gorda, FL': 26.9297836, 'Quincy, IL': 39.9356016, 'Raleigh/Durham, NC': 35.9217839, 'Rapid City, SD': 44.0869329, 'Redding, CA': 40.5863563, 'Reno, NV': 39.5261206, 'Rhinelander, WI': 45.636623, 'Richmond, VA': 49.1977086, 'Roanoke, VA': 37.270973, 'Rochester, MN': 44.0234387, 'Rochester, NY': 43.157285, 'Rockford, IL': 42.2713945, 'Rock Springs, WY': 41.5869225, 'Roswell, NM': 33.3943282, 'Rota, TT': 66.947975, 'Sacramento, CA': 38.5810606, 'Saipan, TT': 7.0698398, 'Salina, KS': 38.8402805, 'Salisbury, MD': 38.3662114, 'Salt Lake City, UT': 40.7596198, 'San Angelo, TX': 31.4648357, 'San Antonio, TX': 29.4246002, 'San Diego, CA': 32.7174202, 'Sanford, FL': 28.8117297, 'San Francisco, CA': 37.7790262, 'San Jose, CA': 37.3361905, 'San Juan, PR': -25.4206759, 'San Luis Obispo, CA': 35.3540209, 'Santa Ana, CA': 33.7494951, 'Santa Barbara, CA': 34.4221319, 'Santa Fe, NM': 35.6869996, 'Santa Maria, CA': 34.9531295, 'Santa Rosa, CA': 38.4404925, 'Sarasota/Bradenton, FL': 27.499764300000002, 'Sault Ste. Marie, MI': 46.490586, 'Savannah, GA': 9.7568312, 'Scottsbluff, NE': 41.862302, 'Scranton/Wilkes-Barre, PA': 41.33709205, 'Seattle, WA': 47.6038321, 'Shreveport, LA': 32.5221828, 'Sioux City, IA': 42.4966815, 'Sioux Falls, SD': 43.549973, 'Sitka, AK': 57.0524973, 'South Bend, IN': 38.622348, 'Spokane, WA': 47.6571934, 'Springfield, IL': 39.7990175, 'Springfield, MO': 37.2166779, 'State College, PA': 40.7944504, 'Staunton, VA': 38.1357949, 'St. Cloud, MN': 45.5616075, 'St. George, UT': 37.104153, 'Stillwater, OK': 36.1156306, 'St. Louis, MO': 38.6529545, 'Stockton, CA': 37.9577016, 'St. Petersburg, FL': 27.7703796, 'Syracuse, NY': 43.0481221, 'Tallahassee, FL': 30.4380832, 'Tampa, FL': 27.9477595, 'Texarkana, AR': 33.4254684, 'Toledo, OH': 41.6529143, 'Traverse City, MI': 44.7606441, 'Trenton, NJ': 40.2170575, 'Tucson, AZ': 32.2228765, 'Tulsa, OK': 36.1556805, 'Twin Falls, ID': 42.5704456, 'Tyler, TX': 32.3512601, 'Unalaska, AK': 53.8722824, 'Valdosta, GA': 30.8327022, 'Valparaiso, FL': 30.5085309, 'Vernal, UT': 40.4556825, 'Waco, TX': 31.549333, 'Walla Walla, WA': 46.0667277, 'Washington, DC': 38.8949924, 'Waterloo, IA': 42.4979693, 'Watertown, NY': 43.9747838, 'Watertown, SD': 44.899211, 'Wenatchee, WA': 47.4234599, 'West Palm Beach/Palm Beach, FL': 26.715364, 'West Yellowstone, MT': 44.664290199999996, 'White Plains, NY': 41.0339862, 'Wichita Falls, TX': 33.9137085, 'Wichita, KS': 37.6922361, 'Williamsport, PA': 41.2493292, 'Williston, ND': 48.1465457, 'Wilmington, NC': 34.2257282, 'Worcester, MA': 42.2761217, 'Wrangell, AK': 56.4706022, 'Yakima, WA': 46.601557, 'Yakutat, AK': 59.572734499999996, 'Youngstown/Warren, OH': 41.22497, 'Yuma, AZ': 32.665135, 'Bristol/Johnson City/Kingsport, TN': 36.475201, 'Mission/McAllen/Edinburg, TX': 26.203407, 'New Bern/Morehead/Beaufort, NC': 35.108494, 'Hattiesburg/Laurel, MS': 31.467, 'Iron Mountain/Kingsfd, MI': 45.8146,'Newburgh/Poughkeepsie, NY': 41.66598, 'College Station/Bryan, TX': 30.601389, 'Saginaw/Bay City/Midland, MI': 43.4195, 'Newport News/Williamsburg, VA': 37.131900, 'Harlingen/San Benito, TX': 26.1326, 'Sun Valley/Hailey/Ketchum, ID': 43.504398}}, inplace=True) flights_test.replace({'destLong': {'Aberdeen, SD': -98.487813, 'Abilene, TX': -99.7475905, 'Adak Island, AK': -176.5734916431957, 'Aguadilla, PR': -67.1541343, 'Akron, OH': -81.518485, 'Albany, GA': -73.8016558, 'Albany, NY': -73.754968, 'Albuquerque, NM': -106.6509851, 'Alexandria, LA': 29.894378, 'Allentown/Bethlehem/Easton, PA': -75.44225386838299, 'Alpena, MI': -83.6670019, 'Amarillo, TX': -101.8338246, 'Anchorage, AK': -149.894852, 'Appleton, WI': -88.4067604, 'Arcata/Eureka, CA': -124.1535049, 'Asheville, NC': -82.5540161, 'Ashland, WV': -81.3526017, 'Aspen, CO': -106.8235606, 'Atlanta, GA': -84.3902644, 'Atlantic City, NJ': -74.4229351, 'Augusta, GA': 10.8933327, 'Austin, TX': -97.7436995, 'Bakersfield, CA': -119.0194639, 'Baltimore, MD': -76.610759, 'Bangor, ME': -68.7778138, 'Barrow, AK': -156.4809618, 'Baton Rouge, LA': -91.18738, 'Beaumont/Port Arthur, TX': -93.985972, 'Belleville, IL': 6.0982683, 'Bellingham, WA': -122.4788361, 'Bemidji, MN': -94.8907869, 'Bend/Redmond, OR': -121.2150324, 'Bethel, AK': -161.7558333, 'Billings, MT': -108.49607, 'Binghamton, NY': -75.914341, 'Birmingham, AL': -1.8237251, 'Bismarck/Mandan, ND': -100.8363564, 'Bloomington/Normal, IL': -88.9844947, 'Boise, ID': -116.200886, 'Boston, MA': -71.0582912, 'Bozeman, MT': -111.044047, 'Brainerd, MN': -94.2008288, 'Branson, MO': -93.2175285, 'Brownsville, TX': -97.4890856, 'Brunswick, GA': 10.560215, 'Buffalo, NY': -78.8783922, 'Bullhead City, AZ': -114.5682983, 'Burbank, CA': -118.3258554, 'Burlington, VT': -73.212906, 'Butte, MT': -121.5858444, 'Cape Girardeau, MO': -89.5230357, 'Casper, WY': -106.3254928, 'Cedar City, UT': -113.0618277, 'Cedar Rapids/Iowa City, IA': -91.6704053, 'Champaign/Urbana, IL': -88.241194, 'Charleston/Dunbar, WV': -81.7207214, 'Charleston, SC': -79.9402728, 'Charlotte Amalie, VI': -64.932789, 'Charlotte, NC': -80.8430827, 'Charlottesville, VA': -78.49973472559668, 'Chattanooga, TN': -85.3094883, 'Cheyenne, WY': -104.820246, 'Chicago, IL': -87.6244212, 'Christiansted, VI': -64.7079823, 'Cincinnati, OH': -84.5124602, 'Clarksburg/Fairmont, WV': -80.3300893, 'Cleveland, OH': -81.6934446, 'Cody, WY': -109.0563923, 'Colorado Springs, CO': -104.8253485, 'Columbia, MO': -92.3337366, 'Columbia, SC': -81.0343313, 'Columbus, GA': -83.0765043, 'Columbus, MS': -88.4272627, 'Columbus, OH': -83.0007065, 'Concord, NC': -80.5800049, 'Cordova, AK': -145.7589103, 'Corpus Christi, TX': -97.4014129, 'Dallas/Fort Worth, TX': -97.3135971, 'Dallas, TX': -96.7968559, 'Daytona Beach, FL': -81.0228331, 'Dayton, OH': -84.1916069, 'Deadhorse, AK': -148.4598151, 'Del Rio, TX': -100.8946984, 'Denver, CO': -72.3959849, 'Des Moines, IA': -93.6046655, 'Detroit, MI': -83.0466403, 'Devils Lake, ND': -98.86512, 'Dickinson, ND': -102.7896242, 'Dillingham, AK': -158.4575, 'Dothan, AL': -85.3933906, 'Dubuque, IA': -90.6647967, 'Duluth, MN': -92.1251218, 'Durango, CO': -104.833333, 'Eagle, CO': -106.7172844, 'Eau Claire, WI': -91.4984941, 'Elko, NV': -115.3272864, 'Elmira/Corning, NY': -76.89199038453467, 'El Paso, TX': -106.464634, 'Erie, PA': -80.0852695, 'Escanaba, MI': -87.0647434, 'Eugene, OR': -123.0950506, 'Evansville, IN': -87.518899, 'Everett, WA': -122.2013998, 'Fairbanks, AK': -147.716675, 'Fargo, ND': -96.789821, 'Fayetteville, AR': -94.1574328, 'Fayetteville, NC': -78.878292, 'Flagstaff, AZ': -111.6165953319917, 'Flint, MI': -83.6900211, 'Florence, SC': -79.7671658, 'Fort Lauderdale, FL': -80.1433786, 'Fort Myers, FL': -81.8723084, 'Fort Smith, AR': -94.4248983, 'Fort Wayne, IN': -85.1386015, 'Fresno, CA': -119.7848307, 'Gainesville, FL': -82.3249846, 'Garden City, KS': -100.8726618, 'Gillette, WY': -105.501876, 'Grand Forks, ND': -97.0592028, 'Grand Island, NE': -98.338685, 'Grand Junction, CO': -108.5507317, 'Grand Rapids, MI': -85.6678639, 'Great Falls, MT': -111.29189, 'Green Bay, WI': -88.0125794, 'Greensboro/High Point, NC': -79.7919754, 'Greenville, NC': -77.3724593, 'Greer, SC': -82.2272119, 'Guam, TT': 144.80206025352555, 'Gulfport/Biloxi, MS': -89.0290044, 'Gunnison, CO': -107.0603126, 'Gustavus, AK': -135.7375654, 'Hagerstown, MD': -77.7202641, 'Hancock/Houghton, MI': -88.580956, 'Harrisburg, PA': -76.8861122, 'Hartford, CT': -72.69061276146614, 'Hayden, CO': -116.82675375791398, 'Hays, KS': -99.3267702, 'Helena, MT': -112.036277, 'Hibbing, MN': -92.937689, 'Hilo, HI': -155.0815803, 'Hilton Head, SC': -99.748119, 'Hobbs, NM': -103.1311314, 'Honolulu, HI': -157.8556764, 'Hoolehua, HI': -157.09484723911947, 'Houston, TX': -95.3676974, 'Huntsville, AL': -86.5859011, 'Hyannis, MA': -70.2825918, 'Idaho Falls, ID': -112.0400919, 'Indianapolis, IN': -86.0519568269157, 'International Falls, MN': -93.4105904, 'Islip, NY': -73.2108618, 'Ithaca/Cortland, NY': -76.4580207, 'Jackson/Vicksburg, MS': -90.8730418, 'Jacksonville/Camp Lejeune, NC': -77.4457643, 'Jacksonville, FL': -81.655651, 'Jackson, WY': -90.1847691, 'Jamestown, ND': -98.708436, 'Joplin, MO': -94.51323, 'Juneau, AK': -134.419734, 'Kahului, HI': -156.4529879461996, 'Kalamazoo, MI': -85.5872286, 'Kalispell, MT': -114.316711, 'Kansas City, MO': -94.5781416, 'Kapalua, HI': -156.6562339558182, 'Kearney, NE': -98.9472344, 'Ketchikan, AK': -131.6466819, 'Key West, FL': -81.7724368, 'Killeen, TX': -97.727796, 'King Salmon, AK': -156.5192469940953, 'Knoxville, TN': -83.9210261, 'Kodiak, AK': -152.4072222, 'Kona, HI': -156.0422959812206, 'Kotzebue, AK': -162.5977621, 'La Crosse, WI': -91.2395429, 'Lafayette, LA': -92.0198427, 'Lake Charles, LA': -93.2173759, 'Lanai, HI': -156.9029492509114, 'Lansing, MI': -84.5553805, 'Laramie, WY': -105.591101, 'Laredo, TX': -99.4953764, 'Las Vegas, NV': -115.1485163, 'Latrobe, PA': -79.3840301, 'Lawton/Fort Sill, OK': -98.4037888, 'Lewisburg, WV': -80.4456303, 'Lewiston, ID': -117.0216144, 'Lexington, KY': -84.4970393, 'Liberal, KS': -100.920999, 'Lihue, HI': -159.3687721, 'Lincoln, NE': -96.7077751, 'Little Rock, AR': -92.2895948, 'Long Beach, CA': -118.191604, 'Longview, TX': -94.74049, 'Los Angeles, CA': -118.242766, 'Louisville, KY': -85.759407, 'Lubbock, TX': -101.879336, 'Lynchburg, VA': -79.1422464, 'Madison, WI': -89.3837613, 'Mammoth Lakes, CA': -118.9668509, 'Manchester, NH': -71.4547891, 'Manhattan/Ft. Riley, KS': -73.9222899, 'Marquette, MI': -87.6305899, "Martha's Vineyard, MA": -70.62085427857699, 'Medford, OR': -122.8718605, 'Melbourne, FL': -80.6371513, 'Memphis, TN': -90.0516285, 'Meridian, MS': -88.703656, 'Miami, FL': -80.19362, 'Midland/Odessa, TX': -102.3606957, 'Milwaukee, WI': -87.922497, 'Minneapolis, MN': -93.2654692, 'Minot, ND': -101.296273, 'Missoula, MT': -113.995267, 'Moab, UT': -109.5462146, 'Mobile, AL': -88.0430541, 'Moline, IL': -90.5151342, 'Monroe, LA': -90.1792484, 'Monterey, CA': -121.3877428, 'Montgomery, AL': -86.3107669425032, 'Montrose/Delta, CO': -108.226467, 'Mosinee, WI': -89.7035959, 'Muskegon, MI': -86.2483921, 'Myrtle Beach, SC': -78.8900409, 'Nantucket, MA': -70.14287301528347, 'Nashville, TN': -86.7743531, 'Newark, NJ': -74.1723667, 'New Haven, CT': -72.93102342707913, 'New Orleans, LA': -90.0701156, 'New York, NY': -74.0060152, 'Niagara Falls, NY': -79.0614686, 'Nome, AK': -165.39879944316317, 'Norfolk, VA': 1.2623608080231654, 'North Bend/Coos Bay, OR': -124.2242824, 'North Platte, NE': -100.7654232, 'Oakland, CA': -122.2713563, 'Ogdensburg, NY': -75.486374, 'Ogden, UT': -111.9738429, 'Oklahoma City, OK': -97.5170536, 'Omaha, NE': -95.9383758, 'Ontario, CA': -86.000977, 'Orlando, FL': -81.3790304, 'Owensboro, KY': -87.1133304, 'Paducah, KY': -88.6000478, 'Pago Pago, TT': -170.7048298, 'Palm Springs, CA': -116.49529769785079, 'Panama City, FL': -85.6545729, 'Pasco/Kennewick/Richland, WA': -119.0664001, 'Pellston, MI': -84.783936, 'Pensacola, FL': -87.2169149, 'Peoria, IL': -89.5891008, 'Petersburg, AK': -132.95547, 'Philadelphia, PA': -75.1635262, 'Phoenix, AZ': -112.0741417, 'Pierre, SD': -100.3511367, 'Pittsburgh, PA': -79.9900861, 'Plattsburgh, NY': -73.45562, 'Pocatello, ID': -112.4401098, 'Ponce, PR': -66.6169509, 'Portland, ME': -70.2548596, 'Portland, OR': -122.6741949, 'Portsmouth, NH': -70.7548621, 'Prescott, AZ': -112.4687616, 'Presque Isle/Houlton, ME': -68.01074889363161, 'Providence, RI': -71.4128343, 'Provo, UT': -111.6585337, 'Pueblo, CO': -74.84053554739253, 'Pullman, WA': -117.173895, 'Punta Gorda, FL': -82.0453664, 'Quincy, IL': -91.4098727, 'Raleigh/Durham, NC': -78.76087880585929, 'Rapid City, SD': -103.2274481, 'Redding, CA': -122.3916754, 'Reno, NV': -119.8126581, 'Rhinelander, WI': -89.412075, 'Richmond, VA': -123.1912406, 'Roanoke, VA': -79.9414313, 'Rochester, MN': -92.4630182, 'Rochester, NY': -77.615214, 'Rockford, IL': -89.093966, 'Rock Springs, WY': -109.2047867, 'Roswell, NM': -104.5229518, 'Rota, TT': 13.553736, 'Sacramento, CA': -121.4938951, 'Saipan, TT': 125.5116649, 'Salina, KS': -97.6114237, 'Salisbury, MD': -75.6008881, 'Salt Lake City, UT': -111.8867975, 'San Angelo, TX': -100.4398442, 'San Antonio, TX': -98.4951405, 'San Diego, CA': -117.1627728, 'Sanford, FL': -81.2680345, 'San Francisco, CA': -122.4199061, 'San Jose, CA': -121.890583, 'San Juan, PR': -49.2687428522959, 'San Luis Obispo, CA': -120.3757163, 'Santa Ana, CA': -117.8732213, 'Santa Barbara, CA': -119.7026673, 'Santa Fe, NM': -105.9377997, 'Santa Maria, CA': -120.4358577, 'Santa Rosa, CA': -122.7141049, 'Sarasota/Bradenton, FL': -82.56510160912002, 'Sault Ste. Marie, MI': -84.359269, 'Savannah, GA': -2.4962, 'Scottsbluff, NE': -103.6627088, 'Scranton/Wilkes-Barre, PA': -75.72257122928625, 'Seattle, WA': -122.3300624, 'Shreveport, LA': -93.7651944, 'Sioux City, IA': -96.4058782, 'Sioux Falls, SD': -96.7003324, 'Sitka, AK': -135.337612, 'South Bend, IN': -105.518825, 'Spokane, WA': -117.4235106, 'Springfield, IL': -89.6439575, 'Springfield, MO': -93.2920373, 'State College, PA': -77.8616386, 'Staunton, VA': -79.08927008810585, 'St. Cloud, MN': -94.1642004, 'St. George, UT': -113.5841313, 'Stillwater, OK': -97.0585717, 'St. Louis, MO': -90.24111656024635, 'Stockton, CA': -121.2907796, 'St. Petersburg, FL': -82.6695085, 'Syracuse, NY': -76.1474244, 'Tallahassee, FL': -84.2809332, 'Tampa, FL': -82.458444, 'Texarkana, AR': -94.0430977, 'Toledo, OH': -83.5378173, 'Traverse City, MI': -85.6165301, 'Trenton, NJ': -74.7429463, 'Tucson, AZ': -110.9748477, 'Tulsa, OK': -95.9929113, 'Twin Falls, ID': -114.4602554, 'Tyler, TX': -95.3010624, 'Unalaska, AK': -166.5272262, 'Valdosta, GA': -83.2784851, 'Valparaiso, FL': -86.5027282, 'Vernal, UT': -109.5284741, 'Waco, TX': -97.1466695, 'Walla Walla, WA': -118.3393456, 'Washington, DC': -77.0365581, 'Waterloo, IA': -92.3329637, 'Watertown, NY': -75.9107565, 'Watertown, SD': -97.115289, 'Wenatchee, WA': -120.3103494, 'West Palm Beach/Palm Beach, FL': -80.0532942, 'West Yellowstone, MT': -111.10513722509046, 'White Plains, NY': -73.7629097, 'Wichita Falls, TX': -98.4933873, 'Wichita, KS': -97.3375448, 'Williamsport, PA': -77.0027671, 'Williston, ND': -103.621814, 'Wilmington, NC': -77.9447107, 'Worcester, MA': -71.8058232, 'Wrangell, AK': -132.3829431, 'Yakima, WA': -120.5108421, 'Yakutat, AK': -139.57831243878087, 'Youngstown/Warren, OH': -80.789606, 'Yuma, AZ': -114.47603157249804, 'Bristol/Johnson City/Kingsport, TN': -82.407401, 'Mission/McAllen/Edinburg, TX': -98.230011, 'New Bern/Morehead/Beaufort, NC': -77.044113, 'Hattiesburg/Laurel, MS': -89.3331, 'Iron Mountain/Kingsfd, MI': -88.1186,'Newburgh/Poughkeepsie, NY': -73.884201, 'College Station/Bryan, TX': -96.314445, 'Saginaw/Bay City/Midland, MI': -83.9508, 'Newport News/Williamsburg, VA': -76.492996, 'Harlingen/San Benito, TX': -97.6311, 'Sun Valley/Hailey/Ketchum, ID': -114.2959976}}, inplace=True) #Converting the planned departure time from 24 hours to a more catagorical variable, which captures an flights_test['crs_dep_time'] = (flights_test['crs_dep_time']/100).astype(int) flights_test['crs_arr_time'] = (flights_test['crs_arr_time']/100).astype(int) #Convert fl_date to Datetime, then just month number to account for higher delays within certain months monthDummies = pd.get_dummies(pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.strftime('%B')) dayDummies = pd.get_dummies(pd.to_datetime(flights_test.fl_date , format="%Y-%m-%d").dt.strftime('%A')) #Creating dummy variables for carriers to account for delays related to certain carriers then concat these dummies onto newTrain mktCarrierDummies = pd.get_dummies(flights_test['mkt_unique_carrier']) # opCarrierDummies = pd.get_dummies(newTrain['op_unique_carrier']) flights_test = pd.concat([flights_test, mktCarrierDummies, monthDummies, dayDummies], axis=1) #tes without these dummies then swap and check results #op dummies was giving better results than mkt dummies flights_test['distanceSQ'] = flights_test['distance']**2 flights_test['originLong*Lat'] = flights_test['originLong']*flights_test['originLat'] flights_test['originLongSQ'] = flights_test['originLong']**2 flights_test['originLatSQ'] = flights_test['originLat']**2 flights_test['Month_Avg_Arr_DelaySQ'] = flights_test['Month_Avg_Arr_Delay']**2 flights_test['February'] = 0 flights_test['March'] = 0 flights_test['April'] = 0 flights_test['May'] = 0 flights_test['June'] = 0 flights_test['July'] = 0 flights_test['August'] = 0 flights_test['September'] = 0 flights_test['October'] = 0 flights_test['November'] = 0 flights_test['December'] = 0 submission = flights_test[['fl_date', 'mkt_carrier', 'mkt_carrier_fl_num', 'origin', 'dest']] #Assign X X_finaltest = flights_test.drop(columns = ['fl_date', 'mkt_unique_carrier', 'branded_code_share', 'mkt_carrier', 'mkt_carrier_fl_num', 'op_unique_carrier','tail_num', 'op_carrier_fl_num', 'origin_airport_id', 'origin', 'origin_city_name', 'dest_airport_id', 'dest','dest_city_name', 'dup', 'crs_elapsed_time', 'flights']) X_finaltest = X_finaltest[['crs_dep_time', 'crs_arr_time','distance','Month_Avg_Arr_Delay','Month_Avg_Dep_Delay', 'Avg_Taxi_In_Carrier','Avg_Taxi_Out_Carrier','originLat','originLong','destLat','destLong', 'AA','AS','B6','DL','F9','G4','HA','NK','UA','WN','April','August','December', 'February', 'January', 'July', 'June', 'March', 'May','November', 'October', 'September', 'Friday', 'Monday','Saturday','Sunday', 'Thursday', 'Tuesday', 'Wednesday', 'distanceSQ','originLong*Lat','originLongSQ','originLatSQ','Month_Avg_Arr_DelaySQ']] X_finaltest #Scale both X and y due to differing units of measurements between features X_finaltest = scaler.fit_transform(X_finaltest) predicted_delay = xg_reg.predict(X_finaltest) len(predicted_delay) submission submission['predicted_delay'] = predicted_delay submission.to_csv('submission.csv')
0.520253
0.285546
## 9장. 케라스2 응용 실제 문제에 인공지능을 활용할 때 생기는 문제들을 효율적으로 처리하는 케라스의 응용 기능을 알아봅시다. ### 9.3 간단한 신규 계층 만들기 신규 계층을 만드는 Lambda 명령을 다룹니다. ``` import keras keras.__version__ ``` ### 9.3.1 Lambda 계층 ``` from keras.layers import Lambda, Input, Concatenate from keras.models import Model import numpy as np ``` ### 9.3.2 파이썬 lambda 기능 이용 ``` def Lambda_with_lambda(): x = Input((2,)) y = Lambda(lambda x: x**2+2*x+1)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) Lambda_with_lambda() ``` ### 9.3.3 Lambda 계층 전용 함수 이용 ``` def Lambda_function(): def kproc(x): return x ** 2 + 2 * x + 1 def kshape(input_shape): return input_shape x = Input((2,)) y = Lambda(kproc, kshape)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) Lambda_function() ``` ### 9.3.4 백엔드 함수 이용 ``` from keras import backend as K def Backend_for_Lambda(): def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs(x - m) d2 = K.square(x - m) return K.concatenate([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) Backend_for_Lambda() ``` ### 9.3.5 엔진 전용 함수 이용 ``` import tensorflow as tf def TF_for_Lamda(): def kproc_concat(x): m = tf.reduce_mean(x, axis=1, keepdims=True) d1 = tf.abs(x - m) d2 = tf.square(x - m) return tf.concat([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) TF_for_Lamda() ``` ### 9.3.6 케라스2의 확장 기능 이용 ``` def No_Lambda_with_keras2(): x = Input((2,)) y = x**2+2*x+1 m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) No_Lambda_with_keras2() ``` --- ### 9.2.7 전체 코드 ``` # 9.3.1 Lambda 계층이란? from keras.layers import Lambda, Input from keras.models import Model # 9.3.2 파이썬 lambda 기능 이용 def Lambda_with_lambda(): x = Input((2,)) y = Lambda(lambda x: x**2+2*x+1)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) # 9.3.3 Lambda 계층 전용 함수 이용 def Lambda_function(): def kproc(x): return x ** 2 + 2 * x + 1 def kshape(input_shape): return input_shape x = Input((2,)) y = Lambda(kproc, kshape)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) # 9.3.4 백엔드 함수 이용 from keras import backend as K def Backend_for_Lambda(): def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs(x - m) d2 = K.square(x - m) return K.concatenate([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) # 9.3.5 엔진 전용 함수 이용 import tensorflow as tf def TF_for_Lamda(): def kproc_concat(x): m = tf.reduce_mean(x, axis=1, keepdims=True) d1 = tf.abs(x - m) d2 = tf.square(x - m) return tf.concat([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) # 9.3.6 케라스2의 확장 기능 이용 def No_Lambda_with_keras2(): x = Input((2,)) y = x**2+2*x+1 m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) def main(): print('Lambda with lambda') Lambda_with_lambda() print('Lambda function') Lambda_function() print('Backend for Lambda') Backend_for_Lambda() print('TF for Lambda') TF_for_Lamda() print('Define-by-run approach in Keras2') No_Lambda_with_keras2() main() ```
github_jupyter
import keras keras.__version__ from keras.layers import Lambda, Input, Concatenate from keras.models import Model import numpy as np def Lambda_with_lambda(): x = Input((2,)) y = Lambda(lambda x: x**2+2*x+1)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) Lambda_with_lambda() def Lambda_function(): def kproc(x): return x ** 2 + 2 * x + 1 def kshape(input_shape): return input_shape x = Input((2,)) y = Lambda(kproc, kshape)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) Lambda_function() from keras import backend as K def Backend_for_Lambda(): def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs(x - m) d2 = K.square(x - m) return K.concatenate([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) Backend_for_Lambda() import tensorflow as tf def TF_for_Lamda(): def kproc_concat(x): m = tf.reduce_mean(x, axis=1, keepdims=True) d1 = tf.abs(x - m) d2 = tf.square(x - m) return tf.concat([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) TF_for_Lamda() def No_Lambda_with_keras2(): x = Input((2,)) y = x**2+2*x+1 m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) No_Lambda_with_keras2() # 9.3.1 Lambda 계층이란? from keras.layers import Lambda, Input from keras.models import Model # 9.3.2 파이썬 lambda 기능 이용 def Lambda_with_lambda(): x = Input((2,)) y = Lambda(lambda x: x**2+2*x+1)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) # 9.3.3 Lambda 계층 전용 함수 이용 def Lambda_function(): def kproc(x): return x ** 2 + 2 * x + 1 def kshape(input_shape): return input_shape x = Input((2,)) y = Lambda(kproc, kshape)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) # 9.3.4 백엔드 함수 이용 from keras import backend as K def Backend_for_Lambda(): def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs(x - m) d2 = K.square(x - m) return K.concatenate([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) # 9.3.5 엔진 전용 함수 이용 import tensorflow as tf def TF_for_Lamda(): def kproc_concat(x): m = tf.reduce_mean(x, axis=1, keepdims=True) d1 = tf.abs(x - m) d2 = tf.square(x - m) return tf.concat([x, d1, d2], axis=1) def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape) x = Input((3,)) y = Lambda(kproc_concat, kshape_concat)(x) m = Model(x, y) yp = m.predict_on_batch(np.array([[1, 2, 3], [3, 4, 8]])) print(yp) # 9.3.6 케라스2의 확장 기능 이용 def No_Lambda_with_keras2(): x = Input((2,)) y = x**2+2*x+1 m = Model(x, y) yp = m.predict_on_batch(np.array([[1,2],[3,4]])) print(yp) def main(): print('Lambda with lambda') Lambda_with_lambda() print('Lambda function') Lambda_function() print('Backend for Lambda') Backend_for_Lambda() print('TF for Lambda') TF_for_Lamda() print('Define-by-run approach in Keras2') No_Lambda_with_keras2() main()
0.617743
0.940353
<a href="https://colab.research.google.com/github/John-G-Thomas/Daily-Warm-Ups/blob/master/notebooks/Probabilities_and_Statistics_Warm_Up.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Probabilities and Statistics Warm-Up --- --- --- In the slack channel recruit a partner, you and your partner should work together from one of your own notebooks. When you're finished PR the completed warm up assignment into your partners repository. ## Concepts ---- Discuss, and answer, the following conceptual questions. #### What is a normal distribution? **Answer:** The normal distribution is a probability function that describes how the values of a variable are distributed. A normal distribution has some interesting properties: it has a bell shape, the mean and median are equal, and 68% of the data falls within 1 standard deviation. The Bell Curve Shape. #### What is the difference between descriptive and inferential statistics? Descriptive statistics describes sets of data. Inferential statistics draws conclusions about the sets of data based on sampling. **Answer:** <!-- Your answer in the following cell --> Descriptive:Are brief descriptive coefficients that summarize a given data set, which can be either a representation of the entire or a sample of a population. Inferential: Allows you to make predictions (“inferences”) from that data. With inferential statistics, you take data from samples and make generalizations about a population. #### What is a null hypothesis? The null hypothesis is the one to be tested and the alternative is everything else. In our example, The null hypothesis would be: The mean data scientist salary is 113,000 dollars. ``` # This is formatted as code ``` #### What is a t-test useful for? **Answer:** One sample: One sample testing against the means of two groups. two sample: The two sample t-test is useful for getting comparing data from two independent variables. Two samples. Two populations. #### When should you use a 1-tailed vs 2-tailed t-test? **Answer:** A one-tailed test is where you are only interested in one direction. >>If a mean is x, you might want to know if a set of results is more than x or less than x. A one-tailed test is more powerful than a two-tailed test, as you aren't considering an effect in the opposite direction. 2 tailed t-test shows the statistical significant difference. # This is formatted as code ``` #### Propose 3 use cases where t-tests could be implemented. **Answer:** issue that democrats support more than republicans with p < 0.01 (significant at the 99% level). issue that republicans support more than democrats with p < 0.01 (significant at the 99% level). issue where the difference between republicans and democrats has p > 0.1 (Not significant at the 90% level - i.e. there may not be much of a difference the two sample means) ## Code --- ``` import numpy as np # linear algebra import pandas as pd # data manipulation # pandas config if pd: pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) from scipy import stats # statistics # visualizations import matplotlib.pyplot as plt import seaborn as sns parties = ["republican", "democrat"] issues = ["handicapped-infants", "water-project-cost-sharing", "adoption-of-the-budget-resolution", "physician-fee-freeze", "el-salvador-aid", "religious-groups-in-schools", "anti-satellite-test-ban", "aid-to-nicaraguan-contras", "mx-missile", "immigration", "synfuels-corporation-cutback", "education-spending", "superfund-right-to-sue", "crime", "duty-free-exports", "export-administration-act-south-africa", # <-- While not required placing a comma here can be helpful when going back and ammending / adding to your code ] columns = ["party"] + issues columns # Loading the data uci = "https://archive.ics.uci.edu/ml/machine-learning-databases" data = "https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data" df = pd.read_csv(data, names=columns) print(df.shape) df.head() # Replace the entries in the dataframe so y = 1 , n = 0 , and ? = np.NaN df = df.replace({'y': 1, 'n': 0, '?':np.NaN}) df.head() # Create seperate dataframes for republicans and democrats by slicing the above dataframe. dem = df[df['party'] == 'democrat'] rep = df[df['party'] == 'republican'] print(len(dem), len(rep)) #stats.ttest_ind() # Define a function to compare the means between both parties def compare_means(issue): """Compares the means of both parties for each issue""" for issue in issues: mean_dems = rep[issue].mean() mean_reps =dem[issue].mean() banner_length = ""*len(issue) print(issues=issues) compare_means(issues=issues) for col in dem.columns[1:]: print(col) output = [] # Add the two sample t-test to the function for col in df.columns[1:]: output.append(stats.ttest_ind(rep[col], dem[col], nan_policy='omit')) for col, out in zip(df.columns[1:], output): print(col) print('\s'+str(out)) ```
github_jupyter
# This is formatted as code #### Propose 3 use cases where t-tests could be implemented. **Answer:** issue that democrats support more than republicans with p < 0.01 (significant at the 99% level). issue that republicans support more than democrats with p < 0.01 (significant at the 99% level). issue where the difference between republicans and democrats has p > 0.1 (Not significant at the 90% level - i.e. there may not be much of a difference the two sample means) ## Code ---
0.393851
0.9814
# Lesson 5: Tidy Data *Learn to prepare data for visualization and analytics.* ## Instructions This tutorial provides step-by-step training divided into numbered sections. The sections often contain embeded exectable code for demonstration. This tutorial is accompanied by a practice notebook: [L05-Tidy_Data-Practice.ipynb](./L05-Tidy_Data-Practice.ipynb). Throughout this tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: ![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/16/Apps-gnome-info-icon.png). You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. ## Introduction The purpose of this assignment is to learn and practice with preparing tidy datasets. Often data we are asked to analyze is provided to us in formats that are not easy to visualize or analyze. Many visualization tools such as Seaborn or analytical tools such as supervised machine learning libraries expect data to be tidied. It is important to know what "tidy" data is, how to reformat a data into a tidy format, and to organize our own scientific data to help ourselves and others analyze it. **What are "tidy" datasets?** > Tidy datasets are easy to manipulate, model and visualize, and have a specific structure: each variable is a column, each observation is a row, and each type of observational unit is a table. \- Wickham, Hadley. [Tidy Data](https://www.jstatsoft.org/article/view/v059i10). *Journal of Statistical Software*, 59.10 (2014): 1 - 23. Before proceeding, fully read the [Tidy Data paper](https://www.jstatsoft.org/article/view/v059i10) (quoted above) by Hadley Wickham. Once finished, return here to reinforce the techniques introduced by that paper. --- ## 1. Getting Started As before, we import any needed packages at the top of our notebook. Let's import Numpy and Pandas: ``` import numpy as np import pandas as pd ``` #### Task 1a: Setup <span style="float:right; margin-left:10px; clear:both;">![Task](./media/task-icon.png)</span> Import the following packages: + `pandas` as `pd` + `numpy` as `np` ## 2. Tidy Rules ### 2.1 Recognizing data components To understand the rules for tidy data, we should define a few terms: 'variable', 'observation' and 'observational unit'. + **variable**: > A variable is a characteristic of a unit being observed... to which a numerical measure or a category... can be assigned (e.g. income, age, weight, etc., and “occupation”, “industry”, “disease”, etc. \- [OECD Glossary of Statistical terms -- Variable](https://stats.oecd.org/glossary/detail.asp?ID=2857) + **observation**: > An observation is the value, at a particular period, of a particular variable \- [OECD Glossary of Statistical terms -- Observation](https://stats.oecd.org/glossary/detail.asp?ID=6132) + **observational unit**: > Observation units are those entities on which information is received and statistics are compiled. \- [OECD Glossary of Statistical terms -- Observation Unit](https://stats.oecd.org/glossary/detail.asp?ID=1873) With those definitions for reference, remember from the text that in order for a dataset to be considered "tidy" it must be organized into a table (i.e. Pandas DataFrame) and follow these rules: + Each variable forms a unique column in the data frame. + Each observation forms a row in the data frame. + Each **type** of observational unit needs its own table. To demonstrate the meaning of these rules, let's first examine a dataset described in the Tidy Data paper. Execute the following lines of code that manually creates a Pandas data frame containing the example table: ``` # Create the data rows and columns. data = [['John Smith', None, 2], ['Jane Doe', 16, 11], ['Mary Johnson', 3, 1]] # Create the list of labels for the data frame. headers = ['', 'Treatment_A', 'Treatment_B'] # Create the data frame. pd.DataFrame(data, columns=headers) ``` This data is not in tidy format. Can you see why? #### Task 2a: Understand the data <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Using the table above, answer the following: - What are the variables? - What are the observations? - What is the observable unit? - Are the variables columns? - Are the observations rows? ### 2.1 Spotting messy data The author provides a few useful indicators that help us spot untidied data: 1. Column headers are values, not variable names. 2. Multiple variables are stored in one column. 3. Variables are stored in both rows and columns. 4. Multiple types of observational units are stored in the same table. 5. A single observational unit is stored in multiple tables. As an example, let's look at a data set that the author borrowed from the Pew Reserach Center that provides religious affiliation and yearly income ranges for individuals surveyed. Execute the following code which manually puts that data into a Pandas data frame: ``` data = [['Agnostic',27,34,60,81,76,137], ['Atheist',12,27,37,52,35,70], ['Buddhist',27,21,30,34,33,58], ['Catholic',418,617,732,670,638,1116], ['Don\'t know/refused',15,14,15,11,10,35], ['Evangelical Prot',575,869,1064,982,881,1486], ['Hindu',1,9,7,9,11,34], ['Historically Black Prot',228,244,236,238,197,223], ['Jehovah\'s Witness',20,27,24,24,21,30], ['Jewish',19,19,25,25,30,95]] headers = ['religion','<$10k','$10-20k','$20-30k','$30-40k','$40-50k','$50-75k'] religion = pd.DataFrame(data, columns=headers) religion ``` #### Task 2b: Explain causes of untidyness <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Using the data set above: - Explain why the data above is untidy? - What are the variables? - What are the observations? As another example, consider the data frame also provided by the author. For this data, the demographic groups are broken down by sex (m, f) and age (0–14, 15–25, 25–34, 35–44, 45–54, 55–64, 65+, or unknown). Execute the following: ``` data = [['AD', 2000, 0, 0, 1, 0, 0, 0, 0, None, None], ['AE', 2000, 2, 4, 4, 6, 5, 12, 10, None, 3], ['AF', 2000, 52, 228, 183, 149, 129, 94, 80, None, 93], ['AG', 2000, 0, 0, 0, 0, 0, 0, 1, None, 1], ['AL', 2000, 2, 19, 21, 14, 24, 19, 16, None, 3], ['AM', 2000, 2, 152, 130, 131, 63, 26, 21, None, 1], ['AN', 2000, 0, 0, 1, 2, 0, 0, 0, None, 0], ['AO', 2000, 186, 999, 1003, 912, 482, 312, 194, None, 247], ['AR', 2000, 97, 278, 594, 402, 419, 368, 330, None, 121], ['AS', 2000, None, None, None, None, 1, 1, None, None, None]] headers = ['country', 'year', 'm014', 'm1524', 'm2534', 'm3544', 'm4554', 'm5564', 'm65', 'mu', 'f014'] demographics = pd.DataFrame(data, columns=headers) demographics ``` #### Task 2c: Explain causes of untidyness <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Using the data set above: - Explain why the data above is untidy? - What are the variables? - What are the observations? --- ## 3. Melting Data In the Tidy paper, the author indicated that many times a data set can be corrected, or tidied, by first "melting" the data. Fortunately, Pandas provides the `pd.melt` function! See the [online documenation for pd.melt](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.melt.html) for full usage instructions. The author provides five different use cases where melting (and other transformations) can be performed: 1. Column headers are values, not variable names. 2. Multiple variables are stored in one column. 3. Variables are stored in both rows and columns. 4. Multiple types of observational units are stored in the same table. 5. A single observational unit is stored in multiple tables. We will explore only a few of these use cases. However, the techniques provided by these examples will help with melting for all of them. ### 3.1 Use Case #1: column headers are values To demonsrate melting let's create a sample dataframe that provides the progress level of different groups of individuals in a process that has two stages: ``` df = pd.DataFrame({'Group': {0: 'A', 1: 'B', 2: 'C'}, 'Stage1': {0: 1, 1: 3, 2: 5}, 'Stage2': {0: 2, 1: 4, 2: 6}}) df ``` It's clear that this dataset does not follow tidy rules. This is because information about the stage is housed in the header (i.e. two different stages: stage1 and stage2). To tidy this up, we should have a separate column that indicates the stage and a corresponding column that indicates the observation for each stage. The first step to correct this is to melt the data. To melt a dataset using Pandas, you must indicate which columns in the current data frame should be kept as columns and which columns should be melted (also called **unpivoted**) to rows. This is indicated using two arguments provided to `pd.melt`: - `id_vars`: indicates the columns to use as identifier variables. These columns remain as columns in the dataframe after melting. - `value_vars`: indicates the columns to melt (unpivot). If not specified, then all columns that are not set as `id_vars` are used. - The column header becomes a value in a new column - The value within the original column is matched with the header value in an adjacent column. As an example, let's melt the example dataframe: ``` df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'], ) df2 ``` Observe that the new column labels named 'variable' and 'value' do not indicate what the data the colomn contains. We can either set these manually using: ```python df2.columns = ['Group', 'Stage', 'Level'] ``` Or, we can provide the new labels when we melt the data using the `var_name` and `value_name` arguments: ``` df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'], var_name='Stage', value_name='Level') df2 ``` #### Task 3a: Melt data, use case #1 <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Using the `pd.melt` function, melt the demographics data introduced in section 2. Be sure to: - Set the column headers correctly. - Order by country - Print the first 10 lines of the resulting melted dataset. ### 3.2 Use Case #2: multiple variables stored in one column Sometimes, melting the data is not enough. Consider the demographics example where the sex and the age range are combined into a single column label. In Task 3a we melted that dataset: <table> <tr><th>country</th><th>year</th><th>age</th><th>freq</th></tr> <tr><td>AD</td><td>2000</td><td>m014</td><td>0</td></tr> <tr><td>AD</td><td>2000</td><td>m5564</td><td>0</td></tr> <tr><td>AD</td><td>2000</td><td>m3544</td><td>0</td></tr> <tr><td>AD</td><td>2000</td><td>m65</td><td>0</td></tr> <tr><td>AD</td><td>2000</td><td>m2534</td><td>1</td></tr> <tr><td>AD</td><td>2000</td><td>mu</td><td>None</td></tr> <tr><td>AD</td><td>2000</td><td>m1524</td><td>0</td></tr> <tr><td>AD</td><td>2000</td><td>f014</td><td>NaN</td></tr> <tr><td>AD</td><td>2000</td><td>m4554</td><td>0</td></tr> <tr><td>AE</td><td>2000</td><td>m5564</td><td>12</td></tr> </table> We need to split that `age` column into three different columns corresponding to the sex, minimum age and maximum age. To do this, we can use the following line of code: ```Python temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})") ``` Remember, that Pandas provides a [pandas.Series.str.extract](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.extract.html) function for manipulating the string values of a Series, and each column in a Pandas dataframe is a series. We can use this function to break apart the value into three separate columns. Observe the argument provided to the `.str.extract` function: `(\D)(\d+)(\d{2})`. This type of string is called a regular expression (RE). We will not cover regular expressions in detail, but they are a powerful method for parsing strings to either match elements of the string or to split them. An [introduction to REs](https://docs.python.org/3.4/howto/regex.html#regex-howto) for Python and [a full syntax description](https://docs.python.org/3.4/library/re.html#regular-expression-syntax) is available online. But here is a short explanation for the elements of the RE above: + `(\D)`: Matches any single character which is not a digit. This correspondes to the sex: 'f' or 'm'. + `(\d+)`: Matches one or more digits. This correspondes to the minimum age which may be one or more digts. + `(\d{2})`: Matches exactly two digits. This requires that the last two digits are the max age. Let's try it and see how it works: ``` # Melt the demographics dataset and sort by country: melted_df = pd.melt(demographics, id_vars=["country", "year"], var_name="age", value_name="freq") melted_df = melted_df.sort_values(by=["country"]) # Split 'age' column into a new dataframe containing the three components: sex, # minimum age and maximum age. temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})") temp_df.columns = ['sex', 'min_age', 'max_age'] temp_df.head(10) ``` ### 3.3 Use Case #3: variables are in both rows and columns Consider the following dataset which contains the daily weather records for five months in 2010 for the MX17004 weather station in Mexico. Each day of the month has it's own column (e.g. d1, d2, d3, etc.). The example data only provides the first 8 days: ``` data = [['MX17004',2010,1,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,1,'tmin',None,None,None,None,None,None,None,None], ['MX17004',2010,2,'tmax',None,27.3,24.1,None,None,None,None,None], ['MX17004',2010,2,'tmin',None,14.4,14.4,None,None,None,None,None], ['MX17004',2010,3,'tmax',None,None,None,None,32.1,None,None,None], ['MX17004',2010,3,'tmin',None,None,None,None,14.2,None,None,None], ['MX17004',2010,4,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,4,'tmin',None,None,None,None,None,None,None,None], ['MX17004',2010,5,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,5,'tmin',None,None,None,None,None,None,None,None]] headers = ['id','year','month','element','d1','d2','d3','d4','d5','d6','d7','d8'] weather = pd.DataFrame(data, columns=headers) weather ``` In this dataset there are two problems. First, we have a violation of use case #1 where observations are stored in the column labels for the days (e.g. d1, d2, d3, etc.). Second, we have a violation of use case #3. Observe that the 'element' column contains values that should be variables. We want the min and max temperatures for each day as columns. First, let's deal with the first problem by including `id`, `year`, `month` and `element` as `id_vars`. Observe that we will currently not try to tidy the `element` column. We want to remove the 'd' from the day so let's name the column `temp_day`: ``` melted_weather = pd.melt(weather, id_vars=['id', 'year', 'month', 'element'], var_name='temp_day', value_name='temperature') melted_weather.head(10) ``` Now, let's create an actual date for the measurement rather than storing year, month and day separately. Let's add a new column to the dataframe named 'day' that uses a regular expression to remove the letter 'd' from the beginning of the day. ``` melted_weather["day"] = melted_weather["temp_day"].str.extract("d(\d+)", expand=False) melted_weather.head(10) ``` We can now combine the year, month and day to form a proper date using the Pandas `apply` function. Execute the code below and observe the in-line comments for the meaning of each line of code: ``` # Import the datetime library. import datetime # Our year, month, and day columns must be numeric. Currently they are # strings. We can use the Pandas "apply" function to convert these columns. melted_weather[["year", "month", "day"]] = melted_weather[["year", "month", "day"]].apply(pd.to_numeric) # Convert temperature to numeric as well melted_weather[["temperature"]] = melted_weather[["temperature"]].apply(pd.to_numeric) # We want to use the Python datetime function to cobmine the year, month and day # into a proper date. In Python this is a datetime object, not a string. So, we # need to use the apply function, just like above, to convert the dates. We'll # create a simple little function that we'll use to apply the datetime change. def create_date(row): return datetime.datetime(year=row["year"], month=int(row["month"]), day=row["day"]) # Apply the create_date function to each row of our data frame for the "date" column. melted_weather["date"] = melted_weather.apply(lambda row: create_date(row), axis=1) # Now take a look! melted_weather.head(10) ``` Now that we have our date corrected, and properly melted, we can address the second problem: the `element` column containing variable names. To fix this we need to do the opposite of melting and we need to **pivot**. To do this we can use the [pd.pivot](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html) function. This function takes the following arguments: - `index`: indicates the columns to use to make the new frame’s index. If None, uses existing index - `columns`: indicates the column to use whose values will become the new frame’s columns. - `values`: indicates the columns to use for populating new frame’s values. Let's use the `pivot_table` function, which is a generalization of the `pivot` function that handles duplicate values or one index/column pair. This will move the `element` column values to be new columns in our data frame. But first, we will also want to drop unwanted columns: ``` # Remove unwanted columns weather_min = melted_weather.drop(['year', 'month', 'day', 'temp_day'], axis=1) weather_min.head(10) # Unpivot and reset indexes. The pivot_table function automatically removes rows with null values. weather_tidy = weather_min.pivot_table(index=["id","date"], columns="element", values="temperature") weather_tidy.reset_index(drop=False, inplace=True) weather_tidy ``` The weather data is now tidy (although rather small). Observe, that in the code above, we called the function `reset_index` on the Tidy'ed weather data. If we do not do this, then the row indexes are not incremental within the data frame. #### Task 3b: Practice with a new dataset <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Download the [PI_DataSet.txt](https://hivdb.stanford.edu/download/GenoPhenoDatasets/PI_DataSet.txt) file from [HIV Drug Resistance Database](https://hivdb.stanford.edu/pages/genopheno.dataset.html). Store the file in the same directory as the practice notebook for this assignment. Here is the meaning of data columns: - SeqID: a numeric identifier for a unique HIV isolate protease sequence. Note: disruption of the protease inhibits HIV’s ability to reproduce. - The Next 8 columns are identifiers for unique protease inhibitor class drugs. - The values in these columns are the fold resistance over wild type (the HIV strain susceptible to all drugs). - Fold change is the ratio of the drug concentration needed to inhibit the isolate. - The latter columns, with P as a prefix, are the positions of the amino acids in the protease. - '-' indicates consensus. - '.' indicates no sequence. - '#' indicates an insertion. - '~' indicates a deletion;. - '*' indicates a stop codon - a letter indicates one letter Amino Acid substitution. - two and more amino acid codes indicates a mixture.  Import this dataset into your notebook, view the top few rows of the data and respond to these questions: - What are the variables? - What are the observations? - What are the values? #### Task 3c: Practice with a new dataset Part 2 <span style="float:right; margin-left:10px; clear:both;">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png) </span> Use the data retreived from task 3b, generate a data frame containing a Tidy’ed set of values for drug concentration fold change. BE sure to: - Set the column names as ‘SeqID’, ‘Drug’ and ‘Fold_change’. - Order the data frame first by sequence ID and then by Drug name - Reset the row indexes - Display the first 10 elements.
github_jupyter
import numpy as np import pandas as pd # Create the data rows and columns. data = [['John Smith', None, 2], ['Jane Doe', 16, 11], ['Mary Johnson', 3, 1]] # Create the list of labels for the data frame. headers = ['', 'Treatment_A', 'Treatment_B'] # Create the data frame. pd.DataFrame(data, columns=headers) data = [['Agnostic',27,34,60,81,76,137], ['Atheist',12,27,37,52,35,70], ['Buddhist',27,21,30,34,33,58], ['Catholic',418,617,732,670,638,1116], ['Don\'t know/refused',15,14,15,11,10,35], ['Evangelical Prot',575,869,1064,982,881,1486], ['Hindu',1,9,7,9,11,34], ['Historically Black Prot',228,244,236,238,197,223], ['Jehovah\'s Witness',20,27,24,24,21,30], ['Jewish',19,19,25,25,30,95]] headers = ['religion','<$10k','$10-20k','$20-30k','$30-40k','$40-50k','$50-75k'] religion = pd.DataFrame(data, columns=headers) religion data = [['AD', 2000, 0, 0, 1, 0, 0, 0, 0, None, None], ['AE', 2000, 2, 4, 4, 6, 5, 12, 10, None, 3], ['AF', 2000, 52, 228, 183, 149, 129, 94, 80, None, 93], ['AG', 2000, 0, 0, 0, 0, 0, 0, 1, None, 1], ['AL', 2000, 2, 19, 21, 14, 24, 19, 16, None, 3], ['AM', 2000, 2, 152, 130, 131, 63, 26, 21, None, 1], ['AN', 2000, 0, 0, 1, 2, 0, 0, 0, None, 0], ['AO', 2000, 186, 999, 1003, 912, 482, 312, 194, None, 247], ['AR', 2000, 97, 278, 594, 402, 419, 368, 330, None, 121], ['AS', 2000, None, None, None, None, 1, 1, None, None, None]] headers = ['country', 'year', 'm014', 'm1524', 'm2534', 'm3544', 'm4554', 'm5564', 'm65', 'mu', 'f014'] demographics = pd.DataFrame(data, columns=headers) demographics df = pd.DataFrame({'Group': {0: 'A', 1: 'B', 2: 'C'}, 'Stage1': {0: 1, 1: 3, 2: 5}, 'Stage2': {0: 2, 1: 4, 2: 6}}) df df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'], ) df2 df2.columns = ['Group', 'Stage', 'Level'] df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'], var_name='Stage', value_name='Level') df2 temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})") # Melt the demographics dataset and sort by country: melted_df = pd.melt(demographics, id_vars=["country", "year"], var_name="age", value_name="freq") melted_df = melted_df.sort_values(by=["country"]) # Split 'age' column into a new dataframe containing the three components: sex, # minimum age and maximum age. temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})") temp_df.columns = ['sex', 'min_age', 'max_age'] temp_df.head(10) data = [['MX17004',2010,1,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,1,'tmin',None,None,None,None,None,None,None,None], ['MX17004',2010,2,'tmax',None,27.3,24.1,None,None,None,None,None], ['MX17004',2010,2,'tmin',None,14.4,14.4,None,None,None,None,None], ['MX17004',2010,3,'tmax',None,None,None,None,32.1,None,None,None], ['MX17004',2010,3,'tmin',None,None,None,None,14.2,None,None,None], ['MX17004',2010,4,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,4,'tmin',None,None,None,None,None,None,None,None], ['MX17004',2010,5,'tmax',None,None,None,None,None,None,None,None], ['MX17004',2010,5,'tmin',None,None,None,None,None,None,None,None]] headers = ['id','year','month','element','d1','d2','d3','d4','d5','d6','d7','d8'] weather = pd.DataFrame(data, columns=headers) weather melted_weather = pd.melt(weather, id_vars=['id', 'year', 'month', 'element'], var_name='temp_day', value_name='temperature') melted_weather.head(10) melted_weather["day"] = melted_weather["temp_day"].str.extract("d(\d+)", expand=False) melted_weather.head(10) # Import the datetime library. import datetime # Our year, month, and day columns must be numeric. Currently they are # strings. We can use the Pandas "apply" function to convert these columns. melted_weather[["year", "month", "day"]] = melted_weather[["year", "month", "day"]].apply(pd.to_numeric) # Convert temperature to numeric as well melted_weather[["temperature"]] = melted_weather[["temperature"]].apply(pd.to_numeric) # We want to use the Python datetime function to cobmine the year, month and day # into a proper date. In Python this is a datetime object, not a string. So, we # need to use the apply function, just like above, to convert the dates. We'll # create a simple little function that we'll use to apply the datetime change. def create_date(row): return datetime.datetime(year=row["year"], month=int(row["month"]), day=row["day"]) # Apply the create_date function to each row of our data frame for the "date" column. melted_weather["date"] = melted_weather.apply(lambda row: create_date(row), axis=1) # Now take a look! melted_weather.head(10) # Remove unwanted columns weather_min = melted_weather.drop(['year', 'month', 'day', 'temp_day'], axis=1) weather_min.head(10) # Unpivot and reset indexes. The pivot_table function automatically removes rows with null values. weather_tidy = weather_min.pivot_table(index=["id","date"], columns="element", values="temperature") weather_tidy.reset_index(drop=False, inplace=True) weather_tidy
0.386185
0.988188
# Training Neural Networks The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time. <img src="assets/function_approx.png" width=500px> At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function. To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems $$ \large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2} $$ where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels. By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. <img src='assets/gradient_descent.png' width=350px> ## Backpropagation For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks. Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation. <img src='assets/backprop_diagram.png' width=550px> In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss. To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule. $$ \large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2} $$ **Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on. We update our weights using this gradient with some learning rate $\alpha$. $$ \large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1} $$ The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. ## Losses in PyTorch Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels. Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss), > This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class. > > The input is expected to contain scores for each class. This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities. ``` import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) ``` ### Note If you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook. ``` # Build a feed-forward network model = nn.Sequential(nn.Linear(784,128), nn.ReLU(), nn.Linear(128,64), nn.ReLU(), nn.Linear(64,10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) ``` In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)). >**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately. ``` # TODO: Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # TODO: Define the loss criterion = nn.NLLLoss() ### Run this to check your work # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) ``` ## Autograd Now that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`. You can turn off gradients for a block of code with the `torch.no_grad()` content: ```python x = torch.zeros(1, requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False ``` Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`. The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`. ``` x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) ``` Below we can see the operation that created `y`, a power operation `PowBackward0`. ``` ## grad_fn shows the function that generated this variable print(y.grad_fn) ``` The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean. ``` z = y.mean() print(z) ``` You can check the gradients for `x` and `y` but they are empty currently. ``` print(x.grad) ``` To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x` $$ \frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2} $$ ``` z.backward() print(x.grad) print(x/2) ``` These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. ## Loss and Autograd together When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass. ``` # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) criterion = nn.NLLLoss() logits = model(images) loss = criterion(logits, labels) print(loss) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) ``` ## Training the network! There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below. ``` from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01) ``` Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch: * Make a forward pass through the network * Use the network output to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights Below I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches. ``` print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight) ``` ### Training for real Now we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights. >**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch. ``` ## Your solution here #defining the model model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) #define the loss criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr = 0.001) epochs = 10 for epoch in range(epochs): running_loss = 0 for images, labels in trainloader: #flatten the images images = images.view(images.shape[0], -1) #clearing the gradients, to prevent accumulation optimizer.zero_grad() #forward pass logits = model(images) #calculating the loss loss = criterion(logits, labels) #calculating the gradients loss.backward() #updating the weights optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") ``` With the network trained, we can check out it's predictions. ``` %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps) ``` Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
github_jupyter
import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Build a feed-forward network model = nn.Sequential(nn.Linear(784,128), nn.ReLU(), nn.Linear(128,64), nn.ReLU(), nn.Linear(64,10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) # TODO: Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # TODO: Define the loss criterion = nn.NLLLoss() ### Run this to check your work # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) x = torch.zeros(1, requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) ## grad_fn shows the function that generated this variable print(y.grad_fn) z = y.mean() print(z) print(x.grad) z.backward() print(x.grad) print(x/2) # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) criterion = nn.NLLLoss() logits = model(images) loss = criterion(logits, labels) print(loss) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01) print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight) ## Your solution here #defining the model model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) #define the loss criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr = 0.001) epochs = 10 for epoch in range(epochs): running_loss = 0 for images, labels in trainloader: #flatten the images images = images.view(images.shape[0], -1) #clearing the gradients, to prevent accumulation optimizer.zero_grad() #forward pass logits = model(images) #calculating the loss loss = criterion(logits, labels) #calculating the gradients loss.backward() #updating the weights optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps)
0.901991
0.994805
Now, we reached the final phase of our project. Here, we will capture a fresh face on our camera and if this person had his face captured and trained before, our recognizer will make a “prediction” returning its id and an index, shown how confident the recognizer is with this match. ### SUDIP MITRA's AI DEVELOPMENT LAB sudipmitraonline@gmail.com github.com/sudipmitraonline <img class="ok te s t u ht ai ic" width="631" height="510" srcset="https://miro.medium.com/max/552/0*kkZMQyWtR5NOFr3q. 276w, https://miro.medium.com/max/1104/0*kkZMQyWtR5NOFr3q. 552w, https://miro.medium.com/max/1262/0*kkZMQyWtR5NOFr3q. 631w" sizes="631px" role="presentation" src="https://miro.medium.com/max/789/0*kkZMQyWtR5NOFr3q."> ``` import cv2 import numpy as np import os #@Author : Sudip Mitra #sudipmitraonline@gmail.com #github.com/sudipmitraonline recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read('trainer/trainer.yml') cascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascadePath); font = cv2.FONT_HERSHEY_SIMPLEX #iniciate id counter id = 0 # names related to ids: example ==> Marcelo: id=1, etc names = ['None', 'Sudip', 'Subrata', 'Shikha', 'Shree', 'ETC'] # Initialize and start realtime video capture cam = cv2.VideoCapture(0) cam.set(3, 640) # set video widht cam.set(4, 480) # set video height # Define min window size to be recognized as a face minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) while True: ret, img =cam.read() img = cv2.flip(img, -1) # Flip vertically gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), ) for(x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) # If confidence is less them 100 ==> "0" : perfect match if (confidence < 100): id = names[id] confidence = " {0}%".format(round(100 - confidence)) else: id = "unknown" confidence = " {0}%".format(round(100 - confidence)) cv2.putText( img, str(id), (x+5,y-5), font, 1, (255,255,255), 2 ) cv2.putText( img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1 ) cv2.imshow('camera',img) k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video if k == 27: break # Do a bit of cleanup print("\n [INFO] Exiting Program and cleanup stuff") cam.release() cv2.destroyAllWindows() ``` We are including here a new array, so we will display “names”, instead of numbered ids: names = ['None', 'Sudip', 'Subrata', 'Shikha', 'Shree', 'ETC'] So, for example: Sudip will the user with id = 1; Subrata: id=2, etc. Next, we will detect a face, same we did before with the haasCascade classifier. Having a detected face we can call the most important function in the above code: id, confidence = recognizer.predict(gray portion of the face) The recognizer.predict (), will take as a parameter a captured portion of the face to be analyzed and will return its probable owner, indicating its id and how much confidence the recognizer is in relation with this match. And at last, if the recognizer could predict a face, we put a text over the image with the probable id and how much is the “probability” in % that the match is correct (“probability” = 100 — confidence index). If not, an “unknow” label is put on the face.
github_jupyter
import cv2 import numpy as np import os #@Author : Sudip Mitra #sudipmitraonline@gmail.com #github.com/sudipmitraonline recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read('trainer/trainer.yml') cascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascadePath); font = cv2.FONT_HERSHEY_SIMPLEX #iniciate id counter id = 0 # names related to ids: example ==> Marcelo: id=1, etc names = ['None', 'Sudip', 'Subrata', 'Shikha', 'Shree', 'ETC'] # Initialize and start realtime video capture cam = cv2.VideoCapture(0) cam.set(3, 640) # set video widht cam.set(4, 480) # set video height # Define min window size to be recognized as a face minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) while True: ret, img =cam.read() img = cv2.flip(img, -1) # Flip vertically gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), ) for(x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) # If confidence is less them 100 ==> "0" : perfect match if (confidence < 100): id = names[id] confidence = " {0}%".format(round(100 - confidence)) else: id = "unknown" confidence = " {0}%".format(round(100 - confidence)) cv2.putText( img, str(id), (x+5,y-5), font, 1, (255,255,255), 2 ) cv2.putText( img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1 ) cv2.imshow('camera',img) k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video if k == 27: break # Do a bit of cleanup print("\n [INFO] Exiting Program and cleanup stuff") cam.release() cv2.destroyAllWindows()
0.269999
0.859782
``` import os import os.path import time import atexit import csv import requests import json import numpy import datetime import pandas as pd import numpy as np from flask import Flask def get_metadata(filename): metadata = [] parsedata = filename.split('/') metadata.append(parsedata[1]) station = parsedata[2].split('_') metadata.append(str(station[0] + ',' + station[1])) timestamp_raw = parsedata[3].split('.') timestamp_bo = timestamp_raw[0].split('_') timetext = timestamp_bo[0] + '-' + timestamp_bo[1] + '-' + timestamp_bo[2] + ':' + timestamp_bo[3] + ':' + '00:00' timestamp = time.strftime('%Y-%m-%d:%H:%M:%S', time.strptime(timetext, '%y-%m-%d:%H:%M:%S')) metadata.append(timestamp) return metadata # Change to top level of extracted data archive os.chdir("C:/Users/Wreedles/Desktop/data") new = os.getcwd().replace("\\","/") filepaths = [] for dirpath, dirnames, filenames in os.walk(new): for filename in [f for f in filenames if f.endswith(".json")]: filepaths.append("." + os.path.join(dirpath, filename)[len(new):].replace("\\","/")) dl = [] for filename in filepaths: file_metadata = get_metadata(filename) station = file_metadata[0] gridblock = file_metadata[1] timestamp = file_metadata[2] with open(filename) as data_file: sample_data = json.load(data_file) try: forecast_lonlat = sample_data["forecastData"]["geometry"]["geometries"][0]["coordinates"] forecastLat = forecast_lonlat[-1] forecastLng = forecast_lonlat[0] poly_coords = sample_data["forecastData"]["geometry"]["geometries"][1]["coordinates"] for outer_item in poly_coords: reversed_list = [] for inner_item in outer_item: reversed_list.append([inner_item[-1], inner_item[0]]) except: forecastLat = float('nan') forecastLng = float('nan') reversed_list = float('nan') try: for period in sample_data['forecastData']['properties']['periods']: sevenDayForecast = {} sevenDayForecast["station"] = station sevenDayForecast["gridblock"] = gridblock sevenDayForecast["timestamp"] = timestamp sevenDayForecast["latitude"] = forecastLat sevenDayForecast["longitude"] = forecastLng sevenDayForecast["polyCoords"] = reversed_list sevenDayForecast["timeOfDay"] = period["name"] sevenDayForecast["day"] = period["startTime"].split("T")[0] sevenDayForecast["temp"] = period["temperature"] sevenDayForecast["windSpeed"] = period["windSpeed"] sevenDayForecast["windDirection"] = period["windDirection"] sevenDayForecast["icon"] = period["icon"] sevenDayForecast["shortForecast"] = period["shortForecast"] sevenDayForecast["longForecast"] = period["detailedForecast"] dl.append(sevenDayForecast) except: sevenDayForecast = {} sevenDayForecast["latitude"] = forecastLat sevenDayForecast["longitude"] = forecastLng sevenDayForecast["timeOfDay"] = float('nan') sevenDayForecast["day"] = float('nan') sevenDayForecast["temp"] = float('nan') sevenDayForecast["windSpeed"] = float('nan') sevenDayForecast["windDirection"] = float('nan') sevenDayForecast["icon"] = float('nan') sevenDayForecast["shortForecast"] = float('nan') sevenDayForecast["longForecast"] = float('nan') dl.append(sevenDayForecast) df_sevendaydata = pd.DataFrame.from_records(dl) df_sevendaydata.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True) df_sevendaydata = df_sevendaydata[["station", "gridblock", "timestamp", "latitude", "longitude", "polyCoords", "timeOfDay", "day", "temp", "windSpeed", "windDirection", "icon", "shortForecast", "longForecast"]] df_sevendaydata.index.name = 'id' # Will export to top level of extracted data archive df_sevendaydata.to_csv("sevendaydata.csv") ```
github_jupyter
import os import os.path import time import atexit import csv import requests import json import numpy import datetime import pandas as pd import numpy as np from flask import Flask def get_metadata(filename): metadata = [] parsedata = filename.split('/') metadata.append(parsedata[1]) station = parsedata[2].split('_') metadata.append(str(station[0] + ',' + station[1])) timestamp_raw = parsedata[3].split('.') timestamp_bo = timestamp_raw[0].split('_') timetext = timestamp_bo[0] + '-' + timestamp_bo[1] + '-' + timestamp_bo[2] + ':' + timestamp_bo[3] + ':' + '00:00' timestamp = time.strftime('%Y-%m-%d:%H:%M:%S', time.strptime(timetext, '%y-%m-%d:%H:%M:%S')) metadata.append(timestamp) return metadata # Change to top level of extracted data archive os.chdir("C:/Users/Wreedles/Desktop/data") new = os.getcwd().replace("\\","/") filepaths = [] for dirpath, dirnames, filenames in os.walk(new): for filename in [f for f in filenames if f.endswith(".json")]: filepaths.append("." + os.path.join(dirpath, filename)[len(new):].replace("\\","/")) dl = [] for filename in filepaths: file_metadata = get_metadata(filename) station = file_metadata[0] gridblock = file_metadata[1] timestamp = file_metadata[2] with open(filename) as data_file: sample_data = json.load(data_file) try: forecast_lonlat = sample_data["forecastData"]["geometry"]["geometries"][0]["coordinates"] forecastLat = forecast_lonlat[-1] forecastLng = forecast_lonlat[0] poly_coords = sample_data["forecastData"]["geometry"]["geometries"][1]["coordinates"] for outer_item in poly_coords: reversed_list = [] for inner_item in outer_item: reversed_list.append([inner_item[-1], inner_item[0]]) except: forecastLat = float('nan') forecastLng = float('nan') reversed_list = float('nan') try: for period in sample_data['forecastData']['properties']['periods']: sevenDayForecast = {} sevenDayForecast["station"] = station sevenDayForecast["gridblock"] = gridblock sevenDayForecast["timestamp"] = timestamp sevenDayForecast["latitude"] = forecastLat sevenDayForecast["longitude"] = forecastLng sevenDayForecast["polyCoords"] = reversed_list sevenDayForecast["timeOfDay"] = period["name"] sevenDayForecast["day"] = period["startTime"].split("T")[0] sevenDayForecast["temp"] = period["temperature"] sevenDayForecast["windSpeed"] = period["windSpeed"] sevenDayForecast["windDirection"] = period["windDirection"] sevenDayForecast["icon"] = period["icon"] sevenDayForecast["shortForecast"] = period["shortForecast"] sevenDayForecast["longForecast"] = period["detailedForecast"] dl.append(sevenDayForecast) except: sevenDayForecast = {} sevenDayForecast["latitude"] = forecastLat sevenDayForecast["longitude"] = forecastLng sevenDayForecast["timeOfDay"] = float('nan') sevenDayForecast["day"] = float('nan') sevenDayForecast["temp"] = float('nan') sevenDayForecast["windSpeed"] = float('nan') sevenDayForecast["windDirection"] = float('nan') sevenDayForecast["icon"] = float('nan') sevenDayForecast["shortForecast"] = float('nan') sevenDayForecast["longForecast"] = float('nan') dl.append(sevenDayForecast) df_sevendaydata = pd.DataFrame.from_records(dl) df_sevendaydata.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True) df_sevendaydata = df_sevendaydata[["station", "gridblock", "timestamp", "latitude", "longitude", "polyCoords", "timeOfDay", "day", "temp", "windSpeed", "windDirection", "icon", "shortForecast", "longForecast"]] df_sevendaydata.index.name = 'id' # Will export to top level of extracted data archive df_sevendaydata.to_csv("sevendaydata.csv")
0.237576
0.151184
# Jupyter Notebook Tutorial > A tutorial of core functionality of Jupyter Notebooks to have an enjoyable coding experience. - toc: true - badges: true - comments: true - author: Isaac Flath - categories: [Jupyter,Getting Started] # Top 3 uses: 1. Exploratory analysis, model creation, data science, any kind of coding that require lots of rapid experimentation and iteration. 1. Tutorials, guides, and blogs (like this one). Because you have a great mix of text functionality with code, they work really well for tutorials and guides. Rather than having static images, or code snippets that have to get updated each iteration, the code is part of the guide and it really simplifies the process. Notebooks can be exported directly to html and be opened in any browser to give to people. With the easy conversion to html, naturally it's easy to post them on a web page. 1. Technical presentations of results. You can have the actual code analysis done, with text explanations. Excess code can be collapsed so that if someone asks really detailed questions you can expand and have every piece of detail. Changes to the analysis are in the presentation so no need to save and put static images in other documents # Cell Types A cell can be 3 different types. The most useful are code cells and markdown cells. ### Code Cells - Code cells run code The next few cells are examples of code cells - While the most common application is Python, you can set up environments easily to use R, swift, and other languages within jupyter notebooks ### Markdown Cells - This cell is a markdown cell. It is really nice for adding details and text explanations in where a code comment is not enough - They have all the normal markdown functionality, plus more. For example, I can write any technical or mathy stuff using latex, or create html tables in markdown or html. - I can also make markdown tables. ##### Latex Formulas $$\begin{bmatrix}w_1&w_2&w_3&w_4&w_5\\x_1&x_2&x_3&x_4&x_5\\y_1&y_2&y_3&y_4&y_5\\z_1&z_2&z_3&z_4&z_5\end{bmatrix}$$ $\begin{align} \frac{dy}{du} &= f'(u) = e^u = e^{\sin(x^2)}, \\ \frac{du}{dv} &= g'(v) = \cos v = \cos(x^2), \\ \frac{dv}{dx} &= h'(x) = 2x. \end{align}$ ##### Markdown Table | This | is | a | table | for | demos | |------|------|-------|-------|-------|-------| | perc | 55% | 22% | 23% | 12% | 53% | | qty | 23 | 19 | 150 | 9 | 92 | ``` #collapse-hide import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.options.display.max_columns = None pd.options.display.max_rows = None %matplotlib inline ``` # Running Code Naturally you can run code cells and print to the Jupyter Notebook ``` for x in range(0,5): print(x*10) ``` # DataFrames ``` iris = sns.load_dataset('iris') iris[iris.petal_length > 6] ``` # Plotting Below we are going to make a few graphs to get the point accross. Naturally, each graph can be accompanied with a markdown cell that gives context and explains the value of that graph. ### Line Chart ``` # evenly sampled time at 200ms intervals t = np.arange(0., 5., 0.2) # red dashes, blue squares and green triangles plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^') plt.show() ``` ### Scatter Plot Sometimes we will want to display a graph, but may not want all the code and details to be immediately visable. In these examples we can create a scatter plot like below, but collapse the code cell. This is great when you want to show a graph and explain it, but the details of how the graph was created aren't that important. ``` #collapse-hide data = {'a': np.arange(50), 'c': np.random.randint(0, 50, 50), 'd': np.random.randn(50)} data['b'] = data['a'] + 10 * np.random.randn(50) data['d'] = np.abs(data['d']) * 100 plt.scatter('a', 'b', c='c', s='d', data=data) plt.xlabel('entry a') plt.ylabel('entry b') plt.show() ``` ### Categorical Plot We can create subplots to have multiple plots show up. This can be especially helpful when showing lots of the same information, or showing how 2 different metrics are related or need to be analyzed together ``` #collapse-hide names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] plt.figure(figsize=(9, 3)) plt.subplot(131) plt.bar(names, values) plt.subplot(132) plt.scatter(names, values) plt.subplot(133) plt.plot(names, values) plt.suptitle('Categorical Plotting') plt.show() ``` # Stack Traces When you run into an error, by default jupyter notebooks give you whatever the error message is, but also the entire stack trace. There is a debug functionality, but I find that these stack traces and jupyter cells work even better than a debugger. I can break my code into as many cells as I want and run things interactively. Here's a few examples of stack traces. ### Matrix Multiplication Good Now we are going to show an example of errors where the stack trace isn't as simple. Suppose we are trying to multiply 2 arrays together (matrix multiplication). ``` a = np.array([ [1,2,4], [3,4,5], [5,6,7] ]) b = np.array([ [11,12,14], [31,14,15], [23,32,23] ]) a@b ``` ### Matrix Multiplication Bad Now if it errors because the columns from matrix a don't match the rows from matrix b, we will get an error as matrix multiplication is impossible with those matrices. We see the same idea s the above for loop, stack trace with error and arrow pointing at the line that failed ``` # here's another a = np.array([ [1,2,4], [3,4,5], [5,6,7] ]) b = np.array([ [11,12,14], [31,14,15] ]) a@b ``` ### Second Layer of Bad But what if the line we call isn't what fails? What if what I run works, but the function underneath fails? In these example, you see the entire trace. It starts with are arrow at what you ran that errored. It then shows an arrow that your code called that caused the error, so you can track all the way back to the source. Here's how it shows a two step stack trace, but it can be as long as needed. ``` def matmul(a,b): c = a@b return c matmul(a,b) ``` # Magic Commands Magic commands are special commands for Juptyer Notebooks. They give you incredible functionality and you will likley find the experience very frustrating without them. A few that I use often are: + ? | put a question mark or 2 after a function or method to get the documentation. ?? gives more detail than ?. I can also use it to wild card search modules for functions. + shift tab | when you are writing something holding shift + tab will open a mini popup with the documentation for that thing. It may be a funciton, method, or module. + ```%who``` or ```%whos``` or ```%who_ls``` | These are all variants that list the objects and variables. I prefer %whos most of the time + ```%history``` | This allows you to look at the last pieces of code that you ran + $$ | wrapping latex code in dollar signs in a markdown cell renders latex code in markdown cells + ! | putting ! at the beginning of a line makes it run that in terminal. For example ```!ls | grep .csv``` + ```%time``` | I can use this to time the execution of things ``` np.*array*? np.array_equal?? a = np.array(np.random.rand(512,512)) b = np.array(np.random.rand(512,512)) %time for i in range(0,20): a@b %whos %history -l 5 ``` # Jupyter Extensions There are many extensions to Jupyter Notebooks. After all a jupyter notebook is just a JSON file, so you can read the JSON in and manipulate and transform things however you want! There are many features, such as variable explorers, auto code timers, and more - but I find I that most are unneccesary. About half the people I talk to don't use any, and the other half use several. # NBDEV NBdev is a jupyter extension/python library that allows you to do full development projects in Jupyter Notebooks. There have been books and libraries written entirely in Jupyter notebooks, including testing frameworks and unit testing that goes with them. A common misconception is that Jupyter notebooks cannot be used for that, though many people already have. ![](../images/export_example.png) There are many features NBdev adds. Here's a few. Using notebooks written like this, nbdev can create and run any of the following with a single command: + Searchable, hyperlinked documentation; any word you surround in backticks will by automatically hyperlinked to the appropriate documentation + Cells in jupyter notebook marked with #export will be exported automatically to a python module + Python modules, following best practices such as automatically defining __ all __ (more details) with your exported functions, classes, and variables + Pip installers (uploaded to pypi for you) + Tests (defined directly in your notebooks, and run in parallel). + Navigate and edit your code in a standard text editor or IDE, and export any changes automatically back into your notebooks I reccomend checking them out for more detail https://github.com/fastai/nbdev
github_jupyter
#collapse-hide import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns pd.options.display.max_columns = None pd.options.display.max_rows = None %matplotlib inline for x in range(0,5): print(x*10) iris = sns.load_dataset('iris') iris[iris.petal_length > 6] # evenly sampled time at 200ms intervals t = np.arange(0., 5., 0.2) # red dashes, blue squares and green triangles plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^') plt.show() #collapse-hide data = {'a': np.arange(50), 'c': np.random.randint(0, 50, 50), 'd': np.random.randn(50)} data['b'] = data['a'] + 10 * np.random.randn(50) data['d'] = np.abs(data['d']) * 100 plt.scatter('a', 'b', c='c', s='d', data=data) plt.xlabel('entry a') plt.ylabel('entry b') plt.show() #collapse-hide names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] plt.figure(figsize=(9, 3)) plt.subplot(131) plt.bar(names, values) plt.subplot(132) plt.scatter(names, values) plt.subplot(133) plt.plot(names, values) plt.suptitle('Categorical Plotting') plt.show() a = np.array([ [1,2,4], [3,4,5], [5,6,7] ]) b = np.array([ [11,12,14], [31,14,15], [23,32,23] ]) a@b # here's another a = np.array([ [1,2,4], [3,4,5], [5,6,7] ]) b = np.array([ [11,12,14], [31,14,15] ]) a@b def matmul(a,b): c = a@b return c matmul(a,b) + ```%time``` | I can use this to time the execution of things
0.341253
0.950549
``` from jupyter_innotater import * import numpy as np, os ``` ### Repeat - Multiple Bounding Boxes, Single Classification At a per-photo level, allow classification stating whether the photo contains 'cats' or 'dogs'. And within each photo allow up to 8 bounding boxes to be drawn around the individual animals. To obtain the repeated bounding boxes we add an extra dimension to the target matrix and use the RepeatInnotation. This accepts configuration details on how to build new rows of the BoundingBoxInnotations - you do not instantiate BoundingBoxInnotations yourself, just tell it which class, data, and keyword params to use. Stack vertically (vertical=True) so there is more space if we want to display larger images. ``` animalfns = sorted(os.listdir('./animals/')) repeats = 8 # Per-photo data classes = ['cat', 'dog'] targets_type = np.zeros((len(animalfns), 1), dtype='int') # Column vector containing class index # Repeats within each photo targets_bboxes = np.zeros((len(animalfns), repeats, 4), dtype='int') # (x,y,w,h) for each animal Innotater( ImageInnotation(animalfns, path='./animals', width=400, height=300), [ MultiClassInnotation(targets_type, name='Animal Type', classes=classes, dropdown=False), RepeatInnotation( (BoundingBoxInnotation, targets_bboxes), max_repeats=repeats, min_repeats=1 ) ], vertical=True ) print('Class Indices', targets_type[:2]) # Just display the first 2 to save space print('Bounding Boxes', targets_bboxes[:2]) ``` ### Repeat - Multiple Bounding Boxes and Multiple Classification, Single BBox and Exclude checkbox Per-photo we have an Exclude checkbox - perhaps to remove 'bad' photos from our dataset further down the line. Per-photo there is a bounding box to be drawn around the main section of the image. Also display the filename in a textbox beneath the image. Within each photo draw bounding boxes and specify breed class of each individual animal, up to a maximum of 8. ``` repeats = 8 # Per-photo data targets_exclude = np.zeros((len(animalfns), 1), dtype='int') # Binary flag to indicate want to exclude from dataset targets_mainbbox = np.zeros((len(animalfns), 4), dtype='int') # (x,y,w,h) for each data row # Repeats within each photo breeds = ['Cat - Shorthair tabby', 'Cat - Shorthair ginger', 'Dog - Labrador', 'Dog - Beagle', 'Dog - Terrier'] targets_breed = np.zeros((len(animalfns), repeats, len(breeds)), dtype='int') targets_breed[:,:,0] = 1 # Set up one-hot default to first class targets_bboxes = np.zeros((len(animalfns), repeats, 4), dtype='int') # (x,y,w,h) for each animal Innotater( [ ImageInnotation(animalfns, path='./animals', width=370, height=280), # Display the image itself TextInnotation(animalfns, multiline=False) # Display the image filename ], [ BinaryClassInnotation(targets_exclude, name='Exclude'), # Checkbox BoundingBoxInnotation(targets_mainbbox, name='Main Bounding Box'), # BBox for main image RepeatInnotation( (BoundingBoxInnotation, targets_bboxes), # Individual animal bounding box (MultiClassInnotation, targets_breed, {'name':'Breed', 'classes':breeds, 'dropdown':True}), # Per-animal breed dropdown max_repeats=repeats, min_repeats=1 ) ] ) print('Exclude Flag', targets_exclude[:2]) # Just display the first 2 to save space print('Main Bounding Boxes', targets_mainbbox[:2]) print('Animal Bounding Boxes', targets_bboxes[:2]) print('Animal Breeds', targets_breed[:2]) ``` ### Grouped Innotations The GroupedInnotation doesn't do any repeating but just allows you to group together other Innotations so they appear side-by-side. ``` targets_singlebb = np.zeros((len(animalfns), 4), dtype='int') # (x,y,w,h) for each data row targets_cl = np.zeros((len(animalfns), 1), dtype='int') Innotater( ImageInnotation(animalfns, path='./animals', width=370, height=280), GroupedInnotation( # Just to place side-by-side MultiClassInnotation(targets_cl, name='Animal', classes=classes, dropdown=True), BoundingBoxInnotation(targets_singlebb) ) ) ```
github_jupyter
from jupyter_innotater import * import numpy as np, os animalfns = sorted(os.listdir('./animals/')) repeats = 8 # Per-photo data classes = ['cat', 'dog'] targets_type = np.zeros((len(animalfns), 1), dtype='int') # Column vector containing class index # Repeats within each photo targets_bboxes = np.zeros((len(animalfns), repeats, 4), dtype='int') # (x,y,w,h) for each animal Innotater( ImageInnotation(animalfns, path='./animals', width=400, height=300), [ MultiClassInnotation(targets_type, name='Animal Type', classes=classes, dropdown=False), RepeatInnotation( (BoundingBoxInnotation, targets_bboxes), max_repeats=repeats, min_repeats=1 ) ], vertical=True ) print('Class Indices', targets_type[:2]) # Just display the first 2 to save space print('Bounding Boxes', targets_bboxes[:2]) repeats = 8 # Per-photo data targets_exclude = np.zeros((len(animalfns), 1), dtype='int') # Binary flag to indicate want to exclude from dataset targets_mainbbox = np.zeros((len(animalfns), 4), dtype='int') # (x,y,w,h) for each data row # Repeats within each photo breeds = ['Cat - Shorthair tabby', 'Cat - Shorthair ginger', 'Dog - Labrador', 'Dog - Beagle', 'Dog - Terrier'] targets_breed = np.zeros((len(animalfns), repeats, len(breeds)), dtype='int') targets_breed[:,:,0] = 1 # Set up one-hot default to first class targets_bboxes = np.zeros((len(animalfns), repeats, 4), dtype='int') # (x,y,w,h) for each animal Innotater( [ ImageInnotation(animalfns, path='./animals', width=370, height=280), # Display the image itself TextInnotation(animalfns, multiline=False) # Display the image filename ], [ BinaryClassInnotation(targets_exclude, name='Exclude'), # Checkbox BoundingBoxInnotation(targets_mainbbox, name='Main Bounding Box'), # BBox for main image RepeatInnotation( (BoundingBoxInnotation, targets_bboxes), # Individual animal bounding box (MultiClassInnotation, targets_breed, {'name':'Breed', 'classes':breeds, 'dropdown':True}), # Per-animal breed dropdown max_repeats=repeats, min_repeats=1 ) ] ) print('Exclude Flag', targets_exclude[:2]) # Just display the first 2 to save space print('Main Bounding Boxes', targets_mainbbox[:2]) print('Animal Bounding Boxes', targets_bboxes[:2]) print('Animal Breeds', targets_breed[:2]) targets_singlebb = np.zeros((len(animalfns), 4), dtype='int') # (x,y,w,h) for each data row targets_cl = np.zeros((len(animalfns), 1), dtype='int') Innotater( ImageInnotation(animalfns, path='./animals', width=370, height=280), GroupedInnotation( # Just to place side-by-side MultiClassInnotation(targets_cl, name='Animal', classes=classes, dropdown=True), BoundingBoxInnotation(targets_singlebb) ) )
0.575588
0.915091
``` import numpy as np import matplotlib.pyplot as plt from qucat import GUI ``` # Designing a classical microwave filter In this application we show how QuCAT can be used to design classical microwave components. We study here a band pass filter made from two LC oscillators with the inductor inline and a capacitive shunt to ground. we are interested in the loss rate of a LC resonator connected through this filter to a 50 $\Omega$ load, which could emulate a typical microwave transmission line. ![alt text](graphics/filter_circuit.png "") We start by opening the GUI and building this circuit. ``` # Build circuit filtered_cavity = GUI('circuits/filtered_cavity.txt', # location of the circuit file edit=True, # open the GUI to edit the circuit plot=True, # plot the circuit after having edited it print_network=False) # print the network ``` Since the values of $C$ and $L$ were not specified in the construction of the circuit, their values have to be passed as keyword arguments in subsequent functions. To get an initial idea of the different modes of the circuit, let us display their resonance frequencies and dissipation rates for a 0.3uH inductance and 1pF capacitance. ``` f,k,A,chi = filtered_cavity.f_k_A_chi(pretty_print=True, L = 0.3e-6, C = 1e-12) ``` Since the filter capacitance and inductance is large relative to the capacitance and inductance of the resonator, the two modes associated with the filter will have a much lower frequency, and very high dissipation rates. We can thus access the loss rate of the resonator by always selecting the last element of the array of loss rates returned by the function `loss_rates` ``` # The resonance modes of the filter # have a very low quality factors, which make # them inadequate for quantization # by default QuCAT will discard these modes # and notify the user with a warning, here # we disactivate these warnings filtered_cavity.warn_discarded_mode = False # Build a list of inductances and capacitances points = 31 C_list = np.logspace(-13.5,-11.8,points) L_list = np.logspace(-8.5,-6.8,points) # Initialize an array to store the resonator loss rates k = np.zeros((points,points)) # Sweep over capacitances and inductances for i,capacitance in enumerate(C_list): for j,inductance in enumerate(L_list): # Calculate loss rates (for all modes) k_all_modes = filtered_cavity.loss_rates( C = capacitance, L = inductance) # Store the resonator loss rate k[i,j] = k_all_modes[-1] ``` The obtained dissipation rates are plotted below ``` plt.pcolor(C_list,L_list,k) cbar = plt.colorbar() plt.xscale('log') plt.yscale('log') ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from qucat import GUI # Build circuit filtered_cavity = GUI('circuits/filtered_cavity.txt', # location of the circuit file edit=True, # open the GUI to edit the circuit plot=True, # plot the circuit after having edited it print_network=False) # print the network f,k,A,chi = filtered_cavity.f_k_A_chi(pretty_print=True, L = 0.3e-6, C = 1e-12) # The resonance modes of the filter # have a very low quality factors, which make # them inadequate for quantization # by default QuCAT will discard these modes # and notify the user with a warning, here # we disactivate these warnings filtered_cavity.warn_discarded_mode = False # Build a list of inductances and capacitances points = 31 C_list = np.logspace(-13.5,-11.8,points) L_list = np.logspace(-8.5,-6.8,points) # Initialize an array to store the resonator loss rates k = np.zeros((points,points)) # Sweep over capacitances and inductances for i,capacitance in enumerate(C_list): for j,inductance in enumerate(L_list): # Calculate loss rates (for all modes) k_all_modes = filtered_cavity.loss_rates( C = capacitance, L = inductance) # Store the resonator loss rate k[i,j] = k_all_modes[-1] plt.pcolor(C_list,L_list,k) cbar = plt.colorbar() plt.xscale('log') plt.yscale('log')
0.465387
0.9852
# 11 CMAP enrichment - for LK cancer based egenes 2/10/2021 ``` library(tidyverse) # library(readr) library(ggplot2) library(clusterProfiler) library(org.Hs.eg.db) library(pheatmap) library(ReactomePA) library(annotate) library(seqinr) save_path = '../data/processed/drug_enrichment/' genes_file = '../data/external/GWAS/cancer_mpra_hit_egenes_02092021.tsv' genes_df = read.table(genes_file,header=T,stringsAsFactor=F) head(genes_df) diseases = sort(unique(genes_df$disease)) print(diseases) get_genes = function(dz, df=genes_df){ print(dz) egenes = df[df$disease ==dz,'egene'] print(length(egenes)) return(egenes) } get_entrez = function(genes){ entrez_ids = bitr(genes, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")$ENTREZID # print(length(entrez_ids)) return(entrez_ids) # return(bitr(genes, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")) } enrichr_res = function(entrez_genes, c_gmt){ enrichr_res <- enricher(entrez_genes, TERM2GENE=c_gmt, minGSSize=0, maxGSSize=1000, pAdjustMethod = "fdr", qvalueCutoff = 1, pvalueCutoff = 1) if (!is.null(enrichr_res)){ enrichr_res <- setReadable(enrichr_res, OrgDb=org.Hs.eg.db, keyType="ENTREZID") return(data.frame(enrichr_res))} return(data.frame()) } filt_res = function(df){ return(filter(df,p.adjust<0.05)) } get_lengths = function(list_dfs){ for (name in names(list_dfs)){ print(name) if (is.data.frame(list_dfs[[name]])){ print(dim(list_dfs[[name]])[1]) } else{ print(length(list_dfs[[name]])) } } } save_dfs = function(list_dfs, save_prefix){ for (name in names(list_dfs)){ if (dim(list_dfs[[name]])[1]>0){ save_filepath = paste0(save_prefix, '_', name,'.csv') write.csv(list_dfs[[name]], save_filepath) } } } # get list of unique genes in list of lists get_genes_unique = function(list_dfs){ genelist = sort(unique(do.call('c',lapply(do.call('rbind',list_dfs)$geneID, function(x) strsplit(x,'/')[[1]])))) return(genelist) } run_enrichment = function(entrez_list, c_gmt, save_prefix){ enrich_df_list = sapply(entrez_list, function(x) enrichr_res(x,c_gmt)) save_dfs(enrich_df_list, save_prefix) enrich_df_list_filt = sapply(enrich_df_list, filt_res) get_lengths(enrich_df_list_filt) print('getting unique genes --all') print(get_genes_unique (enrich_df_list) ) print('getting unique genes --pval filt') print(get_genes_unique (enrich_df_list_filt) ) return(enrich_df_list_filt) } dz_gene_list_sym = sapply(diseases,get_genes) dz_gene_list_entrez = sapply(dz_gene_list_sym,get_entrez) get_lengths(dz_gene_list_entrez) ``` # 1. enrichment of egenes drug target (from pt 1 of 7B) ``` save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target.gmt' # save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target_cat.gmt' c_repurpose <- read.gmt(save_repurpose_gmt) dz_repurpose_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_repurpose, save_prefix=paste0(save_path, 'direct_drug_target')) dz_repurpose_drug_filt save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target_cat.gmt' c_repurpose <- read.gmt(save_repurpose_gmt) dz_repurpose_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_repurpose, save_prefix=paste0(save_path, 'direct_drug_target_cat')) dz_repurpose_drug_cat_filt save_cmap_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_up_crisp_drug.gmt' c_cmap<- read.gmt(save_cmap_gmt) dz_cmap_up_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_up_drug')) dz_cmap_up_drug_filt save_cmap_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_dn_crisp_drug.gmt' c_cmap<- read.gmt(save_cmap_gmt) dz_cmap_dn_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_dn_drug')) dz_cmap_dn_drug_filt ``` ### drugs from cmap have no enrichment up or down ``` save_cmap_gmt_cat = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_up_crisp_drug_cat.gmt' c_cmap<- read.gmt(save_cmap_gmt_cat) dz_cmap_up_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_up_drug_cat')) dz_cmap_up_drug_cat_filt save_cmap_gmt_cat = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_dn_crisp_drug_cat.gmt' c_cmap<- read.gmt(save_cmap_gmt_cat) dz_cmap_dn_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_dn_drug_cat')) dz_cmap_dn_drug_cat_filt ``` ### drug categories from cmap have no enrichment up or down interesting drug egenes ``` repurposing_info = read.csv('/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_20200324.txt', stringsAsFactor=F,sep='\t',skip=9) repurposing_info = repurposing_info%>%separate_rows(target,sep='[|]') head(repurposing_info) drug_egenes = get_genes_unique (dz_repurpose_drug) length(drug_egenes) drug_egenes_str = paste0(drug_egenes,collapse='|') drug_egenes_info = data.frame(target=drug_egenes)%>% left_join(repurposing_info,by="target") dim(drug_egenes_info) length(unique(drug_egenes_info$target)) write.csv(drug_egenes_info, paste0(save_path,'drug_egenes_info.csv')) ```
github_jupyter
library(tidyverse) # library(readr) library(ggplot2) library(clusterProfiler) library(org.Hs.eg.db) library(pheatmap) library(ReactomePA) library(annotate) library(seqinr) save_path = '../data/processed/drug_enrichment/' genes_file = '../data/external/GWAS/cancer_mpra_hit_egenes_02092021.tsv' genes_df = read.table(genes_file,header=T,stringsAsFactor=F) head(genes_df) diseases = sort(unique(genes_df$disease)) print(diseases) get_genes = function(dz, df=genes_df){ print(dz) egenes = df[df$disease ==dz,'egene'] print(length(egenes)) return(egenes) } get_entrez = function(genes){ entrez_ids = bitr(genes, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")$ENTREZID # print(length(entrez_ids)) return(entrez_ids) # return(bitr(genes, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")) } enrichr_res = function(entrez_genes, c_gmt){ enrichr_res <- enricher(entrez_genes, TERM2GENE=c_gmt, minGSSize=0, maxGSSize=1000, pAdjustMethod = "fdr", qvalueCutoff = 1, pvalueCutoff = 1) if (!is.null(enrichr_res)){ enrichr_res <- setReadable(enrichr_res, OrgDb=org.Hs.eg.db, keyType="ENTREZID") return(data.frame(enrichr_res))} return(data.frame()) } filt_res = function(df){ return(filter(df,p.adjust<0.05)) } get_lengths = function(list_dfs){ for (name in names(list_dfs)){ print(name) if (is.data.frame(list_dfs[[name]])){ print(dim(list_dfs[[name]])[1]) } else{ print(length(list_dfs[[name]])) } } } save_dfs = function(list_dfs, save_prefix){ for (name in names(list_dfs)){ if (dim(list_dfs[[name]])[1]>0){ save_filepath = paste0(save_prefix, '_', name,'.csv') write.csv(list_dfs[[name]], save_filepath) } } } # get list of unique genes in list of lists get_genes_unique = function(list_dfs){ genelist = sort(unique(do.call('c',lapply(do.call('rbind',list_dfs)$geneID, function(x) strsplit(x,'/')[[1]])))) return(genelist) } run_enrichment = function(entrez_list, c_gmt, save_prefix){ enrich_df_list = sapply(entrez_list, function(x) enrichr_res(x,c_gmt)) save_dfs(enrich_df_list, save_prefix) enrich_df_list_filt = sapply(enrich_df_list, filt_res) get_lengths(enrich_df_list_filt) print('getting unique genes --all') print(get_genes_unique (enrich_df_list) ) print('getting unique genes --pval filt') print(get_genes_unique (enrich_df_list_filt) ) return(enrich_df_list_filt) } dz_gene_list_sym = sapply(diseases,get_genes) dz_gene_list_entrez = sapply(dz_gene_list_sym,get_entrez) get_lengths(dz_gene_list_entrez) save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target.gmt' # save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target_cat.gmt' c_repurpose <- read.gmt(save_repurpose_gmt) dz_repurpose_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_repurpose, save_prefix=paste0(save_path, 'direct_drug_target')) dz_repurpose_drug_filt save_repurpose_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_target_cat.gmt' c_repurpose <- read.gmt(save_repurpose_gmt) dz_repurpose_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_repurpose, save_prefix=paste0(save_path, 'direct_drug_target_cat')) dz_repurpose_drug_cat_filt save_cmap_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_up_crisp_drug.gmt' c_cmap<- read.gmt(save_cmap_gmt) dz_cmap_up_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_up_drug')) dz_cmap_up_drug_filt save_cmap_gmt = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_dn_crisp_drug.gmt' c_cmap<- read.gmt(save_cmap_gmt) dz_cmap_dn_drug_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_dn_drug')) dz_cmap_dn_drug_filt save_cmap_gmt_cat = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_up_crisp_drug_cat.gmt' c_cmap<- read.gmt(save_cmap_gmt_cat) dz_cmap_up_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_up_drug_cat')) dz_cmap_up_drug_cat_filt save_cmap_gmt_cat = '/Users/mguo123/Google Drive/0_altman/db/cmap/gene_set_library_dn_crisp_drug_cat.gmt' c_cmap<- read.gmt(save_cmap_gmt_cat) dz_cmap_dn_drug_cat_filt = run_enrichment(entrez_list=dz_gene_list_entrez, c_gmt = c_cmap, save_prefix=paste0(save_path, 'cmap_dn_drug_cat')) dz_cmap_dn_drug_cat_filt repurposing_info = read.csv('/Users/mguo123/Google Drive/0_altman/db/cmap/repurposing_drugs_20200324.txt', stringsAsFactor=F,sep='\t',skip=9) repurposing_info = repurposing_info%>%separate_rows(target,sep='[|]') head(repurposing_info) drug_egenes = get_genes_unique (dz_repurpose_drug) length(drug_egenes) drug_egenes_str = paste0(drug_egenes,collapse='|') drug_egenes_info = data.frame(target=drug_egenes)%>% left_join(repurposing_info,by="target") dim(drug_egenes_info) length(unique(drug_egenes_info$target)) write.csv(drug_egenes_info, paste0(save_path,'drug_egenes_info.csv'))
0.09125
0.588594
# Insets and panels ## Panel axes ``` import proplot as plot import numpy as np state = np.random.RandomState(51423) data = (state.rand(20, 20) - 0.48).cumsum(axis=1).cumsum(axis=0) data = 10 * (data - data.min()) / (data.max() - data.min()) # Stacked panels with outer colorbars for loc_cbar, loc_panel in ('rb', 'br'): f, axs = plot.subplots( axwidth=1.6, nrows=1, ncols=2, share=0, panelpad=0.1, includepanels=True ) axs.contourf( data, cmap='glacial', extend='both', colorbar=loc_cbar, colorbar_kw={'label': 'colorbar'}, ) # Summary statistics and settings x1 = x2 = np.arange(20) y1 = data.mean(axis=int(loc_panel == 'r')) y2 = data.std(axis=int(loc_panel == 'r')) titleloc = 'upper center' if loc_panel == 'r': titleloc = 'center' x1, x2, y1, y2 = y1, y2, x1, x2 space = 0 width = '30pt' kwargs = {'xreverse': False, 'yreverse': False, 'titleloc': titleloc} # Panels for plotting the mean paxs1 = axs.panel(loc_panel, space=space, width=width) paxs1.plot(x1, y1, color='gray7') paxs1.format(title='Mean', **kwargs) # Panels for plotting the standard deviation paxs2 = axs.panel(loc_panel, space=space, width=width) paxs2.plot(x2, y2, color='gray7', ls='--') paxs2.format(title='Stdev', **kwargs) # Apply formatting *after* axs.format( xlabel='xlabel', ylabel='ylabel', title='Title', suptitle='Using panels for summary statistics', ) import proplot as plot f, axs = plot.subplots(axwidth=1.5, nrows=2, ncols=2, share=0) # Panels do not interfere with subplot layout for ax, side in zip(axs, 'tlbr'): ax.panel_axes(side, width='3em') axs.format( title='Title', suptitle='Complex arrangement of panels', collabels=['Column 1', 'Column 2'], abcloc='ul', titleloc='uc', xlabel='xlabel', ylabel='ylabel', abc=True, abovetop=False ) axs.format( xlim=(0, 1), ylim=(0, 1), ylocator=plot.arange(0.2, 0.8, 0.2), xlocator=plot.arange(0.2, 0.8, 0.2) ) ``` ## Inset axes ``` import proplot as plot import numpy as np N = 20 # Inset axes representing a "zoom" state = np.random.RandomState(51423) f, ax = plot.subplots(axwidth=3) x, y = np.arange(10), np.arange(10) data = state.rand(10, 10) m = ax.pcolormesh(data, cmap='Grays', levels=N) ax.colorbar(m, loc='b', label='label') ax.format(xlabel='xlabel', ylabel='ylabel') axi = ax.inset( [5, 5, 4, 4], transform='data', zoom=True, zoom_kw={'color': 'red3', 'lw': 2} ) axi.format( xlim=(2, 4), ylim=(2, 4), color='red7', linewidth=1.5, ticklabelweight='bold' ) axi.pcolormesh(data, cmap='Grays', levels=N) ax.format(suptitle='"Zooming in" with an inset axes') ```
github_jupyter
import proplot as plot import numpy as np state = np.random.RandomState(51423) data = (state.rand(20, 20) - 0.48).cumsum(axis=1).cumsum(axis=0) data = 10 * (data - data.min()) / (data.max() - data.min()) # Stacked panels with outer colorbars for loc_cbar, loc_panel in ('rb', 'br'): f, axs = plot.subplots( axwidth=1.6, nrows=1, ncols=2, share=0, panelpad=0.1, includepanels=True ) axs.contourf( data, cmap='glacial', extend='both', colorbar=loc_cbar, colorbar_kw={'label': 'colorbar'}, ) # Summary statistics and settings x1 = x2 = np.arange(20) y1 = data.mean(axis=int(loc_panel == 'r')) y2 = data.std(axis=int(loc_panel == 'r')) titleloc = 'upper center' if loc_panel == 'r': titleloc = 'center' x1, x2, y1, y2 = y1, y2, x1, x2 space = 0 width = '30pt' kwargs = {'xreverse': False, 'yreverse': False, 'titleloc': titleloc} # Panels for plotting the mean paxs1 = axs.panel(loc_panel, space=space, width=width) paxs1.plot(x1, y1, color='gray7') paxs1.format(title='Mean', **kwargs) # Panels for plotting the standard deviation paxs2 = axs.panel(loc_panel, space=space, width=width) paxs2.plot(x2, y2, color='gray7', ls='--') paxs2.format(title='Stdev', **kwargs) # Apply formatting *after* axs.format( xlabel='xlabel', ylabel='ylabel', title='Title', suptitle='Using panels for summary statistics', ) import proplot as plot f, axs = plot.subplots(axwidth=1.5, nrows=2, ncols=2, share=0) # Panels do not interfere with subplot layout for ax, side in zip(axs, 'tlbr'): ax.panel_axes(side, width='3em') axs.format( title='Title', suptitle='Complex arrangement of panels', collabels=['Column 1', 'Column 2'], abcloc='ul', titleloc='uc', xlabel='xlabel', ylabel='ylabel', abc=True, abovetop=False ) axs.format( xlim=(0, 1), ylim=(0, 1), ylocator=plot.arange(0.2, 0.8, 0.2), xlocator=plot.arange(0.2, 0.8, 0.2) ) import proplot as plot import numpy as np N = 20 # Inset axes representing a "zoom" state = np.random.RandomState(51423) f, ax = plot.subplots(axwidth=3) x, y = np.arange(10), np.arange(10) data = state.rand(10, 10) m = ax.pcolormesh(data, cmap='Grays', levels=N) ax.colorbar(m, loc='b', label='label') ax.format(xlabel='xlabel', ylabel='ylabel') axi = ax.inset( [5, 5, 4, 4], transform='data', zoom=True, zoom_kw={'color': 'red3', 'lw': 2} ) axi.format( xlim=(2, 4), ylim=(2, 4), color='red7', linewidth=1.5, ticklabelweight='bold' ) axi.pcolormesh(data, cmap='Grays', levels=N) ax.format(suptitle='"Zooming in" with an inset axes')
0.837221
0.92079
``` import numpy as np import pandas as pd import matplotlib.dates as mdates import datetime import requests import matplotlib.pyplot as plt import time import json from collections import defaultdict from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer PATH_TO_SENT_DATA = '../../data/sentiment_data.csv' SLEEP_DURATION = 7 API_KEY = '&api-key=INSERT_KEY_HERE' sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() def find_news_articles(begindate, nytimes_section, api_key): base_url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?' facet_str = f'&facet=true&begin_date={begindate}&end_date={begindate}' page = 0 count = 0 ret_list = [] while True: url = base_url+nytimes_section+facet_str+f'&page={page}'+api_key r = requests.get(url) if r.status_code != 200: print(r.status_code) data = json.loads(r.content) time.sleep(SLEEP_DURATION) if page == 0: tot_articles = data['response']['meta']['hits'] print(begindate, nytimes_section, 'tot_articles', tot_articles) for i, doc in enumerate(data['response']['docs']): ret_list.append((doc['headline']['main'], doc['lead_paragraph'], doc['web_url'])) count += 1 if count >= tot_articles: break page += 1 #print(len(ret_list)) return ret_list begin_date = sent_data.iloc[-1]['Date'] + datetime.timedelta(days=1) today = datetime.datetime.today() print(begin_date, today) daily_sentiment = defaultdict(defaultdict) sid_obj = SentimentIntensityAnalyzer() while begin_date <= today: days_sentiment_pos, days_sentiment_neg, days_sentiment_neu, days_sentiment_comp = 0, 0, 0, 0 date_str = str(begin_date.year) + str(begin_date.month).zfill(2) + str(begin_date.day).zfill(2) news_desk_str = 'fq=news_desk:("Financial" "Business" "Business Day")' section_str = 'fq=section_name:("Your Money" "Business" "Business Day")' news_desk_list = find_news_articles(date_str, news_desk_str, API_KEY) section_list = find_news_articles(date_str, section_str, API_KEY) final_urls = set() for news in news_desk_list: # tuple of 3: headline, lead_paragraph, web_url if news[2] not in final_urls: # print('adding news desk article', news[0]) final_urls.add(news[2]) sentiment_dict = sid_obj.polarity_scores(news[0] + news[1]) days_sentiment_pos += sentiment_dict['pos'] days_sentiment_neg += sentiment_dict['neg'] days_sentiment_neu += sentiment_dict['neu'] days_sentiment_comp += sentiment_dict['compound'] for news in section_list: # tuple of 3: headline, lead_paragraph, web_url if news[2] not in final_urls: # print('adding section article', news[0]) final_urls.add(news[2]) sentiment_dict = sid_obj.polarity_scores(news[0] + news[1]) days_sentiment_pos += sentiment_dict['pos'] days_sentiment_neg += sentiment_dict['neg'] days_sentiment_neu += sentiment_dict['neu'] days_sentiment_comp += sentiment_dict['compound'] num_news_items = len(final_urls) if num_news_items > 0: daily_sentiment[date_str]['pos'] = days_sentiment_pos/num_news_items daily_sentiment[date_str]['neg'] = days_sentiment_neg/num_news_items daily_sentiment[date_str]['neu'] = days_sentiment_neu/num_news_items daily_sentiment[date_str]['compound'] = days_sentiment_comp/num_news_items else: daily_sentiment[date_str]['pos'] = 0 daily_sentiment[date_str]['neg'] = 0 daily_sentiment[date_str]['neu'] = 0 daily_sentiment[date_str]['compound'] = 0 begin_date += datetime.timedelta(days=1) new_df = pd.DataFrame(daily_sentiment).T new_df.reset_index(inplace=True) new_df.rename(columns={'index' : 'Date'}, inplace=True) new_df['Date'] = pd.to_datetime(new_df['Date'], format="%Y%m%d") new_df = pd.concat([sent_data, new_df], axis=0, ignore_index=True) new_df.to_csv(PATH_TO_SENT_DATA, index=False) sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() monthly = sent_data.groupby(pd.PeriodIndex(sent_data['Date'], freq="M"))['compound'].mean() monthly.plot(figsize=(14,10)); ax = monthly.plot.line() ax.set_title('Monthly average of sentiment data from January 2000 to present') ax.set_ylabel('Monthly average sentiment data') ax.axhline(y=monthly.mean(), color='r', linestyle='--', lw=2) monthly.mean() ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.dates as mdates import datetime import requests import matplotlib.pyplot as plt import time import json from collections import defaultdict from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer PATH_TO_SENT_DATA = '../../data/sentiment_data.csv' SLEEP_DURATION = 7 API_KEY = '&api-key=INSERT_KEY_HERE' sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() def find_news_articles(begindate, nytimes_section, api_key): base_url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?' facet_str = f'&facet=true&begin_date={begindate}&end_date={begindate}' page = 0 count = 0 ret_list = [] while True: url = base_url+nytimes_section+facet_str+f'&page={page}'+api_key r = requests.get(url) if r.status_code != 200: print(r.status_code) data = json.loads(r.content) time.sleep(SLEEP_DURATION) if page == 0: tot_articles = data['response']['meta']['hits'] print(begindate, nytimes_section, 'tot_articles', tot_articles) for i, doc in enumerate(data['response']['docs']): ret_list.append((doc['headline']['main'], doc['lead_paragraph'], doc['web_url'])) count += 1 if count >= tot_articles: break page += 1 #print(len(ret_list)) return ret_list begin_date = sent_data.iloc[-1]['Date'] + datetime.timedelta(days=1) today = datetime.datetime.today() print(begin_date, today) daily_sentiment = defaultdict(defaultdict) sid_obj = SentimentIntensityAnalyzer() while begin_date <= today: days_sentiment_pos, days_sentiment_neg, days_sentiment_neu, days_sentiment_comp = 0, 0, 0, 0 date_str = str(begin_date.year) + str(begin_date.month).zfill(2) + str(begin_date.day).zfill(2) news_desk_str = 'fq=news_desk:("Financial" "Business" "Business Day")' section_str = 'fq=section_name:("Your Money" "Business" "Business Day")' news_desk_list = find_news_articles(date_str, news_desk_str, API_KEY) section_list = find_news_articles(date_str, section_str, API_KEY) final_urls = set() for news in news_desk_list: # tuple of 3: headline, lead_paragraph, web_url if news[2] not in final_urls: # print('adding news desk article', news[0]) final_urls.add(news[2]) sentiment_dict = sid_obj.polarity_scores(news[0] + news[1]) days_sentiment_pos += sentiment_dict['pos'] days_sentiment_neg += sentiment_dict['neg'] days_sentiment_neu += sentiment_dict['neu'] days_sentiment_comp += sentiment_dict['compound'] for news in section_list: # tuple of 3: headline, lead_paragraph, web_url if news[2] not in final_urls: # print('adding section article', news[0]) final_urls.add(news[2]) sentiment_dict = sid_obj.polarity_scores(news[0] + news[1]) days_sentiment_pos += sentiment_dict['pos'] days_sentiment_neg += sentiment_dict['neg'] days_sentiment_neu += sentiment_dict['neu'] days_sentiment_comp += sentiment_dict['compound'] num_news_items = len(final_urls) if num_news_items > 0: daily_sentiment[date_str]['pos'] = days_sentiment_pos/num_news_items daily_sentiment[date_str]['neg'] = days_sentiment_neg/num_news_items daily_sentiment[date_str]['neu'] = days_sentiment_neu/num_news_items daily_sentiment[date_str]['compound'] = days_sentiment_comp/num_news_items else: daily_sentiment[date_str]['pos'] = 0 daily_sentiment[date_str]['neg'] = 0 daily_sentiment[date_str]['neu'] = 0 daily_sentiment[date_str]['compound'] = 0 begin_date += datetime.timedelta(days=1) new_df = pd.DataFrame(daily_sentiment).T new_df.reset_index(inplace=True) new_df.rename(columns={'index' : 'Date'}, inplace=True) new_df['Date'] = pd.to_datetime(new_df['Date'], format="%Y%m%d") new_df = pd.concat([sent_data, new_df], axis=0, ignore_index=True) new_df.to_csv(PATH_TO_SENT_DATA, index=False) sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() sent_data = pd.read_csv(PATH_TO_SENT_DATA) sent_data['Date'] = pd.to_datetime(sent_data['Date']) sent_data.tail() monthly = sent_data.groupby(pd.PeriodIndex(sent_data['Date'], freq="M"))['compound'].mean() monthly.plot(figsize=(14,10)); ax = monthly.plot.line() ax.set_title('Monthly average of sentiment data from January 2000 to present') ax.set_ylabel('Monthly average sentiment data') ax.axhline(y=monthly.mean(), color='r', linestyle='--', lw=2) monthly.mean()
0.159872
0.205416
# HtrA1 Crystal Simulation ## Relevant System info: - **Temperature**: 100 K - **pH**: 5.6 - **X-ray wavelength**: 1.08090 A - **X-ray Resolution Range**: (50.0, 2.750) A - **Matthews Coefficient, $V_m$**: 3.61 (A^3 / Da) - Note: this is the volume of the asymmetric unit, as obtained directly from the X-ray diffraction measurements, divided by the molecular weight of protein contained in the asymmetric unit -- i.e. the crystal volume per unit of molecular wieght - **Solvent Content, $V_s$**: 65.89 % - $V_s = 1 - \frac{1.23}{V_m}$ - **Crystallization Conditions**: - 1.0 M Li2SO4 - 0.1 M Sodium Citrate (pH 5.6) - 0.5 M (NH4)2SO4 - Temperature: 292 K - Vapor Diffusion - Sitting Drop - Temp: 292 K ## PDB Cleaning and Supercell Prep ## PDB File Preparation: ### Issues with protein connectivity ### 3NUM Protein has three separate parts: - Fragment 1: Resid 315 to 359 - Fragment 2: Resid 290 to 300 - Fragment 3: Resid 161 to 284 Using the program `pdbfixer` to fill in the missing residues, without the pH 7 hydrogens, and outputted to new pdb files `3num_fixed.pdb` 3NUM -- resid 161 to 284 and resid 290 to 300 and resid 315 to 369 (3TJN -- resid 164 to 367) 3NZI -- resid 160 to 370 (4-7) 3NWU -- resid 161 to 300 and resid 314 to 364 (protein and chain A) ![HtrA1 Overlay](./misc_files/HtrA1_overlay_all_three.png) **Fig. 1**: 3NUM (red), 3NZI (blue) and 3NWU (green) overlaid, with loop region missing in 3NUM opage in the other two images. Going to try using Chimera and modeller/modloop to attach better loop ![3NUM Fragments](./misc_files/3NUM_fragments.png) **Fig. 2** -- 3NUM structure, with the three fragments in three different colors. ## 3NUM 3 fragments (`pfrags`): - resid 161 to 284 - resid 290 to 300 - resid 315 to 369 ![3NZI Fragments](./misc_files/3NZI_fragments.png) **Fig. 3** -- 3NZI structure, with ligand in orange ## 3NZI 2 fragments (`pfrags`) and one ligand ("B2V": $\text{C}_{4}\text{H}_{12}\text{BNO}$) - resid 160 to 370 - resid 1 to 7 ## PDBFixer Used PDBFixer to reconstruct the missing loops in `3NUM` residues 285 to 289 and 301 to 314, then kept only as much N- and C-terminal residues to match exactly with the N- and C-terminal ends of the original `3NUM` file. Also filled in heavy atoms but did not add hydrogens ## Chimera Tools -- Model/Refine Loops ### Outputted 5 models (#2.1-#2.5) with the following zDOPE scores (reset relevant resids from 5 to 213 instead of 161 to 369) **#2.1** -- -1.29 **#2.4** -- -1.28 **#2.3** -- -1.24 **#2.5** -- -1.22 **#2.2** -- -1.03 ### Commands for fitting and outputting PDB structures in VMD: (assuming 3num is molecule 0, 3num_with_loop is molecule 1) ``` set 3NUM_Fit [atomselect 0 "name CA and (resid 5 to 128 or resid 134 to 144 or resid 159 to 213)"] set 3NUM_Loop_Fit [atomselect 1 "name CA and (resid 5 to 128 or resid 134 to 144 or resid 159 to 213)"] set M [measure fit $3NUM_Loop_Fit $3NUMfit] set 3NUM_Loop [atomselect 1 all] $3NUM_Loop move $M set 3NUM_out [atomselect 1 "resid 5 to 213"] $3NUM_out writepdb "3num_loop_1_aligned.pdb" ``` ### January 26th: - Aligned the 3num and 3num_fixed_chim_loop_1.pdb structures using the atomselection above, then outputted the coordinates to a PDB file, leaving off the first four residues, which were added by pdb fixer, and aren't present in 3num (3num_loop_1_aligned.pdb), and copied the header from 3num.pdb to the top of 3num_loop_1_aligned.pdb for scale and crystal information. Now just need to check the bond orders and add hydrogens before putting in to UnitCell and PropPDB, and do the same for two other loop starting configuration structures. #### Questions to ask later: Once we see what starting configuration best matches with the diffuse data, is there any way for that information to be used to refine the crystallographic structure? #### January 30th, 2018 Fixed `3num_loop_(#)_aligned.pdb` residue numbers with this script (practiced on a `_copy`): #### `fix_residues.py` ```with open("3num_loop_3_aligned_new.pdb", 'r') as file: lines = file.readlines() with open("3num_loop_3_aligned_new2.pdb",'w') as newfile: for line in lines: if line[0:6] == 'ATOM ': old_resid = line[23:26] new_resid = int(old_resid) + 156 new_resid_str = str(new_resid) newline = line[:20] + ' A ' + new_resid_str + line[26:] newfile.write(newline) else: newfile.write(line) newfile.write('TER 1599 ASP A 369\nMASTER 560 0 0 3 14 0 0 6 1598 1 0 26\nEND')``` Now just have to make sure the header doesn't interfere with the necessary parts of the pdb file that parmed and openeye will need to fix the pdb and UnitCell and PropPDB will need to prepare them for simulation ``` import nglview view1 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/PDB2PQR_Files/3num_loop_1_aligned_new_clean2.pdb") view2 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/PDB2PQR_Files/3num_loop_2_aligned_new_clean2.pdb") view3 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/PDB2PQR_Files/3num_loop_3_aligned_new_clean2.pdb") for el in [view1, view2, view3]: el.clear_representations() el.add_representation(repr_type="cartoon", selection="300-315") el.add_representation(repr_type="cartoon", selection="161-284", opacity=0.1) el.add_representation(repr_type="cartoon", selection="285-289") el.add_representation(repr_type="cartoon", selection="290-299", opacity=0.1) el.add_representation(repr_type="cartoon", selection="315-369", opacity=0.1) from ipywidgets import VBox ``` ### Loop Configurations ``` vbox = VBox([view1, view2, view3]) vbox for view in [view1, view2, view3]: view.sync_view() ``` ### PDB2PQR Converted the aligned PDB files to PQR files using http://nbcr-222.ucsd.edu/pdb2pqr_2.0.0/, selecting AMBER Force Field and naming conventions, and choosing a pH of 7 - There was a problem with all three loop configurations, which is apparently a result of not being able to work around a clash between these two atoms: `HIS A 226 HA is too close to HIS A 226 CD2`, but it was unable to "debump" it. It came with this warning in the pqr file: - `WARNING: Unable to debump HIS A 226` ### Using `parmed` to convert the PQRs to PDBs ``` import parmed as pmd pqr1 = pmd.load_file("./PDB2PQR_Files/3num_loop_1_aligned_new.pqr") pqr1.save("./PDB2PQR_Files/3num_loop_1_aligned_new_clean.pdb") pqr2 = pmd.load_file("./PDB2PQR_Files/3num_loop_2_aligned_new.pqr") pqr2.save("./PDB2PQR_Files/3num_loop_2_aligned_new_clean.pdb") pqr3 = pmd.load_file("./PDB2PQR_Files/3num_loop_3_aligned_new.pqr") pqr3.save("./PDB2PQR_Files/3num_loop_3_aligned_new_clean.pdb") ``` ## Using OpenEye to Check Bond Orders/Connectivity and Add Hydrogens ``` from openeye.oechem import * from openeye.oeomega import * from openeye.oeiupac import * from openeye.oeshape import * istream = oemolistream("3num_loop_1_aligned_new_clean.pdb") mol_from_file = OEMol() OEReadMolecule(istream, mol_from_file) oechem.OEAddExplicitHydrogens(OEMol()) oechem.OEDetermineConnectivity(OEMol()) oechem.OEPerceiveBondOrders(OEMol()) ostream = oemolostream('3num_loop_1_aligned_new_clean_oe.pdb') OEWriteMolecule(ostream, mol_from_file) istream = oemolistream("3num_loop_2_aligned_new_clean.pdb") mol_from_file = OEMol() OEReadMolecule(istream, mol_from_file) oechem.OEAddExplicitHydrogens(OEMol()) oechem.OEDetermineConnectivity(OEMol()) oechem.OEPerceiveBondOrders(OEMol()) ostream = oemolostream('3num_loop_2_aligned_new_clean_oe.pdb') OEWriteMolecule(ostream, mol_from_file) istream = oemolistream("3num_loop_3_aligned_new_clean.pdb") mol_from_file = OEMol() OEReadMolecule(istream, mol_from_file) oechem.OEAddExplicitHydrogens(OEMol()) oechem.OEDetermineConnectivity(OEMol()) oechem.OEPerceiveBondOrders(OEMol()) ostream = oemolostream('3num_loop_3_aligned_new_clean_oe.pdb') OEWriteMolecule(ostream, mol_from_file) ``` # Needed to add back in header crystal and scale information ### Loops ![3NUM Loops](./images/3num_loops.png) Here is a piture of the three starting loop configurations (1: Blue; 2: Red; 3: Orange) ## Calculating the Box Volume ``` import mdtraj as md import MDAnalysis as mda SC = mda.Universe("./3num_loop_1_SC.pdb") [X, Y, Z, a, b, g ] = SC.dimensions print("Box Size (breadth, width, height): {} {} {} A".format(X,Y,Z)) import numpy as np SCVolume = X * Y * Z * np.sin((g/360.0)*2*np.pi) print(SCVolume) ``` ### Calculation of the number of solute molecules $9,462,276.7 \ \text{A}^{3}\to 9.4622767×10^{-21} \ \text{L}$ $1.0 \ \text{M} \ \text{Li}_{2}^{+} \ \text{SO}_{4}^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **5,698 Sulfate molecules, 11,396 Lithium atoms** - (3,755 Sulfate molecules, 7,509 Lithum atoms at 65.89% solvent content) $0.5 \ \text{M} \ (\text{NH}_{4}^{+})_{2} \ \text{SO}_{4}^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **2,849 Sulfate molecules, 5,698 Ammonium molecules** - (1,877 Sulfate molecules, 3754 Ammonium molecules at 65.89% solvent content) $0.1 \ \text{M} \ (\text{Na}^{+})_{2} \ (\text{C}_{6}\text{H}_{6}\text{O}_{7})^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **570 Citrate molecules, 1,140 Sodium molecules** - (376 Sulfate molecules, 751 Sodium at 65.89% solvent content) #### Totals to add: Full supercell volume (65.89% solvent content) - **Sulfate**: 8,547 (5,632) - **Lithium**: 11,396 (7,510) - **Sodium**: 1,140 (752) - **Ammonium**: 5,698 (3754) - **Citrate**: 570 (376) ## Supercell ``` SCView = nglview.show_file("./3num_loop_1_SC.pdb") SCView.add_representation(repr_type="cartoon", selection="protein") SCView ``` ## Supercell has a net charge of +144 Need to balance out with negative charged ions (sulfate and citrate): Need 72 sulfate and citrate ions Sulfate:Citrate = 5632:376 = 14.9787234043:1 - (basically 15:1 -- groups of 16) - 72/16 = 4.5 - 4.5 \* 15 = 67.5 --> 68 Sulfate Ions - 4.5 \* 1 = 4.5 --> 4 Citrate Ions Add 68 Sulfate ions, 4 Citrate ions #### New Totals to add: Full supercell volume (65.89% solvent content) - **Sulfate**: 8,615 (5,700) - **Lithium**: 11,396 (7,510) - **Sodium**: 1,140 (752) - **Ammonium**: 5,698 (3754) - **Citrate**: 574 (380) **TOTAL**: 27,423 (18,096) ## Adding the Waters $55.5 \ \text{M} \times \ 9.4622767×10^{-21} \ \text{L} = 316,283 \ \text{molecules} \ \text{(208,399 molecules)}$ Subtracting ions from water to add (65.89%): 208,399-18096 = 190,303 Just going to add 190,300 ## Prepping the Solute ``` from openeye.oechem import * from openeye.oeiupac import * from openeye.oeomega import * from openeye.oeshape import * from openeye.oedepict import * mol_from_smiles = OEMol() OEParseSmiles(mol_from_smiles, "[O-]S(=O)(=O)[O-]") OEPrepareDepiction(mol_from_smiles) OERenderMolecule("sulfate.png", mol_from_smiles) ``` ![sulfate](./images/sulfate.png) For citrate, decided to go with divalent citrate, because the experiment specifies a pH of 5.6 and the pKa specifications from Sigma Aldrich is as follows -- pKa: 3.138, 4.76, 6.401 ``` mol_from_smiles = OEMol() OEParseSmiles(mol_from_smiles, "C(C(=O)[O-])C(CC(=O)[O-])(C(=O)O)O") OEPrepareDepiction(mol_from_smiles) OERenderMolecule("citrate_2minus.png", mol_from_smiles) ``` ![citrate_2minus](./images/citrate_2minus.png) ``` mol_from_smiles = OEMol() OEParseSmiles(mol_from_smiles, "[NH4+]") OEPrepareDepiction(mol_from_smiles) OERenderMolecule("ammonium.png", mol_from_smiles) ``` ![ammonium](./images/ammonium.png) #### Code used to create all of the solute pdbs (replacing all the strings in SmilesToMol with the respective SMILES strings, and the string in oemolostream): ``` from openeye.oechem import * from openeye.oeomega import * from openeye.oeiupac import * from openeye.oeshape import * from openeye.oequacpac import * mol = OEMol() OESmilesToMol(mol, "[NH4+]") #OEAddExplicitHydrogens(mol) mol.SetTitle("Ammonium") omega = OEOmega() omega.SetMaxConfs(1) omega.SetStrictStereo(False) omega.SetStrictAtomTypes(False) omega(mol) OEDetermineConnectivity(mol) OEAssignCharges(mol, OEAM1BCCCharges()) ostream = oemolostream("ammonium.mol2") OEWriteMolecule(ostream, mol) ostream = oemolostream("ammonium.pdb") OEWriteMolecule(ostream, mol) ``` ### Viewing them ``` import nglview view1 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/ammonium.mol2") view2 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/citrate.mol2") view3 = nglview.show_file("/Users/davidwych/Downloads/GP3_Dir/sulfate.mol2") from ipywidgets import VBox vbox = VBox([view1, view2, view3]) vbox ``` ### Using antechamber to give molecules gaff charges with commands: `antechamber -i sulfate.mol2 -fi mol2 -o sulfate.gaff.mol2 -fo mol2 -c bcc -nc -2` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Warning: This molecule has no hydrogens nor halogens. It is quite possible that there are unfilled valences. Warning: The number of bonds (1) for atom (ID: 1, Name: O1) does not match the connectivity (2) for atom type (O.3) defined in CORR_NAME_TYPE.DAT. Warning: The number of bonds (1) for atom (ID: 2, Name: O2) does not match the connectivity (2) for atom type (O.3) defined in CORR_NAME_TYPE.DAT. But, you may safely ignore the warnings if your molecule uses atom names or element names as atom types. -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- Status: pass -- Check Number of Units -- Status: pass acdoctor mode has completed checking the input file. Info: Bond types are assigned for valence state (1) with penalty (1). Info: Total number of electrons: 50; net charge: -2 Running: /Users/davidwych/anaconda3/bin/sqm -O -i sqm.in -o sqm.out ``` `antechamber -i citrate.mol2 -fi mol2 -o citrate.gaff.mol2 -fo mol2 -c bcc -nc -2` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Status: pass -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- Status: pass -- Check Number of Units -- Status: pass acdoctor mode has completed checking the input file. Info: Total number of electrons: 100; net charge: -2 Running: /Users/davidwych/anaconda3/bin/sqm -O -i sqm.in -o sqm.out ``` `antechamber -i ammonium.mol2 -fi mol2 -o ammonium.gaff.mol2 -fo mol2 -c bcc -nc +1` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Status: pass -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- Status: pass -- Check Number of Units -- Status: pass acdoctor mode has completed checking the input file. Info: Total number of electrons: 10; net charge: 1 Running: /Users/davidwych/anaconda3/bin/sqm -O -i sqm.in -o sqm.out ``` Then used antechamber to convert gaff.mol2 files to pdb files: `antechamber -i ammonium.gaff.mol2 -fi mol2 -o ammonium.pdb -fo pdb` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Status: pass -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- Status: pass -- Check Number of Units -- Status: pass acdoctor mode has completed checking the input file. ``` `antechamber -i citrate.gaff.mol2 -fi mol2 -o citrate.pdb -fo pdb` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Status: pass -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- /Users/davidwych/anaconda3/bin/to_be_dispatched/antechamber: Fatal Error! ``` `antechamber -i sulfate.gaff.mol2 -fi mol2 -o sulfate.pdb -fo pdb` ``` acdoctor mode is on: check and diagnosis problems in the input file. -- Check Format for mol2 File -- Status: pass -- Check Unusual Elements -- Status: pass -- Check Open Valences -- Warning: This molecule has no hydrogens nor halogens. It is quite possible that there are unfilled valences. -- Check Geometry -- for those bonded for those not bonded Status: pass -- Check Weird Bonds -- Status: pass -- Check Number of Units -- Status: pass acdoctor mode has completed checking the input file. ```
github_jupyter
set 3NUM_Fit [atomselect 0 "name CA and (resid 5 to 128 or resid 134 to 144 or resid 159 to 213)"] set 3NUM_Loop_Fit [atomselect 1 "name CA and (resid 5 to 128 or resid 134 to 144 or resid 159 to 213)"] set M [measure fit $3NUM_Loop_Fit $3NUMfit] set 3NUM_Loop [atomselect 1 all] $3NUM_Loop move $M set 3NUM_out [atomselect 1 "resid 5 to 213"] $3NUM_out writepdb "3num_loop_1_aligned.pdb" Now just have to make sure the header doesn't interfere with the necessary parts of the pdb file that parmed and openeye will need to fix the pdb and UnitCell and PropPDB will need to prepare them for simulation ### Loop Configurations ### PDB2PQR Converted the aligned PDB files to PQR files using http://nbcr-222.ucsd.edu/pdb2pqr_2.0.0/, selecting AMBER Force Field and naming conventions, and choosing a pH of 7 - There was a problem with all three loop configurations, which is apparently a result of not being able to work around a clash between these two atoms: `HIS A 226 HA is too close to HIS A 226 CD2`, but it was unable to "debump" it. It came with this warning in the pqr file: - `WARNING: Unable to debump HIS A 226` ### Using `parmed` to convert the PQRs to PDBs ## Using OpenEye to Check Bond Orders/Connectivity and Add Hydrogens # Needed to add back in header crystal and scale information ### Loops ![3NUM Loops](./images/3num_loops.png) Here is a piture of the three starting loop configurations (1: Blue; 2: Red; 3: Orange) ## Calculating the Box Volume ### Calculation of the number of solute molecules $9,462,276.7 \ \text{A}^{3}\to 9.4622767×10^{-21} \ \text{L}$ $1.0 \ \text{M} \ \text{Li}_{2}^{+} \ \text{SO}_{4}^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **5,698 Sulfate molecules, 11,396 Lithium atoms** - (3,755 Sulfate molecules, 7,509 Lithum atoms at 65.89% solvent content) $0.5 \ \text{M} \ (\text{NH}_{4}^{+})_{2} \ \text{SO}_{4}^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **2,849 Sulfate molecules, 5,698 Ammonium molecules** - (1,877 Sulfate molecules, 3754 Ammonium molecules at 65.89% solvent content) $0.1 \ \text{M} \ (\text{Na}^{+})_{2} \ (\text{C}_{6}\text{H}_{6}\text{O}_{7})^{2-} \times 9.4622767×10^{-21} \ \text{L} \ \times \ \text{N}_{A}$: - **570 Citrate molecules, 1,140 Sodium molecules** - (376 Sulfate molecules, 751 Sodium at 65.89% solvent content) #### Totals to add: Full supercell volume (65.89% solvent content) - **Sulfate**: 8,547 (5,632) - **Lithium**: 11,396 (7,510) - **Sodium**: 1,140 (752) - **Ammonium**: 5,698 (3754) - **Citrate**: 570 (376) ## Supercell ## Supercell has a net charge of +144 Need to balance out with negative charged ions (sulfate and citrate): Need 72 sulfate and citrate ions Sulfate:Citrate = 5632:376 = 14.9787234043:1 - (basically 15:1 -- groups of 16) - 72/16 = 4.5 - 4.5 \* 15 = 67.5 --> 68 Sulfate Ions - 4.5 \* 1 = 4.5 --> 4 Citrate Ions Add 68 Sulfate ions, 4 Citrate ions #### New Totals to add: Full supercell volume (65.89% solvent content) - **Sulfate**: 8,615 (5,700) - **Lithium**: 11,396 (7,510) - **Sodium**: 1,140 (752) - **Ammonium**: 5,698 (3754) - **Citrate**: 574 (380) **TOTAL**: 27,423 (18,096) ## Adding the Waters $55.5 \ \text{M} \times \ 9.4622767×10^{-21} \ \text{L} = 316,283 \ \text{molecules} \ \text{(208,399 molecules)}$ Subtracting ions from water to add (65.89%): 208,399-18096 = 190,303 Just going to add 190,300 ## Prepping the Solute ![sulfate](./images/sulfate.png) For citrate, decided to go with divalent citrate, because the experiment specifies a pH of 5.6 and the pKa specifications from Sigma Aldrich is as follows -- pKa: 3.138, 4.76, 6.401 ![citrate_2minus](./images/citrate_2minus.png) ![ammonium](./images/ammonium.png) #### Code used to create all of the solute pdbs (replacing all the strings in SmilesToMol with the respective SMILES strings, and the string in oemolostream): ### Viewing them ### Using antechamber to give molecules gaff charges with commands: `antechamber -i sulfate.mol2 -fi mol2 -o sulfate.gaff.mol2 -fo mol2 -c bcc -nc -2` `antechamber -i citrate.mol2 -fi mol2 -o citrate.gaff.mol2 -fo mol2 -c bcc -nc -2` `antechamber -i ammonium.mol2 -fi mol2 -o ammonium.gaff.mol2 -fo mol2 -c bcc -nc +1` Then used antechamber to convert gaff.mol2 files to pdb files: `antechamber -i ammonium.gaff.mol2 -fi mol2 -o ammonium.pdb -fo pdb` `antechamber -i citrate.gaff.mol2 -fi mol2 -o citrate.pdb -fo pdb` `antechamber -i sulfate.gaff.mol2 -fi mol2 -o sulfate.pdb -fo pdb`
0.665084
0.953708
## Roman Time Domain deep-field spec-z efficiency plot This notebook constructs a redshift efficiency figure from the .DUMP file output of a SNANA simulation of the Roman time domain survey. ``` import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Patch from matplotlib.ticker import (MultipleLocator, AutoMinorLocator) import glob import math import yaml import os from astropy.table import Table plt.rcParams['text.usetex'] = False plt.rcParams['mathtext.fontset'] = 'dejavuserif' plt.rcParams['figure.figsize'] = (10,10) plt.rcParams['legend.frameon'] = False plt.rcParams['legend.fontsize'] = 19 plt.rcParams['legend.borderpad'] = 0.1 plt.rcParams['legend.labelspacing'] = 0.1 plt.rcParams['legend.handletextpad'] = 0.1 #plt.rcParams['legend.markerscale'] = 0.1 plt.rcParams['font.family'] = 'stixgeneral' plt.rcParams['font.size'] = 20 plt.rcParams['axes.labelsize'] = 15 plt.rcParams['xtick.labelsize'] = 15 plt.rcParams['ytick.labelsize'] = 15 plt.rcParams['xtick.minor.size'] = 1.5 # major tick size in points plt.rcParams['ytick.minor.size'] = 1.5 # major tick size in points %matplotlib inline def read_dump_file(filename, maxlines=None): """Read a SNANA .DUMP file into an astropy Table object""" if maxlines is not None: tab = Table.read(filename, format='ascii.basic', header_start=0, data_start=1, data_end=maxlines) else: tab = Table.read(filename, format='ascii.basic', header_start=0, data_start=1) return tab ``` ### Read in the SNANA data And limit to only the DEEP field data set ``` roman = read_dump_file("data/PIP_WFIRST_EFFICIENCY_WFIRST_ROMAN_DEEP_G10_SEARCHEFF_0.DUMP", maxlines=None) subaru = read_dump_file("data/PIP_WFIRST_EFFICIENCY_WFIRST_SUBARU_G10_SEARCHEFF_0.DUMP", maxlines=None) #combined = read_dump_file("data/PIP_WFIRST_STARTERKIT+SETEXP_WFIRST_SIMDATA_G10.DUMP", maxlines=None) romanfieldmask = (roman['FIELD']=='DEEP') roman = roman[romanfieldmask] subarufieldmask = (subaru['FIELD']=='DEEP') subaru = subaru[subarufieldmask] ``` ### View the Ia and CC Host populations ``` fig = plt.figure(figsize=[12,8]) for sntype, axrow in zip(['Ia','CC'],[0,1]): ax1 = fig.add_subplot(2,4,axrow*4+1) ax2 = fig.add_subplot(2,4,axrow*4+2) ax3 = fig.add_subplot(2,4,axrow*4+3) ax4 = fig.add_subplot(2,4,axrow*4+4) if sntype=='Ia': explmask = roman['NON1A_INDEX']==0 detmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) hostzmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>4) else: explmask = roman['NON1A_INDEX']>0 detmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) hostzmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>4) for mask,color in zip([explmask, detmask, hostzmask], ['b','g','r']): sfr = roman[mask]['logsfr'] ztruee = roman[mask]['ZTRUE'] zcmb = roman[mask]['ZCMB'] mass = roman[mask]['logmass'] ssfr = sfr - mass ax1.plot(zcmb, sfr, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax2.plot(zcmb, mass, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax3.plot(zcmb, ssfr, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax4.plot(sfr, mass, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax1.set_xlabel('redshift') ax1.set_ylabel('log(SFR)') ax2.set_xlabel('redshift') ax2.set_ylabel('log(M)') ax3.set_xlabel('redshift') ax3.set_ylabel('log(sSFR)') ax4.set_xlabel('log(SFR)') ax4.set_ylabel('log(M)') ax1.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax1.transAxes) ax2.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax2.transAxes) ax3.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax3.transAxes) ax4.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax4.transAxes) ax1.set_xlim(0, 3) ax1.set_ylim(-2, 4) ax2.set_xlim(0, 3) ax2.set_ylim(6, 12) ax3.set_xlim(0, 3) ax3.set_ylim(-11.5, -6.5) ax4.set_xlim(-2.5, 3) ax4.set_ylim(6, 12) plt.tight_layout() ``` ### Some quick counts of detected SN ``` ccmask = roman['NON1A_INDEX']>0 ccdetmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) cchostzmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>4) iamask = roman['NON1A_INDEX']==0 iadetmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) iahostzmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>4) nexpIa_roman = np.sum(iamask) nexpCC_roman = np.sum(ccmask) ndetIa_roman = np.sum(iadetmask) ndetCC_roman = np.sum(ccdetmask) nhostzCC_roman = np.sum(cchostzmask) nhostzIa_roman = np.sum(iahostzmask) print(" Explosions Detections GotSpecz") print("Ia: {:10,d} {:10,d} {:10,d}".format(nexpIa_roman, ndetIa_roman, nhostzIa_roman)) print("CC: {:10,d} {:10,d} {:10,d}".format(nexpCC_roman, ndetCC_roman, nhostzCC_roman)) ``` ### exploratory summary plot Shows two ways of computing the 'specz efficiency'. The grey histogram is all the simulated SNIa that SNANA reports. Blue is the photometric detections (SIM_SEARCHEFF_MASK>0) Red is those that are photometrically detected and also get a specz (SIM_SEARCHEFF_MASK==5) The solid line in black is the efficiency computed as Red / Blue : the fraction of detected SNIa that also get a hostz. (read the efficiency values from the y axis on the right side) The dashed line is the efficiency computed as Red / gray : the fraction of all SNIa explosions that are both detected and get a host specz. We see that the dashed line efficiency is always lower, and drops faster at high z, reflecting both the fact that it gets harder to get a specz, but also that its harder to find the SNIa in the first place. I think the curve we should be showing here is the solid line, b/c that is reflecting our estimate of the fraction of the detected SNIa sample that will have a specz, which is the measurable metric, and the one that would be widely understood as the "spectroscopic redshift recovery efficiency" Note that I don't really understand what is going on with the CC plot. Why would the roman grism be so much less efficient at measuring specz for CC SN host galaxies at high redshift? ``` def make_plot_summary(sntype='Ia', ax=None): if ax is None: ax = plt.gca() if sntype=='Ia': explmask = roman['NON1A_INDEX']==0 detmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) specmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>1) sntypetext = 'SNIa' elif sntype=='CC': explmask = roman['NON1A_INDEX']>0 detmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) specmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>1) sntypetext = 'CCSN' bins = np.arange(0,3.04,0.2) nexpl = int(np.sum(explmask)) ndet = int(np.sum(detmask)) nspecz = int(np.sum(specmask)) histout0 = ax.hist(roman['ZCMB'][explmask], color='k', alpha=0.1, bins=bins, label='all explosions') histout1 = ax.hist(roman['ZCMB'][detmask], color='b', alpha=0.2, bins=bins, label='phot detections') histout2 = ax.hist(roman['ZCMB'][specmask], color='r', alpha=0.2, bins=bins, label='got host specz') ax.set_title(f'Out of {nexpl:d} {sntypetext} explosions {ndet:d} are detected,\n and {nspecz:d} get a Roman grism specz', fontsize=16) ax.legend(loc='upper left') ax2 = ax.twinx() speczeff0 = histout2[0] / histout0[0] # no.specz / no.explosions speczeff1 = histout2[0] / histout1[0] # no.specz / no.phot-detections binmidpts = (histout2[1][1:] + histout2[1][:-1])/2. ax2.plot(binmidpts, speczeff1, marker='d', ls='-', color='k', lw=3, label=' #specz/#det') ax2.plot(binmidpts, speczeff0, marker='s', ls='--', color='k', lw=3, label=' #specz/#expl') ax2.legend(loc='upper right') ax2.set_ylabel("fraction with host specz", rotation=-90, labelpad=20) ax.set_ylabel(f"Number of {sntypetext}") ax.set_xlabel('Redshift') ax.set_xlim(0, 2.95) ax2.set_ylim(0, 1.5) fig = plt.figure(figsize=[10,8]) axIa = fig.add_subplot(2,1,1) make_plot_summary(sntype='Ia', ax=axIa) axCC = fig.add_subplot(2,1,2) make_plot_summary(sntype='CC', ax=axCC) plt.tight_layout() ``` # Making the Redshift Efficiency Figure: ``` def plot_efficiency_vs_z(dat, bins=np.arange(0,3.01,0.1), ax=None, **kwargs): """Plot the redshift measurement efficiency vs redshift for the given (subset) of the SNANA sim data. dat : an astropy Table, read from a SNANA .DUMP file This function presumes the user has made all desired selection cuts to the data, such as limiting to the FIELD of interest or selecting a specific SN subclass using the NON1A_INDEX ax : the axes to plot on. Use None to use current axes or make new. bins : passed to np.histogram() kwargs : passed to plt.plot() """ if ax is None: ax = plt.gca() detmask = dat['SIM_SEARCHEFF_MASK']>0 # All photometrically detected SNe speczmask = dat['SIM_SEARCHEFF_MASK']>4 # All detected SNe that get a specz num_det = np.histogram(dat['ZCMB'][detmask], bins=bins)[0] num_specz, bin_edges = np.histogram(dat['ZCMB'][speczmask], bins=bins) midpt = (bin_edges[1:]+bin_edges[:-1])/2. speczefficiency = num_specz / num_det ax.plot(midpt, speczefficiency, **kwargs) return def make_hostz_efficiency_fig(roman, subaru, sntype='Ia', field='DEEP', scalefactor=1, showseechange=True): """Construct a figure showing the host spectroscopic redshift measuremnet efficiency curves for Type Ia (sntype='Ia') or CC SN (sntype='CC'). scalefactor : fudge factor to rescale the reported counts of SNe in the survey. For example, so that it matches the count of total SNe produced in a different survey sim. """ fig = plt.figure(figsize=[8,4]) ax1 = fig.add_subplot(1,1,1) ax2 = ax1.twinx() if field != 'DEEP' and field != 'SHALLOW': raise RuntimeError(f"field={field} is not known.") # Limit to only the field of interest DEEP or SHALLOW (meaning 'wide') # and the SN sub-class of interest if sntype == 'Ia': hist_tick_step = 200 xgrismtext=2.08 ygrismtext=1.05 xalltext=0.95 yalltext=0.25 mfc=None typestr = 'SNIa' romanmask = (roman['NON1A_INDEX']==0) & (roman['FIELD']==field) subarumask = (subaru['NON1A_INDEX']==0) & (subaru['FIELD']==field) elif sntype == 'CC': hist_tick_step = 800 xgrismtext=1.2 ygrismtext=0.85 xalltext=1.45 yalltext=0.55 mfc='w' typestr = 'CCSN' romanmask = (roman['NON1A_INDEX']!=0) & (roman['FIELD']==field) #romanwidemask = (roman['NON1A_INDEX']!=0) & (roman['FIELD']==field) subarumask = (subaru['NON1A_INDEX']!=0) & (subaru['FIELD']==field) else: raise RuntimeError(f"sntype={sntype} is not known.") roman = roman[romanmask] subaru = subaru[subarumask] # get approximate counts of detections and spectroscopic redshifts (to nearest 100) ndet_roman = int(np.round(scalefactor * len(roman)/1e2))*100 ndet_subaru = int(np.round(scalefactor * len(subaru)/1e2))*100 # only count roman host specz above z=0.8 and only count subaru host specz below 0.8 nhostz_roman = int(np.round(scalefactor * np.sum( (roman['ZCMB']>0.8) & (roman['SIM_SEARCHEFF_MASK']==5))/100.))*100 nhostz_subaru = int(np.round(scalefactor * np.sum( (subaru['ZCMB']<0.8) & (subaru['SIM_SEARCHEFF_MASK']==5))/100.))*100 # plot the subaru efficiency only up to z=0.85 where we lose Halpha plot_efficiency_vs_z(subaru, ax=ax1, bins=np.arange(0.1, 0.85, 0.15), color='blue', marker='d', mfc=mfc, ms=6, ls='-', label='Subaru+PFS', zorder=3) # plot the roman grism efficiency starting at z=0.3, all the way to 3.0 plot_efficiency_vs_z(roman, ax=ax1, bins=np.arange(0.3, 3.05, 0.15), color='firebrick', marker='o', mfc=mfc, ms=6, ls='-', label='Roman Grism', zorder=1) # make a "squished down" histogram showing all detections histvals, binvals, patches = ax2.hist(roman['ZCMB'], bins=np.arange(0., 3.25, 0.15), weights=scalefactor * np.ones(len(roman['ZCMB'])), color='k', alpha=0.1, zorder=0, density=False) maxhistval = int(np.max(histvals)) ax2.set_ylim(0,2.5*maxhistval) ax2.set_yticks(range(0,maxhistval,hist_tick_step)) ax2.text(1.12, 0.07, "# Detected", size=16, rotation=-90, transform=ax2.transAxes) ax1.set_xlabel(r'$z$') ax1.set_ylabel(r'fraction with attainable redshift') # Show the See Change hostz efficiency estimate if showseechange: ax1.plot([0.97,1.5], [0.75,0.75], marker=' ', color='teal', lw=4, alpha=0.3) ax1.text(0.84, 0.73, "Ground-based specz for\nSee Change low-SFR hosts\n(Williams+ 2020)", ha='left', va='top', color='teal', fontsize=14) # Add text reporting the approximate counts ax1.text(0.1, 1.1, f'~{nhostz_subaru:,d} {typestr} host redshifts from Subaru+PFS', size=16, color='blue') ax1.text(xgrismtext, ygrismtext, f'~{nhostz_roman:,d} {typestr} host-z\nfrom Roman Grism', ha='left', va='top', size=16, color='firebrick', backgroundcolor='w') hostz_pct = int(((nhostz_subaru + nhostz_roman )/ndet_roman ) * 100) ax1.text(xalltext, yalltext, f'from a total of ~{ndet_roman:,d}\n {typestr} detections\n(net efficiency ~{hostz_pct:d}%)', ha='left', va='top', size=16, color='k') ax1.set_ylim(0,1.19) ax1.set_xlim(0,2.99) ax1.xaxis.set_major_locator(MultipleLocator(1)) #ax1.xaxis.set_major_formatter('{x:.0f}') # For the minor ticks, use no labels; default NullFormatter. ax1.xaxis.set_minor_locator(MultipleLocator(0.2)) plt.tight_layout() return make_hostz_efficiency_fig(roman, subaru, sntype='Ia', field='DEEP', scalefactor=1.35, showseechange=True) plt.savefig('SNIa_host_z_efficiency_v2.1.pdf',bbox_inches='tight') #plt.savefig('SNIa_host_z_efficiency.png',bbox_inches='tight') ``` ### Don't trust the CC SN figure I don't understand why the specz efficiency for CCSN hosts drops so much faster than it does for the Ia host galaxies ``` make_hostz_efficiency_fig(roman, subaru, sntype='CC') plt.savefig('CCSN_host_z_efficiency.pdf',bbox_inches='tight') plt.savefig('CCSN_host_z_efficiency.png',bbox_inches='tight') ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Patch from matplotlib.ticker import (MultipleLocator, AutoMinorLocator) import glob import math import yaml import os from astropy.table import Table plt.rcParams['text.usetex'] = False plt.rcParams['mathtext.fontset'] = 'dejavuserif' plt.rcParams['figure.figsize'] = (10,10) plt.rcParams['legend.frameon'] = False plt.rcParams['legend.fontsize'] = 19 plt.rcParams['legend.borderpad'] = 0.1 plt.rcParams['legend.labelspacing'] = 0.1 plt.rcParams['legend.handletextpad'] = 0.1 #plt.rcParams['legend.markerscale'] = 0.1 plt.rcParams['font.family'] = 'stixgeneral' plt.rcParams['font.size'] = 20 plt.rcParams['axes.labelsize'] = 15 plt.rcParams['xtick.labelsize'] = 15 plt.rcParams['ytick.labelsize'] = 15 plt.rcParams['xtick.minor.size'] = 1.5 # major tick size in points plt.rcParams['ytick.minor.size'] = 1.5 # major tick size in points %matplotlib inline def read_dump_file(filename, maxlines=None): """Read a SNANA .DUMP file into an astropy Table object""" if maxlines is not None: tab = Table.read(filename, format='ascii.basic', header_start=0, data_start=1, data_end=maxlines) else: tab = Table.read(filename, format='ascii.basic', header_start=0, data_start=1) return tab roman = read_dump_file("data/PIP_WFIRST_EFFICIENCY_WFIRST_ROMAN_DEEP_G10_SEARCHEFF_0.DUMP", maxlines=None) subaru = read_dump_file("data/PIP_WFIRST_EFFICIENCY_WFIRST_SUBARU_G10_SEARCHEFF_0.DUMP", maxlines=None) #combined = read_dump_file("data/PIP_WFIRST_STARTERKIT+SETEXP_WFIRST_SIMDATA_G10.DUMP", maxlines=None) romanfieldmask = (roman['FIELD']=='DEEP') roman = roman[romanfieldmask] subarufieldmask = (subaru['FIELD']=='DEEP') subaru = subaru[subarufieldmask] fig = plt.figure(figsize=[12,8]) for sntype, axrow in zip(['Ia','CC'],[0,1]): ax1 = fig.add_subplot(2,4,axrow*4+1) ax2 = fig.add_subplot(2,4,axrow*4+2) ax3 = fig.add_subplot(2,4,axrow*4+3) ax4 = fig.add_subplot(2,4,axrow*4+4) if sntype=='Ia': explmask = roman['NON1A_INDEX']==0 detmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) hostzmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>4) else: explmask = roman['NON1A_INDEX']>0 detmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) hostzmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>4) for mask,color in zip([explmask, detmask, hostzmask], ['b','g','r']): sfr = roman[mask]['logsfr'] ztruee = roman[mask]['ZTRUE'] zcmb = roman[mask]['ZCMB'] mass = roman[mask]['logmass'] ssfr = sfr - mass ax1.plot(zcmb, sfr, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax2.plot(zcmb, mass, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax3.plot(zcmb, ssfr, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax4.plot(sfr, mass, color=color, ls=' ', alpha=0.2, marker='.', ms=3) ax1.set_xlabel('redshift') ax1.set_ylabel('log(SFR)') ax2.set_xlabel('redshift') ax2.set_ylabel('log(M)') ax3.set_xlabel('redshift') ax3.set_ylabel('log(sSFR)') ax4.set_xlabel('log(SFR)') ax4.set_ylabel('log(M)') ax1.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax1.transAxes) ax2.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax2.transAxes) ax3.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax3.transAxes) ax4.text(0.05, 0.95, sntype, fontsize=20, ha='left', va='top', transform=ax4.transAxes) ax1.set_xlim(0, 3) ax1.set_ylim(-2, 4) ax2.set_xlim(0, 3) ax2.set_ylim(6, 12) ax3.set_xlim(0, 3) ax3.set_ylim(-11.5, -6.5) ax4.set_xlim(-2.5, 3) ax4.set_ylim(6, 12) plt.tight_layout() ccmask = roman['NON1A_INDEX']>0 ccdetmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) cchostzmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>4) iamask = roman['NON1A_INDEX']==0 iadetmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) iahostzmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>4) nexpIa_roman = np.sum(iamask) nexpCC_roman = np.sum(ccmask) ndetIa_roman = np.sum(iadetmask) ndetCC_roman = np.sum(ccdetmask) nhostzCC_roman = np.sum(cchostzmask) nhostzIa_roman = np.sum(iahostzmask) print(" Explosions Detections GotSpecz") print("Ia: {:10,d} {:10,d} {:10,d}".format(nexpIa_roman, ndetIa_roman, nhostzIa_roman)) print("CC: {:10,d} {:10,d} {:10,d}".format(nexpCC_roman, ndetCC_roman, nhostzCC_roman)) def make_plot_summary(sntype='Ia', ax=None): if ax is None: ax = plt.gca() if sntype=='Ia': explmask = roman['NON1A_INDEX']==0 detmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>0) specmask = (roman['NON1A_INDEX']==0) & (roman['SIM_SEARCHEFF_MASK']>1) sntypetext = 'SNIa' elif sntype=='CC': explmask = roman['NON1A_INDEX']>0 detmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>0) specmask = (roman['NON1A_INDEX']>0) & (roman['SIM_SEARCHEFF_MASK']>1) sntypetext = 'CCSN' bins = np.arange(0,3.04,0.2) nexpl = int(np.sum(explmask)) ndet = int(np.sum(detmask)) nspecz = int(np.sum(specmask)) histout0 = ax.hist(roman['ZCMB'][explmask], color='k', alpha=0.1, bins=bins, label='all explosions') histout1 = ax.hist(roman['ZCMB'][detmask], color='b', alpha=0.2, bins=bins, label='phot detections') histout2 = ax.hist(roman['ZCMB'][specmask], color='r', alpha=0.2, bins=bins, label='got host specz') ax.set_title(f'Out of {nexpl:d} {sntypetext} explosions {ndet:d} are detected,\n and {nspecz:d} get a Roman grism specz', fontsize=16) ax.legend(loc='upper left') ax2 = ax.twinx() speczeff0 = histout2[0] / histout0[0] # no.specz / no.explosions speczeff1 = histout2[0] / histout1[0] # no.specz / no.phot-detections binmidpts = (histout2[1][1:] + histout2[1][:-1])/2. ax2.plot(binmidpts, speczeff1, marker='d', ls='-', color='k', lw=3, label=' #specz/#det') ax2.plot(binmidpts, speczeff0, marker='s', ls='--', color='k', lw=3, label=' #specz/#expl') ax2.legend(loc='upper right') ax2.set_ylabel("fraction with host specz", rotation=-90, labelpad=20) ax.set_ylabel(f"Number of {sntypetext}") ax.set_xlabel('Redshift') ax.set_xlim(0, 2.95) ax2.set_ylim(0, 1.5) fig = plt.figure(figsize=[10,8]) axIa = fig.add_subplot(2,1,1) make_plot_summary(sntype='Ia', ax=axIa) axCC = fig.add_subplot(2,1,2) make_plot_summary(sntype='CC', ax=axCC) plt.tight_layout() def plot_efficiency_vs_z(dat, bins=np.arange(0,3.01,0.1), ax=None, **kwargs): """Plot the redshift measurement efficiency vs redshift for the given (subset) of the SNANA sim data. dat : an astropy Table, read from a SNANA .DUMP file This function presumes the user has made all desired selection cuts to the data, such as limiting to the FIELD of interest or selecting a specific SN subclass using the NON1A_INDEX ax : the axes to plot on. Use None to use current axes or make new. bins : passed to np.histogram() kwargs : passed to plt.plot() """ if ax is None: ax = plt.gca() detmask = dat['SIM_SEARCHEFF_MASK']>0 # All photometrically detected SNe speczmask = dat['SIM_SEARCHEFF_MASK']>4 # All detected SNe that get a specz num_det = np.histogram(dat['ZCMB'][detmask], bins=bins)[0] num_specz, bin_edges = np.histogram(dat['ZCMB'][speczmask], bins=bins) midpt = (bin_edges[1:]+bin_edges[:-1])/2. speczefficiency = num_specz / num_det ax.plot(midpt, speczefficiency, **kwargs) return def make_hostz_efficiency_fig(roman, subaru, sntype='Ia', field='DEEP', scalefactor=1, showseechange=True): """Construct a figure showing the host spectroscopic redshift measuremnet efficiency curves for Type Ia (sntype='Ia') or CC SN (sntype='CC'). scalefactor : fudge factor to rescale the reported counts of SNe in the survey. For example, so that it matches the count of total SNe produced in a different survey sim. """ fig = plt.figure(figsize=[8,4]) ax1 = fig.add_subplot(1,1,1) ax2 = ax1.twinx() if field != 'DEEP' and field != 'SHALLOW': raise RuntimeError(f"field={field} is not known.") # Limit to only the field of interest DEEP or SHALLOW (meaning 'wide') # and the SN sub-class of interest if sntype == 'Ia': hist_tick_step = 200 xgrismtext=2.08 ygrismtext=1.05 xalltext=0.95 yalltext=0.25 mfc=None typestr = 'SNIa' romanmask = (roman['NON1A_INDEX']==0) & (roman['FIELD']==field) subarumask = (subaru['NON1A_INDEX']==0) & (subaru['FIELD']==field) elif sntype == 'CC': hist_tick_step = 800 xgrismtext=1.2 ygrismtext=0.85 xalltext=1.45 yalltext=0.55 mfc='w' typestr = 'CCSN' romanmask = (roman['NON1A_INDEX']!=0) & (roman['FIELD']==field) #romanwidemask = (roman['NON1A_INDEX']!=0) & (roman['FIELD']==field) subarumask = (subaru['NON1A_INDEX']!=0) & (subaru['FIELD']==field) else: raise RuntimeError(f"sntype={sntype} is not known.") roman = roman[romanmask] subaru = subaru[subarumask] # get approximate counts of detections and spectroscopic redshifts (to nearest 100) ndet_roman = int(np.round(scalefactor * len(roman)/1e2))*100 ndet_subaru = int(np.round(scalefactor * len(subaru)/1e2))*100 # only count roman host specz above z=0.8 and only count subaru host specz below 0.8 nhostz_roman = int(np.round(scalefactor * np.sum( (roman['ZCMB']>0.8) & (roman['SIM_SEARCHEFF_MASK']==5))/100.))*100 nhostz_subaru = int(np.round(scalefactor * np.sum( (subaru['ZCMB']<0.8) & (subaru['SIM_SEARCHEFF_MASK']==5))/100.))*100 # plot the subaru efficiency only up to z=0.85 where we lose Halpha plot_efficiency_vs_z(subaru, ax=ax1, bins=np.arange(0.1, 0.85, 0.15), color='blue', marker='d', mfc=mfc, ms=6, ls='-', label='Subaru+PFS', zorder=3) # plot the roman grism efficiency starting at z=0.3, all the way to 3.0 plot_efficiency_vs_z(roman, ax=ax1, bins=np.arange(0.3, 3.05, 0.15), color='firebrick', marker='o', mfc=mfc, ms=6, ls='-', label='Roman Grism', zorder=1) # make a "squished down" histogram showing all detections histvals, binvals, patches = ax2.hist(roman['ZCMB'], bins=np.arange(0., 3.25, 0.15), weights=scalefactor * np.ones(len(roman['ZCMB'])), color='k', alpha=0.1, zorder=0, density=False) maxhistval = int(np.max(histvals)) ax2.set_ylim(0,2.5*maxhistval) ax2.set_yticks(range(0,maxhistval,hist_tick_step)) ax2.text(1.12, 0.07, "# Detected", size=16, rotation=-90, transform=ax2.transAxes) ax1.set_xlabel(r'$z$') ax1.set_ylabel(r'fraction with attainable redshift') # Show the See Change hostz efficiency estimate if showseechange: ax1.plot([0.97,1.5], [0.75,0.75], marker=' ', color='teal', lw=4, alpha=0.3) ax1.text(0.84, 0.73, "Ground-based specz for\nSee Change low-SFR hosts\n(Williams+ 2020)", ha='left', va='top', color='teal', fontsize=14) # Add text reporting the approximate counts ax1.text(0.1, 1.1, f'~{nhostz_subaru:,d} {typestr} host redshifts from Subaru+PFS', size=16, color='blue') ax1.text(xgrismtext, ygrismtext, f'~{nhostz_roman:,d} {typestr} host-z\nfrom Roman Grism', ha='left', va='top', size=16, color='firebrick', backgroundcolor='w') hostz_pct = int(((nhostz_subaru + nhostz_roman )/ndet_roman ) * 100) ax1.text(xalltext, yalltext, f'from a total of ~{ndet_roman:,d}\n {typestr} detections\n(net efficiency ~{hostz_pct:d}%)', ha='left', va='top', size=16, color='k') ax1.set_ylim(0,1.19) ax1.set_xlim(0,2.99) ax1.xaxis.set_major_locator(MultipleLocator(1)) #ax1.xaxis.set_major_formatter('{x:.0f}') # For the minor ticks, use no labels; default NullFormatter. ax1.xaxis.set_minor_locator(MultipleLocator(0.2)) plt.tight_layout() return make_hostz_efficiency_fig(roman, subaru, sntype='Ia', field='DEEP', scalefactor=1.35, showseechange=True) plt.savefig('SNIa_host_z_efficiency_v2.1.pdf',bbox_inches='tight') #plt.savefig('SNIa_host_z_efficiency.png',bbox_inches='tight') make_hostz_efficiency_fig(roman, subaru, sntype='CC') plt.savefig('CCSN_host_z_efficiency.pdf',bbox_inches='tight') plt.savefig('CCSN_host_z_efficiency.png',bbox_inches='tight')
0.253769
0.803714
``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from keras import backend as K from keras.models import Sequential, Model from keras.layers import LSTM, Dense, SimpleRNN, Input, merge, Embedding, TimeDistributed from keras.objectives import sparse_categorical_crossentropy, mean_squared_error ## Code taken from http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) X = np.arange(-10,10,0.1) X = np.cos(np.mean(rolling_window(X, 5), -1)) #X = X[:-5+1] print(X.shape) plt.plot(X) ``` Replicating data generation from http://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/ ``` def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) train_size = int(X.shape[0]*0.67) test_size = X.shape[0] - train_size train = X[:train_size] test = X[train_size:] print(train_size, train.shape, test_size, test.shape) look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) print(trainX.shape, trainY.shape, testX.shape, testY.shape) print(trainX[:10]) print(trainY[:10]) np.reshape(trainX[:10], (trainX[:10].shape[0], 1, trainX[:10].shape[1])).shape # reshape input to be [samples, time steps, features] trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) ``` ## Keras model ``` model = Sequential() model.add(LSTM(4, input_dim=look_back)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, nb_epoch=10, batch_size=50) pred_train = model.predict(trainX) pred_test = model.predict(testX) pred_train.shape, pred_test.shape plt.plot(X) plt.plot(np.arange(pred_train.shape[0])+1, pred_train, color='r') plt.plot(np.arange(pred_test.shape[0])+train_size, pred_test, color='g') model = Sequential() model.add(SimpleRNN(4, input_dim=look_back)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, nb_epoch=10, batch_size=5) pred_train = model.predict(trainX) pred_test = model.predict(testX) pred_train.shape, pred_test.shape plt.plot(X) plt.plot(np.arange(pred_train.shape[0])+1, pred_train, color='r') plt.plot(np.arange(pred_test.shape[0])+train_size, pred_test, color='g') trainX.shape ``` ## Multi task learning Keras ``` X = np.random.randn(10,3) W1 = np.random.randn(3,1) W2 = np.random.randn(2,1) y1 = np.dot(X, W1) y2 = np.dot(X[:, :2], W2) X.shape, y1.shape, y2.shape inp1 = Input(name="inp1", shape=(3,)) inp2 = Input(name="inp2", shape=(2,)) dense1 = Dense(10)(inp1) merge_dens1_inp2 = merge([dense1, inp2], mode='concat') dense2 = Dense(10)(merge_dens1_inp2) out1 = Dense(1, name="out1")(dense1) out2 = Dense(1, name="out2")(dense2) model1 = Model(input=[inp1], output=[out1]) model1.get_output_at(0) == out1 model2 = Model(input=[inp1, inp2], output=[out2]) model1.compile(optimizer='sgd', loss='mean_squared_error') model2.compile(optimizer='sgd', loss='mean_squared_error') model2.predict([X, X[:, :2]]) model1.predict([X]) model2.output_names model3 = Model(input=[inp1, inp2], output=[out1, out2]) model3.compile(optimizer='sgd', loss='mean_squared_error') model3.predict([X, X[:, :2]]) ``` ## Multi output LSTM example ``` data = [[1,2,3,4,5], [2,3,2,2,2] ] label_pos = [[1,2,2,2,1], None] label_ner = [None, [1,1,1,2,1]] input_seq = Input(shape=(5,), name="input_seq") shared_embedding = Embedding(6, 10)(input_seq) shared_embedding.get_shape() shared_layer = LSTM(10, return_sequences=True)(shared_embedding) pos_layer = TimeDistributed(Dense(3, activation="softmax"), name="pos_labels")(shared_layer) ner_layer = TimeDistributed(Dense(3, activation="softmax"), name="ner_labels")(shared_layer) model = Model(input=input_seq, output=[pos_layer, ner_layer]) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', sample_weight_mode="temporal") max_len = 5 X = np.array(data) y_pos = np.expand_dims(np.array([np.zeros(max_len) if y is None else y for y in label_pos], dtype='int'), -1) y_ner = np.expand_dims(np.array([np.zeros(max_len) if y is None else y for y in label_ner], dtype='int'), -1) y_pos.shape, y_ner.shape pos_mask = np.array([np.ones(max_len)*(y is not None) for y in label_pos]) ner_mask = np.array([np.ones(max_len)*(y is not None) for y in label_ner]) pos_mask, ner_mask model.fit(X, [y_pos, y_ner], sample_weight=[pos_mask, ner_mask]) t = mean_squared_error(np.random.randn(10,2), np.random.randn(10,2)) t sess = tf.Session() with sess.as_default(): print(t.eval()) t = mean_squared_error(np.random.randn(10,2,3), np.random.randn(10,2,3)) t sess = tf.Session() with sess.as_default(): print(t.eval()) def custom_mse_loss(y_true, y_pred): loss_masks = y_true["masks"] y_true_vals = y_true["vals"] mse_loss = mean_squared_error(y_true_vals, y_pred) mse_loss_masked = loss_masks * mse_loss return mse_loss_masked y_pred = np.random.randn(10,5,3) y_true_vals = np.random.randn(10,5,3) y_true_masks = np.random.randint(2, size=(10,1)) y_true = {"masks": y_true_masks, "vals": y_true_vals} t = custom_mse_loss(y_true, y_pred) t y_true_masks sess = tf.Session() with sess.as_default(): print(t.eval()) pos_layer.get_shape() def task_based_loss(y_true, y_pred): print("Using layer: ", y_pred.get_shape(), y_pred.name) pos_labels = K.cast(y_true[:, :, 0], 'int64') ner_labels = K.cast(y_true[:, :, 1], 'int64') #pos_labels = K.cast(y_true[0][:, :, 0], 'int64') #ner_labels = K.cast(y_true[1][:, :, 0], 'int64') pos_preds = y_pred[0] ner_preds = y_pred[1] pos_loss = sparse_categorical_crossentropy(pos_labels, pos_preds) ner_loss = sparse_categorical_crossentropy(ner_labels, ner_preds) #pos_mask = y_true[0][:, :, 1] # POS mask channel #ner_mask = y_true[1][:, :, 1] # NER mask channel #pos_loss = pos_loss * pos_mask #ner_loss = ner_loss * ner_mask return (pos_loss + ner_loss)/2 model.compile(loss=task_based_loss, optimizer='sgd') X.shape y_pos.shape, pos_mask.shape, y_ner.shape, ner_mask.shape y_true = np.dstack([y_pos, y_ner, pos_mask, ner_mask]) y_true.shape model.fit(X, y_true) y_pred = model.predict(X) y_pred[0].shape, y_pred[1].shape y_pred[0].argmax(axis=-1), y_pred[1].argmax(axis=-1) ```
github_jupyter
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from keras import backend as K from keras.models import Sequential, Model from keras.layers import LSTM, Dense, SimpleRNN, Input, merge, Embedding, TimeDistributed from keras.objectives import sparse_categorical_crossentropy, mean_squared_error ## Code taken from http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) X = np.arange(-10,10,0.1) X = np.cos(np.mean(rolling_window(X, 5), -1)) #X = X[:-5+1] print(X.shape) plt.plot(X) def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) train_size = int(X.shape[0]*0.67) test_size = X.shape[0] - train_size train = X[:train_size] test = X[train_size:] print(train_size, train.shape, test_size, test.shape) look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) print(trainX.shape, trainY.shape, testX.shape, testY.shape) print(trainX[:10]) print(trainY[:10]) np.reshape(trainX[:10], (trainX[:10].shape[0], 1, trainX[:10].shape[1])).shape # reshape input to be [samples, time steps, features] trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) model = Sequential() model.add(LSTM(4, input_dim=look_back)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, nb_epoch=10, batch_size=50) pred_train = model.predict(trainX) pred_test = model.predict(testX) pred_train.shape, pred_test.shape plt.plot(X) plt.plot(np.arange(pred_train.shape[0])+1, pred_train, color='r') plt.plot(np.arange(pred_test.shape[0])+train_size, pred_test, color='g') model = Sequential() model.add(SimpleRNN(4, input_dim=look_back)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, nb_epoch=10, batch_size=5) pred_train = model.predict(trainX) pred_test = model.predict(testX) pred_train.shape, pred_test.shape plt.plot(X) plt.plot(np.arange(pred_train.shape[0])+1, pred_train, color='r') plt.plot(np.arange(pred_test.shape[0])+train_size, pred_test, color='g') trainX.shape X = np.random.randn(10,3) W1 = np.random.randn(3,1) W2 = np.random.randn(2,1) y1 = np.dot(X, W1) y2 = np.dot(X[:, :2], W2) X.shape, y1.shape, y2.shape inp1 = Input(name="inp1", shape=(3,)) inp2 = Input(name="inp2", shape=(2,)) dense1 = Dense(10)(inp1) merge_dens1_inp2 = merge([dense1, inp2], mode='concat') dense2 = Dense(10)(merge_dens1_inp2) out1 = Dense(1, name="out1")(dense1) out2 = Dense(1, name="out2")(dense2) model1 = Model(input=[inp1], output=[out1]) model1.get_output_at(0) == out1 model2 = Model(input=[inp1, inp2], output=[out2]) model1.compile(optimizer='sgd', loss='mean_squared_error') model2.compile(optimizer='sgd', loss='mean_squared_error') model2.predict([X, X[:, :2]]) model1.predict([X]) model2.output_names model3 = Model(input=[inp1, inp2], output=[out1, out2]) model3.compile(optimizer='sgd', loss='mean_squared_error') model3.predict([X, X[:, :2]]) data = [[1,2,3,4,5], [2,3,2,2,2] ] label_pos = [[1,2,2,2,1], None] label_ner = [None, [1,1,1,2,1]] input_seq = Input(shape=(5,), name="input_seq") shared_embedding = Embedding(6, 10)(input_seq) shared_embedding.get_shape() shared_layer = LSTM(10, return_sequences=True)(shared_embedding) pos_layer = TimeDistributed(Dense(3, activation="softmax"), name="pos_labels")(shared_layer) ner_layer = TimeDistributed(Dense(3, activation="softmax"), name="ner_labels")(shared_layer) model = Model(input=input_seq, output=[pos_layer, ner_layer]) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', sample_weight_mode="temporal") max_len = 5 X = np.array(data) y_pos = np.expand_dims(np.array([np.zeros(max_len) if y is None else y for y in label_pos], dtype='int'), -1) y_ner = np.expand_dims(np.array([np.zeros(max_len) if y is None else y for y in label_ner], dtype='int'), -1) y_pos.shape, y_ner.shape pos_mask = np.array([np.ones(max_len)*(y is not None) for y in label_pos]) ner_mask = np.array([np.ones(max_len)*(y is not None) for y in label_ner]) pos_mask, ner_mask model.fit(X, [y_pos, y_ner], sample_weight=[pos_mask, ner_mask]) t = mean_squared_error(np.random.randn(10,2), np.random.randn(10,2)) t sess = tf.Session() with sess.as_default(): print(t.eval()) t = mean_squared_error(np.random.randn(10,2,3), np.random.randn(10,2,3)) t sess = tf.Session() with sess.as_default(): print(t.eval()) def custom_mse_loss(y_true, y_pred): loss_masks = y_true["masks"] y_true_vals = y_true["vals"] mse_loss = mean_squared_error(y_true_vals, y_pred) mse_loss_masked = loss_masks * mse_loss return mse_loss_masked y_pred = np.random.randn(10,5,3) y_true_vals = np.random.randn(10,5,3) y_true_masks = np.random.randint(2, size=(10,1)) y_true = {"masks": y_true_masks, "vals": y_true_vals} t = custom_mse_loss(y_true, y_pred) t y_true_masks sess = tf.Session() with sess.as_default(): print(t.eval()) pos_layer.get_shape() def task_based_loss(y_true, y_pred): print("Using layer: ", y_pred.get_shape(), y_pred.name) pos_labels = K.cast(y_true[:, :, 0], 'int64') ner_labels = K.cast(y_true[:, :, 1], 'int64') #pos_labels = K.cast(y_true[0][:, :, 0], 'int64') #ner_labels = K.cast(y_true[1][:, :, 0], 'int64') pos_preds = y_pred[0] ner_preds = y_pred[1] pos_loss = sparse_categorical_crossentropy(pos_labels, pos_preds) ner_loss = sparse_categorical_crossentropy(ner_labels, ner_preds) #pos_mask = y_true[0][:, :, 1] # POS mask channel #ner_mask = y_true[1][:, :, 1] # NER mask channel #pos_loss = pos_loss * pos_mask #ner_loss = ner_loss * ner_mask return (pos_loss + ner_loss)/2 model.compile(loss=task_based_loss, optimizer='sgd') X.shape y_pos.shape, pos_mask.shape, y_ner.shape, ner_mask.shape y_true = np.dstack([y_pos, y_ner, pos_mask, ner_mask]) y_true.shape model.fit(X, y_true) y_pred = model.predict(X) y_pred[0].shape, y_pred[1].shape y_pred[0].argmax(axis=-1), y_pred[1].argmax(axis=-1)
0.706899
0.849597
# Web Scrapping Activities ``` # Import Dependencies from bs4 import BeautifulSoup as bs from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager import pandas as pd ``` # 1. NASA Mars News ``` # Create an executable path executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # Visit the NASA mars news site url = 'https://mars.nasa.gov/news/' #print(browser) browser.visit(url) # Convert the browser html to a soup object and then quit the browser html = browser.html soup = bs(html, 'html.parser') slide_element = soup.select_one('ul.item_list li.slide') slide_element.find("div", class_='content_title') news_title = slide_element.find("div", class_='content_title').get_text() print(news_title) news_p = slide_element.find("div", class_='article_teaser_body').get_text() print(news_p) ``` # 2. JPL Mars Space Images - Featured Image ``` executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = "https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html" browser.visit(url) #use splinter code to click the second button for the full image full_image_element = browser.find_by_tag('button')[1] full_image_element.click() html = browser.html image_soup = bs(html, 'html.parser') img_url_rel = image_soup.find('img', class_='fancybox-image').get('src') img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}' ``` # 3. Mars Facts ``` df = pd.read_html('https://space-facts.com/mars/')[0] df.columns=['Description', 'Mars'] df.set_index('Description', inplace=True) df ``` # 4. Mars Hemispheres ``` executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) hemisphere_image_urls = [] links = browser.find_by_css("a.product-item h3") for index in range(len(links)): hemisphere = {} browser.find_by_css("a.product-item h3")[index].click() sample_element = browser.links.find_by_text('Sample').first # title = browser.find_by_css("h2.title").text # link = sample_element["href"] hemisphere['title'] = browser.find_by_css("h2.title").text hemisphere['link'] = sample_element['href'] hemisphere_image_urls.append(hemisphere) print("Retrieve the title and link") browser.back() print(hemisphere_image_urls) browser.quit() ```
github_jupyter
# Import Dependencies from bs4 import BeautifulSoup as bs from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager import pandas as pd # Create an executable path executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) # Visit the NASA mars news site url = 'https://mars.nasa.gov/news/' #print(browser) browser.visit(url) # Convert the browser html to a soup object and then quit the browser html = browser.html soup = bs(html, 'html.parser') slide_element = soup.select_one('ul.item_list li.slide') slide_element.find("div", class_='content_title') news_title = slide_element.find("div", class_='content_title').get_text() print(news_title) news_p = slide_element.find("div", class_='article_teaser_body').get_text() print(news_p) executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = "https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html" browser.visit(url) #use splinter code to click the second button for the full image full_image_element = browser.find_by_tag('button')[1] full_image_element.click() html = browser.html image_soup = bs(html, 'html.parser') img_url_rel = image_soup.find('img', class_='fancybox-image').get('src') img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}' df = pd.read_html('https://space-facts.com/mars/')[0] df.columns=['Description', 'Mars'] df.set_index('Description', inplace=True) df executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) hemisphere_image_urls = [] links = browser.find_by_css("a.product-item h3") for index in range(len(links)): hemisphere = {} browser.find_by_css("a.product-item h3")[index].click() sample_element = browser.links.find_by_text('Sample').first # title = browser.find_by_css("h2.title").text # link = sample_element["href"] hemisphere['title'] = browser.find_by_css("h2.title").text hemisphere['link'] = sample_element['href'] hemisphere_image_urls.append(hemisphere) print("Retrieve the title and link") browser.back() print(hemisphere_image_urls) browser.quit()
0.223038
0.467818
``` import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import glob import plotly.graph_objects as go import numpy as np from plotly.graph_objs import * import pandas as pd import plotly.express as px def show_cluster(embedding_name): xyz = np.load(embedding_name,allow_pickle=True) print(embedding_name,xyz.shape) x, y, z = xyz[:,0], xyz[:,1], xyz[:,2] fig = go.Figure(data=[go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=dict( size=1, opacity=0.1 ) )]) # tight layout fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(embedding_name+'.html', auto_open=True) ''' UMAP representation of the morphology and coloring based on a given feature ''' import plotly.graph_objects as go from plotly.graph_objs import * import pandas as pd import plotly.express as px import os id_list = [13,53,17,52,40,38,45,39,57,56,51,46,41,54] usecols = (3, 4, 5, 6, 7, 8) features = ['area','perimeter','eccentricity','circularity','mean_intensity','tot_intensity'] for idx in id_list[:1]: idx = 52 morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' morphology = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=usecols) embedding = np.load(embedding_filename, allow_pickle=True) df_embedding = pd.DataFrame(data=embedding, columns=['x','y','z']) df_morphology = pd.DataFrame(data=morphology, columns=['area','perimeter', 'eccentricity','circularity', 'mean_intensity','tot_intensity']) for feature in features: df_embedding['color'] = pd.qcut(df_morphology[feature], 10, labels=False) fig = px.scatter_3d(df_embedding.sample(n=100000,axis=0), x="x", y="y", z="z", color="color", hover_name="color") fig.update_traces(marker=dict(size=1,opacity=0.5),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' '+feature, font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.write_html(embedding_filename+'.'+feature+'.100K_subsampling.html', auto_open=True) ''' UMAP representation of the morphology ''' import plotly.graph_objects as go from plotly.graph_objs import * import pandas as pd import plotly.express as px import os id_list = [13,53,17,52,40,38,45,39,57,56,51,46,41,54] usecols = (1,2,3, 4, 5, 6, 7, 8) for idx in id_list[:1]: morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' morphology = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=usecols) embedding = np.load(embedding_filename, allow_pickle=True) df_embedding = pd.DataFrame(data=embedding, columns=['x','y','z']) df_morphology = pd.DataFrame(data=morphology, columns=['cx','cy','area','perimeter', 'eccentricity','circularity', 'mean_intensity','tot_intensity']) df = pd.concat([df_morphology, df_embedding], axis=1) fig = px.scatter_3d(df.sample(n=10000,axis=0), x="x", y="y", z="z") fig.update_traces(marker=dict(size=1,opacity=0.5),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename), font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.write_html(embedding_filename+'.10K_subsampling.html', auto_open=True) ''' Density based clustering ''' import matplotlib.pyplot as plt import seaborn as sns import sklearn.datasets as data %matplotlib inline sns.set_context('poster') sns.set_style('white') sns.set_color_codes() plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0} import hdbscan XX = df.sample(n=100000,axis=0) pos = XX[['cx','cy']] min_cluster_size = 100 #parameters min_samples = 10 #parameters clusterer = hdbscan.HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size, gen_min_span_tree=True) clusterer.fit(pos) #cluster positions clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette("Set2",len(clusterer.labels_))) from plotly.graph_objs import * import pandas as pd import plotly.express as px import plotly.graph_objects as go XX['cluster'] = clusterer.labels_ #add cluster id to dataframe XX['cluster'] = XX['cluster'].apply(str) #make clustet id a string #df['alpha'] = clusterer.probabilities_ #df['alpha'] = df['alpha'].apply(str) df_filtered = XX[XX.cluster != str(-1)] # consider only clustered points fig = px.scatter_3d(df_filtered, x="x", y="y", z="z", color="cluster", hover_name="cluster", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' min_cluster='+str(min_cluster_size)+' min_samples='+str(min_samples), font=dict(family="Courier New, monospace",size=18,color="#7f7f7f") ) fig.write_html('test.html', auto_open=True) ''' Densidy based clustering of the spatial density ''' idx = 13 morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' pos = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=(1,2)) XX = pd.DataFrame(data=pos, columns=['x','y']).sample(n=100000,axis=0) print(XX.shape) import matplotlib.pyplot as plt import seaborn as sns import sklearn.datasets as data %matplotlib inline sns.set_context('poster') sns.set_style('white') sns.set_color_codes() plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0} import hdbscan min_cluster_size = 100 min_samples = 100 clusterer = hdbscan.HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size, gen_min_span_tree=True) clusterer.fit(XX) clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette("Set2",len(clusterer.labels_))) from plotly.graph_objs import * import pandas as pd import plotly.express as px import plotly.graph_objects as go XX['cluster'] = clusterer.labels_ XX['cluster'] = XX['cluster'].apply(str) #df['alpha'] = clusterer.probabilities_ #df['alpha'] = df['alpha'].apply(str) df_filtered = XX[XX.cluster != str(-1)] fig = px.scatter_3d(df_filtered, x="x", y="y", z="z", color="cluster", hover_name="cluster", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' min_cluster='+str(min_cluster_size)+' min_samples='+str(min_samples), font=dict(family="Courier New, monospace",size=18,color="#7f7f7f") ) fig.write_html('test.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.primary.ilastik.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0), legend=go.layout.Legend( x=0, y=1, traceorder='normal', font=dict(family='sans-serif',size=20,color='black'), itemsizing='constant' #to show larget legend markers )) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57-380.ilastik+HE.-intensity.-xy.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.HE.*.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_map={"38": "red","52": "green","53": "blue","57": "goldenrod"}) # color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.ilastik+qpHE.?intensity.?xy.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_map={"38": "red","52": "green","53": "blue","57": "goldenrod"}) # color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=2,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) ``` Rotate the pointcloud ``` f='../npy/clusterable_embedding.merge.38-52-53-57.ilastik.-intensity.+xy.npy' embedding = np.load(f,allow_pickle=True) df = pd.DataFrame(data=embedding,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) colorsIdx = {"38": "red","52": "green","53": "blue","57": "goldenrod"} cols = df['label'].map(colorsIdx) trace=[go.Scatter3d(x=df['x'], y=df['y'], z=df['z'], mode='markers',marker=dict(size=1,opacity=0.5,color=cols))] x_eye = -1.25 y_eye = 2 z_eye = 0.5 layout = go.Layout( title='Animation Test', width=600, height=600, scene=dict(camera=dict(eye=dict(x=x_eye, y=y_eye, z=z_eye))), updatemenus=[dict(type='buttons', showactive=False, y=1, x=0.8, xanchor='left', yanchor='bottom', pad=dict(t=45, r=10), buttons=[dict(label='Play', method='animate', args=[None, dict(frame=dict(duration=2, redraw=False), transition=dict(duration=0), fromcurrent=True, mode='immediate' )] ) ] ) ] ) def rotate_z(x, y, z, theta): w = x+1j*y return np.real(np.exp(1j*theta)*w), np.imag(np.exp(1j*theta)*w), z frames=[] for t in np.arange(0, 6.26, 0.05): xe, ye, ze = rotate_z(x_eye, y_eye, z_eye, -t) frames.append(dict(layout=dict(scene=dict(camera=dict(eye=dict(x=xe, y=ye, z=ze)))))) fig = go.Figure(data=trace, layout=layout, frames=frames) fig.write_html(f+'.html', auto_open=True) ``` Plot samples with a discrete color sequence (different plots will have different color order) ``` from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.ilastik.*.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) ``` We can try to cluster the points in the multisample cloud. ``` from plotly.graph_objs import * import pandas as pd import plotly.express as px from sklearn.cluster import OPTICS from sklearn.cluster import DBSCAN import numpy as np X = data[:,:3] #clustering = OPTICS(min_samples=50).fit(X) clustering = DBSCAN(eps=0.6, min_samples=50).fit(X) df['label_optic'] = clustering.labels_ df['label_optic'] = df['label_optic'].apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label_optic", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set1) fig.update_traces(marker=dict(size=2,opacity=0.25),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html('merge-samples.html', auto_open=True) ``` See how the point cloud changes with sampling size ``` from numpy.random import normal as normal import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.animation as animation import matplotlib #################################################################################################### # load logvec from different samples ################################################################################################### import numpy as np import glob import umap import hdbscan import sklearn.cluster as cluster from sklearn.cluster import OPTICS import pandas as pd import plotly.express as px xs = [] ys = [] zs = [] lista = [ f for f in glob.glob(r'../npy/52.txt.r1.s*.logvec.npy') ] '''Use this to see individual random clusters''' for f in lista: X = np.load(f,allow_pickle=True) # create the array of vectorized covd data clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters xs.append(list(clusterable_embedding[:,0])) ys.append(list(clusterable_embedding[:,1])) zs.append(list(clusterable_embedding[:,2])) '''Use this to see random sampling expansion''' for idx in range(2,len(lista)): print(idx) logvec_list = [ np.load(f,allow_pickle=True) for f in lista[:idx] ] X = np.vstack(logvec_list) # create the array of vectorized covd data clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters xs.append(list(clusterable_embedding[:,0])) ys.append(list(clusterable_embedding[:,1])) zs.append(list(clusterable_embedding[:,2])) nfr = len(xs) # Number of frames fps = 1 # Frame per sec fig = plt.figure() ax = fig.add_subplot(111, projection='3d') sct, = ax.plot([], [], [], ".", markersize=1) def update(ifrm, xa, ya, za): sct.set_data(xa[ifrm], ya[ifrm]) sct.set_3d_properties(za[ifrm]) ax.set_xlim(-10,10) ax.set_ylim(-10,10) ax.set_zlim(-10,10) ani = animation.FuncAnimation(fig, update, nfr, fargs=(xs,ys,zs), interval=1000) fn = 'plot_3d_scatter_pooling_samples' ani.save(fn+'.mp4',writer='ffmpeg',fps=fps) ani.save(fn+'.gif',writer='imagemagick',fps=fps) plt.rcParams['animation.html'] = 'html5' ani ```
github_jupyter
import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import glob import plotly.graph_objects as go import numpy as np from plotly.graph_objs import * import pandas as pd import plotly.express as px def show_cluster(embedding_name): xyz = np.load(embedding_name,allow_pickle=True) print(embedding_name,xyz.shape) x, y, z = xyz[:,0], xyz[:,1], xyz[:,2] fig = go.Figure(data=[go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=dict( size=1, opacity=0.1 ) )]) # tight layout fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(embedding_name+'.html', auto_open=True) ''' UMAP representation of the morphology and coloring based on a given feature ''' import plotly.graph_objects as go from plotly.graph_objs import * import pandas as pd import plotly.express as px import os id_list = [13,53,17,52,40,38,45,39,57,56,51,46,41,54] usecols = (3, 4, 5, 6, 7, 8) features = ['area','perimeter','eccentricity','circularity','mean_intensity','tot_intensity'] for idx in id_list[:1]: idx = 52 morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' morphology = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=usecols) embedding = np.load(embedding_filename, allow_pickle=True) df_embedding = pd.DataFrame(data=embedding, columns=['x','y','z']) df_morphology = pd.DataFrame(data=morphology, columns=['area','perimeter', 'eccentricity','circularity', 'mean_intensity','tot_intensity']) for feature in features: df_embedding['color'] = pd.qcut(df_morphology[feature], 10, labels=False) fig = px.scatter_3d(df_embedding.sample(n=100000,axis=0), x="x", y="y", z="z", color="color", hover_name="color") fig.update_traces(marker=dict(size=1,opacity=0.5),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' '+feature, font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.write_html(embedding_filename+'.'+feature+'.100K_subsampling.html', auto_open=True) ''' UMAP representation of the morphology ''' import plotly.graph_objects as go from plotly.graph_objs import * import pandas as pd import plotly.express as px import os id_list = [13,53,17,52,40,38,45,39,57,56,51,46,41,54] usecols = (1,2,3, 4, 5, 6, 7, 8) for idx in id_list[:1]: morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' morphology = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=usecols) embedding = np.load(embedding_filename, allow_pickle=True) df_embedding = pd.DataFrame(data=embedding, columns=['x','y','z']) df_morphology = pd.DataFrame(data=morphology, columns=['cx','cy','area','perimeter', 'eccentricity','circularity', 'mean_intensity','tot_intensity']) df = pd.concat([df_morphology, df_embedding], axis=1) fig = px.scatter_3d(df.sample(n=10000,axis=0), x="x", y="y", z="z") fig.update_traces(marker=dict(size=1,opacity=0.5),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename), font=dict( family="Courier New, monospace", size=18, color="#7f7f7f" ) ) fig.write_html(embedding_filename+'.10K_subsampling.html', auto_open=True) ''' Density based clustering ''' import matplotlib.pyplot as plt import seaborn as sns import sklearn.datasets as data %matplotlib inline sns.set_context('poster') sns.set_style('white') sns.set_color_codes() plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0} import hdbscan XX = df.sample(n=100000,axis=0) pos = XX[['cx','cy']] min_cluster_size = 100 #parameters min_samples = 10 #parameters clusterer = hdbscan.HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size, gen_min_span_tree=True) clusterer.fit(pos) #cluster positions clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette("Set2",len(clusterer.labels_))) from plotly.graph_objs import * import pandas as pd import plotly.express as px import plotly.graph_objects as go XX['cluster'] = clusterer.labels_ #add cluster id to dataframe XX['cluster'] = XX['cluster'].apply(str) #make clustet id a string #df['alpha'] = clusterer.probabilities_ #df['alpha'] = df['alpha'].apply(str) df_filtered = XX[XX.cluster != str(-1)] # consider only clustered points fig = px.scatter_3d(df_filtered, x="x", y="y", z="z", color="cluster", hover_name="cluster", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' min_cluster='+str(min_cluster_size)+' min_samples='+str(min_samples), font=dict(family="Courier New, monospace",size=18,color="#7f7f7f") ) fig.write_html('test.html', auto_open=True) ''' Densidy based clustering of the spatial density ''' idx = 13 morphology_filename = '/home/garner1/wsi-data/npy/'+str(idx)+'.txt.woInf.gz' embedding_filename = '/home/garner1/wsi-data/npy/id'+str(idx)+'_clusterable_embedding.morphology.npy' pos = np.loadtxt(morphology_filename, delimiter="\t", skiprows=True, usecols=(1,2)) XX = pd.DataFrame(data=pos, columns=['x','y']).sample(n=100000,axis=0) print(XX.shape) import matplotlib.pyplot as plt import seaborn as sns import sklearn.datasets as data %matplotlib inline sns.set_context('poster') sns.set_style('white') sns.set_color_codes() plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0} import hdbscan min_cluster_size = 100 min_samples = 100 clusterer = hdbscan.HDBSCAN(min_samples=min_samples, min_cluster_size=min_cluster_size, gen_min_span_tree=True) clusterer.fit(XX) clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette("Set2",len(clusterer.labels_))) from plotly.graph_objs import * import pandas as pd import plotly.express as px import plotly.graph_objects as go XX['cluster'] = clusterer.labels_ XX['cluster'] = XX['cluster'].apply(str) #df['alpha'] = clusterer.probabilities_ #df['alpha'] = df['alpha'].apply(str) df_filtered = XX[XX.cluster != str(-1)] fig = px.scatter_3d(df_filtered, x="x", y="y", z="z", color="cluster", hover_name="cluster", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(title=os.path.basename(embedding_filename)+' min_cluster='+str(min_cluster_size)+' min_samples='+str(min_samples), font=dict(family="Courier New, monospace",size=18,color="#7f7f7f") ) fig.write_html('test.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.primary.ilastik.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0), legend=go.layout.Legend( x=0, y=1, traceorder='normal', font=dict(family='sans-serif',size=20,color='black'), itemsizing='constant' #to show larget legend markers )) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57-380.ilastik+HE.-intensity.-xy.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.HE.*.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_map={"38": "red","52": "green","53": "blue","57": "goldenrod"}) # color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.ilastik+qpHE.?intensity.?xy.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_map={"38": "red","52": "green","53": "blue","57": "goldenrod"}) # color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=2,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) f='../npy/clusterable_embedding.merge.38-52-53-57.ilastik.-intensity.+xy.npy' embedding = np.load(f,allow_pickle=True) df = pd.DataFrame(data=embedding,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) colorsIdx = {"38": "red","52": "green","53": "blue","57": "goldenrod"} cols = df['label'].map(colorsIdx) trace=[go.Scatter3d(x=df['x'], y=df['y'], z=df['z'], mode='markers',marker=dict(size=1,opacity=0.5,color=cols))] x_eye = -1.25 y_eye = 2 z_eye = 0.5 layout = go.Layout( title='Animation Test', width=600, height=600, scene=dict(camera=dict(eye=dict(x=x_eye, y=y_eye, z=z_eye))), updatemenus=[dict(type='buttons', showactive=False, y=1, x=0.8, xanchor='left', yanchor='bottom', pad=dict(t=45, r=10), buttons=[dict(label='Play', method='animate', args=[None, dict(frame=dict(duration=2, redraw=False), transition=dict(duration=0), fromcurrent=True, mode='immediate' )] ) ] ) ] ) def rotate_z(x, y, z, theta): w = x+1j*y return np.real(np.exp(1j*theta)*w), np.imag(np.exp(1j*theta)*w), z frames=[] for t in np.arange(0, 6.26, 0.05): xe, ye, ze = rotate_z(x_eye, y_eye, z_eye, -t) frames.append(dict(layout=dict(scene=dict(camera=dict(eye=dict(x=xe, y=ye, z=ze)))))) fig = go.Figure(data=trace, layout=layout, frames=frames) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px for f in glob.glob(r'../npy/clusterable_embedding.merge.38-52-53-57.ilastik.*.npy'): print(f) data = np.load(f,allow_pickle=True) df = pd.DataFrame(data=data,columns=['x','y','z','label']) df['label'] = df['label'].apply(round).apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=1,opacity=0.75),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html(f+'.html', auto_open=True) from plotly.graph_objs import * import pandas as pd import plotly.express as px from sklearn.cluster import OPTICS from sklearn.cluster import DBSCAN import numpy as np X = data[:,:3] #clustering = OPTICS(min_samples=50).fit(X) clustering = DBSCAN(eps=0.6, min_samples=50).fit(X) df['label_optic'] = clustering.labels_ df['label_optic'] = df['label_optic'].apply(str) fig = px.scatter_3d(df, x="x", y="y", z="z", color="label_optic", hover_name="label", color_discrete_sequence=px.colors.qualitative.Set1) fig.update_traces(marker=dict(size=2,opacity=0.25),selector=dict(mode='markers')) fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) fig.write_html('merge-samples.html', auto_open=True) from numpy.random import normal as normal import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.animation as animation import matplotlib #################################################################################################### # load logvec from different samples ################################################################################################### import numpy as np import glob import umap import hdbscan import sklearn.cluster as cluster from sklearn.cluster import OPTICS import pandas as pd import plotly.express as px xs = [] ys = [] zs = [] lista = [ f for f in glob.glob(r'../npy/52.txt.r1.s*.logvec.npy') ] '''Use this to see individual random clusters''' for f in lista: X = np.load(f,allow_pickle=True) # create the array of vectorized covd data clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters xs.append(list(clusterable_embedding[:,0])) ys.append(list(clusterable_embedding[:,1])) zs.append(list(clusterable_embedding[:,2])) '''Use this to see random sampling expansion''' for idx in range(2,len(lista)): print(idx) logvec_list = [ np.load(f,allow_pickle=True) for f in lista[:idx] ] X = np.vstack(logvec_list) # create the array of vectorized covd data clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters xs.append(list(clusterable_embedding[:,0])) ys.append(list(clusterable_embedding[:,1])) zs.append(list(clusterable_embedding[:,2])) nfr = len(xs) # Number of frames fps = 1 # Frame per sec fig = plt.figure() ax = fig.add_subplot(111, projection='3d') sct, = ax.plot([], [], [], ".", markersize=1) def update(ifrm, xa, ya, za): sct.set_data(xa[ifrm], ya[ifrm]) sct.set_3d_properties(za[ifrm]) ax.set_xlim(-10,10) ax.set_ylim(-10,10) ax.set_zlim(-10,10) ani = animation.FuncAnimation(fig, update, nfr, fargs=(xs,ys,zs), interval=1000) fn = 'plot_3d_scatter_pooling_samples' ani.save(fn+'.mp4',writer='ffmpeg',fps=fps) ani.save(fn+'.gif',writer='imagemagick',fps=fps) plt.rcParams['animation.html'] = 'html5' ani
0.348645
0.347509
# Supervised Machine Learning Examples Some examples of supervised machine learning examples in Python. First, load up a ton of modules... ``` import pandas as pd import itertools import matplotlib.pyplot as plt import numpy as np from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import train_test_split from sklearn.model_selection import cross_val_score from sklearn.metrics import confusion_matrix from sklearn import metrics pd.options.mode.chained_assignment = None ``` ## Load the data Next, we have to load the data into a dataframe. In order to have a balanced dataset, we will use 10000 records from Alexa which will represent the not malicious domains, and 10000 records from `gameoverdga` representing the malicious domains. You can see that at the end we have 10000 of each. ``` df = pd.read_csv( '../../data/dga-full.csv' ) #Filter to alexo and game over df = df[df['dsrc'].isin(['alexa','gameoverdga'])] df.dsrc.value_counts() ``` ## Add a Target Column For our datasets, we need a numeric column to represent the classes. In our case we are going to call the column `isMalicious` and assign it a value of `0` if it is not malicious and `1` if it is. ``` df['isMalicious'] = df['dsrc'].apply( lambda x: 0 if x == "alexa" else 1 ) ``` ## Perform the Train/Test Split For this, let’s create a rather small training data se as it will reduce the time to train up a model. Feel free to try a 15%, 20% or even a 30% portion for the training data (lower percentages for slower machines). In this example, we will split 30% for train and 70% for test. Normally you would want most of the data in the training data, but more training data can considerably extend the time neede to train up a model. We're also going to need a list of column names for the feature columns as well as the target column. ``` train, test = train_test_split(df, test_size = 0.7) features = ['length', 'dicts', 'entropy','numbers', 'ngram'] target = 'isMalicious' ``` ## Create the Classifiers The next step is to create the classifiers. What you'll see is that scikit-learn maintains a constant interface for every machine learning algorithm. For a supervised model, the steps are: 1. Create the classifier object 2. Call the `.fit()` method with the training data set and the target 3. To make a prediction, call the `.predict()` method ``` #Create the Random Forest Classifier random_forest_clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0) random_forest_clf = random_forest_clf.fit( train[features], train[target]) #Next, create the SVM classifier svm_classifier = svm.SVC() svm_classifier = svm_classifier.fit(train[features], train[target]) ``` ## Comparing the Classifiers Now that we have two different classifiers, let's compare them and see how they perform. Fortunately, Scikit has a series of functions to generate metrics for you. The first is the cross validation score. ``` scores = cross_val_score(random_forest_clf, train[features], train[target]) scores.mean() ``` We'll need to to get the predictions from both classifiers, so we add columns to the test and training sets for the predictions. ``` test['predictions'] = random_forest_clf.predict( test[features] ) train['predictions'] = random_forest_clf.predict( train[features] ) test['svm-predictions'] = svm_classifier.predict( test[features]) train['svm-predictions'] = svm_classifier.predict( train[features]) test.head() ``` ## Confusion Matrix These are a little confusing (yuk yuk), but are a very valuable tool in evaluating your models. Scikit-learn has a function to generate a confusion matrix as shown below. ``` confusion_matrix( test['isMalicious'], test['predictions']) ``` The code below generates a nicer presentation of the confusion matrix for the random forest classifer. From: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py ``` def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix( test['isMalicious'], test['predictions']) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Not Malicious', 'Malicious'], title='RF Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Not Malicious', 'Malicious'], normalize=True, title='RF Normalized confusion matrix') plt.show() ``` And again for the SVM classifier. ``` # Compute confusion matrix svm_cnf_matrix = confusion_matrix( test['isMalicious'], test['svm-predictions']) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(svm_cnf_matrix, classes=['Not Malicious', 'Malicious'], title='SVM Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(svm_cnf_matrix, classes=['Not Malicious', 'Malicious'], normalize=True, title='SVM Normalized confusion matrix') plt.show() ``` ## Feature Importance Random Forest has a feature which can calculate the importance for each feature it uses in building the forest. This can be calculated with this property:`random_forest_clf.feature_importances_`. ``` importances = random_forest_clf.feature_importances_ importances ``` You can also visualize this with the following code from: #From: http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html ``` std = np.std([random_forest_clf.feature_importances_ for tree in random_forest_clf.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(test[features].shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(test[features].shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(test[features].shape[1]), indices) plt.xlim([-1, test[features].shape[1]]) plt.show() ``` You can calculate the accuracy with the `metrics.accuracy()` method, and finally, there is the `metrics.classification-report()` which will calculate all the metrics except accuracy at once. ``` pscore = metrics.accuracy_score(test['isMalicious'], test['predictions']) pscore_train = metrics.accuracy_score(train['isMalicious'], train['predictions']) print( metrics.classification_report(test['isMalicious'], test['predictions'], target_names=['Malicious', 'Not Malicious'] ) ) svm_pscore = metrics.accuracy_score(test['isMalicious'], test['svm-predictions']) svm_pscore_train = metrics.accuracy_score(train['isMalicious'], train['svm-predictions']) print( metrics.classification_report(test['isMalicious'], test['svm-predictions'], target_names=['Malicious', 'Not Malicious'] ) ) print( svm_pscore, svm_pscore_train) print( pscore, pscore_train) ```
github_jupyter
import pandas as pd import itertools import matplotlib.pyplot as plt import numpy as np from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import train_test_split from sklearn.model_selection import cross_val_score from sklearn.metrics import confusion_matrix from sklearn import metrics pd.options.mode.chained_assignment = None df = pd.read_csv( '../../data/dga-full.csv' ) #Filter to alexo and game over df = df[df['dsrc'].isin(['alexa','gameoverdga'])] df.dsrc.value_counts() df['isMalicious'] = df['dsrc'].apply( lambda x: 0 if x == "alexa" else 1 ) train, test = train_test_split(df, test_size = 0.7) features = ['length', 'dicts', 'entropy','numbers', 'ngram'] target = 'isMalicious' #Create the Random Forest Classifier random_forest_clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0) random_forest_clf = random_forest_clf.fit( train[features], train[target]) #Next, create the SVM classifier svm_classifier = svm.SVC() svm_classifier = svm_classifier.fit(train[features], train[target]) scores = cross_val_score(random_forest_clf, train[features], train[target]) scores.mean() test['predictions'] = random_forest_clf.predict( test[features] ) train['predictions'] = random_forest_clf.predict( train[features] ) test['svm-predictions'] = svm_classifier.predict( test[features]) train['svm-predictions'] = svm_classifier.predict( train[features]) test.head() confusion_matrix( test['isMalicious'], test['predictions']) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix( test['isMalicious'], test['predictions']) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Not Malicious', 'Malicious'], title='RF Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Not Malicious', 'Malicious'], normalize=True, title='RF Normalized confusion matrix') plt.show() # Compute confusion matrix svm_cnf_matrix = confusion_matrix( test['isMalicious'], test['svm-predictions']) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(svm_cnf_matrix, classes=['Not Malicious', 'Malicious'], title='SVM Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(svm_cnf_matrix, classes=['Not Malicious', 'Malicious'], normalize=True, title='SVM Normalized confusion matrix') plt.show() importances = random_forest_clf.feature_importances_ importances std = np.std([random_forest_clf.feature_importances_ for tree in random_forest_clf.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(test[features].shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(test[features].shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(test[features].shape[1]), indices) plt.xlim([-1, test[features].shape[1]]) plt.show() pscore = metrics.accuracy_score(test['isMalicious'], test['predictions']) pscore_train = metrics.accuracy_score(train['isMalicious'], train['predictions']) print( metrics.classification_report(test['isMalicious'], test['predictions'], target_names=['Malicious', 'Not Malicious'] ) ) svm_pscore = metrics.accuracy_score(test['isMalicious'], test['svm-predictions']) svm_pscore_train = metrics.accuracy_score(train['isMalicious'], train['svm-predictions']) print( metrics.classification_report(test['isMalicious'], test['svm-predictions'], target_names=['Malicious', 'Not Malicious'] ) ) print( svm_pscore, svm_pscore_train) print( pscore, pscore_train)
0.768733
0.988766
### Stock Market Prediction And Forecasting Using Stacked LSTM ``` import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_datareader as pdr import tensorflow as tf from sklearn.metrics import mean_squared_error from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM KEY = "e46970bde5460c8e6caece2452083771f7320e41" df_master = pdr.get_data_tiingo('AAPL', api_key = KEY) df_master.to_csv('AAPL.csv') df_master=pd.read_csv('AAPL.csv') df_master.info() df_master.head() df_master.tail() #df1 = df_master.reset_index()['close'] df = df_master[['date','close']].copy() df.info() df['date']= pd.to_datetime(df['date']) df['date'].dt.strftime('%Y-%m-%d') df.set_index('date', inplace = True) df.info() df.plot(figsize=(12,8)) len(df) test_size = int(len(df)*0.35) test_ind = len(df)- test_size test_size train = df.iloc[:test_ind] test = df.iloc[test_ind:] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range = (0,1)) scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) time_step = 100 time_step = 100 n_features = 1 generator = TimeseriesGenerator(scaled_train, scaled_train, length=time_step, batch_size=1) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length = time_step, batch_size = 1) print(len(generator)) print(len(validation_generator)) model=Sequential() model.add(LSTM(50,return_sequences=True,input_shape=(100,n_features))) model.add(LSTM(50,return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error',optimizer='adam') model.summary() model.fit(generator,epochs = 100 , validation_data = validation_generator) loss = pd.DataFrame(model.history.history) loss.plot() !mkdir -p saved_model model.save('saved_model/my_model') #my_model = tf.keras.models.load_model('saved_model/my_model') #my_model.summary() train_predictions = [] first_step = scaled_train[-time_step:] current_step = first_step.reshape((1, time_step, n_features)) for i in range(len(train)): current_pred = model.predict(current_step)[0] train_predictions.append(current_pred) current_step = np.append(current_step[:,1:,:], [[current_pred]], axis=1) test_predictions = [] test_first_step = scaled_test[-time_step:] test_current_step = test_first_step.reshape((1, time_step, n_features)) for i in range(len(test)): test_current_pred = model.predict(test_current_step)[0] test_predictions.append(test_current_pred) test_current_step = np.append(test_current_step[:,1:,:], [[test_current_pred]], axis=1) print(len(df)) print(test_size) training_size = len(df)-test_size print(training_size) print(len(train_predictions)) print(len(test_predictions)) print(training_size + test_size) true_train_predictions = scaler.inverse_transform(train_predictions) true_test_predictions = scaler.inverse_transform(test_predictions) len(true_train_predictions) len(true_test_predictions) train.loc[:,'predictions'] = true_train_predictions #future_predictions = np.array(future_predictions) #type(future_predictions) from sklearn.metrics import mean_squared_error np.sqrt(mean_squared_error(train['close'],train['predictions'])) train.plot() test.loc[:,'predictions'] = true_test_predictions np.sqrt(mean_squared_error(test['close'],test['predictions'])) test.plot() future_predictions = [] test_first_step = scaled_test[-time_step:] test_current_step = test_first_step.reshape((1, time_step, n_features)) no_days = 30 # predict next 30 days for i in range(no_days): test_current_pred = model.predict(test_current_step)[0] future_predictions.append(test_current_pred) test_current_step = np.append(test_current_step[:,1:,:], [[test_current_pred]], axis=1) len(future_predictions) true_future_predictions = scaler.inverse_transform(future_predictions) test.tail() dates = pd.date_range('1/1/2021', periods=no_days, freq='D') dates df_future = pd.DataFrame(data=true_future_predictions, columns=['Future'], index=dates ) df_future = df_future.rename(columns = {'index':'DATE'}) df_future df_future.plot() ax = test.plot(label='test') df_future.plot(ax=ax) ax = df.plot(figsize=(12,10),label='df') df_future.plot(ax=ax) ```
github_jupyter
import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_datareader as pdr import tensorflow as tf from sklearn.metrics import mean_squared_error from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM KEY = "e46970bde5460c8e6caece2452083771f7320e41" df_master = pdr.get_data_tiingo('AAPL', api_key = KEY) df_master.to_csv('AAPL.csv') df_master=pd.read_csv('AAPL.csv') df_master.info() df_master.head() df_master.tail() #df1 = df_master.reset_index()['close'] df = df_master[['date','close']].copy() df.info() df['date']= pd.to_datetime(df['date']) df['date'].dt.strftime('%Y-%m-%d') df.set_index('date', inplace = True) df.info() df.plot(figsize=(12,8)) len(df) test_size = int(len(df)*0.35) test_ind = len(df)- test_size test_size train = df.iloc[:test_ind] test = df.iloc[test_ind:] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range = (0,1)) scaler.fit(train) scaled_train = scaler.transform(train) scaled_test = scaler.transform(test) time_step = 100 time_step = 100 n_features = 1 generator = TimeseriesGenerator(scaled_train, scaled_train, length=time_step, batch_size=1) validation_generator = TimeseriesGenerator(scaled_test, scaled_test, length = time_step, batch_size = 1) print(len(generator)) print(len(validation_generator)) model=Sequential() model.add(LSTM(50,return_sequences=True,input_shape=(100,n_features))) model.add(LSTM(50,return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error',optimizer='adam') model.summary() model.fit(generator,epochs = 100 , validation_data = validation_generator) loss = pd.DataFrame(model.history.history) loss.plot() !mkdir -p saved_model model.save('saved_model/my_model') #my_model = tf.keras.models.load_model('saved_model/my_model') #my_model.summary() train_predictions = [] first_step = scaled_train[-time_step:] current_step = first_step.reshape((1, time_step, n_features)) for i in range(len(train)): current_pred = model.predict(current_step)[0] train_predictions.append(current_pred) current_step = np.append(current_step[:,1:,:], [[current_pred]], axis=1) test_predictions = [] test_first_step = scaled_test[-time_step:] test_current_step = test_first_step.reshape((1, time_step, n_features)) for i in range(len(test)): test_current_pred = model.predict(test_current_step)[0] test_predictions.append(test_current_pred) test_current_step = np.append(test_current_step[:,1:,:], [[test_current_pred]], axis=1) print(len(df)) print(test_size) training_size = len(df)-test_size print(training_size) print(len(train_predictions)) print(len(test_predictions)) print(training_size + test_size) true_train_predictions = scaler.inverse_transform(train_predictions) true_test_predictions = scaler.inverse_transform(test_predictions) len(true_train_predictions) len(true_test_predictions) train.loc[:,'predictions'] = true_train_predictions #future_predictions = np.array(future_predictions) #type(future_predictions) from sklearn.metrics import mean_squared_error np.sqrt(mean_squared_error(train['close'],train['predictions'])) train.plot() test.loc[:,'predictions'] = true_test_predictions np.sqrt(mean_squared_error(test['close'],test['predictions'])) test.plot() future_predictions = [] test_first_step = scaled_test[-time_step:] test_current_step = test_first_step.reshape((1, time_step, n_features)) no_days = 30 # predict next 30 days for i in range(no_days): test_current_pred = model.predict(test_current_step)[0] future_predictions.append(test_current_pred) test_current_step = np.append(test_current_step[:,1:,:], [[test_current_pred]], axis=1) len(future_predictions) true_future_predictions = scaler.inverse_transform(future_predictions) test.tail() dates = pd.date_range('1/1/2021', periods=no_days, freq='D') dates df_future = pd.DataFrame(data=true_future_predictions, columns=['Future'], index=dates ) df_future = df_future.rename(columns = {'index':'DATE'}) df_future df_future.plot() ax = test.plot(label='test') df_future.plot(ax=ax) ax = df.plot(figsize=(12,10),label='df') df_future.plot(ax=ax)
0.606615
0.755637
# Pré processamento da coleção de dados Aplicação de técnicas de pré processamento de dados para ser possível uma pré-análise dos dados enquanto ocore a transformação para dados padronizados e normalizados. ## Tipos de dados incorretos Cada coluna está atribuída ao tipo de dados correto. Isso pode ser verificado por meio da propriedade `dtypes`. ## Dados homogêneos Este aspecto envolve dados categóricos e numéricos. Os dados categóricos devem ter o mesmo estilo de formatação, como minúsculas ou espaço em branco no início e final da string. Os dados numéricos devem ter, por exemplo, o mesmo número de dígitos após o ponto e outras coisas com arredondamento para cima e para baixo. # Parte 2 - Tipos e Formatação dos Dados * Colunas categoricas do tipo objetos foram convertidas em string * Colunas categoricas convertidas para lower case * Remove espaço em branco no começo e no final do registro * Altera valores diferentes para o mesmo conceito: * usa a função unique() para listar todos os valores de uma coluna * aplicar uma função que procura por um padrão específico em uma string e então realiza alguma substituição na mesma string se o padrão for encontrado. > no caso foi utilizado padrões analisados através da função unique() tais como: > * '5 - very low' na coluna 'urgency', que também ajudou a identificar melhor a escala de urgência; > * também foi identificado algumas colunas de datas e convertido seus tipos; ``` from fun_dependencies import * #load data df_interacao = load_data('cleaning_dataset/pt1/Detail_Interaction_pt1.csv', sep=';') df_incidente = load_data('cleaning_dataset/pt1/Detail_Incident_pt1.csv', sep=';') df_atividades_incidente = load_data('cleaning_dataset/pt1/Detail_Incident_Activity_pt1.csv', sep=';') df_mudancas = load_data('cleaning_dataset/pt1/Detail_Change_pt1.csv', sep=';') ``` ### Tabela Interacao ``` %%capture cap --no-stderr print("---------------------------------------Table: Interacao---------------------------------------\n") print(f'Attributes type\n{df_interacao.dtypes}\n') df_interacao = data_formatting(df_interacao) print(f'Attributes type\n{df_interacao.dtypes}\n') with open('output/output_pt2.txt', 'w') as f: f.write(cap.stdout) ``` ### Tabela Incidente ``` %%capture cap --no-stderr print("---------------------------------------Table: Incidente---------------------------------------\n") print(f'Attributes type\n{df_incidente.dtypes}\n') df_incidente = data_formatting(df_incidente) print(f'Attributes type\n{df_incidente.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) ``` ### Tabela Atividades Incidente ``` %%capture cap --no-stderr print("---------------------------------------Table: Atividades Incidente---------------------------------------\n") print(f'Attributes type\n{df_atividades_incidente.dtypes}\n') df_atividades_incidente = data_formatting(df_atividades_incidente) print(f'Attributes type\n{df_atividades_incidente.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) ``` ### Tabela Mudanças ``` %%capture cap --no-stderr print("---------------------------------------Table: Mudancas---------------------------------------\n") print(f'Attributes type\n{df_mudancas.dtypes}\n') df_mudancas = data_formatting(df_mudancas) print(f'Attributes type\n{df_mudancas.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) df_interacao.to_csv('cleaning_dataset/pt2/Detail_Interaction_pt2.csv', sep=';', encoding='utf-8', index=False) df_incidente.to_csv('cleaning_dataset/pt2/Detail_Incident_pt2.csv', sep=';', encoding='utf-8', index=False) df_atividades_incidente.to_csv('cleaning_dataset/pt2/Detail_Incident_Activity_pt2.csv', sep=';', encoding='utf-8', index=False) df_mudancas.to_csv('cleaning_dataset/pt2/Detail_Change_pt2.csv', sep=';', encoding='utf-8', index=False) ```
github_jupyter
from fun_dependencies import * #load data df_interacao = load_data('cleaning_dataset/pt1/Detail_Interaction_pt1.csv', sep=';') df_incidente = load_data('cleaning_dataset/pt1/Detail_Incident_pt1.csv', sep=';') df_atividades_incidente = load_data('cleaning_dataset/pt1/Detail_Incident_Activity_pt1.csv', sep=';') df_mudancas = load_data('cleaning_dataset/pt1/Detail_Change_pt1.csv', sep=';') %%capture cap --no-stderr print("---------------------------------------Table: Interacao---------------------------------------\n") print(f'Attributes type\n{df_interacao.dtypes}\n') df_interacao = data_formatting(df_interacao) print(f'Attributes type\n{df_interacao.dtypes}\n') with open('output/output_pt2.txt', 'w') as f: f.write(cap.stdout) %%capture cap --no-stderr print("---------------------------------------Table: Incidente---------------------------------------\n") print(f'Attributes type\n{df_incidente.dtypes}\n') df_incidente = data_formatting(df_incidente) print(f'Attributes type\n{df_incidente.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr print("---------------------------------------Table: Atividades Incidente---------------------------------------\n") print(f'Attributes type\n{df_atividades_incidente.dtypes}\n') df_atividades_incidente = data_formatting(df_atividades_incidente) print(f'Attributes type\n{df_atividades_incidente.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr print("---------------------------------------Table: Mudancas---------------------------------------\n") print(f'Attributes type\n{df_mudancas.dtypes}\n') df_mudancas = data_formatting(df_mudancas) print(f'Attributes type\n{df_mudancas.dtypes}\n') with open('output/output_pt2.txt', 'a') as f: f.write(cap.stdout) df_interacao.to_csv('cleaning_dataset/pt2/Detail_Interaction_pt2.csv', sep=';', encoding='utf-8', index=False) df_incidente.to_csv('cleaning_dataset/pt2/Detail_Incident_pt2.csv', sep=';', encoding='utf-8', index=False) df_atividades_incidente.to_csv('cleaning_dataset/pt2/Detail_Incident_Activity_pt2.csv', sep=';', encoding='utf-8', index=False) df_mudancas.to_csv('cleaning_dataset/pt2/Detail_Change_pt2.csv', sep=';', encoding='utf-8', index=False)
0.254416
0.811676
``` import glob import json import pandas as pd import os import gzip import re from nltk.stem import WordNetLemmatizer from nltk import pos_tag from nltk.corpus import stopwords import numpy as np import pandas as pd from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.neural_network import MLPClassifier from sklearn.ensemble import RandomForestClassifier #Calculate accuracy from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report def read_data(directory): dfs = [] for label in ['real', 'fake']: for file in glob.glob(directory + os.path.sep + label + os.path.sep + '*gz'): print('reading %s' % file) df = pd.DataFrame((json.loads(line) for line in gzip.open(file))) df['label'] = label dfs.append(df) df=pd.concat(dfs)[['publish_date', 'source', 'text', 'title', 'tweets', 'label']] list_text = [i for i in list(df.text) if i != ''] return df[df.text.isin(list_text)] directory = r'C:\Users\lenovo\Desktop\IIT\training_data_2' df = read_data(directory) def get_text(list): stopword=set(stopwords.words('english')) list_new=[] for l in list: l=re.sub(r"[^\w']",' ',l).lower() l1=[tokennizer(w) for w in l.split() if len(tokennizer(w))>2] l=' '.join(l1) l1=[tokennizer(w) for w in l.split() if len(tokennizer(w))>2 and tokennizer(w) not in stopword] l=' '.join(lemmatize(l1)) list_new.append(l) return list_new def tokennizer(s): s = re.sub(r'http\S+', '', s) s = re.sub(r'[0-9_\s]+', '', s) s = re.sub(r"[^'\w]+", '', s) s = re.compile(r"(?<=[a-zA-Z])'re").sub(' are', s) s = re.compile(r"(?<=[a-zA-Z])'m").sub(' am', s) s = re.compile(r"(?<=[a-zA-Z])'ve").sub(' have', s) s = re.compile(r"(it|he|she|that|this|there|here|what|where|when|who|why|which)('s)").sub(r"\1 is", s) s = re.sub(r"'s", "", s) s = re.sub(r"can't", 'can not', s) s = re.compile(r"(?<=[a-zA-Z])n't").sub(' not', s) s = re.compile(r"(?<=[a-zA-Z])'ll").sub(' will', s) s = re.compile(r"(?<=[a-zA-Z])'d").sub(' would', s) return s def lemmatize(l): wnl = WordNetLemmatizer() for word, tag in pos_tag(l): if tag.startswith('NN'): yield wnl.lemmatize(word, pos='n') elif tag.startswith('VB'): yield wnl.lemmatize(word, pos='v') elif tag.startswith('JJ'): yield wnl.lemmatize(word, pos='a') elif tag.startswith('R'): yield wnl.lemmatize(word, pos='r') else: yield word text = get_text(list(df.text)) vec1 = TfidfVectorizer(min_df=2, max_df=1., ngram_range=(1, 1),stop_words= 'english') X = vec1.fit_transform(text) y = np.array(df.label) print('MLP----hidden_layer_sizes---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['hidden_layer_sizes', 'Accuracy','std']) for i,hidden_layer_sizes in zip([0,1,2],[10,50,100,200]): MP = MLPClassifier(hidden_layer_sizes = (hidden_layer_sizes,)) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): MP.fit(X[train], Y[train]) pred = MP.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['hidden_layer_sizes'][i] = hidden_layer_sizes accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('MLP----alpha---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['alpha', 'Accuracy','std']) for i,alpha in zip([0,1,2],[.001,.0001,.00001]): MP = MLPClassifier(alpha = alpha) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): MP.fit(X[train], Y[train]) pred = MP.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['alpha'][i] = alpha accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('RandomForest----min_samples_leaf---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['min_samples_leaf', 'Accuracy','std']) for i,min_samples_leaf in zip([0,1,2],[1,3,5]): RFC = RandomForestClassifier(min_samples_leaf = min_samples_leaf) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): RFC.fit(X[train], Y[train]) pred = RFC.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['min_samples_leaf'][i] = min_samples_leaf accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('RandomForest----n_estimators---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['n_estimators', 'Accuracy','std']) for i,n_estimators in zip([0,1,2],[100,200,300]): # print('==================n_estimators : %d ================' %(n_estimators)) RFC = RandomForestClassifier(n_estimators = n_estimators ) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): RFC.fit(X[train], Y[train]) pred = RFC.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) # print(classification_report(Y[test], pred)) # print('accuracy over all cross-validation folds: %s' % str(accuracies)) mean_acc = np.mean(accuracies) std = np.std(accuracies) # print('mean=%.2f std=%.2f' % (mean_acc, std)) accdf['n_estimators'][i] = n_estimators accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf ```
github_jupyter
import glob import json import pandas as pd import os import gzip import re from nltk.stem import WordNetLemmatizer from nltk import pos_tag from nltk.corpus import stopwords import numpy as np import pandas as pd from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.neural_network import MLPClassifier from sklearn.ensemble import RandomForestClassifier #Calculate accuracy from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report def read_data(directory): dfs = [] for label in ['real', 'fake']: for file in glob.glob(directory + os.path.sep + label + os.path.sep + '*gz'): print('reading %s' % file) df = pd.DataFrame((json.loads(line) for line in gzip.open(file))) df['label'] = label dfs.append(df) df=pd.concat(dfs)[['publish_date', 'source', 'text', 'title', 'tweets', 'label']] list_text = [i for i in list(df.text) if i != ''] return df[df.text.isin(list_text)] directory = r'C:\Users\lenovo\Desktop\IIT\training_data_2' df = read_data(directory) def get_text(list): stopword=set(stopwords.words('english')) list_new=[] for l in list: l=re.sub(r"[^\w']",' ',l).lower() l1=[tokennizer(w) for w in l.split() if len(tokennizer(w))>2] l=' '.join(l1) l1=[tokennizer(w) for w in l.split() if len(tokennizer(w))>2 and tokennizer(w) not in stopword] l=' '.join(lemmatize(l1)) list_new.append(l) return list_new def tokennizer(s): s = re.sub(r'http\S+', '', s) s = re.sub(r'[0-9_\s]+', '', s) s = re.sub(r"[^'\w]+", '', s) s = re.compile(r"(?<=[a-zA-Z])'re").sub(' are', s) s = re.compile(r"(?<=[a-zA-Z])'m").sub(' am', s) s = re.compile(r"(?<=[a-zA-Z])'ve").sub(' have', s) s = re.compile(r"(it|he|she|that|this|there|here|what|where|when|who|why|which)('s)").sub(r"\1 is", s) s = re.sub(r"'s", "", s) s = re.sub(r"can't", 'can not', s) s = re.compile(r"(?<=[a-zA-Z])n't").sub(' not', s) s = re.compile(r"(?<=[a-zA-Z])'ll").sub(' will', s) s = re.compile(r"(?<=[a-zA-Z])'d").sub(' would', s) return s def lemmatize(l): wnl = WordNetLemmatizer() for word, tag in pos_tag(l): if tag.startswith('NN'): yield wnl.lemmatize(word, pos='n') elif tag.startswith('VB'): yield wnl.lemmatize(word, pos='v') elif tag.startswith('JJ'): yield wnl.lemmatize(word, pos='a') elif tag.startswith('R'): yield wnl.lemmatize(word, pos='r') else: yield word text = get_text(list(df.text)) vec1 = TfidfVectorizer(min_df=2, max_df=1., ngram_range=(1, 1),stop_words= 'english') X = vec1.fit_transform(text) y = np.array(df.label) print('MLP----hidden_layer_sizes---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['hidden_layer_sizes', 'Accuracy','std']) for i,hidden_layer_sizes in zip([0,1,2],[10,50,100,200]): MP = MLPClassifier(hidden_layer_sizes = (hidden_layer_sizes,)) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): MP.fit(X[train], Y[train]) pred = MP.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['hidden_layer_sizes'][i] = hidden_layer_sizes accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('MLP----alpha---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['alpha', 'Accuracy','std']) for i,alpha in zip([0,1,2],[.001,.0001,.00001]): MP = MLPClassifier(alpha = alpha) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): MP.fit(X[train], Y[train]) pred = MP.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['alpha'][i] = alpha accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('RandomForest----min_samples_leaf---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['min_samples_leaf', 'Accuracy','std']) for i,min_samples_leaf in zip([0,1,2],[1,3,5]): RFC = RandomForestClassifier(min_samples_leaf = min_samples_leaf) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): RFC.fit(X[train], Y[train]) pred = RFC.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) mean_acc = np.mean(accuracies) std = np.std(accuracies) accdf['min_samples_leaf'][i] = min_samples_leaf accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf print('RandomForest----n_estimators---') accdf = pd.DataFrame(np.random.randn(3, 3), index=['1', '2', '3'],columns=['n_estimators', 'Accuracy','std']) for i,n_estimators in zip([0,1,2],[100,200,300]): # print('==================n_estimators : %d ================' %(n_estimators)) RFC = RandomForestClassifier(n_estimators = n_estimators ) Y = y kf = KFold(n_splits=5, shuffle=True, random_state=42) accuracies = [] for train, test in kf.split(X): RFC.fit(X[train], Y[train]) pred = RFC.predict(X[test]) accuracies.append(accuracy_score(Y[test], pred)) # print(classification_report(Y[test], pred)) # print('accuracy over all cross-validation folds: %s' % str(accuracies)) mean_acc = np.mean(accuracies) std = np.std(accuracies) # print('mean=%.2f std=%.2f' % (mean_acc, std)) accdf['n_estimators'][i] = n_estimators accdf['Accuracy'][i] = mean_acc accdf['std'][i] = std accdf
0.317215
0.2027
<table width="100%"> <tr style="border-bottom:solid 2pt #009EE3"> <td style="text-align:left" width="10%"> <a href="biosignalsnotebooks.dwipynb" download><img src="../../images/icons/download.png"></a> </td> <td style="text-align:left" width="10%"> <a href="https://mybinder.org/v2/gh/biosignalsnotebooks/biosignalsnotebooks/biosignalsnotebooks_binder?filepath=biosignalsnotebooks_environment%2Fcategories%2FMainFiles%2Fbiosignalsnotebooks.dwipynb" target="_blank"><img src="../../images/icons/program.png" title="Be creative and test your solutions !"></a> </td> <td></td> <td style="text-align:left" width="5%"> <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png"></a> </td> <td style="text-align:left" width="5%"> <a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png"></a> </td> <td style="text-align:left" width="5%"> <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png"></a> </td> <td style="border-left:solid 2pt #009EE3" width="15%"> <img src="../../images/ost_logo.png"> </td> </tr> </table> <link rel="stylesheet" href="../../styles/theme_style.css"> <img src="../../images/OS_logo_title_slim.png"> <div class="title"><h2 class="color11"> Available Notebooks </h2></div> <table id="notebook_list" width="100%"> <tr> <td width="20%" class="center_cell group_by_header_grey"> Category </td> <td width="60%" class="center_cell group_by_header"></td> <td width="20%" class="center_cell"></td> </tr> <tr> <td rowspan='3' class='center_cell open_cell_border_13'><span style='float:center'><img src='../../images/icons/Install.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color13'>Install</span></td> <td class='center_cell color13_cell color1_top'><span style='float:center'>Install</span></td> <td class='center_cell gradient_color13'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Install/prepare_anaconda.ipynb'> Download, Install and Execute Anaconda </a> </td> <td class='center_cell'> <a href='../Install/prepare_anaconda.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Install/prepare_jupyter.ipynb'> Download, Install and Execute Jypyter Notebook Environment </a> </td> <td class='center_cell'> <a href='../Install/prepare_jupyter.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='2' class='center_cell open_cell_border_14'><span style='float:center'><img src='../../images/icons/Connect.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color14'>Connect</span></td> <td class='center_cell color14_cell '><span style='float:center'>Connect</span></td> <td class='center_cell gradient_color14'></td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Connect/pairing_device.ipynb'> Pairing a Device at Windows 10 [biosignalsplux] </a> </td> <td class='center_cell'> <a href='../Connect/pairing_device.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='5' class='center_cell open_cell_border_2'><span style='float:center'><img src='../../images/icons/Record.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color2'>Record</span></td> <td class='center_cell color2_cell '><span style='float:center'>Record</span></td> <td class='center_cell gradient_color2'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Record/record_data.ipynb'> Signal Acquisition [OpenSignals] </a> </td> <td class='center_cell'> <a href='../Record/record_data.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Record/resolution.ipynb'> Resolution - The difference between smooth and abrupt variations </a> </td> <td class='center_cell'> <a href='../Record/resolution.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Record/sampling_rate_and_aliasing.ipynb'>Problems of low sampling rate (aliasing)</a> </td> <td class='center_cell'> <a href='../Record/sampling_rate_and_aliasing.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Record/store_signals_after_acquisition.ipynb'> Store Files after Acquisition [OpenSignals] </a> </td> <td class='center_cell'> <a href='../Record/store_signals_after_acquisition.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='5' class='center_cell open_cell_border_1'><span style='float:center'><img src='../../images/icons/Load.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color1'>Load</span></td> <td class='center_cell color1_cell '><span style='float:center'>Load</span></td> <td class='center_cell gradient_color1'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Load/open_h5.ipynb'>Load acquired data from .h5 file</a> </td> <td class='center_cell'> <a href='../Load/open_h5.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Load/open_signals_after_acquisition.ipynb'> Load Signals after Acquisition [OpenSignals] </a> </td> <td class='center_cell'> <a href='../Load/open_signals_after_acquisition.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Load/open_txt.ipynb'>Load acquired data from .txt file</a> </td> <td class='center_cell'> <a href='../Load/open_txt.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Load/signal_loading_preparatory_steps.ipynb'>Signal Loading - Working with File Header </a> </td> <td class='center_cell'> <a href='../Load/signal_loading_preparatory_steps.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='8' class='center_cell open_cell_border_4'><span style='float:center'><img src='../../images/icons/Pre-Process.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color4'>Pre-Process</span></td> <td class='center_cell color4_cell '><span style='float:center'>Pre-Process</span></td> <td class='center_cell gradient_color4'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/digital_filtering.ipynb'> Digital Filtering - A Fundamental Pre-Processing Step </a> </td> <td class='center_cell'> <a href='../Pre-Process/digital_filtering.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/emg_fatigue_evaluation_median_freq.ipynb'>Fatigue Evaluation - Evolution of Median Power Frequency</a> </td> <td class='center_cell'> <a href='../Pre-Process/emg_fatigue_evaluation_median_freq.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/generation_of_time_axis.ipynb'> Generation of a time axis (conversion of samples into seconds) </a> </td> <td class='center_cell'> <a href='../Pre-Process/generation_of_time_axis.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/unit_conversion_ecg.ipynb'>ECG Sensor - Unit Conversion </a> </td> <td class='center_cell'> <a href='../Pre-Process/unit_conversion_ecg.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/unit_conversion_eeg.ipynb'>EEG Sensor - Unit Conversion </a> </td> <td class='center_cell'> <a href='../Pre-Process/unit_conversion_eeg.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Pre-Process/unit_conversion_emg.ipynb'>EMG Sensor - Unit Conversion </a> </td> <td class='center_cell'> <a href='../Pre-Process/unit_conversion_emg.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Pre-Process/unit_conversion_fNIRS.ipynb'>fNIRS Sensor - Unit Conversion </a> </td> <td class='center_cell'> <a href='../Pre-Process/unit_conversion_fNIRS.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='3' class='center_cell open_cell_border_5'><span style='float:center'><img src='../../images/icons/Detect.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color5'>Detect</span></td> <td class='center_cell color5_cell '><span style='float:center'>Detect</span></td> <td class='center_cell gradient_color5'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Detect/detect_bursts.ipynb'> Event Detection - Muscular Activations (EMG) </a> </td> <td class='center_cell'> <a href='../Detect/detect_bursts.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Detect/r_peaks.ipynb'> Event Detection - R Peaks (ECG) </a> </td> <td class='center_cell'> <a href='../Detect/r_peaks.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='2' class='center_cell open_cell_border_6'><span style='float:center'><img src='../../images/icons/Extract.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color6'>Extract</span></td> <td class='center_cell color6_cell '><span style='float:center'>Extract</span></td> <td class='center_cell gradient_color6'></td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Extract/emg_parameters.ipynb'> EMG Analysis - Time and Frequency Parameters </a> </td> <td class='center_cell'> <a href='../Extract/emg_parameters.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='5' class='center_cell open_cell_border_7'><span style='float:center'><img src='../../images/icons/Train_and_Classify.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color7'>Train_and_Classify</span></td> <td class='center_cell color7_cell '><span style='float:center'>Train_and_Classify</span></td> <td class='center_cell gradient_color7'></td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_orange.ipynb'> Rock, Paper or Scissor Game - Train and Classify [Orange] </a> </td> <td class='center_cell'> <a href='../Train_and_Classify/classification_game_orange.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_1.ipynb'> Rock, Paper or Scissor Game - Train and Classify [Volume 1] </a> </td> <td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_1.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr > <td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_2.ipynb'> Rock, Paper or Scissor Game - Train and Classify [Volume 2] </a> </td> <td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_2.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_3.ipynb'> Rock, Paper or Scissor Game - Train and Classify [Volume 3] </a> </td> <td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_3.ipynb'><div class='file_icon'></div></a> </td> </tr> <tr> <td rowspan='2' class='center_cell open_cell_border_12'><span style='float:center'><img src='../../images/icons/Evaluate.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color12'>Evaluate</span></td> <td class='center_cell color12_cell '><span style='float:center'>Evaluate</span></td> <td class='center_cell gradient_color12'></td> </tr> <tr class='border_cell_bottom_white'> <td class='center_cell open_cell_light'> <a href='../Evaluate/classification_game_volume_4.ipynb'> Rock, Paper or Scissor Game - Train and Classify [Volume 4] </a> </td> <td class='center_cell'> <a href='../Evaluate/classification_game_volume_4.ipynb'><div class='file_icon'></div></a> </td> </tr> </table><br>**<span style="color:#009EE3">biosignalsnotebooks</span>** (<a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf">see project presentation<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>) is a set of documents and a **<span class="color1">Python</span>** library to provide programming examples in the form of **<span class="color5">Jupyter Notebooks</span>**, as companion to the **<span style="color:#009EE3">OpenSignals</span>** biosignals acquisition tools. This collection of code samples has the purpose to help users of PLUX Wireless Biosignals systems, such as **bitalino** or **biosignalsplux**, and to the researcher or student interested on recording processing and classifying biosignals. The examples are set on a level of complexity to inspire the users and programmers on how easy some tasks are and that more complex ones can also be achieved, by reusing and recreating some of the examples presented here. A **<span class="color1">Python</span>** library (entitled **<span style="color:#009EE3">biosignalsnotebooks</span>** ) is the base toolbox to support the notebooks and to provide some useful functionalities. It can be installed through pip command, like demonstrated in a <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">PyPI <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> dedicated page. In many cases we also point and illustrate with code the usage of other python toolboxes dedicated to biosignal processing. The notebooks will cover the full topics pipeline of working with biosignals, such as: **<span class="color1">Load</span>** a file; **<span class="color3">Visualise</span>** the data online and offline, **<span class="color4">Pre-Process</span>** a one channel signal or a multi-channel acquisition, **<span class="color5">Detect</span>** relevant events in the signals, **<span class="color6">Extract</span>** features from many different type of sensors and domains, **<span class="color7">Train and Classify</span>** among a set of classes with several machine learning approaches, **<span class="color8">Understand</span>** the obtained results with metrics and validations techniques. These examples are carried in a multitude of biosignals , from ECG, EDA, EMG, Accelerometer, Respiration among many others. The notebooks have a set of labels to help navigate among topics <a href="../MainFiles/by_tag.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, types of signals <a href="../MainFiles/by_signal_type.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, application area <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> and complexity <a href="../MainFiles/by_diff.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> level to support the search for particular solutions. We encourage you to share new example ideas, to pose questions info@plux.info, and to make improvements or suggestion to this set of notebooks. **Be inspired on how to make the most of your biosignals!** <br> <hr> <table width="100%"> <tr> <td style="border-right:solid 3px #009EE3" width="20%"> <img src="../../images/ost_logo.png"> </td> <td width="40%" style="text-align:left"> <a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">&#9740; Project Presentation</a> <br> <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank">&#9740; GitHub Repository</a> <br> <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">&#9740; How to install biosignalsnotebooks Python package ?</a> <br> <a href="../MainFiles/signal_samples.ipynb">&#9740; Signal Library</a> </td> <td width="40%" style="text-align:left"> <a href="../MainFiles/biosignalsnotebooks.ipynb">&#9740; Notebook Categories</a> <br> <a href="../MainFiles/by_diff.ipynb">&#9740; Notebooks by Difficulty</a> <br> <a href="../MainFiles/by_signal_type.ipynb">&#9740; Notebooks by Signal Type</a> <br> <a href="../MainFiles/by_tag.ipynb">&#9740; Notebooks by Tag</a> </td> </tr> </table> <span class="color6">**Auxiliary Code Segment (should not be replicated by the user)**</span> ``` from biosignalsnotebooks.__notebook_support__ import css_style_apply css_style_apply() %%html <script> // AUTORUN ALL CELLS ON NOTEBOOK-LOAD! require( ['base/js/namespace', 'jquery'], function(jupyter, $) { $(jupyter.events).on("kernel_ready.Kernel", function () { console.log("Auto-running all cells-below..."); jupyter.actions.call('jupyter-notebook:run-all-cells-below'); jupyter.actions.call('jupyter-notebook:save-notebook'); }); } ); </script> ```
github_jupyter
from biosignalsnotebooks.__notebook_support__ import css_style_apply css_style_apply() %%html <script> // AUTORUN ALL CELLS ON NOTEBOOK-LOAD! require( ['base/js/namespace', 'jquery'], function(jupyter, $) { $(jupyter.events).on("kernel_ready.Kernel", function () { console.log("Auto-running all cells-below..."); jupyter.actions.call('jupyter-notebook:run-all-cells-below'); jupyter.actions.call('jupyter-notebook:save-notebook'); }); } ); </script>
0.22627
0.748995
``` #Importing libraries import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import Model import pandas as pd import matplotlib.pyplot as plt import numpy as np from os import getcwd from tensorflow import keras from tensorflow.keras import layers, Sequential from tensorflow.keras.layers import LSTM, Dense, Bidirectional, Embedding, Dropout from sklearn import preprocessing import datetime as dt from pandas.tseries.offsets import DateOffset from google.colab import drive drive.mount('/content/drive') #Reading data url = "https://hub.mph.in.gov/dataset/bd08cdd3-9ab1-4d70-b933-41f9ef7b809d/resource/afaa225d-ac4e-4e80-9190-f6800c366b58/download/covid_report_county_date.xlsx?raw=true" cases_data = pd.read_excel(url) population_data = pd.read_csv(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/population density of indiana county.csv") #Data preprocessing population_data["County"] = population_data["County"].str.upper() population_data["County"] = population_data["County"].str.replace('.', '') population_data["County"] = population_data["County"].str.replace(' ', '') cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.upper() cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.replace('.', '') cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.replace(' ','') data = pd.merge(cases_data, population_data, how = 'inner', left_on = 'COUNTY_NAME', right_on = 'County') data = data[["COUNTY_NAME", "DATE", "COVID_COUNT", "COVID_DEATHS", "COVID_TESTS", "Population", "Area", "Population Density"]] data data.describe() county_name = data["COUNTY_NAME"].unique() print(county_name) print(len(county_name)) print('Length of the sample: ', len(data)) # Model Parameters time_steps = 21 # No. of historical data to be used for forecasting BATCH_SIZE = 1024 # No. of observations to be used at a time for training NO_OF_DAYS_TO_PREDICT = 5 # No. of days to forecasted def create_dataset(X, y, time_steps=1): Xs, ys = [], [] for i in range(len(X) - time_steps): v = X.iloc[i:(i + time_steps)].values Xs.append(v) ys.append(y.iloc[i + time_steps]) return np.array(Xs), np.array(ys) #Scaling the data scaler = preprocessing.StandardScaler() scaled_data = scaler.fit_transform(data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']]) data["COVID_COUNT"] = scaled_data[:, 0] data["COVID_DEATHS"] = scaled_data[:, 1] data["COVID_TESTS"] = scaled_data[:, 2] data["Population"] = scaled_data[:, 3] data["Area"] = scaled_data[:, 4] data["Population Density"] = scaled_data[:, 5] data # reshapes to [samples, time_steps, n_features] X_train, y_train = [], [] for county in county_name: filtered_data = data.loc[data["COUNTY_NAME"] == county] train_size = len(filtered_data) filtered_train = filtered_data.iloc[0:train_size] filtered_X_train, filtered_y_train = create_dataset(filtered_train[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']], filtered_train[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']], time_steps) if(len(X_train) == 0): X_train, y_train = filtered_X_train, filtered_y_train else: X_train = np.vstack((X_train, filtered_X_train)) y_train = np.vstack((y_train, filtered_y_train)) print(X_train.shape, y_train.shape) # Loading model model = keras.models.load_model(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/checkpoint.h5") model.summary() #Defining Model # model = Sequential() # model.add(LSTM(BATCH_SIZE, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2]))) # model.add(Dropout(0.1)) # model.add(LSTM(512, return_sequences=True)) # model.add(Dropout(0.1)) # model.add(LSTM(256, return_sequences=True)) # model.add(Dropout(0.1)) # model.add(LSTM(64, return_sequences=False)) # model.add(Dropout(0.1)) # model.add(Dense(32, activation='relu')) # model.add(Dense(3)) # model.summary() # model.compile(loss = 'mean_squared_error', optimizer = 'rmsprop', metrics = ['mse', 'accuracy']) #1500 epochs for training model history = model.fit( X_train, y_train, epochs = 50, batch_size = BATCH_SIZE ) fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle('Evaluation Metrics') fig.set_size_inches(15, 6) ax1.plot(history.history['accuracy']) ax1.title.set_text('Model Accuracy') ax1.set_xlabel('Epochs') ax1.set_ylabel('Units') ax2.plot(history.history['loss']) ax2.title.set_text('Model Loss') ax2.set_xlabel('Epochs') ax2.set_ylabel('Units') plt.show() checkpoint_path = f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/checkpoint.h5" model.save(checkpoint_path) forecasted_data = pd.DataFrame() for county in county_name: demo_data = data[data['COUNTY_NAME'] == county] demo_data = demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']] demo_data = demo_data.tail(time_steps + 1) for day in range(NO_OF_DAYS_TO_PREDICT): demo_X_test, demo_y_test = create_dataset(demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']].tail(time_steps + 1), demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']].tail(time_steps + 1), time_steps) predictions = np.concatenate((model.predict(demo_X_test), np.zeros((1, 3))), axis = 1) predictions[0,3] = np.array(demo_data['Population'])[0] predictions[0,4] = np.array(demo_data['Area'])[0] predictions[0,5] = np.array(demo_data['Population Density'])[0] demo_data = pd.DataFrame(np.concatenate((demo_data, predictions), axis = 0)) demo_data.columns = ['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density'] transformed_demo_data = np.intc(scaler.inverse_transform(demo_data)) demo_data['COVID_COUNT'] = np.ceil(np.abs(transformed_demo_data[:, 0])) demo_data['COVID_DEATHS'] = np.ceil(np.abs(transformed_demo_data[:, 1])) demo_data['COVID_TESTS'] = np.ceil(np.abs(transformed_demo_data[:, 2])) demo_data['Population'] = transformed_demo_data[:, 3] demo_data['Area'] = transformed_demo_data[:, 4] demo_data['Population Density'] = transformed_demo_data[:, 5] demo_data = demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']] date_data = data[['DATE']] date_data = date_data.tail(time_steps + 6) date_data['DATE'] = pd.to_datetime(date_data['DATE']) + DateOffset(5) fig, (ax1, ax2, ax3) = plt.subplots(3, 1) fig.suptitle('COVID FORECASTING for ' + county + ' County') fig.set_size_inches(10, 26) plt.setp(ax1.xaxis.get_majorticklabels(), rotation=90) plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90) plt.setp(ax3.xaxis.get_majorticklabels(), rotation=90) ax1.plot(date_data, demo_data['COVID_COUNT'], linestyle='-', marker='o', color = 'red', label = 'COVID CASES') ax1.grid(True) ax1.set_xlabel('Date') ax1.set_ylabel('Case Count') ax1.legend() ax2.plot(date_data, demo_data['COVID_DEATHS'], linestyle='-', marker='o', color = 'blue', label = 'COVID DEATHS') ax2.grid(True) ax2.set_xlabel('Date') ax2.set_ylabel('Death Count') ax2.legend() ax3.plot(date_data, demo_data['COVID_TESTS'], linestyle='-', marker='o', color = 'green', label = 'COVID TESTS') ax3.grid(True) ax3.set_xlabel('Date') ax3.set_ylabel('Test Count') ax3.legend() # plt.show() fig.savefig(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/output_images/"+county+".png", bbox_inches='tight') plt.close() demo_data['COUNTY_NAME'] = county forecasted_data = pd.concat([forecasted_data, pd.DataFrame(demo_data.iloc[[time_steps + 1]])]) forecasted_data from urllib.request import urlopen import json with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) fips_code = pd.read_excel(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/indiana_county_fips.xlsx") county_stats = pd.merge(fips_code, forecasted_data, how = 'inner', left_on = 'COUNTY_NAME', right_on = 'COUNTY_NAME') county_stats = county_stats[["COUNTY_NAME", "STATE", "FIPS_CODE", "COVID_COUNT", "COVID_DEATHS", "COVID_TESTS"]] county_stats.head(5) import plotly.express as px fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_COUNT', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_COUNT':'FORECASTED COVID COUNT'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Count: %{customdata[3]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_COUNT.html") fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_DEATHS', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_DEATHS':'FORECASTED COVID DEATHS'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Deaths: %{customdata[4]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_DEATHS.html") fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_TESTS', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_TESTS':'FORECASTED COVID TEST'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Test: %{customdata[5]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_TEST.html") ```
github_jupyter
#Importing libraries import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import Model import pandas as pd import matplotlib.pyplot as plt import numpy as np from os import getcwd from tensorflow import keras from tensorflow.keras import layers, Sequential from tensorflow.keras.layers import LSTM, Dense, Bidirectional, Embedding, Dropout from sklearn import preprocessing import datetime as dt from pandas.tseries.offsets import DateOffset from google.colab import drive drive.mount('/content/drive') #Reading data url = "https://hub.mph.in.gov/dataset/bd08cdd3-9ab1-4d70-b933-41f9ef7b809d/resource/afaa225d-ac4e-4e80-9190-f6800c366b58/download/covid_report_county_date.xlsx?raw=true" cases_data = pd.read_excel(url) population_data = pd.read_csv(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/population density of indiana county.csv") #Data preprocessing population_data["County"] = population_data["County"].str.upper() population_data["County"] = population_data["County"].str.replace('.', '') population_data["County"] = population_data["County"].str.replace(' ', '') cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.upper() cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.replace('.', '') cases_data["COUNTY_NAME"] = cases_data["COUNTY_NAME"].str.replace(' ','') data = pd.merge(cases_data, population_data, how = 'inner', left_on = 'COUNTY_NAME', right_on = 'County') data = data[["COUNTY_NAME", "DATE", "COVID_COUNT", "COVID_DEATHS", "COVID_TESTS", "Population", "Area", "Population Density"]] data data.describe() county_name = data["COUNTY_NAME"].unique() print(county_name) print(len(county_name)) print('Length of the sample: ', len(data)) # Model Parameters time_steps = 21 # No. of historical data to be used for forecasting BATCH_SIZE = 1024 # No. of observations to be used at a time for training NO_OF_DAYS_TO_PREDICT = 5 # No. of days to forecasted def create_dataset(X, y, time_steps=1): Xs, ys = [], [] for i in range(len(X) - time_steps): v = X.iloc[i:(i + time_steps)].values Xs.append(v) ys.append(y.iloc[i + time_steps]) return np.array(Xs), np.array(ys) #Scaling the data scaler = preprocessing.StandardScaler() scaled_data = scaler.fit_transform(data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']]) data["COVID_COUNT"] = scaled_data[:, 0] data["COVID_DEATHS"] = scaled_data[:, 1] data["COVID_TESTS"] = scaled_data[:, 2] data["Population"] = scaled_data[:, 3] data["Area"] = scaled_data[:, 4] data["Population Density"] = scaled_data[:, 5] data # reshapes to [samples, time_steps, n_features] X_train, y_train = [], [] for county in county_name: filtered_data = data.loc[data["COUNTY_NAME"] == county] train_size = len(filtered_data) filtered_train = filtered_data.iloc[0:train_size] filtered_X_train, filtered_y_train = create_dataset(filtered_train[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']], filtered_train[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']], time_steps) if(len(X_train) == 0): X_train, y_train = filtered_X_train, filtered_y_train else: X_train = np.vstack((X_train, filtered_X_train)) y_train = np.vstack((y_train, filtered_y_train)) print(X_train.shape, y_train.shape) # Loading model model = keras.models.load_model(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/checkpoint.h5") model.summary() #Defining Model # model = Sequential() # model.add(LSTM(BATCH_SIZE, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2]))) # model.add(Dropout(0.1)) # model.add(LSTM(512, return_sequences=True)) # model.add(Dropout(0.1)) # model.add(LSTM(256, return_sequences=True)) # model.add(Dropout(0.1)) # model.add(LSTM(64, return_sequences=False)) # model.add(Dropout(0.1)) # model.add(Dense(32, activation='relu')) # model.add(Dense(3)) # model.summary() # model.compile(loss = 'mean_squared_error', optimizer = 'rmsprop', metrics = ['mse', 'accuracy']) #1500 epochs for training model history = model.fit( X_train, y_train, epochs = 50, batch_size = BATCH_SIZE ) fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle('Evaluation Metrics') fig.set_size_inches(15, 6) ax1.plot(history.history['accuracy']) ax1.title.set_text('Model Accuracy') ax1.set_xlabel('Epochs') ax1.set_ylabel('Units') ax2.plot(history.history['loss']) ax2.title.set_text('Model Loss') ax2.set_xlabel('Epochs') ax2.set_ylabel('Units') plt.show() checkpoint_path = f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/checkpoint.h5" model.save(checkpoint_path) forecasted_data = pd.DataFrame() for county in county_name: demo_data = data[data['COUNTY_NAME'] == county] demo_data = demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']] demo_data = demo_data.tail(time_steps + 1) for day in range(NO_OF_DAYS_TO_PREDICT): demo_X_test, demo_y_test = create_dataset(demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density']].tail(time_steps + 1), demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']].tail(time_steps + 1), time_steps) predictions = np.concatenate((model.predict(demo_X_test), np.zeros((1, 3))), axis = 1) predictions[0,3] = np.array(demo_data['Population'])[0] predictions[0,4] = np.array(demo_data['Area'])[0] predictions[0,5] = np.array(demo_data['Population Density'])[0] demo_data = pd.DataFrame(np.concatenate((demo_data, predictions), axis = 0)) demo_data.columns = ['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS', 'Population', 'Area', 'Population Density'] transformed_demo_data = np.intc(scaler.inverse_transform(demo_data)) demo_data['COVID_COUNT'] = np.ceil(np.abs(transformed_demo_data[:, 0])) demo_data['COVID_DEATHS'] = np.ceil(np.abs(transformed_demo_data[:, 1])) demo_data['COVID_TESTS'] = np.ceil(np.abs(transformed_demo_data[:, 2])) demo_data['Population'] = transformed_demo_data[:, 3] demo_data['Area'] = transformed_demo_data[:, 4] demo_data['Population Density'] = transformed_demo_data[:, 5] demo_data = demo_data[['COVID_COUNT', 'COVID_DEATHS', 'COVID_TESTS']] date_data = data[['DATE']] date_data = date_data.tail(time_steps + 6) date_data['DATE'] = pd.to_datetime(date_data['DATE']) + DateOffset(5) fig, (ax1, ax2, ax3) = plt.subplots(3, 1) fig.suptitle('COVID FORECASTING for ' + county + ' County') fig.set_size_inches(10, 26) plt.setp(ax1.xaxis.get_majorticklabels(), rotation=90) plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90) plt.setp(ax3.xaxis.get_majorticklabels(), rotation=90) ax1.plot(date_data, demo_data['COVID_COUNT'], linestyle='-', marker='o', color = 'red', label = 'COVID CASES') ax1.grid(True) ax1.set_xlabel('Date') ax1.set_ylabel('Case Count') ax1.legend() ax2.plot(date_data, demo_data['COVID_DEATHS'], linestyle='-', marker='o', color = 'blue', label = 'COVID DEATHS') ax2.grid(True) ax2.set_xlabel('Date') ax2.set_ylabel('Death Count') ax2.legend() ax3.plot(date_data, demo_data['COVID_TESTS'], linestyle='-', marker='o', color = 'green', label = 'COVID TESTS') ax3.grid(True) ax3.set_xlabel('Date') ax3.set_ylabel('Test Count') ax3.legend() # plt.show() fig.savefig(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/output_images/"+county+".png", bbox_inches='tight') plt.close() demo_data['COUNTY_NAME'] = county forecasted_data = pd.concat([forecasted_data, pd.DataFrame(demo_data.iloc[[time_steps + 1]])]) forecasted_data from urllib.request import urlopen import json with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) fips_code = pd.read_excel(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/indiana_county_fips.xlsx") county_stats = pd.merge(fips_code, forecasted_data, how = 'inner', left_on = 'COUNTY_NAME', right_on = 'COUNTY_NAME') county_stats = county_stats[["COUNTY_NAME", "STATE", "FIPS_CODE", "COVID_COUNT", "COVID_DEATHS", "COVID_TESTS"]] county_stats.head(5) import plotly.express as px fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_COUNT', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_COUNT':'FORECASTED COVID COUNT'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Count: %{customdata[3]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_COUNT.html") fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_DEATHS', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_DEATHS':'FORECASTED COVID DEATHS'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Deaths: %{customdata[4]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_DEATHS.html") fig = px.choropleth_mapbox(county_stats, geojson=counties, locations='FIPS_CODE', color='COVID_TESTS', mapbox_style="carto-positron", zoom=6, center = {"lat": 39.8362, "lon": -86.1752}, opacity=0.5, labels={'COVID_TESTS':'FORECASTED COVID TEST'}, hover_name = "COUNTY_NAME" ) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.update_traces(customdata = county_stats, hovertemplate='<b>%{customdata[0]}</b> <br>'+ 'Forecasted COVID Test: %{customdata[5]}<extra></extra>' ) fig.update_layout(hovermode="x") # fig.show() fig.write_html(f"{getcwd()}/drive/My Drive/Colab Notebooks/COVID forecasting/Forecasted_COVID_TEST.html")
0.482185
0.414899
# Thermal Convection Regimes for an Internally Heated Mantle This is an example of two modes of mantle convection: stagnant lid and isoviscous, both driven by internal heating. **Relevant reading:** - *Stagnant Lid Convection on Venus*, Solomatov and Moresi 1996 (http://onlinelibrary.wiley.com/doi/10.1029/95JE03361/full) - *Geodynamics, Turcotte and Schubert*, 6-21 in 2nd edition - *Mantle Convection in the Earth and Planets*, Schubert, Turcotte and Olson, Chapter 7 ``` import underworld as uw import numpy import glucifer import matplotlib.pyplot as plt from IPython import display import os rank = uw.rank() size = uw.nProcs() uw.matplotlib_inline() plt.ion() ``` Do we want to read a previous temperature field in? The answer is usually yes ``` readTemperature = True ``` Do we want to save figures? If set to true, stagnant lid and isoviscous figures are saved seperately in the current directory. Otherwise, you can look at plots in this notebook. ``` writefigures = False ``` in Underworld, the heat-source term is entered as $H_c = \frac{H}{ c_p}$, where $H$ is the heat-production in $W\ kg^{-1}$ and $c_p$ is the heat capacity in $J\ kg^{-1}\ K^{-1}$. $H_c$ subsequently has units of $K\ s^{-1}$ The Rayleigh number for internally heated convection can be written as: $Ra = \frac{\rho_0 g \alpha H_c L^5}{\kappa^2 \eta}$ Temperature can be scaled as: $T' = T \frac{\kappa}{H_c L^2}$ ----- Set the Rayleigh number ``` Ra = 1e6 # Choose Ra by varying alpha and setting other parameters to 1 H = 1. diffusivity = 1. rho0 = 1. eta0 = 1. alpha = Ra ``` The temperature between the surface and the interior of the convective cell ($\Delta T$), for internally heated convection, is well approximated as $\Delta T = \beta H Ra^{-\frac{1}{4}}$, where $\beta$ is found empirically (6-346, Geodynamics, Turcotte and Schubert 2007). This is used for benchmarking and for comparison. ``` predTemp = 2.45 * H * Ra**-0.25 if rank == 0: print("For Ra = %.2e, the temperature jump is %.2e" %(Ra,predTemp)) ``` **As an example,** if we assume that $\kappa = 10^{-6}\ m^2s^{-1}$, $H = 10^{-11}\ W\ kg^{-1}$, $C_p = 1200\ J\ kg^{-1}\ K^{-1}$: ``` if rank == 0: print("dimensional interior temperature: %.2f K" %(predTemp / 1e-6 * 9e-12 / 1200. * (2900e3)**2.)) ``` How does this compare to the expected value of $\sim 1700 K$ for the Earth? This might give you an idea of the Earth's effective $Ra$ (though the thickness of boundary layer is also important) and the degree to which an internally heated, isoviscous convection model approximates the Earth's heat loss. --- **We'll decide on the style of convection here** Setting the entire mantle to be isoviscous (```isoviscous = True```) results in a surface which can be recycled into the mantle interior. This approximates 'mobile lid convection'. Once surface heat loss and internal heat generation are in equilibrium, we would expect the internal temperature to be well approximated by the calculation of $\Delta T$ above. Setting the mantle viscosity to be exponentially temperature dependent (```isoviscous = False```) results in 'stagnant lid' convection. In this mode, the surface and a thick 'lid' are stationary, while convection occurs below. Heat loss is much less efficient (relatively lower Nusselt number) and so the interior temperature should be relatively higher for the same heat generation. ``` isoviscous = False ``` ---- *Set key parameters* ``` elementType = "Q1/dQ0" resX = 64 resY = 64 mesh = uw.mesh.FeMesh_Cartesian( elementType = (elementType), elementRes = (resX, resY), minCoord = (0., 0.), maxCoord = (1., 1.)) temperatureField = mesh.add_variable( nodeDofCount=1 ) temperatureDotField = mesh.add_variable( nodeDofCount=1 ) pressureField = mesh.subMesh.add_variable( nodeDofCount=1 ) velocityField = mesh.add_variable( nodeDofCount=2 ) HField = mesh.add_variable( nodeDofCount=1 ) HField.data[:] = H IWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"] JWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"] BottomWall = mesh.specialSets["MinJ_VertexSet"] TopWall = mesh.specialSets["MaxJ_VertexSet"] LeftWall = mesh.specialSets["MinI_VertexSet"] RightWall = mesh.specialSets["MaxI_VertexSet"] freeslipBC = uw.conditions.DirichletCondition( variable=velocityField, indexSetsPerDof=(IWalls,JWalls) ) # Top wall is set to constant temperature, the others are insulating tempBC = uw.conditions.DirichletCondition( variable=temperatureField, indexSetsPerDof=(TopWall) ) # Un-comment if you want to be really sure that the walls are insulating, though this seems to happen by default. # neumannBC = uw.conditions.NeumannCondition( fn_flux=0., variable=temperatureField, indexSetsPerDof=IWalls+BottomWall) mSwarm = uw.swarm.Swarm( mesh=mesh) nParticles = 12 layout = uw.swarm.layouts.GlobalSpaceFillerLayout( swarm=mSwarm, particlesPerCell=nParticles ) mSwarm.populate_using_layout( layout=layout ) tracerSwarm = uw.swarm.Swarm (mesh=mesh) tracerSwarm.add_particles_with_coordinates(numpy.array([(0.3,0.5)])) tracerTrackSwarm = uw.swarm.Swarm (mesh=mesh) advDiff = uw.systems.AdvectionDiffusion( temperatureField, temperatureDotField, velocityField, fn_diffusivity=diffusivity, fn_sourceTerm=HField, conditions=[tempBC])#,neumannBC]) advector = uw.systems.SwarmAdvector( swarm=mSwarm, velocityField=velocityField, order=2 ) traceradvector = uw.systems.SwarmAdvector( swarm=tracerSwarm, velocityField=velocityField, order=2 ) if isoviscous: mname = "isovisc" else: mname = "stag" if readTemperature: temperatureField.load("input/1_12_Internally_Heated_Convection/temperature_%s.h5" %mname, interpolate=True) else: CoordFn = uw.function.input() surfGradFn = 10.*(1. - (1.+0.0*uw.function.math.cos(CoordFn[0] * 3.14))* CoordFn[1]) maxTemp = 0.1 initialFn = uw.function.branching.conditional( [(surfGradFn < 0.,0.),( surfGradFn < maxTemp , surfGradFn),(True,maxTemp)]) temperatureField.data[:] = initialFn.evaluate(mesh) refTemp = uw.function.misc.constant(1.) maxTemp = numpy.max(temperatureField.data[:,0]) refTemp.value = maxTemp mIVar = mSwarm.add_variable( dataType="int", count=1) mIVar.data[:] = 0 MrhoFn = rho0 * (1. - alpha* ( temperatureField )) dicDensity = {0:MrhoFn} densityFn = uw.function.branching.map( fn_key = mIVar, mapping = dicDensity) figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Temperature Field" ) figMaterial.append( glucifer.objects.Surface(mesh,temperatureField )) # figMaterial.show() ``` Set the maxmimum viscosity contrast between material at the maximum and mininum temperatures, which by default is five orders of magnitude. ``` surfEta = 1e5 cEta = numpy.log(surfEta) / refTemp if isoviscous: ArrFunction = 1. else: #Frank-Kamenetskii Temperature-Dependent Rheology ArrFunction = uw.function.math.exp(cEta *(refTemp-temperatureField)) dicViscosity = {0:ArrFunction} viscosityMapFn = uw.function.branching.map( fn_key = mIVar, mapping = dicViscosity) figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Viscosity Distribution (Log)" ) figMaterial.append( glucifer.objects.Points(mSwarm,fn_colour = uw.function.math.log10(viscosityMapFn ))) if size == 1: figMaterial.show() figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Density Distribution" ) figMaterial.append( glucifer.objects.Points(mSwarm,fn_colour = densityFn )) if size == 1: figMaterial.show() stokesPIC = uw.systems.Stokes(velocityField=velocityField, pressureField=pressureField, conditions=[freeslipBC,], fn_viscosity=1., fn_bodyforce=densityFn*(0.,-1.)) solver=uw.systems.Solver(stokesPIC) solver.solve() stokesPIC = uw.systems.Stokes(velocityField=velocityField, pressureField=pressureField, conditions=[freeslipBC,], fn_viscosity=viscosityMapFn, fn_bodyforce=densityFn*(0.,-1.)) solver=uw.systems.Solver(stokesPIC) surfaceHF = uw.utils.Integral( fn = temperatureField.fn_gradient[1], mesh = mesh, integrationType = "surface", surfaceIndexSet = TopWall) bottomHF = uw.utils.Integral( fn = temperatureField.fn_gradient[1], mesh = mesh, integrationType = "surface", surfaceIndexSet = BottomWall) leftHF = uw.utils.Integral( fn = temperatureField.fn_gradient[0], mesh = mesh, integrationType = "surface", surfaceIndexSet = LeftWall) rightHF = uw.utils.Integral( fn = temperatureField.fn_gradient[0], mesh = mesh, integrationType = "surface", surfaceIndexSet = RightWall) step = 0 maxsteps = 3 yielding = False if rank == 0: arrMeanTemp = numpy.zeros(maxsteps+1) arrSurfHF = numpy.zeros(maxsteps+1) arrOtherWallsHF = numpy.zeros(maxsteps+1) for step in range(maxsteps+1): solver.solve(nonLinearIterate=yielding) cFactor = 0.5 dt = numpy.min([cFactor * advDiff.get_max_dt(),cFactor * advector.get_max_dt()]) advDiff.integrate(dt) avTemperature = mesh.integrate(temperatureField)[0] if rank ==0: arrMeanTemp[step] = avTemperature traceradvector.integrate(dt) tracerTrackSwarm.add_particles_with_coordinates(tracerSwarm.particleCoordinates.data[:]) surfHF = -1. * surfaceHF.evaluate()[0] wallsHF = abs(bottomHF.evaluate()[0])+abs(leftHF.evaluate()[0])+abs(rightHF.evaluate()[0]) if rank == 0: arrSurfHF[step] = surfHF arrOtherWallsHF[step] = wallsHF refTemp.value = numpy.max(temperatureField.data[:]) if rank == 0: plt.plot(range(maxsteps),arrMeanTemp[:step]) plt.scatter(range(maxsteps),arrMeanTemp[:step]) plt.xlabel("Time Step") plt.ylabel("Average Temperature") if writefigures: plt.savefig("output/%s_temperaturetime.pdf" %mname, bbox_inches="tight") ``` **Now we can look at our model output** If the average temperature plotted above is still varying considerably, the system is not in a thermal steady-state and you should run more time-steps. ``` #Write to h5 file savedmesh = mesh.save("output/mesh_%s.h5" %mname) temperatureField.save("output/temperature_%s.h5" %mname, meshHandle=savedmesh) ``` We'll plot the heat-loss over time. If our system is in steady-state, the heat-loss should be equivalent to the heat-generation. We have set all walls other than the top to be insulating, so there should be negligible heat loss through these surfaces. ``` if rank == 0: plt.clf() plt.plot(range(step),arrSurfHF[:step]/H,"--",label="Top Wall Heat Loss") plt.plot(range(step),arrOtherWallsHF[:step],"--",label="Other Walls Heat Loss") plt.plot(range(maxsteps),numpy.ones(maxsteps),label="Heat Generation") plt.ylim([0,max([1.1,max(arrSurfHF[:step]/H)])]) plt.title("Heat Generation and Loss Through Time") plt.xlabel("Time Step") plt.ylabel("Integrated Heat Loss or Gradient") plt.legend(loc='best') if writefigures: plt.savefig("output/%s_heatlossandgeneration.pdf" %mname) ``` Plot the temperature and velocity vector fields and not the contrasts between isoviscous and stagnant lid convection. Stagnant lid convection should be characterised by a thick thermal boundary layer, with negligible flow near the surface. Isoviscous convection should have a thin boundary layer and significant surface flow. ``` figMaterial = glucifer.Figure( figsize=(800,400), title="Temperature Field" ) figMaterial.append( glucifer.objects.Surface(mesh,temperatureField )) figMaterial.append( glucifer.objects.VectorArrows(mesh,1e3/Ra*velocityField)) figMaterial.append( glucifer.objects.Points(swarm=tracerTrackSwarm,colourBar=False,fn_size=5,colours="purple")) if size == 1: figMaterial.show() if writefigures: figMaterial.save_image("output/%s_TemperatureField" %mname) ``` Let's have a closer look at the surface velocity ``` n = 100 topWallX = numpy.linspace(mesh.minCoord[0],mesh.maxCoord[0],n) topWallVelocity = numpy.zeros(n) for i in range(n): topWallVelocity[i] = velocityField[0].evaluate_global((topWallX[i],mesh.maxCoord[1])) if rank == 0: plt.clf() plt.plot(topWallX,topWallVelocity) plt.title("Surface Velocity") plt.ylabel("Horizontal Velocity") plt.xlabel("Distance") if writefigures: plt.savefig("output/%s_surface_velocity.pdf" %mname) ``` Calculate the vertical temperature gradient at the surface of the model, to see where the highest heat-loss is. We're running these models to steady-state, so the integrated surface heat-loss should not depend on the convective regime. ``` n = 100 topWallX = numpy.linspace(mesh.minCoord[0],mesh.maxCoord[0],n) topWalldTdZ = numpy.zeros(n) for i in range(n): topWalldTdZ[i] = temperatureField.fn_gradient[1].evaluate_global((topWallX[i],mesh.maxCoord[1])) if rank == 0: plt.clf() plt.plot(topWallX,abs(topWalldTdZ)) plt.title("Top Wall Temperature Gradients") plt.ylabel("Temperature gradient") plt.xlabel("Distance") if writefigures: plt.savefig("output/%s_wall_gradients.pdf" %mname) ``` Let's calculate geotherms for three vertical cross-sections and compare to the predicted internal temperature for isoviscous convection. Because stagnant lid has a thick boundary layer, its heat-loss is significantly less efficient than for isoviscous convection. Running to steady-state should result in extremely high internal temperatures, which you can compare to the predicted isoviscous convection temperature (dashed). ``` if rank == 0: plt.clf() n = 100 arrY = numpy.linspace(0,1,n) for x in [0.,0.5,1.]: arrT = numpy.zeros(n) for i in range(n): arrT[i] = temperatureField.evaluate_global((x,arrY[i])) if rank == 0 : plt.plot(arrT,arrY,label="Temperature at x=%.1f" %x) if rank ==0: plt.title("Geotherm") plt.plot(numpy.ones(n)*predTemp, numpy.linspace(0,1,n),"--",c="black",label="Predicted " + r"$\Delta T$") plt.ylabel("Temperature") plt.xlabel("Depth") plt.legend(loc='best') if writefigures: plt.savefig("output/%s_geotherm.pdf" %mname) ``` **Things to do:** 1. Switch writefigures to true, to save figures for comparison. Run in ```isoviscous = True``` mode, generating isoviscous convection plots. Then again for ```isoviscous = False``` mode for stagnant lid plots. Compare surface velocities and geotherms. 2. Make a slight change to the Rayleigh number and see how many time-steps it takes to reach a thermal steady-state, for stagnant lid and mobile lid. You might want to lower the resolution or run in parallel. What could this imply about cooling of the Earth during a stagnant lid regime? You might find "Energetics of mantle convection and the fate of fossil heat" by Korenaga (2003), interesting. 3. Lowering the maximum viscosity contrast or increasing the Rayleigh number should decrease the thickness of the stagnant lid, as described in `Stagnant Lid Convection on Venus' cited at the start. See if you can reproduce this!
github_jupyter
import underworld as uw import numpy import glucifer import matplotlib.pyplot as plt from IPython import display import os rank = uw.rank() size = uw.nProcs() uw.matplotlib_inline() plt.ion() readTemperature = True writefigures = False Ra = 1e6 # Choose Ra by varying alpha and setting other parameters to 1 H = 1. diffusivity = 1. rho0 = 1. eta0 = 1. alpha = Ra predTemp = 2.45 * H * Ra**-0.25 if rank == 0: print("For Ra = %.2e, the temperature jump is %.2e" %(Ra,predTemp)) if rank == 0: print("dimensional interior temperature: %.2f K" %(predTemp / 1e-6 * 9e-12 / 1200. * (2900e3)**2.)) isoviscous = False elementType = "Q1/dQ0" resX = 64 resY = 64 mesh = uw.mesh.FeMesh_Cartesian( elementType = (elementType), elementRes = (resX, resY), minCoord = (0., 0.), maxCoord = (1., 1.)) temperatureField = mesh.add_variable( nodeDofCount=1 ) temperatureDotField = mesh.add_variable( nodeDofCount=1 ) pressureField = mesh.subMesh.add_variable( nodeDofCount=1 ) velocityField = mesh.add_variable( nodeDofCount=2 ) HField = mesh.add_variable( nodeDofCount=1 ) HField.data[:] = H IWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"] JWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"] BottomWall = mesh.specialSets["MinJ_VertexSet"] TopWall = mesh.specialSets["MaxJ_VertexSet"] LeftWall = mesh.specialSets["MinI_VertexSet"] RightWall = mesh.specialSets["MaxI_VertexSet"] freeslipBC = uw.conditions.DirichletCondition( variable=velocityField, indexSetsPerDof=(IWalls,JWalls) ) # Top wall is set to constant temperature, the others are insulating tempBC = uw.conditions.DirichletCondition( variable=temperatureField, indexSetsPerDof=(TopWall) ) # Un-comment if you want to be really sure that the walls are insulating, though this seems to happen by default. # neumannBC = uw.conditions.NeumannCondition( fn_flux=0., variable=temperatureField, indexSetsPerDof=IWalls+BottomWall) mSwarm = uw.swarm.Swarm( mesh=mesh) nParticles = 12 layout = uw.swarm.layouts.GlobalSpaceFillerLayout( swarm=mSwarm, particlesPerCell=nParticles ) mSwarm.populate_using_layout( layout=layout ) tracerSwarm = uw.swarm.Swarm (mesh=mesh) tracerSwarm.add_particles_with_coordinates(numpy.array([(0.3,0.5)])) tracerTrackSwarm = uw.swarm.Swarm (mesh=mesh) advDiff = uw.systems.AdvectionDiffusion( temperatureField, temperatureDotField, velocityField, fn_diffusivity=diffusivity, fn_sourceTerm=HField, conditions=[tempBC])#,neumannBC]) advector = uw.systems.SwarmAdvector( swarm=mSwarm, velocityField=velocityField, order=2 ) traceradvector = uw.systems.SwarmAdvector( swarm=tracerSwarm, velocityField=velocityField, order=2 ) if isoviscous: mname = "isovisc" else: mname = "stag" if readTemperature: temperatureField.load("input/1_12_Internally_Heated_Convection/temperature_%s.h5" %mname, interpolate=True) else: CoordFn = uw.function.input() surfGradFn = 10.*(1. - (1.+0.0*uw.function.math.cos(CoordFn[0] * 3.14))* CoordFn[1]) maxTemp = 0.1 initialFn = uw.function.branching.conditional( [(surfGradFn < 0.,0.),( surfGradFn < maxTemp , surfGradFn),(True,maxTemp)]) temperatureField.data[:] = initialFn.evaluate(mesh) refTemp = uw.function.misc.constant(1.) maxTemp = numpy.max(temperatureField.data[:,0]) refTemp.value = maxTemp mIVar = mSwarm.add_variable( dataType="int", count=1) mIVar.data[:] = 0 MrhoFn = rho0 * (1. - alpha* ( temperatureField )) dicDensity = {0:MrhoFn} densityFn = uw.function.branching.map( fn_key = mIVar, mapping = dicDensity) figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Temperature Field" ) figMaterial.append( glucifer.objects.Surface(mesh,temperatureField )) # figMaterial.show() surfEta = 1e5 cEta = numpy.log(surfEta) / refTemp if isoviscous: ArrFunction = 1. else: #Frank-Kamenetskii Temperature-Dependent Rheology ArrFunction = uw.function.math.exp(cEta *(refTemp-temperatureField)) dicViscosity = {0:ArrFunction} viscosityMapFn = uw.function.branching.map( fn_key = mIVar, mapping = dicViscosity) figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Viscosity Distribution (Log)" ) figMaterial.append( glucifer.objects.Points(mSwarm,fn_colour = uw.function.math.log10(viscosityMapFn ))) if size == 1: figMaterial.show() figMaterial = glucifer.Figure( figsize=(800,400), title="Initial Density Distribution" ) figMaterial.append( glucifer.objects.Points(mSwarm,fn_colour = densityFn )) if size == 1: figMaterial.show() stokesPIC = uw.systems.Stokes(velocityField=velocityField, pressureField=pressureField, conditions=[freeslipBC,], fn_viscosity=1., fn_bodyforce=densityFn*(0.,-1.)) solver=uw.systems.Solver(stokesPIC) solver.solve() stokesPIC = uw.systems.Stokes(velocityField=velocityField, pressureField=pressureField, conditions=[freeslipBC,], fn_viscosity=viscosityMapFn, fn_bodyforce=densityFn*(0.,-1.)) solver=uw.systems.Solver(stokesPIC) surfaceHF = uw.utils.Integral( fn = temperatureField.fn_gradient[1], mesh = mesh, integrationType = "surface", surfaceIndexSet = TopWall) bottomHF = uw.utils.Integral( fn = temperatureField.fn_gradient[1], mesh = mesh, integrationType = "surface", surfaceIndexSet = BottomWall) leftHF = uw.utils.Integral( fn = temperatureField.fn_gradient[0], mesh = mesh, integrationType = "surface", surfaceIndexSet = LeftWall) rightHF = uw.utils.Integral( fn = temperatureField.fn_gradient[0], mesh = mesh, integrationType = "surface", surfaceIndexSet = RightWall) step = 0 maxsteps = 3 yielding = False if rank == 0: arrMeanTemp = numpy.zeros(maxsteps+1) arrSurfHF = numpy.zeros(maxsteps+1) arrOtherWallsHF = numpy.zeros(maxsteps+1) for step in range(maxsteps+1): solver.solve(nonLinearIterate=yielding) cFactor = 0.5 dt = numpy.min([cFactor * advDiff.get_max_dt(),cFactor * advector.get_max_dt()]) advDiff.integrate(dt) avTemperature = mesh.integrate(temperatureField)[0] if rank ==0: arrMeanTemp[step] = avTemperature traceradvector.integrate(dt) tracerTrackSwarm.add_particles_with_coordinates(tracerSwarm.particleCoordinates.data[:]) surfHF = -1. * surfaceHF.evaluate()[0] wallsHF = abs(bottomHF.evaluate()[0])+abs(leftHF.evaluate()[0])+abs(rightHF.evaluate()[0]) if rank == 0: arrSurfHF[step] = surfHF arrOtherWallsHF[step] = wallsHF refTemp.value = numpy.max(temperatureField.data[:]) if rank == 0: plt.plot(range(maxsteps),arrMeanTemp[:step]) plt.scatter(range(maxsteps),arrMeanTemp[:step]) plt.xlabel("Time Step") plt.ylabel("Average Temperature") if writefigures: plt.savefig("output/%s_temperaturetime.pdf" %mname, bbox_inches="tight") #Write to h5 file savedmesh = mesh.save("output/mesh_%s.h5" %mname) temperatureField.save("output/temperature_%s.h5" %mname, meshHandle=savedmesh) if rank == 0: plt.clf() plt.plot(range(step),arrSurfHF[:step]/H,"--",label="Top Wall Heat Loss") plt.plot(range(step),arrOtherWallsHF[:step],"--",label="Other Walls Heat Loss") plt.plot(range(maxsteps),numpy.ones(maxsteps),label="Heat Generation") plt.ylim([0,max([1.1,max(arrSurfHF[:step]/H)])]) plt.title("Heat Generation and Loss Through Time") plt.xlabel("Time Step") plt.ylabel("Integrated Heat Loss or Gradient") plt.legend(loc='best') if writefigures: plt.savefig("output/%s_heatlossandgeneration.pdf" %mname) figMaterial = glucifer.Figure( figsize=(800,400), title="Temperature Field" ) figMaterial.append( glucifer.objects.Surface(mesh,temperatureField )) figMaterial.append( glucifer.objects.VectorArrows(mesh,1e3/Ra*velocityField)) figMaterial.append( glucifer.objects.Points(swarm=tracerTrackSwarm,colourBar=False,fn_size=5,colours="purple")) if size == 1: figMaterial.show() if writefigures: figMaterial.save_image("output/%s_TemperatureField" %mname) n = 100 topWallX = numpy.linspace(mesh.minCoord[0],mesh.maxCoord[0],n) topWallVelocity = numpy.zeros(n) for i in range(n): topWallVelocity[i] = velocityField[0].evaluate_global((topWallX[i],mesh.maxCoord[1])) if rank == 0: plt.clf() plt.plot(topWallX,topWallVelocity) plt.title("Surface Velocity") plt.ylabel("Horizontal Velocity") plt.xlabel("Distance") if writefigures: plt.savefig("output/%s_surface_velocity.pdf" %mname) n = 100 topWallX = numpy.linspace(mesh.minCoord[0],mesh.maxCoord[0],n) topWalldTdZ = numpy.zeros(n) for i in range(n): topWalldTdZ[i] = temperatureField.fn_gradient[1].evaluate_global((topWallX[i],mesh.maxCoord[1])) if rank == 0: plt.clf() plt.plot(topWallX,abs(topWalldTdZ)) plt.title("Top Wall Temperature Gradients") plt.ylabel("Temperature gradient") plt.xlabel("Distance") if writefigures: plt.savefig("output/%s_wall_gradients.pdf" %mname) if rank == 0: plt.clf() n = 100 arrY = numpy.linspace(0,1,n) for x in [0.,0.5,1.]: arrT = numpy.zeros(n) for i in range(n): arrT[i] = temperatureField.evaluate_global((x,arrY[i])) if rank == 0 : plt.plot(arrT,arrY,label="Temperature at x=%.1f" %x) if rank ==0: plt.title("Geotherm") plt.plot(numpy.ones(n)*predTemp, numpy.linspace(0,1,n),"--",c="black",label="Predicted " + r"$\Delta T$") plt.ylabel("Temperature") plt.xlabel("Depth") plt.legend(loc='best') if writefigures: plt.savefig("output/%s_geotherm.pdf" %mname)
0.400163
0.921251
# Homework 3: Build a CNN for Image Recognition ### Name: Ravi Patel | CWID: 10432313 | Date: 3/26/2019 ## 1. Data Prep #### Load Data ``` from keras.datasets import cifar10 import numpy as np (x_train, y_train) , (x_test, y_test) = cifar10.load_data() #Examine the data shape print('Shape of x_train: ' + str(x_train.shape)) print('Shape of y_train: ' + str(y_train.shape)) print('Shape of x_test: ' + str(x_test.shape)) print('Shape of y_test: ' + str(y_test.shape)) ``` #### Create Method for One_Hot Encoding ``` def to_one_hot(y, num_class=10): """ transforms an n x 1 array to a n x 10 matrix """ result = np.zeros(shape=(y.shape[0],num_class)) for i in range(y.shape[0]): result[i][y[i]]=1 return result y_train_vec = to_one_hot(y_train) y_test_vec = to_one_hot(y_test) #print(y_train[0]) #print(y_train_vec[0]) # Randomly partition athe training set to training # and validation sets rand_indices = np.random.permutation(50000) train_indices = rand_indices[0:40000] valid_indices = rand_indices[40000:50000] x_val = x_train[valid_indices, :] y_val = y_train_vec[valid_indices, :] x_tr = x_train[train_indices, :] y_tr = y_train_vec[train_indices, :] ``` ## 2. Build a CNN and tune its hyper-params ``` from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization from keras.models import Sequential model = Sequential() model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=(32,32,3))) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) #model.add(Dropout(0.5)) model.add(Dense(10,activation='softmax')) model.summary() from keras import optimizers learning_rate = 1E-4 model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adamax(lr=learning_rate), metrics=['acc'] ) ``` ## 3. Training the model with partial training data ``` history = model.fit(x_tr, y_tr, batch_size=32, epochs=30, validation_data=(x_val, y_val)) import matplotlib.pyplot as plt %matplotlib inline acc = history.history['acc'] val_acc = history.history['val_acc'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() ``` ## 4. Training the model with the full training data ``` history = model.fit(x_train, y_train_vec, batch_size=32, epochs=30) ``` ## 5. Evaluating the model with the test dataset ``` scores = model.evaluate(x_test, y_test_vec) print("Loss: "+str(scores[0])) print("Accuracy: "+str(scores[1])) ```
github_jupyter
from keras.datasets import cifar10 import numpy as np (x_train, y_train) , (x_test, y_test) = cifar10.load_data() #Examine the data shape print('Shape of x_train: ' + str(x_train.shape)) print('Shape of y_train: ' + str(y_train.shape)) print('Shape of x_test: ' + str(x_test.shape)) print('Shape of y_test: ' + str(y_test.shape)) def to_one_hot(y, num_class=10): """ transforms an n x 1 array to a n x 10 matrix """ result = np.zeros(shape=(y.shape[0],num_class)) for i in range(y.shape[0]): result[i][y[i]]=1 return result y_train_vec = to_one_hot(y_train) y_test_vec = to_one_hot(y_test) #print(y_train[0]) #print(y_train_vec[0]) # Randomly partition athe training set to training # and validation sets rand_indices = np.random.permutation(50000) train_indices = rand_indices[0:40000] valid_indices = rand_indices[40000:50000] x_val = x_train[valid_indices, :] y_val = y_train_vec[valid_indices, :] x_tr = x_train[train_indices, :] y_tr = y_train_vec[train_indices, :] from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization from keras.models import Sequential model = Sequential() model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=(32,32,3))) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D((2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) #model.add(Dropout(0.5)) model.add(Dense(10,activation='softmax')) model.summary() from keras import optimizers learning_rate = 1E-4 model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adamax(lr=learning_rate), metrics=['acc'] ) history = model.fit(x_tr, y_tr, batch_size=32, epochs=30, validation_data=(x_val, y_val)) import matplotlib.pyplot as plt %matplotlib inline acc = history.history['acc'] val_acc = history.history['val_acc'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() history = model.fit(x_train, y_train_vec, batch_size=32, epochs=30) scores = model.evaluate(x_test, y_test_vec) print("Loss: "+str(scores[0])) print("Accuracy: "+str(scores[1]))
0.55447
0.945901
``` import numpy as np import numpy.random as rand from numpy.testing import assert_allclose, assert_equal import pandas as pd from scipy.stats import chi2_contingency import sys import os # let us import local app packages PACKAGE_PARENT = '../..' sys.path.append(os.path.normpath(PACKAGE_PARENT)) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" %load_ext autoreload %autoreload 2 %load_ext rpy2.ipython from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" %config InlineBackend.figure_format = 'retina' ``` ## A basic table: ``` age = np.random.choice(['18-36','37-54','55+'], size = 2000, p = [0.3,0.4,0.3]); favorite_show = np.random.choice(['NCIS','House of Cards','Westworld'], size = 2000, p = [0.2,0.4,0.4]) survey_results = pd.DataFrame({"age": age, "favorite_show": favorite_show}) survey_results.index.name = "respondent_id" survey_results.head(10) %%R -i survey_results table(survey_results) from statsmodels.stats.contingency_tables import Table table = Table.from_data(survey_results) print(table) independence_result = table.test_nominal_association() print(independence_result) %%R -i survey_results chisq.test(table(survey_results)) ``` ## A table with a relationship: ``` age = np.random.choice(['less than 18','19-36','37+'], size = 2000, p = [0.3,0.4,0.3]); survey_results = pd.DataFrame({"age": age}) survey_results.index.name = "respondent_id" def weighted_choice(age): # weight tuples: (snapchat, instagram, facebook) weights = {'less than 18': (.5, .4, .1), '19-36': (.3, .3, .4), '37+': (.1, .2, .7)} choices = ("snapchat", "instagram", "facebook") favorite_network = np.random.choice(choices, p=weights[age]) return favorite_network favorite_social_network = survey_results.age.apply(weighted_choice) survey_results['favorite_social_network'] = favorite_social_network survey_results.head(10) from statsmodels.stats.contingency_tables import Table table = Table.from_data(survey_results) print(table) independence_result = table.test_nominal_association() print(independence_result) %%R -i survey_results chisq.test(table(survey_results)) ``` ## A table with multiple response factors ``` import statsmodels.api as sm from statsmodels.datasets import presidential2016 data = sm.datasets.presidential2016.load_pandas() presidential_data = data.data presidential_data.head() narrow_row_factor = rows_factor.cast_wide_to_narrow() srcv_presidential = pd.concat([narrow_row_factor.as_dataframe().loc[:, 'variable'], columns_factor.as_dataframe()], axis=1) srcv_presidential.head() ``` ### Single response versus multiple response ``` from statsmodels.stats.contingency_tables import Factor, MRCVTable rows_factor = Factor(data.data.iloc[:, :6], data.data.columns[:6], "expected_choice", orientation="wide") columns_factor = Factor(data.data.iloc[:, 6:11], data.data.columns[6:11], "believe_true", orientation="wide") multiple_response_table = MRCVTable([rows_factor,], [columns_factor]) multiple_response_table ``` ### MMI Item Response Table ``` srcv_item_response_table_python = multiple_response_table._build_item_response_table_for_MMI(rows_factor, columns_factor) srcv_item_response_table_python srcv_item_response_table_python.iloc[:, (0,1)].sum().sum() result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_item_response_table_result.csv") %R -i fpath %%R -i srcv_presidential library('MRCV') srcv_item_response_table_r <- item.response.table(srcv_presidential, I=1, J=5) srcv_item_response_table_r_dataframe = as.data.frame.matrix(srcv_item_response_table_r) write.table(srcv_item_response_table_r, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(srcv_item_response_table_r) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_item_response_table_result.csv") srcv_item_response_table_r = pd.DataFrame.from_csv(fpath) srcv_item_response_table_r for i in range(0, len(columns_factor.labels)*2, 2): c = columns_factor.labels[i // 2] r_left_offset = i r_right_offset = i + 2 py_group = srcv_item_response_table_python.loc[:, c] r_group = srcv_item_response_table_r.iloc[:, r_left_offset:r_right_offset] assert_allclose(py_group.values, r_group) ``` ## MMI Full Chi-square tables ``` python_result = multiple_response_table._calculate_pairwise_chi2s_for_MMI_item_response_table(rows_factor, columns_factor) python_result %%R -i srcv_presidential library('MRCV') r_srcv_chis <- MI.stat(srcv_presidential, I=1, J=5) %R -o r_srcv_chis r_results = pd.Series({k:v for k, v in zip(r_srcv_chis[1].names[1], r_srcv_chis[1])}) r_result_reordered = r_results.reindex(python_result.index, ) assert_allclose(python_result, r_result_reordered) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_all_chis_result.csv") r_result_reordered.to_csv(fpath) ``` ### MMI bonferroni test ``` bonferroni_test = multiple_response_table._test_for_marginal_mutual_independence_using_bonferroni_correction results = bonferroni_test(rows_factor, columns_factor) table_p_value_bonferroni_corrected, pairwise_bonferroni_corrected_p_values = results print("Overall table p value: {}\n\n".format(table_p_value_bonferroni_corrected)) print("Pairwise p values (likelihood of independence between single select variable and specific multi-select option):") pairwise_bonferroni_corrected_p_values result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_bonferroni.csv") %R -i fpath %%R res <- MI.test(srcv_presidential, I=1, J=5, type="bon") res$bon$X.sq.S.ij.p.bon write.csv(res$bon, file=fpath) r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.bon"] cell_p_values_r = r_result.iloc[:, 1:] reshaped_python_values = pairwise_bonferroni_corrected_p_values.values.reshape(5,1) assert_allclose(reshaped_python_values, cell_p_values_r.T) assert_allclose(table_p_value_r, table_p_value_bonferroni_corrected) ``` ### MMI Rao Scott 2 Test ``` rao_scott_test = multiple_response_table._test_for_marginal_mutual_independence_using_rao_scott_2 table_p_value_rao_scott_corrected = rao_scott_test(rows_factor, columns_factor) print("Overall table p value: {}\n\n".format(table_p_value_rao_scott_corrected)) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_rao_scott.csv") %R -i fpath %%R res <- MI.test(srcv_presidential, I=1, J=5, type="rs2") res$rs2 write.csv(res$rs2, file=fpath) res$rs2 r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.rs2"] assert_allclose(table_p_value_r, table_p_value_rao_scott_corrected) ``` ## Multiple response versus multiple response ### Item response table ``` columns_factor = Factor(data.data.iloc[:, 11:], presidential_data.columns[11:], "reasons_undecided", orientation="wide") rows_factor = Factor(data.data.iloc[:, 6:11], presidential_data.columns[6:11], "believe_true", orientation="wide") multiple_response_table = MRCVTable([rows_factor, ], [columns_factor]) rows_factor.data.shape mrcv_presidential = presidential_data.iloc[:, 6:] mrcv_presidential.head() result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "spmi_r_item_response_table_result.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') spmi_item_response_table_r <- item.response.table(mrcv_presidential, I=5, J=5) spmi_item_response_table_r_dataframe = as.data.frame.matrix(spmi_item_response_table_r) write.table(spmi_item_response_table_r, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(spmi_item_response_table_r) spmi_item_response_table_r_dataframe spmi_item_response_table_r = pd.DataFrame.from_csv(fpath) spmi_item_response_table_r spmi_item_response_table_python = multiple_response_table._build_item_response_table_for_SPMI(rows_factor, columns_factor) spmi_item_response_table_python assert_allclose(spmi_item_response_table_r.values, spmi_item_response_table_python.values) ``` ### Pairwise Chi-squared ``` spmi_pairwise_chis_python = multiple_response_table._calculate_pairwise_chi2s_for_SPMI_item_response_table(rows_factor, columns_factor) spmi_pairwise_chis_python ``` Let's compare this result versus the p value we would get by applying a traditional chi-squared test: ``` fpath = os.path.join(result_dir, "spmi_r_pairwise_chis_result.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') spmi_pairwise_chis_r <- MI.stat(mrcv_presidential, I=5, J=5) spmi_pairwise_chis_r$X.sq.S.ij spmi_pairwise_chis_r_dataframe <- as.data.frame.matrix(spmi_pairwise_chis_r$X.sq.S.ij) write.table(spmi_pairwise_chis_r_dataframe, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(spmi_pairwise_chis_r_dataframe) spmi_pairwise_chis_r_dataframe spmi_pairwise_chis_r = pd.DataFrame.from_csv(fpath) spmi_pairwise_chis_r assert_allclose(spmi_pairwise_chis_r.values.astype(float), spmi_pairwise_chis_python.values.astype(float)) ``` ### SPMI bonferroni ``` bonferroni_test = multiple_response_table._test_for_single_pairwise_mutual_independence_using_bonferroni table_p_value_bonferroni_corrected, cellwise_p_bonferroni_python = bonferroni_test(rows_factor, columns_factor) print("Overall table p value: {}\n\n".format(table_p_value_bonferroni_corrected)) print("Cellwise p value: {}\n\n".format(cellwise_p_bonferroni_python)) fpath = os.path.join(result_dir, "spmi_r_bonferroni.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') res <- MI.test(mrcv_presidential, I=5, J=5, type="bon") res$bon$X.sq.S.ij.p.bon write.csv(res$bon, file=fpath) spmi_bonferroni_r = pd.DataFrame.from_csv(fpath) spmi_bonferroni_r table_p_value_r = spmi_bonferroni_r["p.value.bon"] cell_p_values_r = spmi_bonferroni_r.iloc[:, 1:] assert_allclose(cellwise_p_bonferroni_python, cell_p_values_r) assert_allclose(table_p_value_r, table_p_value_bonferroni_corrected) ``` ### SPMI Rao Scott 2 ``` rao_scott_2_test = multiple_response_table._test_for_single_pairwise_mutual_independence_using_rao_scott_2 table_p_value = rao_scott_2_test(rows_factor, columns_factor) fpath = os.path.join(result_dir, "spmi_r_rao_scott.csv") %R -i fpath %%R -i mrcv_presidential res <- MI.test(mrcv_presidential, I=5, J=5, type="rs2") res$rs2 write.csv(res$rs2, file=fpath) res$rs2 table_p_value r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.rs2"] assert_allclose(table_p_value_r, table_p_value) ``` Both the bonferroni and the row scott methods require us to make some assumptions about the distribution of the deviations we observe in our response table. Ever not comfortable making assumptions we can instead use a nonparametric method by running a bootstrap simulation to approximate the distribution of deviations. The bootstrap method is more computationally intensive but allows us to estimate the p value using fewer assumptions. ## Degenerate cases ``` import contingency_tables as ctab a = np.zeros((1000, 2)) b = np.zeros((1000, 2)) labels = ["Yes", "No"] labels_b = ["Yes.b", "No.b"] mrcv_1 = ctab.Factor.from_array(a, labels, "alive", orientation="wide", multiple_response=True) mrcv_2 = ctab.Factor.from_array(b, labels_b, "cool", orientation="wide", multiple_response=True) zero_zero = pd.concat([mrcv_1.data, mrcv_2.data], axis=1) zero_zero.head() %%R -i zero_zero library('MRCV') res <- MI.test(zero_zero, I=2, J=2, type="bon") res$bon$X.sq.S.ij.p.bon zero_zero.min() ```
github_jupyter
import numpy as np import numpy.random as rand from numpy.testing import assert_allclose, assert_equal import pandas as pd from scipy.stats import chi2_contingency import sys import os # let us import local app packages PACKAGE_PARENT = '../..' sys.path.append(os.path.normpath(PACKAGE_PARENT)) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" %load_ext autoreload %autoreload 2 %load_ext rpy2.ipython from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" %config InlineBackend.figure_format = 'retina' age = np.random.choice(['18-36','37-54','55+'], size = 2000, p = [0.3,0.4,0.3]); favorite_show = np.random.choice(['NCIS','House of Cards','Westworld'], size = 2000, p = [0.2,0.4,0.4]) survey_results = pd.DataFrame({"age": age, "favorite_show": favorite_show}) survey_results.index.name = "respondent_id" survey_results.head(10) %%R -i survey_results table(survey_results) from statsmodels.stats.contingency_tables import Table table = Table.from_data(survey_results) print(table) independence_result = table.test_nominal_association() print(independence_result) %%R -i survey_results chisq.test(table(survey_results)) age = np.random.choice(['less than 18','19-36','37+'], size = 2000, p = [0.3,0.4,0.3]); survey_results = pd.DataFrame({"age": age}) survey_results.index.name = "respondent_id" def weighted_choice(age): # weight tuples: (snapchat, instagram, facebook) weights = {'less than 18': (.5, .4, .1), '19-36': (.3, .3, .4), '37+': (.1, .2, .7)} choices = ("snapchat", "instagram", "facebook") favorite_network = np.random.choice(choices, p=weights[age]) return favorite_network favorite_social_network = survey_results.age.apply(weighted_choice) survey_results['favorite_social_network'] = favorite_social_network survey_results.head(10) from statsmodels.stats.contingency_tables import Table table = Table.from_data(survey_results) print(table) independence_result = table.test_nominal_association() print(independence_result) %%R -i survey_results chisq.test(table(survey_results)) import statsmodels.api as sm from statsmodels.datasets import presidential2016 data = sm.datasets.presidential2016.load_pandas() presidential_data = data.data presidential_data.head() narrow_row_factor = rows_factor.cast_wide_to_narrow() srcv_presidential = pd.concat([narrow_row_factor.as_dataframe().loc[:, 'variable'], columns_factor.as_dataframe()], axis=1) srcv_presidential.head() from statsmodels.stats.contingency_tables import Factor, MRCVTable rows_factor = Factor(data.data.iloc[:, :6], data.data.columns[:6], "expected_choice", orientation="wide") columns_factor = Factor(data.data.iloc[:, 6:11], data.data.columns[6:11], "believe_true", orientation="wide") multiple_response_table = MRCVTable([rows_factor,], [columns_factor]) multiple_response_table srcv_item_response_table_python = multiple_response_table._build_item_response_table_for_MMI(rows_factor, columns_factor) srcv_item_response_table_python srcv_item_response_table_python.iloc[:, (0,1)].sum().sum() result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_item_response_table_result.csv") %R -i fpath %%R -i srcv_presidential library('MRCV') srcv_item_response_table_r <- item.response.table(srcv_presidential, I=1, J=5) srcv_item_response_table_r_dataframe = as.data.frame.matrix(srcv_item_response_table_r) write.table(srcv_item_response_table_r, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(srcv_item_response_table_r) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_item_response_table_result.csv") srcv_item_response_table_r = pd.DataFrame.from_csv(fpath) srcv_item_response_table_r for i in range(0, len(columns_factor.labels)*2, 2): c = columns_factor.labels[i // 2] r_left_offset = i r_right_offset = i + 2 py_group = srcv_item_response_table_python.loc[:, c] r_group = srcv_item_response_table_r.iloc[:, r_left_offset:r_right_offset] assert_allclose(py_group.values, r_group) python_result = multiple_response_table._calculate_pairwise_chi2s_for_MMI_item_response_table(rows_factor, columns_factor) python_result %%R -i srcv_presidential library('MRCV') r_srcv_chis <- MI.stat(srcv_presidential, I=1, J=5) %R -o r_srcv_chis r_results = pd.Series({k:v for k, v in zip(r_srcv_chis[1].names[1], r_srcv_chis[1])}) r_result_reordered = r_results.reindex(python_result.index, ) assert_allclose(python_result, r_result_reordered) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_all_chis_result.csv") r_result_reordered.to_csv(fpath) bonferroni_test = multiple_response_table._test_for_marginal_mutual_independence_using_bonferroni_correction results = bonferroni_test(rows_factor, columns_factor) table_p_value_bonferroni_corrected, pairwise_bonferroni_corrected_p_values = results print("Overall table p value: {}\n\n".format(table_p_value_bonferroni_corrected)) print("Pairwise p values (likelihood of independence between single select variable and specific multi-select option):") pairwise_bonferroni_corrected_p_values result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_bonferroni.csv") %R -i fpath %%R res <- MI.test(srcv_presidential, I=1, J=5, type="bon") res$bon$X.sq.S.ij.p.bon write.csv(res$bon, file=fpath) r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.bon"] cell_p_values_r = r_result.iloc[:, 1:] reshaped_python_values = pairwise_bonferroni_corrected_p_values.values.reshape(5,1) assert_allclose(reshaped_python_values, cell_p_values_r.T) assert_allclose(table_p_value_r, table_p_value_bonferroni_corrected) rao_scott_test = multiple_response_table._test_for_marginal_mutual_independence_using_rao_scott_2 table_p_value_rao_scott_corrected = rao_scott_test(rows_factor, columns_factor) print("Overall table p value: {}\n\n".format(table_p_value_rao_scott_corrected)) result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "srcv_r_rao_scott.csv") %R -i fpath %%R res <- MI.test(srcv_presidential, I=1, J=5, type="rs2") res$rs2 write.csv(res$rs2, file=fpath) res$rs2 r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.rs2"] assert_allclose(table_p_value_r, table_p_value_rao_scott_corrected) columns_factor = Factor(data.data.iloc[:, 11:], presidential_data.columns[11:], "reasons_undecided", orientation="wide") rows_factor = Factor(data.data.iloc[:, 6:11], presidential_data.columns[6:11], "believe_true", orientation="wide") multiple_response_table = MRCVTable([rows_factor, ], [columns_factor]) rows_factor.data.shape mrcv_presidential = presidential_data.iloc[:, 6:] mrcv_presidential.head() result_dir = "/Users/gjlondon/programming/open_source/statsmodels/statsmodels/stats/tests/results/" fpath = os.path.join(result_dir, "spmi_r_item_response_table_result.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') spmi_item_response_table_r <- item.response.table(mrcv_presidential, I=5, J=5) spmi_item_response_table_r_dataframe = as.data.frame.matrix(spmi_item_response_table_r) write.table(spmi_item_response_table_r, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(spmi_item_response_table_r) spmi_item_response_table_r_dataframe spmi_item_response_table_r = pd.DataFrame.from_csv(fpath) spmi_item_response_table_r spmi_item_response_table_python = multiple_response_table._build_item_response_table_for_SPMI(rows_factor, columns_factor) spmi_item_response_table_python assert_allclose(spmi_item_response_table_r.values, spmi_item_response_table_python.values) spmi_pairwise_chis_python = multiple_response_table._calculate_pairwise_chi2s_for_SPMI_item_response_table(rows_factor, columns_factor) spmi_pairwise_chis_python fpath = os.path.join(result_dir, "spmi_r_pairwise_chis_result.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') spmi_pairwise_chis_r <- MI.stat(mrcv_presidential, I=5, J=5) spmi_pairwise_chis_r$X.sq.S.ij spmi_pairwise_chis_r_dataframe <- as.data.frame.matrix(spmi_pairwise_chis_r$X.sq.S.ij) write.table(spmi_pairwise_chis_r_dataframe, file = fpath, sep=",") ## R is apparently bad at writing out tables with nested headers so we just get "term", "term", "term" colnames(spmi_pairwise_chis_r_dataframe) spmi_pairwise_chis_r_dataframe spmi_pairwise_chis_r = pd.DataFrame.from_csv(fpath) spmi_pairwise_chis_r assert_allclose(spmi_pairwise_chis_r.values.astype(float), spmi_pairwise_chis_python.values.astype(float)) bonferroni_test = multiple_response_table._test_for_single_pairwise_mutual_independence_using_bonferroni table_p_value_bonferroni_corrected, cellwise_p_bonferroni_python = bonferroni_test(rows_factor, columns_factor) print("Overall table p value: {}\n\n".format(table_p_value_bonferroni_corrected)) print("Cellwise p value: {}\n\n".format(cellwise_p_bonferroni_python)) fpath = os.path.join(result_dir, "spmi_r_bonferroni.csv") %R -i fpath %%R -i mrcv_presidential library('MRCV') res <- MI.test(mrcv_presidential, I=5, J=5, type="bon") res$bon$X.sq.S.ij.p.bon write.csv(res$bon, file=fpath) spmi_bonferroni_r = pd.DataFrame.from_csv(fpath) spmi_bonferroni_r table_p_value_r = spmi_bonferroni_r["p.value.bon"] cell_p_values_r = spmi_bonferroni_r.iloc[:, 1:] assert_allclose(cellwise_p_bonferroni_python, cell_p_values_r) assert_allclose(table_p_value_r, table_p_value_bonferroni_corrected) rao_scott_2_test = multiple_response_table._test_for_single_pairwise_mutual_independence_using_rao_scott_2 table_p_value = rao_scott_2_test(rows_factor, columns_factor) fpath = os.path.join(result_dir, "spmi_r_rao_scott.csv") %R -i fpath %%R -i mrcv_presidential res <- MI.test(mrcv_presidential, I=5, J=5, type="rs2") res$rs2 write.csv(res$rs2, file=fpath) res$rs2 table_p_value r_result = pd.DataFrame.from_csv(fpath) table_p_value_r = r_result["p.value.rs2"] assert_allclose(table_p_value_r, table_p_value) import contingency_tables as ctab a = np.zeros((1000, 2)) b = np.zeros((1000, 2)) labels = ["Yes", "No"] labels_b = ["Yes.b", "No.b"] mrcv_1 = ctab.Factor.from_array(a, labels, "alive", orientation="wide", multiple_response=True) mrcv_2 = ctab.Factor.from_array(b, labels_b, "cool", orientation="wide", multiple_response=True) zero_zero = pd.concat([mrcv_1.data, mrcv_2.data], axis=1) zero_zero.head() %%R -i zero_zero library('MRCV') res <- MI.test(zero_zero, I=2, J=2, type="bon") res$bon$X.sq.S.ij.p.bon zero_zero.min()
0.334263
0.687787
``` from IPython.display import Image from demo_utils.demo0 import Demo0 from demo_utils.demo5 import Demo5 from demo_utils.temporal_data import * import importlib import warnings warnings.filterwarnings('ignore') ``` Anteriormente había llegado a la conclusión de que realizar PCA sobre un dataset daba mejores resultados a los modelos ``` Image('demo_previa_pca_dt.png') ``` La imagen previa utiliza el dataset 'digits' (directamente como lo ofrece la biblioteca sklearn) Da a entender que es mejor usar PCA respecto a no usarlo, y que lo mejor es hacer primero el sampling y después el PCA Pues ahora no obtengo los mismos resultados ``` Demo0().non_interactive(**data_d0) ``` Notar que en la primera imagen se está mirando el score, mientras que en la segunda el error Para empezar, el DT solo ahora es mejor que el DT con PCA. Eso es distinto a lo que muestra la primera imagen. Después, no parece haber una diferencia real entre usar PCA y no usarlo cuando se hace sampling. Nystroem parece que se beneficia un poco en algunos casos, pero la diferencia no es mucha. La única diferencia que se me ocurre en las dos ejecuciones es que en la primera utilizo el dataset de juguete que ofrecen en scikit-learn, y hago la normalización que usaban ellos (dividir entre 16 y restar la media). En la segunda, en cambio, utilizo un subconjunto propio del dataset digits que está en UCI, y uso la la estandarización (restar la media y dividir entre la varianza) Me queda pendiente enfrentar estas diferencias para ver si son las que están causando estas discordancias Quería volver a comprobar cómo es mejor hacer primero el sampling y después el PCA, y he hecho una demo probando las dos ordenaciones. Los resultados son estos: ``` Demo5().non_interactive(**data_d1_1) Demo5().non_interactive(**data_d1_2) ``` Se puede observar cómo haciendo RBF no hay ninguna diferencia real entre cambiar el orden del sampling y PCA, y en Nystroem hay una pequeña diferencia. Parece que es mejor hacer primero el sampling y después el PCA ### Usando digits ``` Demo0().non_interactive(**data_d2_1) ``` Se puede observar que lo que va mejor es Logit RBF con un Grey Box, con resultados muy parecidos a usar un PCA. El resto de combinaciones son bastante similares. De todos modos, lo mejor sigue siendo usar un Logit clásico sin más ¿Es esto lo que estábamos esperando? ``` Demo0().non_interactive(**data_d2_2) ``` ### Usando Segment ``` Demo0().non_interactive(**data_d2_3) Demo0().non_interactive(**data_d2_4) ```
github_jupyter
from IPython.display import Image from demo_utils.demo0 import Demo0 from demo_utils.demo5 import Demo5 from demo_utils.temporal_data import * import importlib import warnings warnings.filterwarnings('ignore') Image('demo_previa_pca_dt.png') Demo0().non_interactive(**data_d0) Demo5().non_interactive(**data_d1_1) Demo5().non_interactive(**data_d1_2) Demo0().non_interactive(**data_d2_1) Demo0().non_interactive(**data_d2_2) Demo0().non_interactive(**data_d2_3) Demo0().non_interactive(**data_d2_4)
0.333937
0.912475
# In the Beginning was the Command Line Author: Matt Burton # Getting to the Command line ### On OSX ![OSX Terminal Location](terminal-location.png) ### On Windows (using Anaconda) ![Anaconda Terminal Location](anaconda-command-line.png) # Moving around ## The *directory tree* ![The Directory Tree](dir-tree.png) ### Except *in your head* ![terminal](terminal.png) # Four commands to move around a (unix) command line ### `pwd` - To print working directory (OSX/Linux only) pilemian:2014-summer-camp mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp ### `ls` (OSX) or `dir` (Windows) - To list directory contents ``` pilemian:2014-summer-camp mcburton$ ls LICENSE.md _config.yml _layouts css index.html setup README.md _includes _site img js slides pilemian:2014-summer-camp mcburton$ ``` #### Use the `-l` parameter to do a long listing ``` pilemian:2014-summer-camp mcburton$ ls -l total 40 -rw-r--r-- 1 mcburton staff 2838 Apr 24 15:14 LICENSE.md -rw-r--r-- 1 mcburton staff 248 Apr 24 22:20 README.md -rw-r--r-- 1 mcburton staff 321 Apr 24 16:06 _config.yml drwxr-xr-x 7 mcburton staff 238 Apr 24 15:14 _includes drwxr-xr-x 4 mcburton staff 136 Apr 24 16:57 _layouts drwxr-xr-x 11 mcburton staff 374 May 19 09:22 _site drwxr-xr-x 6 mcburton staff 204 Apr 24 17:20 css drwxr-xr-x 9 mcburton staff 306 May 9 12:10 img -rw-r--r-- 1 mcburton staff 6717 May 18 22:00 index.html drwxr-xr-x 4 mcburton staff 136 Apr 24 15:14 js drwxr-xr-x 16 mcburton staff 544 May 18 22:17 setup drwxr-xr-x 5 mcburton staff 170 May 18 21:48 slides pilemian:2014-summer-camp mcburton$ ``` #### The `dir` command on Windows does a long listing automatically ``` C:\Users\mcburton>dir Volume in drive C has no label. Volume Serial Number is 2E3D-5F00 Directory of C:\Users\mcburton 05/15/2014 01:24 AM <DIR> . 05/15/2014 01:24 AM <DIR> .. 05/14/2014 02:17 PM <DIR> .continuum 05/14/2014 06:59 PM <DIR> .ipython 05/14/2014 06:56 PM <DIR> .matplotlib 05/14/2014 07:42 PM <DIR> Anaconda 05/14/2014 01:56 PM <DIR> Contacts 05/15/2014 12:31 AM <DIR> Desktop 05/15/2014 01:24 AM 0 dir 05/14/2014 02:17 PM <DIR> Documents 05/15/2014 12:31 AM <DIR> Downloads 05/14/2014 01:56 PM <DIR> Favorites 05/14/2014 01:56 PM <DIR> Links 05/14/2014 01:56 PM <DIR> Music 05/14/2014 01:56 PM <DIR> Pictures 05/15/2014 01:24 AM 0 Saved 05/14/2014 01:56 PM <DIR> Saved Games 05/14/2014 01:56 PM <DIR> Searches 05/14/2014 01:56 PM <DIR> Videos 2 File(s) 0 bytes 17 Dir(s) 12,957,863,936 bytes free ``` ### `cd <filepath>` - To change directory ``` pilemian:2014-summer-camp mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp ... pilemian:2014-summer-camp mcburton$ cd setup pilemian:setup mcburton$ ... pilemian:setup mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp/setup ``` ### `mv <existing filepath> <desired filepath>` - To move files (OSX) ### `move <existing filepath> <desired filepath>` - To move files (Windows) ## File Paths ### `/foo/bar` - the *bar* is a subdirectory of *foo* `/Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp/setup` ### "`..`" - means up/back one directory ``` pilemian:setup mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp/setup pilemian:setup mcburton$ ls -l total 3280 -rw-r--r--@ 1 mcburton staff 218212 May 16 12:31 OSX-anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 175418 May 18 22:17 anaconda-command-line.png -rw-r--r--@ 1 mcburton staff 113444 May 16 12:37 anaconda-launcher-launch.png -rw-r--r--@ 1 mcburton staff 114809 May 16 12:38 anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 77454 May 18 22:07 dir-tree.png -rw-r--r--@ 1 mcburton staff 244584 May 16 14:17 sqlite-manager-install.png -rw-r--r--@ 1 mcburton staff 79532 May 16 14:25 sqlite-manager.png -rw-r--r--@ 1 mcburton staff 253725 May 18 22:15 terminal-location.png -rw-r--r--@ 1 mcburton staff 44467 May 18 22:09 terminal.png -rw-r--r-- 1 mcburton staff 8531 May 19 09:40 the-command-line.ipynb -rw-r--r-- 1 mcburton staff 6787 May 18 13:13 the-setup.ipynb -rw-r--r--@ 1 mcburton staff 277489 May 16 12:33 windows-8-anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 41325 May 19 09:33 windows-commands.png pilemian:setup mcburton$ cd .. pilemian:2014-summer-camp mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp pilemian:2014-summer-camp mcburton$ ls -l total 40 -rw-r--r-- 1 mcburton staff 2838 Apr 24 15:14 LICENSE.md -rw-r--r-- 1 mcburton staff 248 Apr 24 22:20 README.md -rw-r--r-- 1 mcburton staff 321 Apr 24 16:06 _config.yml drwxr-xr-x 7 mcburton staff 238 Apr 24 15:14 _includes drwxr-xr-x 4 mcburton staff 136 Apr 24 16:57 _layouts drwxr-xr-x 11 mcburton staff 374 May 19 09:40 _site drwxr-xr-x 6 mcburton staff 204 Apr 24 17:20 css drwxr-xr-x 9 mcburton staff 306 May 9 12:10 img -rw-r--r-- 1 mcburton staff 6717 May 18 22:00 index.html drwxr-xr-x 4 mcburton staff 136 Apr 24 15:14 js drwxr-xr-x 17 mcburton staff 578 May 19 09:33 setup drwxr-xr-x 5 mcburton staff 170 May 18 21:48 slides ``` # Running the Jupyter Notebook mcburton$ jupyter notebook This spits out a bunch of gobbly gook into the terminal, but also launches a web browser. ![Ipython Notebook Tree](ipython-notebook-tree.png )
github_jupyter
pilemian:2014-summer-camp mcburton$ ls LICENSE.md _config.yml _layouts css index.html setup README.md _includes _site img js slides pilemian:2014-summer-camp mcburton$ pilemian:2014-summer-camp mcburton$ ls -l total 40 -rw-r--r-- 1 mcburton staff 2838 Apr 24 15:14 LICENSE.md -rw-r--r-- 1 mcburton staff 248 Apr 24 22:20 README.md -rw-r--r-- 1 mcburton staff 321 Apr 24 16:06 _config.yml drwxr-xr-x 7 mcburton staff 238 Apr 24 15:14 _includes drwxr-xr-x 4 mcburton staff 136 Apr 24 16:57 _layouts drwxr-xr-x 11 mcburton staff 374 May 19 09:22 _site drwxr-xr-x 6 mcburton staff 204 Apr 24 17:20 css drwxr-xr-x 9 mcburton staff 306 May 9 12:10 img -rw-r--r-- 1 mcburton staff 6717 May 18 22:00 index.html drwxr-xr-x 4 mcburton staff 136 Apr 24 15:14 js drwxr-xr-x 16 mcburton staff 544 May 18 22:17 setup drwxr-xr-x 5 mcburton staff 170 May 18 21:48 slides pilemian:2014-summer-camp mcburton$ C:\Users\mcburton>dir Volume in drive C has no label. Volume Serial Number is 2E3D-5F00 Directory of C:\Users\mcburton 05/15/2014 01:24 AM <DIR> . 05/15/2014 01:24 AM <DIR> .. 05/14/2014 02:17 PM <DIR> .continuum 05/14/2014 06:59 PM <DIR> .ipython 05/14/2014 06:56 PM <DIR> .matplotlib 05/14/2014 07:42 PM <DIR> Anaconda 05/14/2014 01:56 PM <DIR> Contacts 05/15/2014 12:31 AM <DIR> Desktop 05/15/2014 01:24 AM 0 dir 05/14/2014 02:17 PM <DIR> Documents 05/15/2014 12:31 AM <DIR> Downloads 05/14/2014 01:56 PM <DIR> Favorites 05/14/2014 01:56 PM <DIR> Links 05/14/2014 01:56 PM <DIR> Music 05/14/2014 01:56 PM <DIR> Pictures 05/15/2014 01:24 AM 0 Saved 05/14/2014 01:56 PM <DIR> Saved Games 05/14/2014 01:56 PM <DIR> Searches 05/14/2014 01:56 PM <DIR> Videos 2 File(s) 0 bytes 17 Dir(s) 12,957,863,936 bytes free pilemian:2014-summer-camp mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp ... pilemian:2014-summer-camp mcburton$ cd setup pilemian:setup mcburton$ ... pilemian:setup mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp/setup pilemian:setup mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp/setup pilemian:setup mcburton$ ls -l total 3280 -rw-r--r--@ 1 mcburton staff 218212 May 16 12:31 OSX-anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 175418 May 18 22:17 anaconda-command-line.png -rw-r--r--@ 1 mcburton staff 113444 May 16 12:37 anaconda-launcher-launch.png -rw-r--r--@ 1 mcburton staff 114809 May 16 12:38 anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 77454 May 18 22:07 dir-tree.png -rw-r--r--@ 1 mcburton staff 244584 May 16 14:17 sqlite-manager-install.png -rw-r--r--@ 1 mcburton staff 79532 May 16 14:25 sqlite-manager.png -rw-r--r--@ 1 mcburton staff 253725 May 18 22:15 terminal-location.png -rw-r--r--@ 1 mcburton staff 44467 May 18 22:09 terminal.png -rw-r--r-- 1 mcburton staff 8531 May 19 09:40 the-command-line.ipynb -rw-r--r-- 1 mcburton staff 6787 May 18 13:13 the-setup.ipynb -rw-r--r--@ 1 mcburton staff 277489 May 16 12:33 windows-8-anaconda-launcher.png -rw-r--r--@ 1 mcburton staff 41325 May 19 09:33 windows-commands.png pilemian:setup mcburton$ cd .. pilemian:2014-summer-camp mcburton$ pwd /Users/mcburton/Dropbox/si/bigdatabootcamp/2014-summer-camp pilemian:2014-summer-camp mcburton$ ls -l total 40 -rw-r--r-- 1 mcburton staff 2838 Apr 24 15:14 LICENSE.md -rw-r--r-- 1 mcburton staff 248 Apr 24 22:20 README.md -rw-r--r-- 1 mcburton staff 321 Apr 24 16:06 _config.yml drwxr-xr-x 7 mcburton staff 238 Apr 24 15:14 _includes drwxr-xr-x 4 mcburton staff 136 Apr 24 16:57 _layouts drwxr-xr-x 11 mcburton staff 374 May 19 09:40 _site drwxr-xr-x 6 mcburton staff 204 Apr 24 17:20 css drwxr-xr-x 9 mcburton staff 306 May 9 12:10 img -rw-r--r-- 1 mcburton staff 6717 May 18 22:00 index.html drwxr-xr-x 4 mcburton staff 136 Apr 24 15:14 js drwxr-xr-x 17 mcburton staff 578 May 19 09:33 setup drwxr-xr-x 5 mcburton staff 170 May 18 21:48 slides
0.214609
0.68614
# CH 4. Arrays and Vectorized Computation ## 1. Import Package ``` import numpy as np import matplotlib.pyplot as plt from random import normalvariate import random my_list=list(range(1000000)) my_arr=np.arange(1000000) %time for _ in range(10) : my_list*2 %time for _ in range(10) : my_arr*2 ``` ## 2. Creating ndarray ``` arr=np.random.randn(2,3) arr print(arr*10) print(arr+arr) print("shape of data :",arr.shape) print("data type of data :",arr.dtype) print("type of data :" ,type(arr)) data1=[6,7.5,8,0,1] arr1=np.array(data1) arr1 data2=[[1,2,3,4],[5,6,7,8]] arr2=np.array(data2) arr2 print("arr1 :",arr1.ndim) print("arr2 :",arr2.ndim) print("arr1 :",arr1.dtype) print("arr2 :",arr2.dtype) ``` ## 3. dtypes for ndarray ``` arr1=np.array([1,2,3],dtype=np.float64) arr2=np.array([1,2,3],dtype=np.int32) print(arr1.dtype) print(arr2.dtype) arr=np.array([1,2,3,4,5]) arr.dtype float_arr=arr.astype(np.float64) float_arr.dtype arr=np.array([3.7,-1.2,-2.6,0.5,12.9,10.1]) arr.astype(np.int32) numeric_strings=np.array(['1.25','-9.6','42'],dtype=np.string_) numeric_strings.astype(float) ``` ## 4. Arithmetic with numpy arrays ``` arr=np.array([[1,2,3],[4,5,6]]) arr print(arr*arr) print(arr-arr) print(1/arr) print(arr**0.5) arr2=np.array([[0,4,1],[7,2,12]]) arr2 print(arr2>arr) ``` ## 5. Basic indexing and slicing ``` # 1 dimensional arr=np.arange(10) arr print(arr[5]) print(arr[5:8]) arr[5:8]=12 arr arr_slice=arr[5:8] arr_slice arr_slice[1]=12345 arr arr_slice[:]=64 arr # 2 dimensional arr2d=np.array([[1,2,3],[4,5,6],[7,8,9]]) arr2d print(arr2d[0]) print(arr2d[0][2]) print(arr2d[0,2]) # 3 dimensional arr3d=np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) arr3d print(arr3d[0]) print(arr3d[0][1]) print(arr3d[0][1][0]) # selection with slicing arr print(arr[1:6]) arr2d print(arr2d[:2]) print(arr2d[:2,1:]) print(arr2d[1,:2]) ``` ## 6. Boolean Indexing ``` names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) data=np.random.randn(7,4) print(names) print(data) print("[1]", names=='Bob') print("[2]", data[names=='Bob']) print("[3]", data[names=='Bob',2:]) print("[4]", data[names=='Bob',3]) print("[5]", names!='Bob') print("[6]", data[~(names=='Bob')]) cond = names=='Bob' data[~cond] mask = (names=='Bob') | (names=='Will') mask data[mask] data[data<0]=0 data data[names !='Joe']=7 data ``` ## 7. Fancy Indexing ``` arr = np.empty((8,4)) arr for i in range(8): arr[i]=i arr print(arr[[4,3,0,6]]) print(arr[[-3,-5,-7]]) arr = np.arange(32).reshape((8,4)) arr print(arr[[1,5,7,2],[0,3,1,2]]) print(arr[[1,5,7,2]][:,[0,3,1,2]]) arr = np.arange(15).reshape((3,5)) print(arr) print(arr.T) arr=np.random.randn(6,3) print(arr) print(np.dot(arr.T,arr)) ``` ## 8. Universal Function : Fast element-wise array function ``` arr=np.arange(10) print(arr) print(np.sqrt(arr)) print(np.exp(arr)) x=np.random.randn(8) y=np.random.randn(8) print(x) print(y) print(np.maximum(x,y)) print(np.minimum(x,y)) ``` ## 9. Array-oriented programming with arrays ``` points = np.arange(-5,5,0.01) xs,ys=np.meshgrid(points,points) print(xs) print(ys) z = np.sqrt(xs**2+ys**2) z plt.imshow(z, cmap=plt.cm.gray);plt.colorbar() plt.title("Image plot of $\sqrt{x^2+y^2}$ for a gird of values") plt.show() ``` ## 10. Expressing Conditional Logic as array operations ``` xarr = np.array([1.1,1.2,1.3,1.4,1.5]) yarr = np.array([2.1,2.2,2.3,2.4,2.5]) cond = np.array([True,False,True,True,False]) result=[(x if c else y) for x,y,c in zip(xarr,yarr,cond)] result result_=np.where(cond,xarr,yarr) result_ arr=np.random.randn(4,4) arr print(arr>0) print(np.where(arr>0,2,-2)) print(np.where(arr>0,2,arr)) ``` ## 11. Mathematical and Statistical Methods ``` arr=np.random.randn(5,4) print(arr) print(arr.mean()) print(np.mean(arr)) print(arr.sum()) print(arr.mean(axis=1)) print(arr.sum(axis=1)) arr = np.array([0,1,2,3,4,5,6,7]) print(arr) print(arr.cumsum()) arr = np.array([[0,1,2],[3,4,5],[6,7,8]]) print(arr) print(arr.cumprod(0)) print(arr.cumprod(axis=0)) ``` ## 12. Method for Boolean Arrays ``` arr = np.random.randn(100) print(arr) print((arr>0).sum()) bools = np.array([False,False,True,False]) print(bools.any()) print(bools.all()) ``` ## 13. Sorting ``` arr = np.random.randn(6) arr arr.sort() arr arr=np.random.randn(5,3) arr arr.sort(1) arr large_arr = np.random.randn(1000) large_arr.sort() large_arr[int(0.05*len(large_arr))] ``` ## 14. Unique and other Set logics ``` names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) np.unique(names) ints = np.array([3,3,3,2,2,1,1,4,4]) np.unique(ints) sorted(set(names)) values = np.array([6,0,0,3,2,5,6]) np.in1d(values,[2,3,6]) ``` ## 15. File Input and Output with Arrays ``` arr = np.arange(10) np.save('some_array',arr) np.load('some_array.npy') np.savez('array_archive.npz',a=arr,b=arr) arch=np.load('array_archive.npz') arch['b'] np.savez_compressed('arrays_compressed.npz',a=arr,b=arr) ``` ## 16. Linear Algebra ``` x = np.array([[1,2,3],[4,5,6]]) y = np.array([[6.,23.],[-1,7],[8,9]]) print(x) print(y) x.dot(y) # np.dot(x,y) np.dot(x,np.ones(3)) #x@np.ones(3) ``` ## 17. Random ``` samples=np.random.normal(size=(4,4)) samples N=1000000 %timeit samples =[normalvariate(0,1) for _ in range(N)] %timeit np.random.normal(size=N) ``` ## 18. Example ``` position = 0 walk = [position] steps = 1000 for i in range(steps) : step = 1 if random.randint(0,1) else -1 position += step walk.append(position) plt.plot(walk[:100]) plt.show() ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from random import normalvariate import random my_list=list(range(1000000)) my_arr=np.arange(1000000) %time for _ in range(10) : my_list*2 %time for _ in range(10) : my_arr*2 arr=np.random.randn(2,3) arr print(arr*10) print(arr+arr) print("shape of data :",arr.shape) print("data type of data :",arr.dtype) print("type of data :" ,type(arr)) data1=[6,7.5,8,0,1] arr1=np.array(data1) arr1 data2=[[1,2,3,4],[5,6,7,8]] arr2=np.array(data2) arr2 print("arr1 :",arr1.ndim) print("arr2 :",arr2.ndim) print("arr1 :",arr1.dtype) print("arr2 :",arr2.dtype) arr1=np.array([1,2,3],dtype=np.float64) arr2=np.array([1,2,3],dtype=np.int32) print(arr1.dtype) print(arr2.dtype) arr=np.array([1,2,3,4,5]) arr.dtype float_arr=arr.astype(np.float64) float_arr.dtype arr=np.array([3.7,-1.2,-2.6,0.5,12.9,10.1]) arr.astype(np.int32) numeric_strings=np.array(['1.25','-9.6','42'],dtype=np.string_) numeric_strings.astype(float) arr=np.array([[1,2,3],[4,5,6]]) arr print(arr*arr) print(arr-arr) print(1/arr) print(arr**0.5) arr2=np.array([[0,4,1],[7,2,12]]) arr2 print(arr2>arr) # 1 dimensional arr=np.arange(10) arr print(arr[5]) print(arr[5:8]) arr[5:8]=12 arr arr_slice=arr[5:8] arr_slice arr_slice[1]=12345 arr arr_slice[:]=64 arr # 2 dimensional arr2d=np.array([[1,2,3],[4,5,6],[7,8,9]]) arr2d print(arr2d[0]) print(arr2d[0][2]) print(arr2d[0,2]) # 3 dimensional arr3d=np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) arr3d print(arr3d[0]) print(arr3d[0][1]) print(arr3d[0][1][0]) # selection with slicing arr print(arr[1:6]) arr2d print(arr2d[:2]) print(arr2d[:2,1:]) print(arr2d[1,:2]) names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) data=np.random.randn(7,4) print(names) print(data) print("[1]", names=='Bob') print("[2]", data[names=='Bob']) print("[3]", data[names=='Bob',2:]) print("[4]", data[names=='Bob',3]) print("[5]", names!='Bob') print("[6]", data[~(names=='Bob')]) cond = names=='Bob' data[~cond] mask = (names=='Bob') | (names=='Will') mask data[mask] data[data<0]=0 data data[names !='Joe']=7 data arr = np.empty((8,4)) arr for i in range(8): arr[i]=i arr print(arr[[4,3,0,6]]) print(arr[[-3,-5,-7]]) arr = np.arange(32).reshape((8,4)) arr print(arr[[1,5,7,2],[0,3,1,2]]) print(arr[[1,5,7,2]][:,[0,3,1,2]]) arr = np.arange(15).reshape((3,5)) print(arr) print(arr.T) arr=np.random.randn(6,3) print(arr) print(np.dot(arr.T,arr)) arr=np.arange(10) print(arr) print(np.sqrt(arr)) print(np.exp(arr)) x=np.random.randn(8) y=np.random.randn(8) print(x) print(y) print(np.maximum(x,y)) print(np.minimum(x,y)) points = np.arange(-5,5,0.01) xs,ys=np.meshgrid(points,points) print(xs) print(ys) z = np.sqrt(xs**2+ys**2) z plt.imshow(z, cmap=plt.cm.gray);plt.colorbar() plt.title("Image plot of $\sqrt{x^2+y^2}$ for a gird of values") plt.show() xarr = np.array([1.1,1.2,1.3,1.4,1.5]) yarr = np.array([2.1,2.2,2.3,2.4,2.5]) cond = np.array([True,False,True,True,False]) result=[(x if c else y) for x,y,c in zip(xarr,yarr,cond)] result result_=np.where(cond,xarr,yarr) result_ arr=np.random.randn(4,4) arr print(arr>0) print(np.where(arr>0,2,-2)) print(np.where(arr>0,2,arr)) arr=np.random.randn(5,4) print(arr) print(arr.mean()) print(np.mean(arr)) print(arr.sum()) print(arr.mean(axis=1)) print(arr.sum(axis=1)) arr = np.array([0,1,2,3,4,5,6,7]) print(arr) print(arr.cumsum()) arr = np.array([[0,1,2],[3,4,5],[6,7,8]]) print(arr) print(arr.cumprod(0)) print(arr.cumprod(axis=0)) arr = np.random.randn(100) print(arr) print((arr>0).sum()) bools = np.array([False,False,True,False]) print(bools.any()) print(bools.all()) arr = np.random.randn(6) arr arr.sort() arr arr=np.random.randn(5,3) arr arr.sort(1) arr large_arr = np.random.randn(1000) large_arr.sort() large_arr[int(0.05*len(large_arr))] names=np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe']) np.unique(names) ints = np.array([3,3,3,2,2,1,1,4,4]) np.unique(ints) sorted(set(names)) values = np.array([6,0,0,3,2,5,6]) np.in1d(values,[2,3,6]) arr = np.arange(10) np.save('some_array',arr) np.load('some_array.npy') np.savez('array_archive.npz',a=arr,b=arr) arch=np.load('array_archive.npz') arch['b'] np.savez_compressed('arrays_compressed.npz',a=arr,b=arr) x = np.array([[1,2,3],[4,5,6]]) y = np.array([[6.,23.],[-1,7],[8,9]]) print(x) print(y) x.dot(y) # np.dot(x,y) np.dot(x,np.ones(3)) #x@np.ones(3) samples=np.random.normal(size=(4,4)) samples N=1000000 %timeit samples =[normalvariate(0,1) for _ in range(N)] %timeit np.random.normal(size=N) position = 0 walk = [position] steps = 1000 for i in range(steps) : step = 1 if random.randint(0,1) else -1 position += step walk.append(position) plt.plot(walk[:100]) plt.show()
0.093647
0.864939
``` import numpy as np import pandas as pd from sklearn.preprocessing import normalize,MinMaxScaler header = ['userid', 'itemid', 'rating', 'timestamp'] df = pd.read_csv('Data/HugeRating.data', sep='\t', names=header) df.head(5) df.describe() ratings = df['rating'].values.reshape(-1, 1) min_max_scaler = MinMaxScaler() df['rating'] = min_max_scaler.fit_transform(ratings) df.head(5) n_users = df.userid.unique().shape[0] n_items = df.itemid.unique().shape[0] print ('Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items)) from sklearn import model_selection as cv train_data, test_data = cv.train_test_split(df, test_size=0.25) #Create two user-item matrices, one for training and another for testing train_data_matrix = np.zeros((n_users, n_items)) for line in train_data.itertuples(): train_data_matrix[line[1] - 1, line[2] - 1] = line[3] test_data_matrix = np.zeros((n_users, n_items)) for line in test_data.itertuples(): test_data_matrix[line[1]-1, line[2]-1] = line[3] #Create two user-item matrices, one for training and another for testing train_data_matrix = np.zeros((n_users, n_items)) for line in train_data.itertuples(): train_data_matrix[line[1]-1, line[2]-1] = line[3] test_data_matrix = np.zeros((n_users, n_items)) for line in test_data.itertuples(): test_data_matrix[line[1]-1, line[2]-1] = line[3] train_data_matrix[0] from sklearn.metrics.pairwise import pairwise_distances user_similarity = pairwise_distances(train_data_matrix, metric='cosine') item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine') def predict(ratings, similarity, type='user'): if type == 'user': mean_user_rating = ratings.mean(axis=1) #You use np.newaxis so that mean_user_rating has same format as ratings ratings_diff = (ratings - mean_user_rating[:, np.newaxis]) pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T elif type == 'item': pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)]) return pred item_prediction = predict(train_data_matrix, item_similarity, type='item') user_prediction = predict(train_data_matrix, user_similarity, type='user') from sklearn.metrics import mean_squared_error from math import sqrt def rmse(prediction, ground_truth): prediction = prediction[ground_truth.nonzero()].flatten() ground_truth = ground_truth[ground_truth.nonzero()].flatten() return sqrt(mean_squared_error(prediction, ground_truth)) print ('User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix))) print ('Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix))) errors = abs(prediction - ground_truth) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') from sklearn.metrics import mean_squared_error from math import sqrt def flatten(prediction, ground_truth): prediction = prediction[ground_truth.nonzero()].flatten() ground_truth = ground_truth[ground_truth.nonzero()].flatten() return [prediction,ground_truth] res = flatten(user_prediction,test_data_matrix ) prediction = res[0] ground_truth = res[1] fig, ax = plt.subplots() ax.scatter(ground_truth, prediction, edgecolors=(0, 0, 0)) ax.plot([ground_truth.min(), ground_truth.max()], [ground_truth.min(), ground_truth.max()], linestyle="None" ) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() res = flatten(item_prediction,test_data_matrix ) prediction = res[0] ground_truth = res[1] fig, ax = plt.subplots() ax.scatter(ground_truth, prediction, edgecolors=(0, 0, 0)) ax.plot([ground_truth.min(), ground_truth.max()], [ground_truth.min(), ground_truth.max()], linestyle="None" ) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() ```
github_jupyter
import numpy as np import pandas as pd from sklearn.preprocessing import normalize,MinMaxScaler header = ['userid', 'itemid', 'rating', 'timestamp'] df = pd.read_csv('Data/HugeRating.data', sep='\t', names=header) df.head(5) df.describe() ratings = df['rating'].values.reshape(-1, 1) min_max_scaler = MinMaxScaler() df['rating'] = min_max_scaler.fit_transform(ratings) df.head(5) n_users = df.userid.unique().shape[0] n_items = df.itemid.unique().shape[0] print ('Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items)) from sklearn import model_selection as cv train_data, test_data = cv.train_test_split(df, test_size=0.25) #Create two user-item matrices, one for training and another for testing train_data_matrix = np.zeros((n_users, n_items)) for line in train_data.itertuples(): train_data_matrix[line[1] - 1, line[2] - 1] = line[3] test_data_matrix = np.zeros((n_users, n_items)) for line in test_data.itertuples(): test_data_matrix[line[1]-1, line[2]-1] = line[3] #Create two user-item matrices, one for training and another for testing train_data_matrix = np.zeros((n_users, n_items)) for line in train_data.itertuples(): train_data_matrix[line[1]-1, line[2]-1] = line[3] test_data_matrix = np.zeros((n_users, n_items)) for line in test_data.itertuples(): test_data_matrix[line[1]-1, line[2]-1] = line[3] train_data_matrix[0] from sklearn.metrics.pairwise import pairwise_distances user_similarity = pairwise_distances(train_data_matrix, metric='cosine') item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine') def predict(ratings, similarity, type='user'): if type == 'user': mean_user_rating = ratings.mean(axis=1) #You use np.newaxis so that mean_user_rating has same format as ratings ratings_diff = (ratings - mean_user_rating[:, np.newaxis]) pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T elif type == 'item': pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)]) return pred item_prediction = predict(train_data_matrix, item_similarity, type='item') user_prediction = predict(train_data_matrix, user_similarity, type='user') from sklearn.metrics import mean_squared_error from math import sqrt def rmse(prediction, ground_truth): prediction = prediction[ground_truth.nonzero()].flatten() ground_truth = ground_truth[ground_truth.nonzero()].flatten() return sqrt(mean_squared_error(prediction, ground_truth)) print ('User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix))) print ('Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix))) errors = abs(prediction - ground_truth) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') from sklearn.metrics import mean_squared_error from math import sqrt def flatten(prediction, ground_truth): prediction = prediction[ground_truth.nonzero()].flatten() ground_truth = ground_truth[ground_truth.nonzero()].flatten() return [prediction,ground_truth] res = flatten(user_prediction,test_data_matrix ) prediction = res[0] ground_truth = res[1] fig, ax = plt.subplots() ax.scatter(ground_truth, prediction, edgecolors=(0, 0, 0)) ax.plot([ground_truth.min(), ground_truth.max()], [ground_truth.min(), ground_truth.max()], linestyle="None" ) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() res = flatten(item_prediction,test_data_matrix ) prediction = res[0] ground_truth = res[1] fig, ax = plt.subplots() ax.scatter(ground_truth, prediction, edgecolors=(0, 0, 0)) ax.plot([ground_truth.min(), ground_truth.max()], [ground_truth.min(), ground_truth.max()], linestyle="None" ) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show()
0.67104
0.510252
<pre style="font-size: 50%;"> Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </pre> <img src="//storage.googleapis.com/lucid-link-images/lucid_alpha-warn.png" width=648></img> # Lucid: A Quick Tutorial This tutorial quickly introduces [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research. **Note**: The easiest way to use this tutorial is [as a colab notebook](https://colab.research.google.com/drive/1XuxLjIZj9MV-lRCpXHBhLo5A41Zs0f8E), which allows you to dive in with no setup. We recommend you enable a free GPU by going: > **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU** Thanks for trying Lucid! ## Install, Import, Load Model ``` # Install Lucid #!pip install --quiet lucid==0.0.4 !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git # Imports import numpy as np import tensorflow as tf import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform # Let's import a model from the Lucid modelzoo! model = models.InceptionV1() model.load_graphdef() ``` ## Visualize a Neuron ``` # Visualizing a neuron is easy! _ = render.render_vis(model, "mixed4a_pre_relu:476") ``` ## Getting a bit deeper Lucid splits visualizations into a few components which you can fiddle with completely indpendently: * **objectives** -- What do you want the model to visualize? * **paramaterization** -- How do you describe the image? * **transforms** -- What transformations do you want your visualization to be robust to? In this section, we'll experiment with each one. **Experimenting with objectives** ``` # Let's visualize another neuron using a more explicit objective: obj = objectives.channel("mixed4a_pre_relu", 465) _ = render.render_vis(model, obj) # Or we could do something weirder: # (Technically, objectives are a class that implements addition.) channel = lambda n: objectives.channel("mixed4a_pre_relu", n) obj = channel(476) + channel(465) _ = render.render_vis(model, obj) ``` **Transformation Robustness** Recomended reading: The Feature Visualization article's section titled [The Enemy of Feature Visualization](https://distill.pub/2017/feature-visualization/#enemy-of-feature-vis) discusion of "Transformation Robustness." In particular, there's an interactive diagram that allows you to easily explore how different kinds of transformation robustness effects visualizations. ``` # No transformation robustness transforms = [] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) # Jitter 2 transforms = [ transform.jitter(2) ] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) # Breaking out all the stops transforms = [ transform.pad(16), transform.jitter(8), transform.random_scale([n/100. for n in range(80, 120)]), transform.random_rotate(range(-10,10) + range(-5,5) + 10*range(-2,2)), transform.jitter(2) ] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) ``` **Experimenting with paramaterization** Recomended reading: The Feature Visualization article's section on [Preconditioning and Parameterization](https://distill.pub/2017/feature-visualization/#preconditioning) ``` # Using alternate paramaterizations is one of the primary ingredients for # effective visualization param_f = lambda: param.image(128, fft=False, decorrelate=False) _ = render.render_vis(model, "mixed4a_pre_relu:2", param_f) # Using alternate paramaterizations is one of the primary ingredients for # effective visualization param_f = lambda: param.image(128, fft=True, decorrelate=True) _ = render.render_vis(model, "mixed4a_pre_relu:2", param_f) ```
github_jupyter
# Install Lucid #!pip install --quiet lucid==0.0.4 !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git # Imports import numpy as np import tensorflow as tf import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform # Let's import a model from the Lucid modelzoo! model = models.InceptionV1() model.load_graphdef() # Visualizing a neuron is easy! _ = render.render_vis(model, "mixed4a_pre_relu:476") # Let's visualize another neuron using a more explicit objective: obj = objectives.channel("mixed4a_pre_relu", 465) _ = render.render_vis(model, obj) # Or we could do something weirder: # (Technically, objectives are a class that implements addition.) channel = lambda n: objectives.channel("mixed4a_pre_relu", n) obj = channel(476) + channel(465) _ = render.render_vis(model, obj) # No transformation robustness transforms = [] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) # Jitter 2 transforms = [ transform.jitter(2) ] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) # Breaking out all the stops transforms = [ transform.pad(16), transform.jitter(8), transform.random_scale([n/100. for n in range(80, 120)]), transform.random_rotate(range(-10,10) + range(-5,5) + 10*range(-2,2)), transform.jitter(2) ] _ = render.render_vis(model, "mixed4a_pre_relu:476", transforms=transforms) # Using alternate paramaterizations is one of the primary ingredients for # effective visualization param_f = lambda: param.image(128, fft=False, decorrelate=False) _ = render.render_vis(model, "mixed4a_pre_relu:2", param_f) # Using alternate paramaterizations is one of the primary ingredients for # effective visualization param_f = lambda: param.image(128, fft=True, decorrelate=True) _ = render.render_vis(model, "mixed4a_pre_relu:2", param_f)
0.708112
0.890103
# EEP/IAS 118 - Section 3 ## Manipulating (more) Data, Attractive Figures, and Practice Problems! ### July 11, 2019 Today's coding portion of the section will help get us familiar with a few packages that will help us improve the quality of our output tables and figures. ``` library(tidyverse) library(haven) library(xtable) sleepdata <- read_dta("sleep75.dta") ``` ## Working with Indexes We've seen how to manipulate datasets by adding in variables or removing certain observations, but what if we want to obtain one element/a set of elements from a known location? ### Vectors Let's start by working with a vector: ``` vec <- rnorm(10, mean =4, sd = 2) vec ``` We created a vector of length 10 of random draws from a N(4,4) distribution. Now if we were interested in getting just the third element of this vector, we can do that like so: ``` vec[3] ``` The `[]` lets __R__ know that you want to select on position, while the 3 is our instruction for which position to pull from. (note that since we're working with a vector and not a dataframe, we can't use `$` to call a certain column). If we were interested in elements 5 through 7, we can pull them with the use of `:` ``` vec[5:7] ``` Finally, if we wanted to pull the first, fourth, and ninth elements we can do that using `c()`: ``` vec[c(1,4,9)] ``` What `c()` is doing is combining all the elements given to it into a vector themselves. We can see that by running it on its own. ``` newvec <- c(30,34,38,42) newvec is.vector(newvec) ``` ### Matrices and Data Frames What happens when we are working multidimensional objects? Largely the same thing! Now we just need to refer to position by specifying `[row#, column#]`. It is the same process for whether we're working with a matrix or a data frame. ``` # make a matrix mat40 <- matrix(1:40, nrow = 4, ncol = 10) mat40 is.matrix(mat40) # Get the first element (1) mat40[1,1] # Get the element from the 3rd row and 6th column mat40[3,6] # Get the fifth, sixth, and seventh elements from the 2nd row mat40[2, 5:7] # Get all of column five mat40[, 5] # Get all of row four mat40[4,] # Get the fifth, sixth, and seventh elements from the first AND 2nd rows mat40[1:2, 5:7] # Get the first and fourth elements from the third row mat40[3,c(1,4)] ``` We have a bunch of flexibility here to call one element or multiple elements at the same time, the only restriction being that we follow the `[row#, col#]` syntax. The process for data frames is pretty similar, albeit with one extension. Now that we have variables, we can combine a position call with the `$` for a specific variable. ``` sleepdf <- sleepdata %>% select(age, educ, exper, hrwage) head(sleepdf) nrow(sleepdf) ncol(sleepdf) dim(sleepdf) is.data.frame(sleepdf) # Get the first row sleepdf[1,] # Get the head of the age variable head(sleepdf$age) # Get the fourth row element of column 4 (hrwage) sleepdf[4,4] # Alternatively, we can do the same thing by refering to the specific variable/column sleepdf$hrwage[4] ``` Note that when we use the `$` to call a specific variable, __R__ now treats that variable as a vector, so we can refer to its elements with `[]` in one dimension. In that case, our call `sleepdf$hrwage[4]` gives us just a number, whereas the previous call of `sleepdf[4,4]` gives us the same value but presented in a 1x1 table. ## ggplot2 One of the sad facts about (most) economic research papers is that they don't always have the most aesthetically pleasing figures. For many data visualization applications or our own work we might want to have more control over the visuals and step them up a notch, making sure they convey useful information and have informative labels/captions. This is where the __ggplot2__ package comes in. We started off using __R's__ built-in plot function, which let us produce scatterplots and construct histograms of all sorts of variables. However, it doesn't look the best and has some ugly naming conventions. __ggplot2__ will give us complete control over our figure and allow us to get as in depth with it as we want. ### ggplot2 Basic Syntax Let's start by getting familiar with the basic syntax of __ggplot2__. It's syntax is a little bit different than some of the functions we've used before, but once we figure it out it makes thing nice and easy as we make more and more professional-looking figures. To start a plot, we start with the function ## `ggplot()` This function initializes an empty plot and passes data to other plots that we'll add on top. We can also use this function to define our dataset or specify what our x and y variables are. ``` ggplot() ``` Okay, so not the most impressive yet. We get a little bit more if we specify our data and our x/y variables. To specify the data, we add the argument `data = "dataname"` to the function. To specify which variable is on the x axis and which is on the y, we use the `aes(x= "xvar", y= "yvar")` argument. `aes()` is short for "aesthetics" and allows us to automatically pass these variables along as our x and y variables for the plots we add. Let's say we're interested in using our `sleepdata` to see the relationship between age and hourly wage in our sample ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) ``` That is a start! Now we have labels on both of our axes corresponding to the assigned variable, and a grid corresponding to possible values of those variables. We will add geometries (sets of points, histograms, lines, etc.) by adding what we call "layers" - let's take a look at a few of the options. ### Scatterplots Now let's add some points! If we want to get a sense of how age and hourly wage vary in our data, we can do that by just plotting the points. We can add points using the `geom_point()` function. Since we already declared our two variables, all we need to add the function with `+ geom_point()` to our existing code: ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() ``` And we get a a plot of all our points (note that we were warned that there are some missing values that get dropped). #### Labels Sometimes we might want to change the labels from the variable names to a more descriptive label, and possibly add a title. We can do that! We do this by adding the `labs()` function to our plot. ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ``` Let's take a look at what we added to `labs()`. First, `title` gives us the main title at the top. Second, `subtitle` gives us another line in a smaller font below the main title. `x` and `y` correspond to our x and y labels, respectively. #### Changing Points What if we want to change the color/shape/transparency of our points? We can do that by using arguments of `geom_point()`. ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(colour = "blue", alpha = 0.4, size = 0.8) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ``` By adding `colour="blue"` we changed the color to blue. There are [a toooooon](http://sape.inf.usi.ch/sites/default/files/ggplot2-colour-names.png) of named colors that we could use instead (this gets really useful when we start splitting our data by group levels). `alpha = 0.4` is changing the transparency of our points to 40%. `size = 0.8` is reducing the size of the points to 80% of their original size. #### Splitting by Groups What if we wanted to change the color of our points according to whether the individual is male or not? We can do that! ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male))) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ``` By adding an aesthestic to our `geom_point` we can set the color to be determined by the value of $male$. By default, the zero value (i.e. female) gets a red color while a 1 value (female) gets a light green. We specify the variable as a `factor()` so that ggplot knows it is a discrete variable. What if we instead wanted to change color on a continuous scale? ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = age)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ``` Here the color is now a function of our continuous variable $age$, taking increasingly lighter values for higher ages. (note that __ggplot2__ lets you specify the color scale or color levels if you want, as well as nitpick the labels in the legend. In reality we can change anything that appears in the plot - we just have to choose the right option). One thing to note is that we can make other options conditional on variables in our data frame too. What if we wanted the shape of our points to depend on union participation, the color to vary with gender, and the size of the points to depend on the total minutes worked per week? We can do all that - even if it might look real gross. ``` ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male), shape = factor(union), size = totwrk)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ``` While the above example is cluttered, it shows how we can take a simple scatterplot and use it to convey additional information in just one plot. ### Lines We can add lines to our figure in a couple different ways. First, if we wanted to connect all the points in our data with a line, we would use the `geom_line()` function. ``` sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_line() ``` We can also add points just by adding another layer! ``` sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_line()+ geom_point(colour = "gray40") ``` What if instead we wanted to add a vertical, horizontal, or sloped line in our plot? We use the layers `vline()`, `hline()`, and `abline()` for that. `vline()` is simple and really only needs the `xintercept` argument. Similarly, `hline` takes the `yintercept` argument. `abline` requires us to specify both a `slope` and an `intercept`. Let's say we wanted to add lines to the previous set of points (not connected): ``` sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_point(colour = "gray40") + geom_vline(xintercept = 40, colour = "orchid4") + geom_hline(yintercept = 10) + geom_abline(intercept = 25, slope = -0.5, colour = "grey60", linetype = "dashed") ``` ### Histograms and Distributions Sometimes we want to get information about one variable on its own. We can use __ggplot2__ to make histograms as well as predicted distributions! We use the function `geom_histogram()` to produce histograms. To get a basic histogram of $age$, ``` ggplot(data = sleepdata, aes(x = age)) + geom_histogram() ``` Notice that __ggplot2__ chooses a bin width by default, but we can change this by adding `binwidth`. We can also add labels as before. Note that if we want to change color, we now have two different options. `colour` now changes the outline color, while `fill` changes the interior color. ``` ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, colour = "seagreen4") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, fill = "midnightblue") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, colour = "grey60", fill = "darkolivegreen1") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(aes(fill = factor(male)), binwidth = 10) + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ``` What if we wanted to get a sense of the estimated distribution of age rather than look at the histogram? We can do that with the `geom_density()` function! ``` ggplot(data = sleepdata, aes(x = age)) + geom_density(fill = "gray60", colour= "navy") + labs(title = "Age Density", x = "Age (years)", y = "Density") ggplot(data = sleepdata, aes(x = age)) + geom_density(aes(colour = factor(male))) + labs(title = "Age Density", x = "Age (years)", y = "Density") ``` ### Regression One cool thing that we can do with __ggplot2__ is produce a simple linear regression line directly in our plot! We use the `geom_smooth(method = "lm")` layer for that. ``` wagereg <- lm(hrwage ~ age, data = sleepdata) summary(wagereg) ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ geom_smooth(method = "lm") ``` Notice that by default it gives us the 95% confidence interval too! We can change the confidence interval using the `level` argument. ### Multiple Linear Regression in ggplot2 How would we go about plotting the results of a multiple linear regression? In this case we have to combine output from our regression with the `abline` function. ``` wagereg2 <- lm(hrwage ~ age + educ + male, data = sleepdata) summary(wagereg2) int <- wagereg2$coefficients[1] slope_age <- wagereg2$coefficients[2] ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ geom_abline(intercept = int, slope = slope_age) + ylim(-20,40) ``` I had to add the `ylim(-20,40)` to change the y limits so that we could see the line... because it now doesn't pass through the data! Recall that our slope coefficient $\hat\beta_{age}$ is now the _partial_ effect of age on hourly wage, holding education level and gender constant. As a result, the plot isn't quite as informative on top of the data points in a single set of dimensions. ### Facets Sometimes we might want to produce different panels of a plot for different _values_ of another variable. For instance, instead of changing the color of our points for males vs females earlier, we could have produced separate plots for data where males = 0 and females = 0 right next to each other. We do that using the `facet_grid()` layer. ``` ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ facet_grid(. ~ male) ``` Here we put the panels next to each other, first for female ($male=0$) on the left and then for males on the left. We can also arrange them vertically by changing how we write the argument. ``` ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ facet_grid(male ~ .) ``` Notice that when we put `male ~ .` we get the plots stacked vertically by age, whereas `. ~ male` splits them side by side. ## xtable The package __xtable__ allows us to obtain high-quality formatted versions of our summary statistics tables, regression tables, and raw data to improve the look of our __R__ output. This is especially useful for generating professional-looking tables that can be added to a research paper... once we get into __RStudio__ on its own. Right now it's not as useful, since our Jupyter notebook already formats results in a specific way. One way we can get a sense of how it formats is by using it on our regression tables in our Jupyter notebook. ``` reg <- lm(hrwage ~ educ + age + union + exper, data = sleepdata) summary(reg) xtable(reg) ``` We'll spend more time with __xtable__ (and eventually __stargazer__ once we switch over to __RStudio__). ## Practice with ggplot! Let's try producing a couple of different plots. First, let's load in a new dataset - the _autos.dta_ file again. ``` autodata <- read_dta("autos.dta") head(autodata) ``` ### Using `autodata`, create ### 1. A scatter plot showing the relationship between weight and mpg. Put weight on the x-axis and mpg on the y-axis. Label the x-axis "Weight (lbs)" and the y-axis "Fuel Efficiency (mpg)" and give it a nice title. ### 2. A histogram of price, with fill color according to whether the vehicle is foreign-made or not ### 3. A histogram of price, faceted according to whether the vehicle is foreign-made or not. Do you think this looks better or worse than 2. ? ### 4. Run a regression of price on mpg, foreign, and weight. Use `mutate` to add the residuals as a variable in `autodata`. Then, plot the residuals (y-axis) against mpg. Do the residuals appear to vary systematically with fuel efficiency? (recall that we can access residuals from `lm` output using `$residuals`) # Practice Exercises ## 1. #### We run the following regression of log-wage on three X variables: #### lm(log(wage) ~ educ + exper + female, data = WageData) <p style="text-align: center;"> </p> <img src="images/wagereg.png" width="800" /> #### 1. Fill in the t-stat for education and calculate the 95% confidence interval #### 2. Interpret the coefficient on experience, remember to comment on sign, size, and significance (SSS) #### 3. Test the null that female salaries are 50% lower than male salaries at 1% significance. Show your work using the five steps in hypothesis testing. ## 2. #### A multinational firm focused on petroleum refining conducted a poll that showed a disapproval rate of 63% among consumers. The CEO refuses to believe this is true, and hires you as a consultant to check on the validity of the earlier poll. After depositing your hefty consulting fee, you collect a random sample of 100 consumers and find that 55 of them disapprove of the way the firm treats the environment. Run a hypothesis test (95% significance level) to evaluate whether the original poll is reporting the correct disapproval rate. ## 3. #### To investigate possible gender discrimination in a firm, a sample of 100 men and 64 women with similar job descriptions are selected at random and independently. A summary of the resulting monthly salaries is: | Group | Average | Standard Deviation | Observations | |------------|---------|--------------------|--------------| | Men | 3100 | 200 | 100 | | Women | 2900 | 320 | 64 | #### Do these data provide statistically significant evidence that the wages of men and women are different at the 1 percent significance level? ## 4. #### From a sample of 200 households, we estimated the following two models of gasoline consumption (t-statistics in parentheses) $$ gas = 34.2 + 10.5 suv + 0.25 inc - 0.00005 inc^2 $$ $$ ~~~~~~~~~~~~~~~~(2.3)~~~~(3.1)~~~~~~~~~(1.7)~~~~~~~~~~~~(1.8)~~~~~~~~~~~~~~$$ $$ gas = 22.2 + 15.3 suv $$ $$ ~~~~~~~~~~~~~~~~(2.3)~~~~(3.1)~~~~~~~~~~$$ #### where gas gives the number of gallons per month, suv is a dummy variable for whether the household owns an SUV, and inc is the annual household income in thousands of \$. #### 1. What is the marginal effect of income on gasoline consumption? #### 2. At what point does that relationship change sign? #### 3. What is the correlation between income and owning and SUV? Show how you came to this conclusion. (What did you have to assume - reasonably - in order to answer this question?)
github_jupyter
library(tidyverse) library(haven) library(xtable) sleepdata <- read_dta("sleep75.dta") vec <- rnorm(10, mean =4, sd = 2) vec vec[3] vec[5:7] vec[c(1,4,9)] newvec <- c(30,34,38,42) newvec is.vector(newvec) # make a matrix mat40 <- matrix(1:40, nrow = 4, ncol = 10) mat40 is.matrix(mat40) # Get the first element (1) mat40[1,1] # Get the element from the 3rd row and 6th column mat40[3,6] # Get the fifth, sixth, and seventh elements from the 2nd row mat40[2, 5:7] # Get all of column five mat40[, 5] # Get all of row four mat40[4,] # Get the fifth, sixth, and seventh elements from the first AND 2nd rows mat40[1:2, 5:7] # Get the first and fourth elements from the third row mat40[3,c(1,4)] sleepdf <- sleepdata %>% select(age, educ, exper, hrwage) head(sleepdf) nrow(sleepdf) ncol(sleepdf) dim(sleepdf) is.data.frame(sleepdf) # Get the first row sleepdf[1,] # Get the head of the age variable head(sleepdf$age) # Get the fourth row element of column 4 (hrwage) sleepdf[4,4] # Alternatively, we can do the same thing by refering to the specific variable/column sleepdf$hrwage[4] ggplot() ggplot(data = sleepdata, aes(x = age, y = hrwage)) ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(colour = "blue", alpha = 0.4, size = 0.8) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male))) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = age)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male), shape = factor(union), size = totwrk)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)") sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_line() sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_line()+ geom_point(colour = "gray40") sleepdata %>% group_by(age) %>% filter(row_number() == 1) %>% ggplot(aes(x=age, y = hrwage)) + geom_point(colour = "gray40") + geom_vline(xintercept = 40, colour = "orchid4") + geom_hline(yintercept = 10) + geom_abline(intercept = 25, slope = -0.5, colour = "grey60", linetype = "dashed") ggplot(data = sleepdata, aes(x = age)) + geom_histogram() ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, colour = "seagreen4") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, fill = "midnightblue") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(binwidth = 10, colour = "grey60", fill = "darkolivegreen1") + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_histogram(aes(fill = factor(male)), binwidth = 10) + labs(title = "Age Histogram", x = "Age (years)", y = "Count") ggplot(data = sleepdata, aes(x = age)) + geom_density(fill = "gray60", colour= "navy") + labs(title = "Age Density", x = "Age (years)", y = "Density") ggplot(data = sleepdata, aes(x = age)) + geom_density(aes(colour = factor(male))) + labs(title = "Age Density", x = "Age (years)", y = "Density") wagereg <- lm(hrwage ~ age, data = sleepdata) summary(wagereg) ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ geom_smooth(method = "lm") wagereg2 <- lm(hrwage ~ age + educ + male, data = sleepdata) summary(wagereg2) int <- wagereg2$coefficients[1] slope_age <- wagereg2$coefficients[2] ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ geom_abline(intercept = int, slope = slope_age) + ylim(-20,40) ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ facet_grid(. ~ male) ggplot(data = sleepdata, aes(x=age, y = hrwage)) + geom_point()+ facet_grid(male ~ .) reg <- lm(hrwage ~ educ + age + union + exper, data = sleepdata) summary(reg) xtable(reg) autodata <- read_dta("autos.dta") head(autodata)
0.453746
0.985594
# Investment Valuations In this activity, you’ll use the Alpaca API to get the pricing information for two stocks. Instructions: 1. Create your environment file (`.env`) in your project folder. Make sure that this file holds your Alpaca API and secret keys. 2. Import the Alpaca API and secret keys into the `investment_valuations.ipynb` notebook. 3. Create the Alpaca API `REST` object by calling the Alpaca `tradeapi.REST` function and then setting the `alpaca_api_key`, `alpaca_secret_key`, and `api_version`. 4. Review the two-stock `portfolio_df` DataFrame that we created for you in the starter notebook. Run this cell as you work through the remaining steps in this activity. 5. Get the closing prices of the prior business day for the two stocks in question, Apple and Microsoft, by using the Alpaca `get_barset` function. Note that this requires values for `tickers`, `timeframe`, and the `start` and `end` dates. Add the `df` property to the end of this API call to automatically convert the response to a DataFrame. > **Note** The solution notebook uses `"2020-06-30"` for both the `start` and the `end` date. 6. Get the closing prices for both stocks. Convert the values to floating point numbers so that you can use them in a future calculation. > **Hint** A floating point number is a numerical value that has decimal places. To convert a number to a `float`, call the [float function](https://docs.python.org/3/library/functions.html#float) and pass the closing price as a parameter. 7. Calculate the current value, in dollars, of the portfolio. To do so, multiply the closing price of each stock by the shares that the `portfolio_df` DataFrame supplies for you. Print the current value of each stock, and then add the values to get the total value of the portfolio. 8. Create a Pandas DataFrame named `portfolio_value_df` that includes the current value, in dollars, of each stock. Plot a bar chart that visualizes the DataFrame based on the calculated values of each stock. 9. Review the code in the cell provided in the starter notebook to learn how a pie chart is created using the current valuations of Apple and Microsoft. Run the cell so that you can visualize the information. > **Challenge Connection** An terrific way to visualize the value of each stock in a portfolio is by using a [Pandas pie chart](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.pie.html) You’ll need to create a pie chart in this week’s Challenge. References: [Alpaca API Docs](https://alpaca.markets/docs/api-documentation/) [Pandas pie plot](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.pie.html) ## Import the required libraries and dependencies ``` # Import the required libraries and dependencies import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi %matplotlib inline ``` ## Step 1: Create your environment file (`.env`) in your project folder. Make sure that this file holds your Alpaca API and secret keys. ## Step 2: Import the Alpaca API and secret keys into the `investment_valuations.ipynb` notebook. * Load the environment variable by calling the `load_dotenv()` function. * Set the value of the variables `alpaca_api_key` and `alpaca_secret_key` equal to their respective environment variables. * Confirm the variables are available by checking the `type` of each. ``` # Load the environment variables by calling the load_dotenv function load_dotenv() # Set Alpaca API key and secret by calling the os.getenv function and referencing the environment variable names # Set each environment variable to a notebook variable of the same name alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # Check the values were imported correctly by evaluating the type of each display(type(alpaca_api_key)) display(type(alpaca_secret_key)) ``` ## Step 3: Create the Alpaca API `REST` object by calling the Alpaca `tradeapi.REST` function and then setting the `alpaca_api_key`, `alpaca_secret_key`, and `api_version`. ``` # Create your Alpaca API REST object by calling Alpaca's tradeapi.REST function # Set the parameters to your alpaca_api_key, alpaca_secret_key and api_version="v2" alpaca = tradeapi.REST( alpaca_api_key, alpaca_secret_key, api_version="v2") ``` ## Step 4: Review the two-stock `portfolio_df` DataFrame that we created for you in the starter notebook. Run this cell as you work through the remaining steps in this activity. ``` # Set current amount of shares data shares_data = { "shares": [200, 320] } # Set the tickers tickers = ["MSFT", "AAPL"] # Create the shares DataFrame portfolio_df = pd.DataFrame(shares_data, index=tickers) # Display shares data portfolio_df ``` ## Step 5: Get the closing prices of the prior business day for the two stocks in question, Apple and Microsoft, by using the Alpaca `get_barset` function. Note that this requires values for `tickers`, `timeframe`, and the `start` and `end` dates. Add the `df` property to the end of this API call to automatically convert the response to a DataFrame. * Confirm the value for `tickers` from a the prior step * Set the values for `start_date` and `end_date` using the `pd.Timestamp` function. * Set the `timeframe` value to 1 day. * Create the `portfolio_prices_df` DataFrame by setting it equal to the `alpaca.get_barset` function. ``` # Confirm the values of the `tickers` variable created in the prior step tickers # Set the values for start_date and end_date using the pd.Timestamp function # Inside the function set the date parameter to the prior business day # Both the start_date and end_date should contain the same date value, as we looking for the closing price # of the prior business day. # Set the parameter tz to "America/New_York", # Set this all to the ISO format by calling the isoformat function start_date = pd.Timestamp("2020-06-30", tz="America/New_York").isoformat() end_date = pd.Timestamp("2020-06-30", tz="America/New_York").isoformat() # Set timeframe to one day (1D) for the Alpaca API timeframe = "1D" # Use the Alpaca get_barset function to gather the price information for each ticker # Include the function parameters: tickers, timeframe, start, and end # Be sure to call the df property to ensure that the returned information is set as a DataFrame portfolio_prices_df = alpaca.get_barset( tickers, timeframe, start = start_date, end = end_date ).df # Review the resulting `portfolio_prices_df` DataFrame. portfolio_prices_df ``` ## Step 6: Get the closing prices for both stocks. Convert the values to floating point numbers so that you can use them in a future calculation. ``` # Fetch the current closing prices for Apple and Microsoft from the portfolio_prices_df DataFrame # Remember that the DataFrame generated from the Alpaca call incorporates multi-indexing # Be sure to set the values from the DataFrame to a float by calling the `float` function aapl_price = float(portfolio_prices_df["AAPL"]["close"]) msft_price = float(portfolio_prices_df["MSFT"]["close"]) print(aapl_price) print(type(msft_price)) ``` ## Step 7: Calculate the current value, in dollars, of the portfolio. To do so, multiply the closing price of each stock by the shares that the `portfolio_df` DataFrame supplies for you. Print the current value of each stock, and then add the values to get the total value of the portfolio. 1. Multipy the current price of each stock by the shares indicated in the `portfolio_df` DataFrame. 2. Print the current value of each stock. 3. Add the values together and print the current total vaue of the portfolio. ``` # Compute the current value in dollars of each of the stock's in the portfolio # This is done by multiplying the price from the portfolio_prices_df DataFrame # and the shares from the portfolio_df DataFrame. msft_value = msft_price * portfolio_df.loc["MSFT"]["shares"] aapl_value = aapl_price * portfolio_df.loc["AAPL"]["shares"] # Print the current value of each stock in the stocks portfolio print(f"The current value of the {portfolio_df.loc['MSFT']['shares']} MSFT shares is ${msft_value:,.2f}") print(f"The current value of the {portfolio_df.loc['AAPL']['shares']} AAPL shares is ${aapl_value:,.2f}") # Print the total value of the current portfolio. print(f"The current value of the entire portfolio is ${(aapl_value + msft_value):,.2f}") ``` ## Step 8: Create a Pandas DataFrame named `portfolio_value_df` that includes the current value, in dollars, of each stock. Plot a bar chart that visualizes the DataFrame based on the calculated values of each stock. 1. Create a portfolio_value_df DataFrame that reflects the current value of shares. 2. Create a bar chart visualizing the values of the portfolio_value_df DataFrame. ``` # Create a Pandas DataFrame that includes the current value of both MSFT and AAPL. portfolio_value_df = pd.DataFrame( {"MSFT": [msft_value], "AAPL": [aapl_value]} ) # Display portfolio_value_df DataFrame portfolio_value_df # Create a bar chart to show the value of shares # Give the plot a title and adjust the figure size portfolio_value_df.plot(kind="bar", title="Current Value in Dollars of Apple & Microsoft") ``` ## Step 9: Review the code in the cell provided in the starter notebook to learn how a pie chart is created using the current valuations of Apple and Microsoft. Run the cell so that you can visualize the information. 1. Create the DataFrame to use in the pie chart. 2. Use Pandas `plot.pie` to visualize the current value of each of the two stocks relative to the total portfolio. ``` # Using the DataFrame created below: pie_values_df = pd.DataFrame( {'Value':[aapl_value, msft_value]}, index=['Apple', 'MSFT'] ) pie_values_df # Create a pie chart to visualize the proportion each stock is of the portfolio as a whole # Give the plot a title pie_values_df.plot.pie(y='Value', title='Portfolio Composition - 2020-07-14 ') ```
github_jupyter
# Import the required libraries and dependencies import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi %matplotlib inline # Load the environment variables by calling the load_dotenv function load_dotenv() # Set Alpaca API key and secret by calling the os.getenv function and referencing the environment variable names # Set each environment variable to a notebook variable of the same name alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # Check the values were imported correctly by evaluating the type of each display(type(alpaca_api_key)) display(type(alpaca_secret_key)) # Create your Alpaca API REST object by calling Alpaca's tradeapi.REST function # Set the parameters to your alpaca_api_key, alpaca_secret_key and api_version="v2" alpaca = tradeapi.REST( alpaca_api_key, alpaca_secret_key, api_version="v2") # Set current amount of shares data shares_data = { "shares": [200, 320] } # Set the tickers tickers = ["MSFT", "AAPL"] # Create the shares DataFrame portfolio_df = pd.DataFrame(shares_data, index=tickers) # Display shares data portfolio_df # Confirm the values of the `tickers` variable created in the prior step tickers # Set the values for start_date and end_date using the pd.Timestamp function # Inside the function set the date parameter to the prior business day # Both the start_date and end_date should contain the same date value, as we looking for the closing price # of the prior business day. # Set the parameter tz to "America/New_York", # Set this all to the ISO format by calling the isoformat function start_date = pd.Timestamp("2020-06-30", tz="America/New_York").isoformat() end_date = pd.Timestamp("2020-06-30", tz="America/New_York").isoformat() # Set timeframe to one day (1D) for the Alpaca API timeframe = "1D" # Use the Alpaca get_barset function to gather the price information for each ticker # Include the function parameters: tickers, timeframe, start, and end # Be sure to call the df property to ensure that the returned information is set as a DataFrame portfolio_prices_df = alpaca.get_barset( tickers, timeframe, start = start_date, end = end_date ).df # Review the resulting `portfolio_prices_df` DataFrame. portfolio_prices_df # Fetch the current closing prices for Apple and Microsoft from the portfolio_prices_df DataFrame # Remember that the DataFrame generated from the Alpaca call incorporates multi-indexing # Be sure to set the values from the DataFrame to a float by calling the `float` function aapl_price = float(portfolio_prices_df["AAPL"]["close"]) msft_price = float(portfolio_prices_df["MSFT"]["close"]) print(aapl_price) print(type(msft_price)) # Compute the current value in dollars of each of the stock's in the portfolio # This is done by multiplying the price from the portfolio_prices_df DataFrame # and the shares from the portfolio_df DataFrame. msft_value = msft_price * portfolio_df.loc["MSFT"]["shares"] aapl_value = aapl_price * portfolio_df.loc["AAPL"]["shares"] # Print the current value of each stock in the stocks portfolio print(f"The current value of the {portfolio_df.loc['MSFT']['shares']} MSFT shares is ${msft_value:,.2f}") print(f"The current value of the {portfolio_df.loc['AAPL']['shares']} AAPL shares is ${aapl_value:,.2f}") # Print the total value of the current portfolio. print(f"The current value of the entire portfolio is ${(aapl_value + msft_value):,.2f}") # Create a Pandas DataFrame that includes the current value of both MSFT and AAPL. portfolio_value_df = pd.DataFrame( {"MSFT": [msft_value], "AAPL": [aapl_value]} ) # Display portfolio_value_df DataFrame portfolio_value_df # Create a bar chart to show the value of shares # Give the plot a title and adjust the figure size portfolio_value_df.plot(kind="bar", title="Current Value in Dollars of Apple & Microsoft") # Using the DataFrame created below: pie_values_df = pd.DataFrame( {'Value':[aapl_value, msft_value]}, index=['Apple', 'MSFT'] ) pie_values_df # Create a pie chart to visualize the proportion each stock is of the portfolio as a whole # Give the plot a title pie_values_df.plot.pie(y='Value', title='Portfolio Composition - 2020-07-14 ')
0.72331
0.957636
``` import numpy as np from ipycanvas import Canvas, hold_canvas from ipywidgets import FloatSlider ``` This module is local to the Notebook ``` from py3d_engine import OrbitCamera, project_vector class Plot3d(Canvas): def __init__(self): super(Plot3d, self).__init__(width=500, height=500) self.dragging = False self.n = 200 self.x = np.random.rand(self.n) - 0.5 self.y = np.random.rand(self.n) - 0.5 self.z = np.random.rand(self.n) - 0.5 self.dx = 0 self.dy = 0 self.radius = 10 self.camera = OrbitCamera(self.radius, [0, 0, 0], self.width/self.height) self.x2, self.y2, self.z2 = project_vector(self.x, self.y, self.z, self.camera.matrix) self.draw() self.on_mouse_down(self.mouse_down_handler) self.on_mouse_move(self.mouse_move_handler) self.on_mouse_up(self.mouse_up_handler) self.on_mouse_out(self.mouse_out_handler) def update_matrix(self, dx=None, dy=None, radius=None): dx = dx if dx is not None else self.dx dy = dy if dy is not None else self.dy self.radius = radius if radius is not None else self.radius self.camera.radius = self.radius self.camera.update_position(dy, dx) self.x2, self.y2, self.z2 = project_vector(self.x, self.y, self.z, self.camera.matrix) self.draw() def draw(self): x = self.x2 * self.width + self.width / 2 y = self.y2 * self.height + self.height / 2 with hold_canvas(self): self.clear() self.fill_circles(x, y, 2) def mouse_down_handler(self, pixel_x, pixel_y): self.dragging = True self.x_mouse = pixel_x self.y_mouse = pixel_y def mouse_move_handler(self, pixel_x, pixel_y): if self.dragging: self.dx_new = self.dx + pixel_x - self.x_mouse self.dy_new = self.dy + pixel_y - self.y_mouse self.update_matrix(self.dx_new, self.dy_new) def mouse_up_handler(self, pixel_x, pixel_y): if self.dragging: self.dragging = False self.dx = self.dx_new self.dy = self.dy_new def mouse_out_handler(self, pixel_x, pixel_y): if self.dragging: self.dragging = False self.dx = self.dx_new self.dy = self.dy_new p = Plot3d() p # Link Camera position to a slider widget slider = FloatSlider(description='Radius:', min=1., max=7., value=p.radius) def on_slider_move(change): slider_value = change['new'] p.update_matrix(radius=slider_value) slider.observe(on_slider_move, 'value') slider ```
github_jupyter
import numpy as np from ipycanvas import Canvas, hold_canvas from ipywidgets import FloatSlider from py3d_engine import OrbitCamera, project_vector class Plot3d(Canvas): def __init__(self): super(Plot3d, self).__init__(width=500, height=500) self.dragging = False self.n = 200 self.x = np.random.rand(self.n) - 0.5 self.y = np.random.rand(self.n) - 0.5 self.z = np.random.rand(self.n) - 0.5 self.dx = 0 self.dy = 0 self.radius = 10 self.camera = OrbitCamera(self.radius, [0, 0, 0], self.width/self.height) self.x2, self.y2, self.z2 = project_vector(self.x, self.y, self.z, self.camera.matrix) self.draw() self.on_mouse_down(self.mouse_down_handler) self.on_mouse_move(self.mouse_move_handler) self.on_mouse_up(self.mouse_up_handler) self.on_mouse_out(self.mouse_out_handler) def update_matrix(self, dx=None, dy=None, radius=None): dx = dx if dx is not None else self.dx dy = dy if dy is not None else self.dy self.radius = radius if radius is not None else self.radius self.camera.radius = self.radius self.camera.update_position(dy, dx) self.x2, self.y2, self.z2 = project_vector(self.x, self.y, self.z, self.camera.matrix) self.draw() def draw(self): x = self.x2 * self.width + self.width / 2 y = self.y2 * self.height + self.height / 2 with hold_canvas(self): self.clear() self.fill_circles(x, y, 2) def mouse_down_handler(self, pixel_x, pixel_y): self.dragging = True self.x_mouse = pixel_x self.y_mouse = pixel_y def mouse_move_handler(self, pixel_x, pixel_y): if self.dragging: self.dx_new = self.dx + pixel_x - self.x_mouse self.dy_new = self.dy + pixel_y - self.y_mouse self.update_matrix(self.dx_new, self.dy_new) def mouse_up_handler(self, pixel_x, pixel_y): if self.dragging: self.dragging = False self.dx = self.dx_new self.dy = self.dy_new def mouse_out_handler(self, pixel_x, pixel_y): if self.dragging: self.dragging = False self.dx = self.dx_new self.dy = self.dy_new p = Plot3d() p # Link Camera position to a slider widget slider = FloatSlider(description='Radius:', min=1., max=7., value=p.radius) def on_slider_move(change): slider_value = change['new'] p.update_matrix(radius=slider_value) slider.observe(on_slider_move, 'value') slider
0.686055
0.704236
``` %matplotlib inline import pandas as pd import numpy as np from statsmodels.tsa import ar_model, arima_model, tsatools from sklearn.linear_model import LinearRegression def SR(x): return (np.mean(x)/np.std(x)) * np.sqrt(252.0) def WN(size): return pd.DataFrame({"WN": np.random.normal(size=size)}) def WN_Returns(size, scale): return pd.DataFrame({"WN": np.random.normal(size=size, scale=scale)}) def TSCV(index, window_size, warmup_size, expanding=True): n_windows = int(np.floor((len(index) - warmup_size)/window_size)) tscv_folds = {"insample": {}, "outsample": {}} for i in range(n_windows): if expanding: tscv_folds["insample"][str(i)] = index[:warmup_size+i*window_size] tscv_folds["outsample"][str(i)] = index[warmup_size+i*window_size:warmup_size+(i+1)*window_size] # last bit if expanding: tscv_folds["insample"][str(i+1)] = index[:warmup_size+(i+1)*window_size] tscv_folds["outsample"][str(i+1)] = index[warmup_size+(i+1)*window_size:] return tscv_folds def df_ewma(ts, spans): df = ts.ewm(span=spans[0]).mean() df.columns = ["span_" + str(spans[0])] for span in spans[1:]: data = ts.ewm(span=span).mean() data.columns = ["span_" + str(span)] df = pd.concat([df, data], axis=1) return df def df_lags(ts, lags): df = ts.shift(lags[0]) df.columns = ["lag_" + str(lags[0])] for lag in lags[1:]: data = ts.shift(lag) data.columns = ["lag_" + str(lag)] df = pd.concat([df, data], axis=1) return df ``` # Mult Hyp Test vs Perf Eval Diff ``` # evaluating well performance, or estimating better generalization error can be gamed, # mainly if it is a fixed approached (window based) ``` ## Data and Params ``` # data params data_size = 12000 wn_scale = 0.01 ts_shift = 1 # cv params holdout_size = 2000 warmup_size = 5000 window_size = 1000 expanding = True # model params pred_model = LinearRegression() ewmas_span = np.arange(3, 100).tolist() lags_values = np.arange(0, 100).tolist() # data ts = WN_Returns(data_size, scale=wn_scale) ts_target = ts[ts_shift:].copy() ts_input = ts.shift(ts_shift).dropna() # ts_input = df_ewma(ts_input, ewmas_span) ts_input = df_lags(ts_input, lags_values) ts = pd.concat([ts_target, ts_input], axis=1).dropna() avail_featlist = list(ts_input.columns) ts.columns = ["target"] + avail_featlist # cv folds ts_folds = TSCV(list(ts.index)[:-holdout_size], window_size, warmup_size, expanding) ts_folds["holdout"] = list(ts.index)[-holdout_size:] ``` ## Feature Selection using Lags - Fixed TSCV ``` av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction for fold_num in range(len(ts_folds["insample"].keys())): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot() ``` ## Feature Selection with Lags - Random Subsets for Every Turn ``` av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] rsubsets = 4 max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] random_tscv = np.random.permutation(range(len(ts_folds["insample"].keys())))[:rsubsets].tolist() for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction for fold_num in np.sort(random_tscv): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot() ``` ## Feature Selection using Lags - Random subsets different for every feature attempted ``` av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] rsubsets = 4 max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction random_tscv = np.random.permutation(range(len(ts_folds["insample"].keys())))[:rsubsets].tolist() for fold_num in np.sort(random_tscv): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot() ```
github_jupyter
%matplotlib inline import pandas as pd import numpy as np from statsmodels.tsa import ar_model, arima_model, tsatools from sklearn.linear_model import LinearRegression def SR(x): return (np.mean(x)/np.std(x)) * np.sqrt(252.0) def WN(size): return pd.DataFrame({"WN": np.random.normal(size=size)}) def WN_Returns(size, scale): return pd.DataFrame({"WN": np.random.normal(size=size, scale=scale)}) def TSCV(index, window_size, warmup_size, expanding=True): n_windows = int(np.floor((len(index) - warmup_size)/window_size)) tscv_folds = {"insample": {}, "outsample": {}} for i in range(n_windows): if expanding: tscv_folds["insample"][str(i)] = index[:warmup_size+i*window_size] tscv_folds["outsample"][str(i)] = index[warmup_size+i*window_size:warmup_size+(i+1)*window_size] # last bit if expanding: tscv_folds["insample"][str(i+1)] = index[:warmup_size+(i+1)*window_size] tscv_folds["outsample"][str(i+1)] = index[warmup_size+(i+1)*window_size:] return tscv_folds def df_ewma(ts, spans): df = ts.ewm(span=spans[0]).mean() df.columns = ["span_" + str(spans[0])] for span in spans[1:]: data = ts.ewm(span=span).mean() data.columns = ["span_" + str(span)] df = pd.concat([df, data], axis=1) return df def df_lags(ts, lags): df = ts.shift(lags[0]) df.columns = ["lag_" + str(lags[0])] for lag in lags[1:]: data = ts.shift(lag) data.columns = ["lag_" + str(lag)] df = pd.concat([df, data], axis=1) return df # evaluating well performance, or estimating better generalization error can be gamed, # mainly if it is a fixed approached (window based) # data params data_size = 12000 wn_scale = 0.01 ts_shift = 1 # cv params holdout_size = 2000 warmup_size = 5000 window_size = 1000 expanding = True # model params pred_model = LinearRegression() ewmas_span = np.arange(3, 100).tolist() lags_values = np.arange(0, 100).tolist() # data ts = WN_Returns(data_size, scale=wn_scale) ts_target = ts[ts_shift:].copy() ts_input = ts.shift(ts_shift).dropna() # ts_input = df_ewma(ts_input, ewmas_span) ts_input = df_lags(ts_input, lags_values) ts = pd.concat([ts_target, ts_input], axis=1).dropna() avail_featlist = list(ts_input.columns) ts.columns = ["target"] + avail_featlist # cv folds ts_folds = TSCV(list(ts.index)[:-holdout_size], window_size, warmup_size, expanding) ts_folds["holdout"] = list(ts.index)[-holdout_size:] av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction for fold_num in range(len(ts_folds["insample"].keys())): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot() av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] rsubsets = 4 max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] random_tscv = np.random.permutation(range(len(ts_folds["insample"].keys())))[:rsubsets].tolist() for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction for fold_num in np.sort(random_tscv): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot() av_featlist = np.copy(avail_featlist).tolist() tentative_list = [] fixed_list = [] rsubsets = 4 max_fslist = 20 k = 0 df_results = pd.DataFrame(index=[0], columns=["Iter", "InCV SR", "OutCV SR", "Hold SR", "Max InCV SR", "Max Hold SR", "Long-only SR", "FS"]) while len(av_featlist) != 0 and len(fixed_list) <= max_fslist: insr_fs, outsr_fs, holdsr_fs = [], [], [] for fs in av_featlist: # feature list feat_list = fixed_list + [fs] outsample_pred, outsample_obs = np.array([]), np.array([]) # model and prediction random_tscv = np.random.permutation(range(len(ts_folds["insample"].keys())))[:rsubsets].tolist() for fold_num in np.sort(random_tscv): # fit model ml = pred_model.fit(ts.loc[ts_folds["insample"][str(fold_num)], feat_list], ts.loc[ts_folds["insample"][str(fold_num)], "target"]) # prediction and obs outsample_pred = np.concatenate([outsample_pred, ml.predict(ts.loc[ts_folds["outsample"][str(fold_num)], feat_list])]) outsample_obs = np.concatenate([outsample_obs, ts.loc[ts_folds["outsample"][str(fold_num)], "target"]]) # compute performance insr_fs.append(SR(ml.predict(ts.loc[ts_folds["insample"][str(fold_num)], feat_list]) * ts.loc[ts_folds["insample"][str(fold_num)], "target"])) outsr_fs.append(SR(outsample_pred * outsample_obs)) holdsr_fs.append(SR(ml.predict(ts.loc[ts_folds["holdout"], feat_list]) * ts.loc[ts_folds["holdout"], "target"])) # get best feature and remove from available list get_fs = av_featlist[np.argmax(outsr_fs)] fixed_list += [get_fs] av_featlist.pop(np.argmax(outsr_fs)) # store results df_results.loc[k, "Iter"] = k df_results.loc[k, "InCV SR"] = insr_fs[np.argmax(outsr_fs)] df_results.loc[k, "OutCV SR"] = np.max(outsr_fs) df_results.loc[k, "Hold SR"] = holdsr_fs[np.argmax(outsr_fs)] df_results.loc[k, "FS"] = np.copy(fixed_list) df_results.loc[k, "Max InCV SR"] = np.max(insr_fs) df_results.loc[k, "Max Hold SR"] = np.max(holdsr_fs) df_results.loc[k, "Long-only SR"] = SR(ts["target"].values) k += 1 print(k, insr_fs[np.argmax(outsr_fs)], np.max(outsr_fs), holdsr_fs[np.argmax(outsr_fs)]) print(fixed_list) df_results.loc[:20, ["InCV SR", "OutCV SR", "Hold SR"]].plot()
0.520984
0.738999
<img src="images/dask_horizontal.svg" align="right" width="30%"> # Arrays <img src="images/array.png" width="25%" align="right"> Dask array provides a parallel, larger-than-memory, n-dimensional array using blocked algorithms. Simply put: distributed Numpy. * **Parallel**: Uses all of the cores on your computer * **Larger-than-memory**: Lets you work on datasets that are larger than your available memory by breaking up your array into many small pieces, operating on those pieces in an order that minimizes the memory footprint of your computation, and effectively streaming data from disk. * **Blocked Algorithms**: Perform large computations by performing many smaller computations In this notebook, we'll build some understanding by implementing some blocked algorithms from scratch. We'll then use Dask Array to analyze large datasets, in parallel, using a familiar NumPy-like API. **Related Documentation** * [Array documentation](https://docs.dask.org/en/latest/array.html) * [Array screencast](https://youtu.be/9h_61hXCDuI) * [Array API](https://docs.dask.org/en/latest/array-api.html) * [Array examples](https://examples.dask.org/array.html) ## Create data ``` %run prep.py -d random ``` ## Setup ``` from dask.distributed import Client client = Client(n_workers=4) ``` ## Blocked Algorithms A *blocked algorithm* executes on a large dataset by breaking it up into many small blocks. For example, consider taking the sum of a billion numbers. We might instead break up the array into 1,000 chunks, each of size 1,000,000, take the sum of each chunk, and then take the sum of the intermediate sums. We achieve the intended result (one sum on one billion numbers) by performing many smaller results (one thousand sums on one million numbers each, followed by another sum of a thousand numbers.) We do exactly this with Python and NumPy in the following example: ``` # Load data with h5py # this creates a pointer to the data, but does not actually load import h5py import os f = h5py.File(os.path.join('data', 'random.hdf5'), mode='r') dset = f['/x'] ``` **Compute sum using blocked algorithm** Before using dask, let's consider the concept of blocked algorithms. We can compute the sum of a large number of elements by loading them chunk-by-chunk, and keeping a running total. Here we compute the sum of this large array on disk by 1. Computing the sum of each 1,000,000 sized chunk of the array 2. Computing the sum of the 1,000 intermediate sums Note that this is a sequential process in the notebook kernel, both the loading and summing. ``` # Compute sum of large array, one million numbers at a time sums = [] for i in range(0, 1_000_000_000, 1_000_000): chunk = dset[i: i + 1_000_000] # pull out numpy array sums.append(chunk.sum()) total = sum(sums) print(total) ``` ### Exercise: Compute the mean using a blocked algorithm Now that we've seen the simple example above, try doing a slightly more complicated problem. Compute the mean of the array, assuming for a moment that we don't happen to already know how many elements are in the data. You can do this by changing the code above with the following alterations: 1. Compute the sum of each block 2. Compute the length of each block 3. Compute the sum of the 1,000 intermediate sums and the sum of the 1,000 intermediate lengths and divide one by the other This approach is overkill for our case but does nicely generalize if we don't know the size of the array or individual blocks beforehand. ``` # Compute the mean of the array sums = [] lengths = [] for i in range(0, 1_000_000_000, 1_000_000): chunk = dset[i: i + 1_000_000] # pull out numpy array sums.append(chunk.sum()) lengths.append(len(chunk)) total = sum(sums) length = sum(lengths) print(total / length) ``` `dask.array` contains these algorithms -------------------------------------------- Dask.array is a NumPy-like library that does these kinds of tricks to operate on large datasets that don't fit into memory. It extends beyond the linear problems discussed above to full N-Dimensional algorithms and a decent subset of the NumPy interface. **Create `dask.array` object** You can create a `dask.array` `Array` object with the `da.from_array` function. This function accepts 1. `data`: Any object that supports NumPy slicing, like `dset` 2. `chunks`: A chunk size to tell us how to block up our array, like `(1_000_000,)` ``` import dask.array as da x = da.from_array(dset, chunks=(1_000_000,)) x ``` **Manipulate `dask.array` object as you would a numpy array** Now that we have an `Array` we perform standard numpy-style computations like arithmetic, mathematics, slicing, reductions, etc.. The interface is familiar, but the actual work is different. `dask_array.sum()` does not do the same thing as `numpy_array.sum()`. **What's the difference?** `dask_array.sum()` builds an expression of the computation. It does not do the computation yet. `numpy_array.sum()` computes the sum immediately. *Why the difference?* Dask arrays are split into chunks. Each chunk must have computations run on that chunk explicitly. If the desired answer comes from a small slice of the entire dataset, running the computation over all data would be wasteful of CPU and memory. ``` result = x.sum() result ``` **Compute result** Dask.array objects are lazily evaluated. Operations like `.sum` build up a graph of blocked tasks to execute. We ask for the final result with a call to `.compute()`. This triggers the actual computation. ``` result.compute() ``` ### Exercise: Compute the mean And the variance, std, etc.. This should be a small change to the example above. Look at what other operations you can do with the Jupyter notebook's tab-completion. Does this match your result from before? Performance and Parallelism ------------------------------- <img src="images/fail-case.gif" width="40%" align="right"> In our first examples we used `for` loops to walk through the array one block at a time. For simple operations like `sum` this is optimal. However for complex operations we may want to traverse through the array differently. In particular we may want the following: 1. Use multiple cores in parallel 2. Chain operations on a single blocks before moving on to the next one `Dask.array` translates your array operations into a graph of inter-related tasks with data dependencies between them. Dask then executes this graph in parallel with multiple threads. We'll discuss more about this in the next section. ### Example 1. Construct a 20000x20000 array of normally distributed random values broken up into 1000x1000 sized chunks 2. Take the mean along one axis 3. Take every 100th element ``` import numpy as np import dask.array as da x = da.random.normal(10, 0.1, size=(20000, 20000), # 400 million element array chunks=(1000, 1000)) # Cut into 1000x1000 sized chunks y = x.mean(axis=0)[::100] # Perform NumPy-style operations x.nbytes / 1e9 # Gigabytes of the input processed lazily %%time y.compute() # Time to compute the result ``` Performance comparison --------------------------- The following experiment was performed on a heavy personal laptop. Your performance may vary. If you attempt the NumPy version then please ensure that you have more than 4GB of main memory. **NumPy: 19s, Needs gigabytes of memory** ```python import numpy as np %%time x = np.random.normal(10, 0.1, size=(20000, 20000)) y = x.mean(axis=0)[::100] y CPU times: user 19.6 s, sys: 160 ms, total: 19.8 s Wall time: 19.7 s ``` **Dask Array: 4s, Needs megabytes of memory** ```python import dask.array as da %%time x = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000)) y = x.mean(axis=0)[::100] y.compute() CPU times: user 29.4 s, sys: 1.07 s, total: 30.5 s Wall time: 4.01 s ``` **Discussion** Notice that the Dask array computation ran in 4 seconds, but used 29.4 seconds of user CPU time. The numpy computation ran in 19.7 seconds and used 19.6 seconds of user CPU time. Dask finished faster, but used more total CPU time because Dask was able to transparently parallelize the computation because of the chunk size. *Questions* * What happens if the dask chunks=(20000,20000)? * Will the computation run in 4 seconds? * How much memory will be used? * What happens if the dask chunks=(25,25)? * What happens to CPU and memory? ### Exercise: Meteorological data There is 2GB of somewhat artifical weather data in HDF5 files in `data/weather-big/*.hdf5`. We'll use the `h5py` library to interact with this data and `dask.array` to compute on it. Our goal is to visualize the average temperature on the surface of the Earth for this month. This will require a mean over all of this data. We'll do this in the following steps 1. Create `h5py.Dataset` objects for each of the days of data on disk (`dsets`) 2. Wrap these with `da.from_array` calls 3. Stack these datasets along time with a call to `da.stack` 4. Compute the mean along the newly stacked time axis with the `.mean()` method 5. Visualize the result with `matplotlib.pyplot.imshow` ``` %run prep.py -d weather import h5py from glob import glob import os filenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5'))) dsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames] dsets[0] dsets[0][:5, :5] # Slicing into h5py.Dataset object gives a numpy array %matplotlib inline import matplotlib.pyplot as plt fig = plt.figure(figsize=(16, 8)) plt.imshow(dsets[0][::4, ::4], cmap='RdBu_r'); ``` **Integrate with `dask.array`** Make a list of `dask.array` objects out of your list of `h5py.Dataset` objects using the `da.from_array` function with a chunk size of `(500, 500)`. ``` arrays = [da.from_array(dset, chunks=(500, 500)) for dset in dsets] arrays ``` **Stack this list of `dask.array` objects into a single `dask.array` object with `da.stack`** Stack these along the first axis so that the shape of the resulting array is `(31, 5760, 11520)`. ``` x = da.stack(arrays, axis=0) x ``` **Plot the mean of this array along the time (`0th`) axis** ``` # complete the following: fig = plt.figure(figsize=(16, 8)) plt.imshow(..., cmap='RdBu_r') result = x.mean(axis=0) fig = plt.figure(figsize=(16, 8)) plt.imshow(result, cmap='RdBu_r'); ``` **Plot the difference of the first day from the mean** ``` result = x[0] - x.mean(axis=0) fig = plt.figure(figsize=(16, 8)) plt.imshow(result, cmap='RdBu_r'); ``` ### Exercise: Subsample and store In the above exercise the result of our computation is small, so we can call `compute` safely. Sometimes our result is still too large to fit into memory and we want to save it to disk. In these cases you can use one of the following two functions 1. `da.store`: Store dask.array into any object that supports numpy setitem syntax, e.g. f = h5py.File('myfile.hdf5') output = f.create_dataset(shape=..., dtype=...) da.store(my_dask_array, output) 2. `da.to_hdf5`: A specialized function that creates and stores a `dask.array` object into an `HDF5` file. da.to_hdf5('data/myfile.hdf5', '/output', my_dask_array) The task in this exercise is to **use numpy step slicing to subsample the full dataset by a factor of two in both the latitude and longitude direction and then store this result to disk** using one of the functions listed above. As a reminder, Python slicing takes three elements start:stop:step >>> L = [1, 2, 3, 4, 5, 6, 7] >>> L[::3] [1, 4, 7] ``` # ... import h5py from glob import glob import os import dask.array as da filenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5'))) dsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames] arrays = [da.from_array(dset, chunks=(500, 500)) for dset in dsets] x = da.stack(arrays, axis=0) result = x[:, ::2, ::2] da.to_zarr(result, os.path.join('data', 'myfile.zarr'), overwrite=True) ``` ## Example: Lennard-Jones potential The [Lennard-Jones potential](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is used in partical simuluations in physics, chemistry and engineering. It is highly parallelizable. First, we'll run and profile the Numpy version on 7,000 particles. ``` import numpy as np # make a random collection of particles def make_cluster(natoms, radius=40, seed=1981): np.random.seed(seed) cluster = np.random.normal(0, radius, (natoms,3))-0.5 return cluster def lj(r2): sr6 = (1./r2)**3 pot = 4.*(sr6*sr6 - sr6) return pot # build the matrix of distances def distances(cluster): diff = cluster[:, np.newaxis, :] - cluster[np.newaxis, :, :] mat = (diff*diff).sum(-1) return mat # the lj function is evaluated over the upper traingle # after removing distances near zero def potential(cluster): d2 = distances(cluster) dtri = np.triu(d2) energy = lj(dtri[dtri > 1e-6]).sum() return energy cluster = make_cluster(int(7e3), radius=500) %time potential(cluster) ``` Notice that the most time consuming function is `distances`: ``` # this would open in another browser tab # %load_ext snakeviz # %snakeviz potential(cluster) # alternative simple version given text results in this tab %prun -s tottime potential(cluster) ``` ### Dask version Here's the Dask version. Only the `potential` function needs to be rewritten to best utilize Dask. Note that `da.nansum` has been used over the full $NxN$ distance matrix to improve parallel efficiency. ``` import dask.array as da # compute the potential on the entire # matrix of distances and ignore division by zero def potential_dask(cluster): d2 = distances(cluster) energy = da.nansum(lj(d2))/2. return energy ``` Let's convert the NumPy array to a Dask array. Since the entire NumPy array fits in memory it is more computationally efficient to chunk the array by number of CPU cores. ``` from os import cpu_count dcluster = da.from_array(cluster, chunks=cluster.shape[0]//cpu_count()) ``` This step should scale quite well with number of cores. The warnings are complaining about dividing by zero, which is why we used `da.nansum` in `potential_dask`. ``` e = potential_dask(dcluster) %time e.compute() ``` Limitations ----------- Dask Array does not implement the entire numpy interface. Users expecting this will be disappointed. Notably Dask Array has the following failings: 1. Dask does not implement all of ``np.linalg``. This has been done by a number of excellent BLAS/LAPACK implementations and is the focus of numerous ongoing academic research projects. 2. Dask Array does not support some operations where the resulting shape depends on the values of the array. For those that it does support (for example, masking one Dask Array with another boolean mask), the chunk sizes will be unknown, which may cause issues with other operations that need to know the chunk sizes. 3. Dask Array does not attempt operations like ``sort`` which are notoriously difficult to do in parallel and are of somewhat diminished value on very large data (you rarely actually need a full sort). Often we include parallel-friendly alternatives like ``topk``. 4. Dask development is driven by immediate need, and so many lesser used functions, like ``np.sometrue`` have not been implemented purely out of laziness. These would make excellent community contributions. * [Array documentation](https://docs.dask.org/en/latest/array.html) * [Array screencast](https://youtu.be/9h_61hXCDuI) * [Array API](https://docs.dask.org/en/latest/array-api.html) * [Array examples](https://examples.dask.org/array.html) ``` client.shutdown() ```
github_jupyter
%run prep.py -d random from dask.distributed import Client client = Client(n_workers=4) # Load data with h5py # this creates a pointer to the data, but does not actually load import h5py import os f = h5py.File(os.path.join('data', 'random.hdf5'), mode='r') dset = f['/x'] # Compute sum of large array, one million numbers at a time sums = [] for i in range(0, 1_000_000_000, 1_000_000): chunk = dset[i: i + 1_000_000] # pull out numpy array sums.append(chunk.sum()) total = sum(sums) print(total) # Compute the mean of the array sums = [] lengths = [] for i in range(0, 1_000_000_000, 1_000_000): chunk = dset[i: i + 1_000_000] # pull out numpy array sums.append(chunk.sum()) lengths.append(len(chunk)) total = sum(sums) length = sum(lengths) print(total / length) import dask.array as da x = da.from_array(dset, chunks=(1_000_000,)) x result = x.sum() result result.compute() import numpy as np import dask.array as da x = da.random.normal(10, 0.1, size=(20000, 20000), # 400 million element array chunks=(1000, 1000)) # Cut into 1000x1000 sized chunks y = x.mean(axis=0)[::100] # Perform NumPy-style operations x.nbytes / 1e9 # Gigabytes of the input processed lazily %%time y.compute() # Time to compute the result import numpy as np %%time x = np.random.normal(10, 0.1, size=(20000, 20000)) y = x.mean(axis=0)[::100] y CPU times: user 19.6 s, sys: 160 ms, total: 19.8 s Wall time: 19.7 s import dask.array as da %%time x = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000)) y = x.mean(axis=0)[::100] y.compute() CPU times: user 29.4 s, sys: 1.07 s, total: 30.5 s Wall time: 4.01 s %run prep.py -d weather import h5py from glob import glob import os filenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5'))) dsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames] dsets[0] dsets[0][:5, :5] # Slicing into h5py.Dataset object gives a numpy array %matplotlib inline import matplotlib.pyplot as plt fig = plt.figure(figsize=(16, 8)) plt.imshow(dsets[0][::4, ::4], cmap='RdBu_r'); arrays = [da.from_array(dset, chunks=(500, 500)) for dset in dsets] arrays x = da.stack(arrays, axis=0) x # complete the following: fig = plt.figure(figsize=(16, 8)) plt.imshow(..., cmap='RdBu_r') result = x.mean(axis=0) fig = plt.figure(figsize=(16, 8)) plt.imshow(result, cmap='RdBu_r'); result = x[0] - x.mean(axis=0) fig = plt.figure(figsize=(16, 8)) plt.imshow(result, cmap='RdBu_r'); # ... import h5py from glob import glob import os import dask.array as da filenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5'))) dsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames] arrays = [da.from_array(dset, chunks=(500, 500)) for dset in dsets] x = da.stack(arrays, axis=0) result = x[:, ::2, ::2] da.to_zarr(result, os.path.join('data', 'myfile.zarr'), overwrite=True) import numpy as np # make a random collection of particles def make_cluster(natoms, radius=40, seed=1981): np.random.seed(seed) cluster = np.random.normal(0, radius, (natoms,3))-0.5 return cluster def lj(r2): sr6 = (1./r2)**3 pot = 4.*(sr6*sr6 - sr6) return pot # build the matrix of distances def distances(cluster): diff = cluster[:, np.newaxis, :] - cluster[np.newaxis, :, :] mat = (diff*diff).sum(-1) return mat # the lj function is evaluated over the upper traingle # after removing distances near zero def potential(cluster): d2 = distances(cluster) dtri = np.triu(d2) energy = lj(dtri[dtri > 1e-6]).sum() return energy cluster = make_cluster(int(7e3), radius=500) %time potential(cluster) # this would open in another browser tab # %load_ext snakeviz # %snakeviz potential(cluster) # alternative simple version given text results in this tab %prun -s tottime potential(cluster) import dask.array as da # compute the potential on the entire # matrix of distances and ignore division by zero def potential_dask(cluster): d2 = distances(cluster) energy = da.nansum(lj(d2))/2. return energy from os import cpu_count dcluster = da.from_array(cluster, chunks=cluster.shape[0]//cpu_count()) e = potential_dask(dcluster) %time e.compute() client.shutdown()
0.516352
0.992229
``` import os as os import requests as req import zipfile as zipfile from datetime import datetime from dateutil.relativedelta import relativedelta def data_directories(): """ Funcao de criacao das pastas para armazenamento dos dados, caso nao existam. """ # Diretorios para armazenamento dos dados coletados directories = ['..\\data\\01-collected' ,'..\\data\\02-cleaned' ,'..\\data\\03-organized' ,'..\\data\\04-standardized'] # Log: Mensagem de inicio da criacao dos diretorios print(str(datetime.now()) + ': Criacao dos diretorios para armazenamento dos dados iniciada.') # Para cada diretorio na lista de diretorios for directory in directories: # Verifica se o diretorio existe if not os.path.exists(directory): # Cria o diretorio os.makedirs(directory) # Log: Mensagem de criacao do diretorio print(str(datetime.now()) + ': Diretorio ' + directory + ' criado.') else: # Log: Mensagem de diretorio existente print(str(datetime.now()) + ': Diretorio ' + directory + ' ja existe.') # Log: Mensagem de fim da criacao dos diretorios print(str(datetime.now()) + ': Criacao dos diretorios para armazenamento dos dados finalizada.') def download_file(**kwargs): """ Funcao que realiza o download do arquivo. Ao final do download, e verificado se o arquivo solicitado esta vazio. Caso esteja vazio, o arquivo e apagado. Caso contrario, o arquivo e descompactado. Args: url_query (str): URL parametrizada para download do arquivo. download_path (str): Caminho do diretorio onde sera salvo o arquivo. download_directory (str): Caminho do diretorio para decompactacao do arquivo """ # Log: Mensagem de inicio do download do arquivo print(str(datetime.now()) + ': Inicio do download do arquivo ' + kwargs['download_path']) # Prepara a requisicao de download do arquivo res = req.get(kwargs['url_query'], stream = True) # Se for encontrada a URL para download if res.status_code == 200: # Faz o download do arquivo with open(kwargs['download_path'], 'wb') as f: # Log: Mensagem de URL encontrada e de inicio do download print(str(datetime.now()) + ': URL encontrada, fazendo o download do arquivo: ' + kwargs['download_path']) # Download do arquivo em pedacos for chunk in res.iter_content(chunk_size = 1024): f.write(chunk) # Verifica o tamanho do arquivo recebido file_size = os.stat(kwargs['download_path']).st_size # Se o tamanho do arquivo for muito # pequeno remove o arquivo da pasta if file_size <= 100: # Log: Mensagem de remocao por arquivo por nao conter dados print(str(datetime.now()) + ': Arquivo ' + kwargs['download_path'] + ' removido por conter poucos dados.') # Remove o arquivo sem dados os.remove(kwargs['download_path']) # Caso contrario, descompacta o arquivo csv # na pasta e remove o arquivo compactado else: # Log: Mensagem de inicio da descompactacao do arquivo zip print(str(datetime.now()) + ': Descompactando o arquivo ' + kwargs['download_path']) # Descompacta o arquivo zip mantem apenas o csv zip_ref = zipfile.ZipFile(kwargs['download_path'], 'r') zip_ref.extractall(path=kwargs['download_directory']) zip_ref.close() os.remove(kwargs['download_path']) # log: Mensagem de finalizacao do download do arquivo print(str(datetime.now()) + ': Fim do download do arquivo ' + kwargs['download_path']) else: print(str(datetime.now()) + ': Nao foram encontrados arquivos referentes ao ano de ' + kwargs['file_date_ref']) def collect_data(**kwargs): """ Coleta os dados para cada ano de referencia dentro das datas de inicio e fim informadas. Args: dt_ini_ref (date): Data inicial para coleta dos dados dt_fim_ref (date): Data final para coleta dos dados """ # Cria os diretorios para armazenamento dos dados data_directories() # Define o diretorio para download dos arquivos kwargs['download_directory'] = '..\\data\\01-collected\\' # Define a URL base de busca dos arquivos para download kwargs['download_url'] = 'http://arquivos.prf.gov.br/arquivos/index.php/s/' # Define o nome e extensao do arquivo para download kwargs['file_names'] = ['jdDLrQIf33xXSCe' # Referente a 2020 ,'kRBUylqz6DyQznN' # Referente a 2019 ,'MaC6cieXSFACNWT' # Referente a 2018 ,'nqvFu7xEF6HhbAq' # Referente a 2017 ] kwargs['file_ext'] = '.zip' # Log: Mensagem de inicio do processo print(str(datetime.now()) + ': Coleta dos arquivos iniciada.') # Para cada mes de referencia dentro das datas de inicio e fim for file in kwargs['file_names']: # Prepara as variaveis do mes de referencia kwargs['download_file'] = file + kwargs['file_ext'] # Define a URL completa para download do arquivo kwargs['url_query'] = kwargs['download_url'] + file + '/download' # Define o caminho completo para armazenamento do arquivo de download kwargs['download_path'] = kwargs['download_directory'] + kwargs['download_file'] # Inicia o download do arquivo download_file(**kwargs) # Log: Mensagem de inicio da remocao de arquivos nao relacionados com os dados print(str(datetime.now()) + ': Verificando a existencia de arquivos nao relacionados com os dados.') # Remove arquivos nao relacionados com os dados (Ex.: leiame.pdf) downloaded_files = os.listdir(kwargs['download_directory']) unrelated_files = [file for file in downloaded_files if file.endswith('.pdf')] for file in unrelated_files: file_to_remove = os.path.join(kwargs['download_directory'], file) os.remove(file_to_remove) # Log: Mensagem de remocao de arquivo nao relacionado com os dados print(str(datetime.now()) + ': Arquivo ' + file_to_remove + ' removido.') # Log: Mensagem de fim da remocao de arquivos nao relacionados com os dados print(str(datetime.now()) + ': Remocao dos arquivos nao relacionados com os dados concluida.') # Log: Mensagem de finalizacao do processo print(str(datetime.now()) + ': Coleta dos arquivos finalizada.') def main(): """ Coleta os dados de acidentes por ocorrencia nas rodovias federais. Origem: Policia Rodoviaria Federal (PRF) """ # Prepara o dicionario de variaveis (kwargs = keyworded arguments) kwargs = {} # Coleta os arquivos collect_data(**kwargs) if __name__ == '__main__': main() ```
github_jupyter
import os as os import requests as req import zipfile as zipfile from datetime import datetime from dateutil.relativedelta import relativedelta def data_directories(): """ Funcao de criacao das pastas para armazenamento dos dados, caso nao existam. """ # Diretorios para armazenamento dos dados coletados directories = ['..\\data\\01-collected' ,'..\\data\\02-cleaned' ,'..\\data\\03-organized' ,'..\\data\\04-standardized'] # Log: Mensagem de inicio da criacao dos diretorios print(str(datetime.now()) + ': Criacao dos diretorios para armazenamento dos dados iniciada.') # Para cada diretorio na lista de diretorios for directory in directories: # Verifica se o diretorio existe if not os.path.exists(directory): # Cria o diretorio os.makedirs(directory) # Log: Mensagem de criacao do diretorio print(str(datetime.now()) + ': Diretorio ' + directory + ' criado.') else: # Log: Mensagem de diretorio existente print(str(datetime.now()) + ': Diretorio ' + directory + ' ja existe.') # Log: Mensagem de fim da criacao dos diretorios print(str(datetime.now()) + ': Criacao dos diretorios para armazenamento dos dados finalizada.') def download_file(**kwargs): """ Funcao que realiza o download do arquivo. Ao final do download, e verificado se o arquivo solicitado esta vazio. Caso esteja vazio, o arquivo e apagado. Caso contrario, o arquivo e descompactado. Args: url_query (str): URL parametrizada para download do arquivo. download_path (str): Caminho do diretorio onde sera salvo o arquivo. download_directory (str): Caminho do diretorio para decompactacao do arquivo """ # Log: Mensagem de inicio do download do arquivo print(str(datetime.now()) + ': Inicio do download do arquivo ' + kwargs['download_path']) # Prepara a requisicao de download do arquivo res = req.get(kwargs['url_query'], stream = True) # Se for encontrada a URL para download if res.status_code == 200: # Faz o download do arquivo with open(kwargs['download_path'], 'wb') as f: # Log: Mensagem de URL encontrada e de inicio do download print(str(datetime.now()) + ': URL encontrada, fazendo o download do arquivo: ' + kwargs['download_path']) # Download do arquivo em pedacos for chunk in res.iter_content(chunk_size = 1024): f.write(chunk) # Verifica o tamanho do arquivo recebido file_size = os.stat(kwargs['download_path']).st_size # Se o tamanho do arquivo for muito # pequeno remove o arquivo da pasta if file_size <= 100: # Log: Mensagem de remocao por arquivo por nao conter dados print(str(datetime.now()) + ': Arquivo ' + kwargs['download_path'] + ' removido por conter poucos dados.') # Remove o arquivo sem dados os.remove(kwargs['download_path']) # Caso contrario, descompacta o arquivo csv # na pasta e remove o arquivo compactado else: # Log: Mensagem de inicio da descompactacao do arquivo zip print(str(datetime.now()) + ': Descompactando o arquivo ' + kwargs['download_path']) # Descompacta o arquivo zip mantem apenas o csv zip_ref = zipfile.ZipFile(kwargs['download_path'], 'r') zip_ref.extractall(path=kwargs['download_directory']) zip_ref.close() os.remove(kwargs['download_path']) # log: Mensagem de finalizacao do download do arquivo print(str(datetime.now()) + ': Fim do download do arquivo ' + kwargs['download_path']) else: print(str(datetime.now()) + ': Nao foram encontrados arquivos referentes ao ano de ' + kwargs['file_date_ref']) def collect_data(**kwargs): """ Coleta os dados para cada ano de referencia dentro das datas de inicio e fim informadas. Args: dt_ini_ref (date): Data inicial para coleta dos dados dt_fim_ref (date): Data final para coleta dos dados """ # Cria os diretorios para armazenamento dos dados data_directories() # Define o diretorio para download dos arquivos kwargs['download_directory'] = '..\\data\\01-collected\\' # Define a URL base de busca dos arquivos para download kwargs['download_url'] = 'http://arquivos.prf.gov.br/arquivos/index.php/s/' # Define o nome e extensao do arquivo para download kwargs['file_names'] = ['jdDLrQIf33xXSCe' # Referente a 2020 ,'kRBUylqz6DyQznN' # Referente a 2019 ,'MaC6cieXSFACNWT' # Referente a 2018 ,'nqvFu7xEF6HhbAq' # Referente a 2017 ] kwargs['file_ext'] = '.zip' # Log: Mensagem de inicio do processo print(str(datetime.now()) + ': Coleta dos arquivos iniciada.') # Para cada mes de referencia dentro das datas de inicio e fim for file in kwargs['file_names']: # Prepara as variaveis do mes de referencia kwargs['download_file'] = file + kwargs['file_ext'] # Define a URL completa para download do arquivo kwargs['url_query'] = kwargs['download_url'] + file + '/download' # Define o caminho completo para armazenamento do arquivo de download kwargs['download_path'] = kwargs['download_directory'] + kwargs['download_file'] # Inicia o download do arquivo download_file(**kwargs) # Log: Mensagem de inicio da remocao de arquivos nao relacionados com os dados print(str(datetime.now()) + ': Verificando a existencia de arquivos nao relacionados com os dados.') # Remove arquivos nao relacionados com os dados (Ex.: leiame.pdf) downloaded_files = os.listdir(kwargs['download_directory']) unrelated_files = [file for file in downloaded_files if file.endswith('.pdf')] for file in unrelated_files: file_to_remove = os.path.join(kwargs['download_directory'], file) os.remove(file_to_remove) # Log: Mensagem de remocao de arquivo nao relacionado com os dados print(str(datetime.now()) + ': Arquivo ' + file_to_remove + ' removido.') # Log: Mensagem de fim da remocao de arquivos nao relacionados com os dados print(str(datetime.now()) + ': Remocao dos arquivos nao relacionados com os dados concluida.') # Log: Mensagem de finalizacao do processo print(str(datetime.now()) + ': Coleta dos arquivos finalizada.') def main(): """ Coleta os dados de acidentes por ocorrencia nas rodovias federais. Origem: Policia Rodoviaria Federal (PRF) """ # Prepara o dicionario de variaveis (kwargs = keyworded arguments) kwargs = {} # Coleta os arquivos collect_data(**kwargs) if __name__ == '__main__': main()
0.182426
0.255948
# Trabajo de Fin de Grado ## Predicción de eventos cardiovasculares y hemorrágicos en pacientes con doble antiagregación con modelos machine learning. ##### Pablo Pérez Sánchez --- # 1. Preprocess Database --- **Import libarys** ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt %pylab inline ``` --- **Import data** ``` df = pd.read_excel('../../data/PACS_DAPT_completo.xlsx') # df.head() ``` --- **Preprocess Database** 1. Pasamos las variables categoricas a una clasificación binaria. 2. Creamos una nueva variable "IMC". 3. Crearemos dos nuevas variable a partir de LVEFgroups. 4. Establaceremos bien los tiempos de sangrado y reinfarto en el registro "Vigo_Arritxaca". 5. Creamos nuevas variables que clasificaran el infarto/sangrado a los antes de los 9 meses, entre los 9 y 15 meses y despues de los 15 meses. --- **1. Pasamos las variables categoricas a una clasificación binaria.** "Female" = 1 / "Male" = 2 "No" = 0 / "Yes" = 1 ``` #variables que es necesario realizar el cambio replace_vars = ['Sex','Diabetes', 'Hypertension', 'Dyslipemia', 'PeripheralArteryDisease', 'PriorMyocardialInfarction', 'PriorPCI', 'PriorCABG', 'PriorStroke', 'Cancer', 'Prior_Bleeding', 'STEMI', 'UnstableAngina', 'Killip2orMore', 'Femoral_Access', 'Multivessel', 'DrugElutingStent', 'Revascularization_Complete', 'ReAMI_inhospital', 'Bleeding_inhospital', 'Transfusion_IH', 'Clopidogrel', 'Ticagrelor', 'Prasugrel', 'OralAnticoagulation', 'Betablockers', 'ACEI_ARB', 'Statin', 'Death', 'ReInfarction','Bleeding', 'Transfusion'] #creamos un diccionario con los cambios dic = {'Female': 1, 'Male': 2, 'Yes': 1, 'No': 0, np.nan:np.nan,'NaN':np.nan, 1:1, 0:0} #Realizamos el cambio for i in replace_vars: #print(i) df[i] = df[i].apply(lambda x:dic[x]) ``` --- **2. Creamos una nueva variable "IMC".** ![image.png](attachment:image.png "IMC = Peso(kg)/Talla(m)^2") - Bleemacs: Ni talla ni peso. - Renami: Talla = IMC. - Vigo_Arritxaca: Peso y Talla, calculamos IMC Antes de realizar los cambios, tenemos varios pacientes con datos incoherentes en la base de datos. ``` df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']<100), ['Id','Talla']].shape ``` Tenemos 25 pacientes en los que "Talla" es menor a 100 cm: - 20 pacientes con Talla: -1 - 3 pacientes con Talla: 1 - 1 pacientes con Talla: 0 - 1 paciente con Talla: 19 ¿Los sutituimos por la media? ``` # Calculamos el IMC: # - BLEEMACS no tenemos ni talla ni peso # - RENAMI la talla es el IMC y df['IMC'] = df.loc[df['Registro']=='RENAMI', 'Talla'] # - Vigo_Arritxaca la calculamos sin meter los valores menores a 100cm df['IMC'] = df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Peso'] / ((df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Talla']/100) * (df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Talla']/100)) # Los 24 pacientes con Talla = (-1, 0, 1), son sustituidos por la media. IMC_media_Vigo = df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'IMC'].mean() df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==-1) ,'IMC'] = IMC_media_Vigo df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==0) ,'IMC'] = IMC_media_Vigo df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==1) ,'IMC'] = IMC_media_Vigo df.loc[df['Id']==24445,'IMC'] = IMC_media_Vigo # # El paciente con Id: 24445 tiene Talla = 19, la sustituimos por Talla = 190 # df.loc[df['Id']==24445,'Talla'] = 190 ``` --- **3. Crearemos dos nuevas variable a partir de "LVEFgroups".** A partir de "LVEFgroups" obtenemos las variables: "LVEFless40", "LVEFless50" y "LVEFafter50" ``` # Creamos dos nuevas columnas equivalente a LVEFless40 df['LVEFless50'] = 0 df['LVEFafter50'] = 0 df.loc[~(df['LVEFgroups']=='LVEF 40-49'),'LVEFless50'] = 1 # df.loc[~(df['LVEFgroups']=='LVEF > 50'),'LVEFless50'] = 1 # Si queremos hacerlo de manera escalonada df.loc[~(df['LVEFgroups']=='LVEF > 50'),'LVEFafter50'] = 1 ``` --- **4. Establaceremos bien los tiempos de sangrado y reinfarto en el registro "Vigo_Arritxaca".** Usaré "evento2 para referirme a reinfarto o sangrado **"LengthDapt_months"**: tiempo de medicación, tiempo desde que se da el paciente de alta hasta el siguiente evento. **"Months_reAMI"**:tiempo de reinfarto, tiempo desde el primer infarto hasta el segundo. **"Months_Bleeding"**:tiempo de sangrado, tiempo desde el primer infarto hasta el sangrado. Despues de cualquier evento se cambia la medicación. Ejemplo: ![image.png](attachment:image.png) Despues del reinfarto, se cambia la medicación, entonces el sangrado ya no es valido. Debemos establecer el tiempo de medidacación igual al tiempo de reinfarto. **Crearemos dos nuevas variables:"ReInfarctionDapt", "ReInfarctionAfterDapt", "BleedingDapt" y "BleedinfAfterDapt" para asi poder realiar un estudio durante la medicación y depsues de la medicación** Sera necesario cambiar las funciones creadas abajo y realizar unas nuevas. ``` # Creamos una funcion para establecer bien los tiempos de medicación en Vigo_Arritxaca. def tiempoDapt(x): if x['Registro']=='Vigo_Arritxaca': x['LengthDapt_months'] = 0.5 + x['LengthDapt_months'] return x['LengthDapt_months'] df['LengthDapt_months'] = df.apply(tiempoDapt, axis = 1) # Creamos diferentes funciones para luego limpiar la base de que se produzcan más de un evento. # Con esta función cambiamos el estado de si ha sangrado o no el paciente def sangradoFalso(x): if x['Bleeding']==1 and x['ReInfarction'] == 1 and x['Months_reAMI'] < x['Months_Bleeding']: x['Bleeding'] = 0 return x['Bleeding'] # Con esta función cambiamos el estado de si ha sufrido o no otro infarto el paciente def reamiFalso(x): if x['Bleeding']==1 and x['ReInfarction'] == 1 and x['Months_Bleeding'] < x['Months_reAMI']: x['ReInfarction'] = 0 return x['ReInfarction'] %%time # Ejecutamos las funciones enteriores en nuestra base de datos df['Bleeding'] = df.apply(sangradoFalso, axis = 1) df['ReInfarction']= df.apply(reamiFalso, axis = 1) # Creamos diferentes funciones para la creación de las nuevas variables de los eventos durante y depues de la medicacion # Con esta función vemos si ha sangrado el paciente durante la medicacion def sangradoDapt(x): if x['Bleeding']==1 and x['LengthDapt_months'] >= x['Months_Bleeding']: x['BleedingDapt'] = 1 return x['BleedingDapt'] # Con esta función vemos si ha sangrado el paciente despues de la medicacion def sangradoAfterDapt(x): if x['Bleeding']==1 and x['LengthDapt_months'] < x['Months_Bleeding']: x['BleedinfAfterDapt'] = 1 return x['BleedinfAfterDapt'] # Con esta función vemos si ha sufrido otro infarto el paciente durante la medicacion def reamiDapt(x): if x['ReInfarction'] == 1 and x['LengthDapt_months'] >= x['Months_reAMI']: x['ReInfarctionDapt'] = 1 return x['ReInfarctionDapt'] # Con esta función vemos si ha sufrido otro infarto el paciente despues de la medicacion def reamiAfterDapt(x): if x['ReInfarction'] == 1 and x['LengthDapt_months'] < x['Months_reAMI']: x['ReInfarctionAfterDapt'] = 1 return x['ReInfarctionAfterDapt'] %%time df['BleedingDapt'] = 0 df['BleedinfAfterDapt'] = 0 df['ReInfarctionDapt'] = 0 df['ReInfarctionAfterDapt'] = 0 # Ejecutamos las funciones enteriores en nuestra base de datos df['BleedingDapt'] = df.apply(sangradoDapt, axis = 1) df['BleedinfAfterDapt'] = df.apply(sangradoAfterDapt, axis = 1) df['ReInfarctionDapt']= df.apply(reamiDapt, axis = 1) df['ReInfarctionAfterDapt']= df.apply(reamiAfterDapt, axis = 1) ``` --- **5. Creamos nuevas variables que clasificaran el infarto/sangrado a los 6 meses, 12 meses y 24 meses.** ``` # DE MIENTRAS LA MEDICACIÓN # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 6 meses. df['reAMIless6Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 6) & (df['ReInfarctionDapt']==1), 'reAMIless6Dapt'] = 1 df['bleedingless6Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 6) & (df['BleedingDapt']==1), 'bleedingless6Dapt'] = 1 # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 12 meses. df['reAMIless12Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 12) & (df['ReInfarctionDapt']==1), 'reAMIless12Dapt'] = 1 df['bleedingless12Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 12) & (df['BleedingDapt']==1), 'bleedingless12Dapt'] = 1 # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 24 meses. df['reAMIless24Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 24) & (df['ReInfarctionDapt']==1), 'reAMIless24Dapt'] = 1 df['bleedingless24Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 24) & (df['BleedingDapt']==1), 'bleedingless24Dapt'] = 1 ``` Como el estudio se realizará durante la medicación, no es necesario ejecutar el siguiente bloque de comandos ``` # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 6 meses. # df['reAMIless6'] = 0 # df.loc[(df['Months_reAMI'] <= 6) & (df['ReInfarction']==1), 'reAMIless6'] = 1 # df['bleedingless6'] = 0 # df.loc[(df['Months_Bleeding'] <= 6) & (df['Bleeding']==1), 'bleedingless6'] = 1 # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 12 meses. # df['reAMIless12'] = 0 # df.loc[(df['Months_reAMI'] <= 12) & (df['ReInfarction']==1), 'reAMIless12'] = 1 # df['bleedingless12'] = 0 # df.loc[(df['Months_Bleeding'] <= 12) & (df['Bleeding']==1), 'bleedingless12'] = 1 # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 24 meses. # df['reAMIless24'] = 0 # df.loc[(df['Months_reAMI'] <= 24) & (df['ReInfarction']==1), 'reAMIless24'] = 1 # df['bleedingless24'] = 0 # df.loc[(df['Months_Bleeding'] <= 24) & (df['Bleeding']==1), 'bleedingless24'] = 1 ``` --- **Export Preprocess Database** ``` # df.to_excel('../../data/PACS_DAPT_preprocess.xlsx') df.to_csv('../../data/PACS_DAPT_preprocess.csv') ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt %pylab inline df = pd.read_excel('../../data/PACS_DAPT_completo.xlsx') # df.head() #variables que es necesario realizar el cambio replace_vars = ['Sex','Diabetes', 'Hypertension', 'Dyslipemia', 'PeripheralArteryDisease', 'PriorMyocardialInfarction', 'PriorPCI', 'PriorCABG', 'PriorStroke', 'Cancer', 'Prior_Bleeding', 'STEMI', 'UnstableAngina', 'Killip2orMore', 'Femoral_Access', 'Multivessel', 'DrugElutingStent', 'Revascularization_Complete', 'ReAMI_inhospital', 'Bleeding_inhospital', 'Transfusion_IH', 'Clopidogrel', 'Ticagrelor', 'Prasugrel', 'OralAnticoagulation', 'Betablockers', 'ACEI_ARB', 'Statin', 'Death', 'ReInfarction','Bleeding', 'Transfusion'] #creamos un diccionario con los cambios dic = {'Female': 1, 'Male': 2, 'Yes': 1, 'No': 0, np.nan:np.nan,'NaN':np.nan, 1:1, 0:0} #Realizamos el cambio for i in replace_vars: #print(i) df[i] = df[i].apply(lambda x:dic[x]) df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']<100), ['Id','Talla']].shape # Calculamos el IMC: # - BLEEMACS no tenemos ni talla ni peso # - RENAMI la talla es el IMC y df['IMC'] = df.loc[df['Registro']=='RENAMI', 'Talla'] # - Vigo_Arritxaca la calculamos sin meter los valores menores a 100cm df['IMC'] = df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Peso'] / ((df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Talla']/100) * (df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'Talla']/100)) # Los 24 pacientes con Talla = (-1, 0, 1), son sustituidos por la media. IMC_media_Vigo = df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']>100), 'IMC'].mean() df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==-1) ,'IMC'] = IMC_media_Vigo df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==0) ,'IMC'] = IMC_media_Vigo df.loc[(df['Registro']=='Vigo_Arritxaca') & (df['Talla']==1) ,'IMC'] = IMC_media_Vigo df.loc[df['Id']==24445,'IMC'] = IMC_media_Vigo # # El paciente con Id: 24445 tiene Talla = 19, la sustituimos por Talla = 190 # df.loc[df['Id']==24445,'Talla'] = 190 # Creamos dos nuevas columnas equivalente a LVEFless40 df['LVEFless50'] = 0 df['LVEFafter50'] = 0 df.loc[~(df['LVEFgroups']=='LVEF 40-49'),'LVEFless50'] = 1 # df.loc[~(df['LVEFgroups']=='LVEF > 50'),'LVEFless50'] = 1 # Si queremos hacerlo de manera escalonada df.loc[~(df['LVEFgroups']=='LVEF > 50'),'LVEFafter50'] = 1 # Creamos una funcion para establecer bien los tiempos de medicación en Vigo_Arritxaca. def tiempoDapt(x): if x['Registro']=='Vigo_Arritxaca': x['LengthDapt_months'] = 0.5 + x['LengthDapt_months'] return x['LengthDapt_months'] df['LengthDapt_months'] = df.apply(tiempoDapt, axis = 1) # Creamos diferentes funciones para luego limpiar la base de que se produzcan más de un evento. # Con esta función cambiamos el estado de si ha sangrado o no el paciente def sangradoFalso(x): if x['Bleeding']==1 and x['ReInfarction'] == 1 and x['Months_reAMI'] < x['Months_Bleeding']: x['Bleeding'] = 0 return x['Bleeding'] # Con esta función cambiamos el estado de si ha sufrido o no otro infarto el paciente def reamiFalso(x): if x['Bleeding']==1 and x['ReInfarction'] == 1 and x['Months_Bleeding'] < x['Months_reAMI']: x['ReInfarction'] = 0 return x['ReInfarction'] %%time # Ejecutamos las funciones enteriores en nuestra base de datos df['Bleeding'] = df.apply(sangradoFalso, axis = 1) df['ReInfarction']= df.apply(reamiFalso, axis = 1) # Creamos diferentes funciones para la creación de las nuevas variables de los eventos durante y depues de la medicacion # Con esta función vemos si ha sangrado el paciente durante la medicacion def sangradoDapt(x): if x['Bleeding']==1 and x['LengthDapt_months'] >= x['Months_Bleeding']: x['BleedingDapt'] = 1 return x['BleedingDapt'] # Con esta función vemos si ha sangrado el paciente despues de la medicacion def sangradoAfterDapt(x): if x['Bleeding']==1 and x['LengthDapt_months'] < x['Months_Bleeding']: x['BleedinfAfterDapt'] = 1 return x['BleedinfAfterDapt'] # Con esta función vemos si ha sufrido otro infarto el paciente durante la medicacion def reamiDapt(x): if x['ReInfarction'] == 1 and x['LengthDapt_months'] >= x['Months_reAMI']: x['ReInfarctionDapt'] = 1 return x['ReInfarctionDapt'] # Con esta función vemos si ha sufrido otro infarto el paciente despues de la medicacion def reamiAfterDapt(x): if x['ReInfarction'] == 1 and x['LengthDapt_months'] < x['Months_reAMI']: x['ReInfarctionAfterDapt'] = 1 return x['ReInfarctionAfterDapt'] %%time df['BleedingDapt'] = 0 df['BleedinfAfterDapt'] = 0 df['ReInfarctionDapt'] = 0 df['ReInfarctionAfterDapt'] = 0 # Ejecutamos las funciones enteriores en nuestra base de datos df['BleedingDapt'] = df.apply(sangradoDapt, axis = 1) df['BleedinfAfterDapt'] = df.apply(sangradoAfterDapt, axis = 1) df['ReInfarctionDapt']= df.apply(reamiDapt, axis = 1) df['ReInfarctionAfterDapt']= df.apply(reamiAfterDapt, axis = 1) # DE MIENTRAS LA MEDICACIÓN # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 6 meses. df['reAMIless6Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 6) & (df['ReInfarctionDapt']==1), 'reAMIless6Dapt'] = 1 df['bleedingless6Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 6) & (df['BleedingDapt']==1), 'bleedingless6Dapt'] = 1 # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 12 meses. df['reAMIless12Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 12) & (df['ReInfarctionDapt']==1), 'reAMIless12Dapt'] = 1 df['bleedingless12Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 12) & (df['BleedingDapt']==1), 'bleedingless12Dapt'] = 1 # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 24 meses. df['reAMIless24Dapt'] = 0 df.loc[(df['Months_reAMI'] <= 24) & (df['ReInfarctionDapt']==1), 'reAMIless24Dapt'] = 1 df['bleedingless24Dapt'] = 0 df.loc[(df['Months_Bleeding'] <= 24) & (df['BleedingDapt']==1), 'bleedingless24Dapt'] = 1 # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 6 meses. # df['reAMIless6'] = 0 # df.loc[(df['Months_reAMI'] <= 6) & (df['ReInfarction']==1), 'reAMIless6'] = 1 # df['bleedingless6'] = 0 # df.loc[(df['Months_Bleeding'] <= 6) & (df['Bleeding']==1), 'bleedingless6'] = 1 # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 12 meses. # df['reAMIless12'] = 0 # df.loc[(df['Months_reAMI'] <= 12) & (df['ReInfarction']==1), 'reAMIless12'] = 1 # df['bleedingless12'] = 0 # df.loc[(df['Months_Bleeding'] <= 12) & (df['Bleeding']==1), 'bleedingless12'] = 1 # # Creamos una nueva variable, para estududiar si tuvo infarto o sangrado antes de los 24 meses. # df['reAMIless24'] = 0 # df.loc[(df['Months_reAMI'] <= 24) & (df['ReInfarction']==1), 'reAMIless24'] = 1 # df['bleedingless24'] = 0 # df.loc[(df['Months_Bleeding'] <= 24) & (df['Bleeding']==1), 'bleedingless24'] = 1 # df.to_excel('../../data/PACS_DAPT_preprocess.xlsx') df.to_csv('../../data/PACS_DAPT_preprocess.csv')
0.112515
0.884389
# Example 2: Lower crustal anisotropy In this example we generate P receiver functions for a model that includes a lower-crustal anisotropic layer. This example follows that of Figure 2 in [Porter et al. (2011)](#references), which uses the Raysum software developed by [Frederiksen and Bostock (2000)](#references]) Start by importing the necessary modules ``` import numpy as np from obspy.core import Stream from telewavesim import utils as ut from telewavesim import wiggle as wg ``` Select the model file: ``` modfile = '../models/model_Porter2011.txt' ``` Select the type of incident wave - options are `'P'`, `'SV'`, `'SH'`, or `'Si'`, which is an isotropic S-wave source <p class="alert alert-danger"> <strong>Danger!</strong> Using 'SH' will not work properly for modeling receiver functions as the code will think you want plane-wave displacements (see below). Do not use 'SH' if you want S-wave receiver functions. </p> ``` wvtype = 'P' ``` Next we use variables to define the desired time series. <div class="alert alert-warning"> <strong>Warning!</strong> Be careful to use a total length of time large enough to avoid wrap around effects. Sometimes if you see signals arriving at aberrant (early) times, try with either (or both) a greater number of samples or higher sample distance. </div> ``` npts = 3000 # Number of samples dt = 0.01 # Sample distance in seconds ``` Now specify the parameters of the incident wavefield in terms of a horizontal slowness and back-azimuth. In this example the slowness won't change, so we can pass it as a global variable now. The back-azimuth will range from 0 to 360 degrees with a 10-degree increment, so we define a `np.ndarray` for this variable and do not yet pass it as a global variable. ``` slow = 0.06 # Horizontal slowness (or ray parameter) in s/km baz = np.arange(0., 360., 10.) ``` Read the model parameters and return a Model object. Up to here, the steps could have been performed in no particular order, except the name of the file that needs to be defined before the call to `read_model()` ``` model = ut.read_model(modfile) ``` As we need to loop through back-azimuth values, we will initialize empty `Stream` objects to store the traces from the output of the main routine. ``` trR = Stream(); trT = Stream() ``` Now the main loop over back-azimuths where all calculations are done - self explanatory Remember that the `obs` boolean variable defaults to `False`, so if you want to change to `True`, either explicitely set them prior to this step or use the following call to `ut.run_plane()` with argument `obs=True`. Here we are not simulating OBS seismograms, so we don't need to specify anything. ``` # Loop over range of data for bb in baz: # Calculate the plane waves seismograms trxyz = ut.run_plane(model, slow, npts, dt, bb, wvtype=wvtype, obs=False) # Then the transfer functions in Z-R-T coordinate system tfs = ut.tf_from_xyz(trxyz, pvh=False) # Append to streams trR.append(tfs[0]); trT.append(tfs[1]) ``` The result of the previous loop is a set of transfer functions. To get receiver functions, we simply filter the `Stream` objects using some frequency corners ``` # Set frequency corners in Hz f1 = 0.01 f2 = 1.0 # Filter to get wave-like traces trR.filter('bandpass',freqmin=f1, freqmax=f2, corners=2, zerophase=True) trT.filter('bandpass',freqmin=f1, freqmax=f2, corners=2, zerophase=True) ``` Now plot the result as wiggles, using the format displayed in the paper by Porter et al. (2011). We also need to define 'stacked traces' that represent the average of all recceiver functions to be displayed. ``` # Stack over all traces trR_stack, trT_stack = ut.stack_all(trR, trT, pws=True) # Plot as wiggles wg.rf_wiggles_baz(trR, trT, trR_stack, trT_stack, 'test', btyp='baz', scale=1.e3, tmin=-5., tmax=8., save=False, ftitle='porter2011', wvtype='P') ``` And Voilà! Now try the same example but setting `wvtype = 'SV'` to get S receiver functions for the same model. Such an example would correspond to a core-refracted shear wave (such as SKS) with no incident transverse component. In this case the receiver functions are unstable (spectral division by zero-valued SH component) and are not computed for the transverse component. Setting `wvtype = 'Si'` will produce a transverse component receiver function, which is more realistic for incident S waves propagating through the mantle only. Be careful with the slowness values!!! Did you notice the boolean `pvh=False` in the code above? It sets whether or not the seismograms are rotated to the P-SV-SH wave modes. Setting it to `True` will essentially make the zero-lag signal on the radial. component disappear, since we know the exact value of the seismic velocities at the surface. This may not always be true so use with caution when comparing with real data! ## References * Frederiksen, A.W., & Bostock, M.G. (2000). Modelling teleseismic waves in dipping anisotropic structures. Geophysical Journal International, 141, 401-412. https://doi.org/10.1046/j.1365-246x.2000.00090.x * Porter, R., Zandt, G., & McQuarrie, N. (2011). Pervasive lower-crustal seismic anisotropy in Southern California: Evidence for underplated schists and active tectonics. Lithosphere, 3(3), 201-220. https://doi.org/10.1130/L126.1
github_jupyter
import numpy as np from obspy.core import Stream from telewavesim import utils as ut from telewavesim import wiggle as wg modfile = '../models/model_Porter2011.txt' wvtype = 'P' npts = 3000 # Number of samples dt = 0.01 # Sample distance in seconds slow = 0.06 # Horizontal slowness (or ray parameter) in s/km baz = np.arange(0., 360., 10.) model = ut.read_model(modfile) trR = Stream(); trT = Stream() # Loop over range of data for bb in baz: # Calculate the plane waves seismograms trxyz = ut.run_plane(model, slow, npts, dt, bb, wvtype=wvtype, obs=False) # Then the transfer functions in Z-R-T coordinate system tfs = ut.tf_from_xyz(trxyz, pvh=False) # Append to streams trR.append(tfs[0]); trT.append(tfs[1]) # Set frequency corners in Hz f1 = 0.01 f2 = 1.0 # Filter to get wave-like traces trR.filter('bandpass',freqmin=f1, freqmax=f2, corners=2, zerophase=True) trT.filter('bandpass',freqmin=f1, freqmax=f2, corners=2, zerophase=True) # Stack over all traces trR_stack, trT_stack = ut.stack_all(trR, trT, pws=True) # Plot as wiggles wg.rf_wiggles_baz(trR, trT, trR_stack, trT_stack, 'test', btyp='baz', scale=1.e3, tmin=-5., tmax=8., save=False, ftitle='porter2011', wvtype='P')
0.652906
0.982889
# Introduction to BigQuery ML - Predict Birth Weight **Learning Objectives** 1. Use BigQuery to explore the natality dataset 1. Create a regression (linear regression) model in BQML 1. Evaluate the performance of your machine learning model 1. Make predictions with a trained BQML model ## Introduction In this lab, you will be using the US Centers for Disease Control and Prevention's (CDC) natality data to build a model to predict baby birth weights based on a handful of features known at pregnancy. Because we're predicting a continuous value, this is a regression problem, and for that, we'll use the linear regression model built into BQML. ``` import matplotlib.pyplot as plt ``` ### Set up the notebook environment __VERY IMPORTANT__: In the cell below you must replace the text `<YOUR PROJECT>` with your GCP project id as provided during the setup of your environment. Please leave any surrounding single quotes in place. ``` PROJECT = '<YOUR PROJECT>' #TODO Replace with your GCP PROJECT ``` ## Exploring the Data This lab will use natality data and training on features to predict the birth weight. The CDC's Natality data has details on US births from 1969 to 2008 and is available in BigQuery as a public data set. More details: https://bigquery.cloud.google.com/table/publicdata:samples.natality?tab=details Start by looking at the data since 2000 with useful values, those greater than 0. Note: "__%%bigquery__" is a magic which allows quick access to BigQuery from within a notebook. ``` %%bigquery SELECT * FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 LIMIT 10 ``` ## Define Features Looking over the data set, there are a few columns of interest that could be leveraged into features for a reasonable prediction of approximate birth weight. Further, some feature engineering may be accomplished with the BigQuery `CAST` function -- in BQML, all strings are considered categorical features and all numeric types are considered continuous ones. The hashmonth is added so that we can repeatably split the data without leakage -- the goal is to have all babies that share a birthday to be either in training set or in test set and not spread between them (otherwise, there would be information leakage when it comes to triplets, etc.) ``` %%bigquery SELECT weight_pounds, -- this is the label; because it is continuous, we need to use regression CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 LIMIT 10 ``` ## Train Model With the relevant columns chosen to accomplish predictions, it is then possible to create and train the model in BigQuery. First, a dataset will be needed store the model. ``` %%bash bq --location=US mk -d demo ``` With the demo dataset ready, it is possible to create a linear regression model to train the model. This will take approximately **5 to 7 minutes** to run. Feedback from BigQuery will cease in output cell and the notebook will leave the "busy" state when complete. ``` %%bigquery CREATE or REPLACE MODEL demo.babyweight_model_asis OPTIONS (model_type='linear_reg', labels=['weight_pounds'], optimize_strategy='batch_gradient_descent') AS WITH natality_data AS ( SELECT weight_pounds,-- this is the label; because it is continuous, we need to use regression CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 ) SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks FROM natality_data WHERE MOD(hashmonth, 4) < 3 -- select 75% of the data as training ``` ## Training Statistics For all training runs, statistics are captured in the "TRAINING_INFO" table. This table has basic performance statistics for each iteration. The query below returns the training details. ``` %%bigquery SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model_asis); ``` Some of these columns are obvious although what do the non-specific ML columns mean (specific to BQML)? **training_run** - Will be zero for a newly created model. If the model is re-trained using warm_start, this will increment for each re-training. **iteration** - Number of the associated `training_run`, starting with zero for the first iteration. **duration_ms** - Indicates how long the iteration took (in ms). Next plot the training and evaluation loss to see if the model has an overfit. ``` %%bigquery history SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model_asis) history plt.plot('iteration', 'loss', data=history, marker='o', color='orange', linewidth=2) plt.plot('iteration', 'eval_loss', data=history, marker='', color='green', linewidth=2, linestyle='dashed') plt.xlabel('iteration') plt.ylabel('loss') plt.legend(); ``` As you can see, the training loss and evaluation loss are essentially identical. There does not appear to be any overfitting. ## Make a Prediction with BQML using the Model With a trained model, it is now possible to make a prediction on the values. The only difference from the second query above is the reference to the model. The data has been limited (`LIMIT 100`) to reduce amount of data returned. When the `ml.predict` function is leveraged, output prediction column name for the model is `predicted_<label_column_name>`. ``` %%bigquery SELECT * FROM ml.PREDICT(MODEL demo.babyweight_model_asis, (SELECT weight_pounds, CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 )) LIMIT 100 ``` <br> <br> <br> <br> Copyright 2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
import matplotlib.pyplot as plt PROJECT = '<YOUR PROJECT>' #TODO Replace with your GCP PROJECT %%bigquery SELECT * FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 LIMIT 10 %%bigquery SELECT weight_pounds, -- this is the label; because it is continuous, we need to use regression CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 LIMIT 10 %%bash bq --location=US mk -d demo %%bigquery CREATE or REPLACE MODEL demo.babyweight_model_asis OPTIONS (model_type='linear_reg', labels=['weight_pounds'], optimize_strategy='batch_gradient_descent') AS WITH natality_data AS ( SELECT weight_pounds,-- this is the label; because it is continuous, we need to use regression CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 ) SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks FROM natality_data WHERE MOD(hashmonth, 4) < 3 -- select 75% of the data as training %%bigquery SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model_asis); %%bigquery history SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model_asis) history plt.plot('iteration', 'loss', data=history, marker='o', color='orange', linewidth=2) plt.plot('iteration', 'eval_loss', data=history, marker='', color='green', linewidth=2, linestyle='dashed') plt.xlabel('iteration') plt.ylabel('loss') plt.legend(); %%bigquery SELECT * FROM ml.PREDICT(MODEL demo.babyweight_model_asis, (SELECT weight_pounds, CAST(is_male AS STRING) AS is_male, mother_age, CAST(plurality AS STRING) AS plurality, gestation_weeks FROM publicdata.samples.natality WHERE year > 2000 AND gestation_weeks > 0 AND mother_age > 0 AND plurality > 0 AND weight_pounds > 0 )) LIMIT 100
0.238905
0.987289
# TensorFlow Datasets TFDS provides a collection of ready-to-use datasets for use with TensorFlow, Jax, and other Machine Learning frameworks. It handles downloading and preparing the data deterministically and constructing a `tf.data.Dataset` (or `np.array`). Note: Do not confuse [TFDS](https://www.tensorflow.org/datasets) (this library) with `tf.data` (TensorFlow API to build efficient data pipelines). TFDS is a high level wrapper around `tf.data`. If you're not familiar with this API, we encourage you to read [the official tf.data guide](https://www.tensorflow.org/guide/data) first. Copyright 2018 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/datasets/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/datasets/blob/master/docs/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/datasets/blob/master/docs/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/datasets/docs/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Installation TFDS exists in two packages: * `pip install tensorflow-datasets`: The stable version, released every few months. * `pip install tfds-nightly`: Released every day, contains the last versions of the datasets. This colab uses `tfds-nightly`: ``` !pip install -q tfds-nightly tensorflow matplotlib import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tensorflow_datasets as tfds ``` ## Find available datasets All dataset builders are subclass of `tfds.core.DatasetBuilder`. To get the list of available builders, use `tfds.list_builders()` or look at our [catalog](https://www.tensorflow.org/datasets/catalog/overview). ``` tfds.list_builders() ``` ## Load a dataset ### tfds.load The easiest way of loading a dataset is `tfds.load`. It will: 1. Download the data and save it as [`tfrecord`](https://www.tensorflow.org/tutorials/load_data/tfrecord) files. 2. Load the `tfrecord` and create the `tf.data.Dataset`. ``` ds = tfds.load('mnist', split='train', shuffle_files=True) assert isinstance(ds, tf.data.Dataset) print(ds) ``` Some common arguments: * `split=`: Which split to read (e.g. `'train'`, `['train', 'test']`, `'train[80%:]'`,...). See our [split API guide](https://www.tensorflow.org/datasets/splits). * `shuffle_files=`: Control whether to shuffle the files between each epoch (TFDS store big datasets in multiple smaller files). * `data_dir=`: Location where the dataset is saved ( defaults to `~/tensorflow_datasets/`) * `with_info=True`: Returns the `tfds.core.DatasetInfo` containing dataset metadata * `download=False`: Disable download ### tfds.builder `tfds.load` is a thin wrapper around `tfds.core.DatasetBuilder`. You can get the same output using the `tfds.core.DatasetBuilder` API: ``` builder = tfds.builder('mnist') # 1. Create the tfrecord files (no-op if already exists) builder.download_and_prepare() # 2. Load the `tf.data.Dataset` ds = builder.as_dataset(split='train', shuffle_files=True) print(ds) ``` ### `tfds build` CLI If you want to generate a specific dataset, you can use the [`tfds` command line](https://www.tensorflow.org/datasets/cli). For example: ```sh tfds build mnist ``` See [the doc](https://www.tensorflow.org/datasets/cli) for available flags. ## Iterate over a dataset ### As dict By default, the `tf.data.Dataset` object contains a `dict` of `tf.Tensor`s: ``` ds = tfds.load('mnist', split='train') ds = ds.take(1) # Only take a single example for example in ds: # example is `{'image': tf.Tensor, 'label': tf.Tensor}` print(list(example.keys())) image = example["image"] label = example["label"] print(image.shape, label) ``` To find out the `dict` key names and structure, look at the dataset documentation in [our catalog](https://www.tensorflow.org/datasets/catalog/overview#all_datasets). For example: [mnist documentation](https://www.tensorflow.org/datasets/catalog/mnist). ### As tuple (`as_supervised=True`) By using `as_supervised=True`, you can get a tuple `(features, label)` instead for supervised datasets. ``` ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in ds: # example is (image, label) print(image.shape, label) ``` ### As numpy (`tfds.as_numpy`) Uses `tfds.as_numpy` to convert: * `tf.Tensor` -> `np.array` * `tf.data.Dataset` -> `Iterator[Tree[np.array]]` (`Tree` can be arbitrary nested `Dict`, `Tuple`) ``` ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in tfds.as_numpy(ds): print(type(image), type(label), label) ``` ### As batched tf.Tensor (`batch_size=-1`) By using `batch_size=-1`, you can load the full dataset in a single batch. This can be combined with `as_supervised=True` and `tfds.as_numpy` to get the the data as `(np.array, np.array)`: ``` image, label = tfds.as_numpy(tfds.load( 'mnist', split='test', batch_size=-1, as_supervised=True, )) print(type(image), image.shape) ``` Be careful that your dataset can fit in memory, and that all examples have the same shape. ## Benchmark your datasets Benchmarking a dataset is a simple `tfds.benchmark` call on any iterable (e.g. `tf.data.Dataset`, `tfds.as_numpy`,...). ``` ds = tfds.load('mnist', split='train') ds = ds.batch(32).prefetch(1) tfds.benchmark(ds, batch_size=32) tfds.benchmark(ds, batch_size=32) # Second epoch much faster due to auto-caching ``` * Do not forget to normalize the results per batch size with the `batch_size=` kwarg. * In the summary, the first warmup batch is separated from the other ones to capture `tf.data.Dataset` extra setup time (e.g. buffers initialization,...). * Notice how the second iteration is much faster due to [TFDS auto-caching](https://www.tensorflow.org/datasets/performances#auto-caching). * `tfds.benchmark` returns a `tfds.core.BenchmarkResult` which can be inspected for further analysis. ### Build end-to-end pipeline To go further, you can look: * Our [end-to-end Keras example](https://www.tensorflow.org/datasets/keras_example) to see a full training pipeline (with batching, shuffling,...). * Our [performance guide](https://www.tensorflow.org/datasets/performances) to improve the speed of your pipelines (tip: use `tfds.benchmark(ds)` to benchmark your datasets). ## Visualization ### tfds.as_dataframe `tf.data.Dataset` objects can be converted to [`pandas.DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) with `tfds.as_dataframe` to be visualized on [Colab](https://colab.research.google.com). * Add the `tfds.core.DatasetInfo` as second argument of `tfds.as_dataframe` to visualize images, audio, texts, videos,... * Use `ds.take(x)` to only display the first `x` examples. `pandas.DataFrame` will load the full dataset in-memory, and can be very expensive to display. ``` ds, info = tfds.load('mnist', split='train', with_info=True) tfds.as_dataframe(ds.take(4), info) ``` ### tfds.show_examples `tfds.show_examples` returns a `matplotlib.figure.Figure` (only image datasets supported now): ``` ds, info = tfds.load('mnist', split='train', with_info=True) fig = tfds.show_examples(ds, info) ``` ## Access the dataset metadata All builders include a `tfds.core.DatasetInfo` object containing the dataset metadata. It can be accessed through: * The `tfds.load` API: ``` ds, info = tfds.load('mnist', with_info=True) ``` * The `tfds.core.DatasetBuilder` API: ``` builder = tfds.builder('mnist') info = builder.info ``` The dataset info contains additional informations about the dataset (version, citation, homepage, description,...). ``` print(info) ``` ### Features metadata (label names, image shape,...) Access the `tfds.features.FeatureDict`: ``` info.features ``` Number of classes, label names: ``` print(info.features["label"].num_classes) print(info.features["label"].names) print(info.features["label"].int2str(7)) # Human readable version (8 -> 'cat') print(info.features["label"].str2int('7')) ``` Shapes, dtypes: ``` print(info.features.shape) print(info.features.dtype) print(info.features['image'].shape) print(info.features['image'].dtype) ``` ### Split metadata (e.g. split names, number of examples,...) Access the `tfds.core.SplitDict`: ``` print(info.splits) ``` Available splits: ``` print(list(info.splits.keys())) ``` Get info on individual split: ``` print(info.splits['train'].num_examples) print(info.splits['train'].filenames) print(info.splits['train'].num_shards) ``` It also works with the subsplit API: ``` print(info.splits['train[15%:75%]'].num_examples) print(info.splits['train[15%:75%]'].file_instructions) ``` ## Troubleshooting ### Manual download (if download fails) If download fails for some reason (e.g. offline,...). You can always manually download the data yourself and place it in the `manual_dir` (defaults to `~/tensorflow_datasets/download/manual/`. To find out which urls to download, look into: * For new datasets (implemented as folder): [`tensorflow_datasets/`](https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets/)`<type>/<dataset_name>/checksums.tsv`. For example: [`tensorflow_datasets/text/bool_q/checksums.tsv`](https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/text/bool_q/checksums.tsv). You can find the dataset source location in [our catalog](https://www.tensorflow.org/datasets/catalog/overview). * For old datasets: [`tensorflow_datasets/url_checksums/<dataset_name>.txt`](https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets/url_checksums) ### Fixing `NonMatchingChecksumError` TFDS ensure determinism by validating the checksums of downloaded urls. If `NonMatchingChecksumError` is raised, might indicate: * The website may be down (e.g. `503 status code`). Please check the url. * For Google Drive URLs, try again later as Drive sometimes rejects downloads when too many people access the same URL. See [bug](https://github.com/tensorflow/datasets/issues/1482) * The original datasets files may have been updated. In this case the TFDS dataset builder should be updated. Please open a new Github issue or PR: * Register the new checksums with `tfds build --register_checksums` * Eventually update the dataset generation code. * Update the dataset `VERSION` * Update the dataset `RELEASE_NOTES`: What caused the checksums to change ? Did some examples changed ? * Make sure the dataset can still be built. * Send us a PR Note: You can also inspect the downloaded file in `~/tensorflow_datasets/download/`. ## Citation If you're using `tensorflow-datasets` for a paper, please include the following citation, in addition to any citation specific to the used datasets (which can be found in the [dataset catalog](https://www.tensorflow.org/datasets/catalog)). ``` @misc{TFDS, title = { {TensorFlow Datasets}, A collection of ready-to-use datasets}, howpublished = {\url{https://www.tensorflow.org/datasets}}, } ```
github_jupyter
!pip install -q tfds-nightly tensorflow matplotlib import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tensorflow_datasets as tfds tfds.list_builders() ds = tfds.load('mnist', split='train', shuffle_files=True) assert isinstance(ds, tf.data.Dataset) print(ds) builder = tfds.builder('mnist') # 1. Create the tfrecord files (no-op if already exists) builder.download_and_prepare() # 2. Load the `tf.data.Dataset` ds = builder.as_dataset(split='train', shuffle_files=True) print(ds) tfds build mnist ds = tfds.load('mnist', split='train') ds = ds.take(1) # Only take a single example for example in ds: # example is `{'image': tf.Tensor, 'label': tf.Tensor}` print(list(example.keys())) image = example["image"] label = example["label"] print(image.shape, label) ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in ds: # example is (image, label) print(image.shape, label) ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in tfds.as_numpy(ds): print(type(image), type(label), label) image, label = tfds.as_numpy(tfds.load( 'mnist', split='test', batch_size=-1, as_supervised=True, )) print(type(image), image.shape) ds = tfds.load('mnist', split='train') ds = ds.batch(32).prefetch(1) tfds.benchmark(ds, batch_size=32) tfds.benchmark(ds, batch_size=32) # Second epoch much faster due to auto-caching ds, info = tfds.load('mnist', split='train', with_info=True) tfds.as_dataframe(ds.take(4), info) ds, info = tfds.load('mnist', split='train', with_info=True) fig = tfds.show_examples(ds, info) ds, info = tfds.load('mnist', with_info=True) builder = tfds.builder('mnist') info = builder.info print(info) info.features print(info.features["label"].num_classes) print(info.features["label"].names) print(info.features["label"].int2str(7)) # Human readable version (8 -> 'cat') print(info.features["label"].str2int('7')) print(info.features.shape) print(info.features.dtype) print(info.features['image'].shape) print(info.features['image'].dtype) print(info.splits) print(list(info.splits.keys())) print(info.splits['train'].num_examples) print(info.splits['train'].filenames) print(info.splits['train'].num_shards) print(info.splits['train[15%:75%]'].num_examples) print(info.splits['train[15%:75%]'].file_instructions) @misc{TFDS, title = { {TensorFlow Datasets}, A collection of ready-to-use datasets}, howpublished = {\url{https://www.tensorflow.org/datasets}}, }
0.864325
0.995898
__Objetivos__: - entender os conceitos de derivada e gradiente - entender a diferença entre gradiente analítico e numérico - aprender a calcular a backpropagação de qualquer rede neural. # Sumário [0. Imports and Configurações](#0.-Imports-and-Configurações) [1. Introdução](#1.-Introdução) - [O Objetivo](#O-Objetivo) - [Estratégia 1: Busca Aleatória](#Estratégia-1:-Busca-Aleatória) - [Estratégia 2: Busca Aleatória Local](#Estratégia-2:-Busca-Aleatória-Local) - [Estratégia 3: Gradiente Numérico](#Estratégia-3:-Gradiente-Numérico) - [Estratégia 4: Gradiente Analítico](#Estratégia-4:-Gradiente-Anal%C3%ADtico) - [Caso Recursivo: Múltiplas Portas](#Caso-Recursivo:-Múltiplas-Portas) - [Checagem do gradiente numérico](#Checagem-do-gradiente-numérico) - [Neurônio Sigmóide](#Neurônio-Sigmóide) [2. Backpropagation](#2.-Backpropagation) - [Se tornando um Ninja em Backpropagation!](#Se-tornando-um-Ninja-em-Backpropagation!) - [Resumo dos Padrões na Backpropagation](#Resumo-dos-Padrões-na-Backpropagation) - [Exemplo 1](#Exemplo-1) - [Exemplo 2](#Exemplo-2) # 0. Imports and Configurações ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` # 1. Introdução A melhor maneira de pensar em redes neurais é como circuitos de valores reais. Mas, ao invés de valores booleanos, valores reais e, ao invés de portas lógicas como **and** ou **or**, portas binárias (dois operandos) como $*$ (multiplicação), + (adição), max, exp, etc. Além disso, também teremos **gradientes** fluindo pelo circuito, mas na direção oposta. <img src='images/porta_multiplicacao.png' width="250"> De forma matemática, a gente pode considerar que essa porta implementa a seguinte função: $$f(x,y)=x*y$$ ## O Objetivo Vamos imaginar que temos o seguinte problema: 1. Nós vamos providenciar a um circuito valores específicos como entrada (x=-2, y=3) 2. O circuito vai calcular o valor de saída (-6) 3. A questão é: *Quanto mudar a entrada para levemente **aumentar** a saída?* No nosso caso, em que direção devemos mudar x,y para conseguir um número maior que -6? Note que, pro nosso exemplo, se x = -1.99 e y = 2.99, x$*$y = -5.95 que é maior que -6. **-5.95 é melhor (maior) que 6**, e obtivemos uma melhora de 0.05. ## Estratégia 1: Busca Aleatória Ok. Isso não é trivial? A gente pode simplesmente gerar valores aleatórios, calcular a saída e guardar o melhor resultado. ``` x,y = -2,3 melhor_saida = forwardMultiplyGate(x,y) melhor_x, melhor_y = 0,0 for k in range(0,100): x_try = 5*np.random.random() - 5 y_try = 5*np.random.random() - 5 out = forwardMultiplyGate(x_try, y_try) if out > melhor_saida: melhor_saida = out melhor_x, melhor_y = x_try, y_try print(melhor_x, melhor_y, forwardMultiplyGate(melhor_x, melhor_y)) ``` Ok, foi bem melhor. Mas, e se tivermos milhões de entradas? É claro que essa estratégia não funcionará. Vamos tentar algo mais aprimorado. ## Estratégia 2: Busca Aleatória Local ``` x,y = -2,3 passo = 0.01 melhor_saida = forwardMultiplyGate(x,y) melhor_x, melhor_y = 0,0 for k in range(0,100): x_try = x + passo * (2*np.random.random() - 1) y_try = y + passo * (2*np.random.random() - 1) out = forwardMultiplyGate(x_try, y_try) if out > melhor_saida: melhor_saida = out melhor_x, melhor_y = x_try, y_try print(melhor_x, melhor_y, forwardMultiplyGate(melhor_x, melhor_y)) ``` ## Estratégia 3: Gradiente Numérico Imagine agora que a gente pega as entradas de um circuito e puxa-as para uma direção positiva. Essa força puxando $x$ e $y$ vai nos dizer como $x$ e $y$ devem mudar para aumentar a saída. Não entendeu? Vamos explicar: Se olharmos para as entradas, a gente pode intuitivamente ver que a força em $x$ deveria sempre ser positiva, porque tornando $x$ um pouquinho maior de $x=-2$ para $x=-1$ aumenta a saída do circuito para $-3$, o que é bem maior que $-6$. Por outro lado, se a força em $y$ for negativa, tornando-o menor, como de $y=3$ para $y=2$, também aumenta a saída: $-2\times2 = -4$, de novo maior que $-6$. E como calcular essa força? Usando **derivadas**. > *A derivada pode ser pensada como a força que a gente aplica em cada entrada para aumentar a saída* <img src='images/derivada.gif'> E como exatamente a gente vai fazer isso? Em vez de olhar para o valor de saída, como fizemos anteriormente, nós vamos iterar sobre as cada entrada individualmente, aumentando-as bem devagar e vendo o que acontece com a saída. **A quantidade que a saída muda é a resposta da derivada**. Vamos para definição matemática. A derivada em relação a $x$ pode ser definida como: $$\frac{\partial f(x,y)}{\partial x} = \frac{f(x+h,y) - f(x,y)}{h}$$ Onde $h$ é pequeno. Nós vamos, então, calcular a saída inicial $f(x,y)$ e aumentar $x$ por um valor pequeno $h$ e calcular a nova saída $f(x+h,y)$. Então, nós subtraimos esse valores para ver a diferença e dividimos por $f(x+h,y)$ para normalizar essa mudança pelo valor (arbitrário) que nós usamos. Em termos de código, teremos: ``` x,y = -2,3 out = forwardMultiplyGate(x,y) h = 0.0001 # derivada em relação a x # derivada em relação a y ``` Como a gente pode ver, a derivada em relação a $x$ é igual a $+3$. O sinal positivo indica que alterando o valor de $x$ pelo passo $h$, a saída se torna maior. O valor $3$ pode ser considerado como o valor da força que puxa $x$. O inverso acontece com $y$. > *A derivada em relação a alguma entrada pode ser calculada ajustando levemente aquela entrada e observando a mudança no valor da saída* A derivada é calculada sobre cada entrada, enquanto o **gradiente** representa todas as derivadas sobre as entradas concatenadas em um vetor. ``` passo = 0.01 out = forwardMultiplyGate(x,y) x = x + passo * derivada_x y = y + passo * derivada_y out_new = forwardMultiplyGate(x,y) print(out_new) ``` Como a gente pode perceber $-5.87 > -6$. Apenas 3 avaliações foram necessárias para aumentar o valor da saída (ao invés de centenas) e conseguimos um melhor resultado. **Passo maior nem sempre é melhor**: É importante destacar que qualquer valor de passo maior que 0.01 ia sempre funcionar melhor (por exemplo, passo = 1 gera a saída = 1). No entanto, à medida que os circuitos vão ficando mais complexos (como em redes neurais completas), a função vai ser tornando mais caótica e complexa. O gradiente garante que se você tem um passo muito pequeno (o ideal seria infinitesimal), então você definitivamente aumenta a saída seguindo aquela direção. O passo que estamos utilizando (0.01) ainda é muito grande, mas como nosso circuito é simples, podemos esperar pelo melhor resultado. Lembre-se da analogia do **escalador cego**. ## Estratégia 4: Gradiente Analítico A estratégia que utilizamos até agora de ajustar levemente a entrada e ver o que acontece com a saída pode não ser muito cômoda na prática quando temos milhares de entradas para ajustar. Então, a gente precisa de algo melhor. Felizmente, existe uma estratégia mais fácil e muito mais rápida para calcular o gradiente: podemos usar cálculo para derivar diretamente a nossa função. Chamamos isso de **gradiente analítico** e dessa forma não precisamos ajustar levemente nada. > *O gradiente analítico evita o leve ajustamento das entradas. O circuito pode ser derivado usando cálculo.* É muito fácil calcular derivadas parciais para funções simples como $x*y$. Se você não lembra da definição, aqui está o cálculo da derivada parcial em relação a $x$ da nossa função $f(x,y)$: $$\frac{\partial f(x,y)}{\partial x} = \frac{f(x+h,y) - f(x,y)}{h} = \frac{(x+h)y - xy}{h} = \frac{xy + hy - xy}{h} = \frac{hy}{h} = y$$ A derivada parcial em relação em $x$ da nossa $f(x,y)$ é igual $y$. Você reparou na coincidência de $\partial x = 3.0$, que é exatamente o valor de $y$? E que o mesmo aconteceu para $x$? **Então, a gente não precisa ajustar nada!** E nosso código fica assim: ``` x,y = -2,3 out = forwardMultiplyGate(x,y) # insira seu código aqui! ``` É importante destacar que a Estratégia #3 reduziu a #2 para uma única vez. Porém, a #3 nos dá somente uma aproximação do gradiente, enquanto a Estratégia #4 nos dá o valor exato. Sem aproximações. O único lado negativo é que temos de saber derivar a nossa funcão. Recapitulando o que vimos até aqui: - __Estratégia 1__: definimos valores aleatórios em todas as iterações. Não funciona para muitas entradas. - __Estratégia 2__: pequenos ajustes aleatórios nas entradas e vemos qual funciona melhor. Tão ruim quando a #1. - __Estratégia 3__: muito melhor através do cálculo do gradiente. Independentemente de quão complicado é o circuito, o **gradiente numérico** é muito simples de se calcular (mas um pouco caro). - __Estratégia 4__: no final, vimos que a forma melhor, mais inteligente e mais rápida é calcular o **gradiente analítico**. O resultado é idêntico ao gradiente numérico, porém mais rápido e não precisa de ajustes. ## Caso Recursivo: Múltiplas Portas Calcular o gradiente para o nosso circuito foi trivial. Mas, e em circuitos mais complexos? Como a gente vai ver agora, cada porta pode ser tratada individualmente e a gente pode calcular derivadas locais como a gente fez anteriormente. Vamos considerar nossa função agora como a seguinte: $$f(x,y,z) = (x+y)*z$$ <img src='images/circuito_2.png' width='300'> ``` def forwardAddGate(a, b): return a+b def forwardCircuit(x,y,z): q = forwardAddGate(x,y) f = forwardMultiplyGate(q, z) return f print(forwardCircuit(-2, 5, -4)) ``` Como vamos calcular agora a nossa derivada? Primeiramente, vamos esquecer da porta de soma e fingir que temos apenas duas entradas no nosso circuito: **q** e **z**. Como já vimos, as nossas derivadas parciais podem ser definidas da seguinte maneira: $$f(q,z) = q z \hspace{0.5in} \implies \hspace{0.5in} \frac{\partial f(q,z)}{\partial q} = z, \hspace{1in} \frac{\partial f(q,z)}{\partial z} = q$$ Ok, mas e em relação a $x$ e $y$? Como $q$ é calculado em função de $x$ e $y$ (pela adição em nosso exemplo), nós também podemos calcular suas derivadas parciais: $$q(x,y) = x + y \hspace{0.5in} \implies \hspace{0.5in} \frac{\partial q(x,y)}{\partial x} = 1, \hspace{1in} \frac{\partial q(x,y)}{\partial y} = 1$$ Correto! As derivadas parciais são 1, independentemente dos valores de $x$ e $y$. Isso faz sentido se pensarmos que para aumentar A saída de uma porta de adição, a gente espera uma força positiva tanto em $x$ quanto em $y$, independente dos seus valores. Com as fórmulas acima, nós sabemos calcular o gradiente da saída em relação a $q$ e $z$, e o gradiente de $q$ em relação a $x$ e $y$. Para calcular o gradiente do nosso circuito em relação a $x$, $y$ e $z$, nós vamos utilizar a **Regra da Cadeia**, que vai nos dizer como combinar esses gradientes. A derivada final em relação a $x$, será dada por: $$\frac{\partial f(q,z)}{\partial x} = \frac{\partial q(x,y)}{\partial x} \frac{\partial f(q,z)}{\partial q}$$ Pode parecer complicado à primeira vista, mas a verdade é que isso vai ser simplificado a somente duas multiplicações: ``` x, y, z = -2, 5, -4 q = forwardAddGate(x,y) f = forwardMultiplyGate(q,z) # Derivada da porta de multiplicação # Derivada da porta de adição # Regra da cadeia ``` <img src="images/circuito_2_back.png"> É isso! Vamos agora fazer nossas entradas responderem ao gradiente. Lembrando que queremos um valor maior que -12. ``` grad_f_rel_xyz = [der_f_rel_x, der_f_rel_y, der_f_rel_z] passo = 0.01 x = x + passo * der_f_rel_x y = y + passo * der_f_rel_y z = z + passo * der_f_rel_z print(forwardCircuit(x,y,z)) ``` Vamos agora analisar os resultados separadamente. Analisando primeiramente $q$ e $z$, vemos que o circuito quer que $z$ aumente (der_f_rel_z = +3) e o valor de $q$ diminua (der_f_rel_q = -4) com uma força maior (4 contra 3). Em relação a porta de soma, como vimos, o padrão é que aumentando as entradas a saída também aumenta. Porém, o circuito quer que $q$ diminua (der_f_rel_q = -4). Esse é o **ponto crucial**: em vez de aplicarmos uma força de +1 as entradas da porta de soma como normalmente faríamos (derivada local), o circuito quer que os gradientes em $x$ e $y$ se tornem 1x-4=-4. Isso faz sentido: o circuito quer $x$ e $y$ pequeno para que $q$ seja pequeno também, o que vai aumentar $f$. > *Se isso fez sentido, você entendeu backpropagation.* **Recapitulando:** - Vimos que, para uma simples porta (or simples expressão), podemos derivar o gradiente analítico usando cálculo simples. Nós interpretamos o gradiente como uma força que puxa as entradas na direção necessária para fazer a saída aumentar. - No caso de múltiplas portas, cada porta é tratada individualmente até que o circuito seja tratado como um todo. A *única* diferença é que agora o circuito diz como a saída de outras portas devem se comportar (como da porta de adição), que é o gradiente final do circuito em relação a saída da porta. É como o circuito pedindo aquela porta maior ou menor valor de saída, e com alguma força. A porta simplesmente pega essa força e multiplica em relação a todas as forças calculadas para suas entradas anteriores (regra da cadeia) - repare como a força de q (-4) é multiplicada as forças de x e y. Isso pode ter dois efeitos desejados: - Se a porta tem uma força positiva de saída, essa força também é multiplicada nas suas entradas, escalonada pelo valor da força das entradas. - Se a porta tem uma força negativa de saída, isso significa que o circuito quer que a saída decresça, então essa força é multiplicada pelas entradas para diminuir o valor de saída. > *Tenha em mente que a força da saída do circuito vai puxando as outras forças na direção desejada por todo o circuito até as entradas.* ## Checagem do gradiente numérico Vamos verificar se os gradientes analíticos que calculamos por backpropagation estão corretos. Lembre-se que podemos fazer isso através do gradiente numérico e esperamos que o resultado seja [-4, -4, 4] para $x,y,z$. ``` x,y,z = -2,5,-4 h = 0.0001 #insira seu código aqui ``` ## Neurônio Sigmóide Qualquer função diferenciável pode atuar como uma porta, como também podemos agrupar múltiplas portas para formar uma simples porta, ou decompor um função em múltiplas portas quando for conveniente. Para exemplificar, vamos utilizar a função de ativação *sigmoid* com entradas **x** e pesos **w**: $$f(w,x) = \frac{1}{1+e^{-(w_0x_0 + w_1x_1 + w_2)}}$$ Como dito, a função acima nada mais é que a função sigmoid $\sigma(x)$. Sabendo, então, que a derivada da função sigmoid é: $$\sigma(x)=\frac{1}{1+e^{-x}}=(1-\sigma(x))\sigma(x)$$ Vamos calcular a gradiente em relação as entradas: ``` w0, w1, w2 = 2, -3, -3 x0, x1 = -1, -2 # forward pass # backward pass # Nova saida ``` Vamos supor agora que não sabemos a derivada da função $\sigma(x)$ muito menos de $f(w,x)$. O que podemos fazer?. **Decompor essa função em circuito com múltiplas portas!** Dessa forma: <img src='images/circuito_3.png' width='800'> Calculando a saída para cada porta, temos: <img src='images/circuito_3_forward.png' width='800'> Onde sabemos as seguintes derivadas: $$f(x) = \frac{1}{x} \rightarrow \frac{df}{dx} = -1/x^2 \\\\ f_c(x) = c + x \rightarrow \frac{df}{dx} = 1 \\\\ f(x) = e^x \rightarrow \frac{df}{dx} = e^x \\\\ f_a(x) = ax \rightarrow \frac{df}{dx} = a$$ Onde as funções $f_c(x)$ e $f_a(x)$ transladam a entrada por uma constante $c$ e escala por uma contante $a$, respectivamente. Na verdade, são apenas casos especias de adição e multiplicação, mas que foram introduzidos como portas unárias. Como podemos calcular a derivada em relação as entradas agora? **Usando Backpropagation!!** # 2. Backpropagation ## Se tornando um Ninja em Backpropagation! Antes de resolver o circuito acima, vamos praticar um pouco de backpropagation com alguns exemplos. Vamos esquecer funções por enquanto e trabalhar só com 4 variáveis: $a$, $b$, $c$, e $x$. Vamos também nos referir as seus gradientes como $da$, $db$, $dc$, e $dx$. Além disso, vamos assumir que $dx$ é dado (ou é +1 como nos casos acima). Nosso primeiro exemplo é a porta $*$, que já conhecemos: $$x = a * b$$ $$da = b * dx$$ $$db = a * dx$$ Se você reparar bem, vai perceber que a porta $*$ atua como um *switcher* durante a backpropagation, ou seja, o gradiente de cada entrada é o valor da outra multiplicado pelo gradiente da anterior (regra da cadeia). Por outro lado, vamos analisar a porta +: $$x = a + b$$ $$da = 1.0 * dx$$ $$db = 1.0 * dx$$ Nesse caso, 1.0 é o gradiente local e a multiplicação é a nossa regra da cadeia. **E se fosse a adição de 3 números?**: $$q = a + b$$ $$x = q + c$$ $$dc = 1.0 * dx$$ $$dq = 1.0 * dx$$ $$da = 1.0 * dq$$ $$db = 1.0 * dq$$ Você percebe o que está acontecendo? Se você olhar nos diagramas dos circuitos que já resolvemos, vai perceber que a porta + simplesmente pega o gradiente atual e roteia igualmente para todas as entradas (porque os gradientes locais são sempre 1.0 para todas as entradas, independente dos seus valores atuais). Então, podemos fazer bem mais rápido: $$x = a + b + c$$ $$da = 1.0 * dx$$ $$db = 1.0 * dx$$ $$dc = 1.0 * dx$$ Okay. Mas, e se combinarmos portas? $$x = a*b + c$$ $$da = b * dx$$ $$db = a * dx$$ $$dc = 1.0 * dx$$ Se você não percebeu o que aconteceu, introduza uma variável temporária $q = a * b$ e então calcula $x = q + c$ para se convencer. E quanto a este exemplo: $$x = a * a$$ $$da = 2 * a * dx$$ Outro exemplo: $$x = a*a + b*b + c*c$$ $$da = 2 * a * dx$$ $$db = 2 * b * dx$$ $$dc = 2 * c * dx$$ Ok. Agora mais complexo: $$x = (a * b + c) * d)^2$$ Quando casos mais complexos como esse acontecem, eu gosto de dividir a expressão em partes gerenciáveis que são quase sempre compostas de simples expressões onde eu posso aplicar a regra da cadeia: $$x1 = a * b + c$$ $$x2 = x1 * d$$ $$x = x2 * x2$$ $$dx2 = 2 * x2 * dx$$ $$dx1 = d * dx2$$ $$dd = x1 * dx2$$ $$da = b * dx1$$ $$db = a * dx1$$ $$dc = 1 * dx1$$ Não foi tão difícil! Essas são as equações para toda a expressão, e nós fizemos dividindo peça por peça e aplicando backpropagation a todas as variáveis. Note que **toda variável durante a fase forward tem uma variável equivalente na backpropagação que contém o gradiente em relação a saída do circuito.**. Mais um exemplo útil de função e seu gradiente local: $$x = 1.0/a$$ $$da = 1.0/(a*a) * dx$$ E como ela pode ser aplicada na prática: $$x = (a+b)/(c+d)$$ $$x1 = a + b$$ $$x2 = c + d$$ $$x3 = 1.0 / x2$$ $$x = x1 * x3$$ $$dx1 = x3 * dx$$ $$dx3 = x1 * dx$$ $$dx2 = (1.0/(x2 * x2)) * dx3$$ $$dc = 1 * dx2$$ $$dd = 1 * dx2$$ $$da = 1 * dx1$$ $$db = 1 * dx1$$ E mais uma: $$x = math.max(a, b)$$ $$da = x == a\ ?\ 1.0 * dx\ :\ 0.0$$ $$db = x == b\ ?\ 1.0 * dx\ :\ 0.0$$ No caso acima é mais difícil de entender. A função **max** passa o valor para a maior entrada e ignora as outras. Na fase de backpropagation, a porta __max__ simplesmente pega o gradiente atual e roteia para a entrada que teve o maior valor durante a fase de forward. A porta age como um simples switch baseado na entrada com o maior valor durante a forward. As outras entradas terão gradiente zero. Agora, vamos dar uma olhada na porta **ReLU (*Rectified Linear Unit)***, muita usada em redes neurais no lugar da função sigmoid. Ela é simplesmente um threshold com zero: $$x = max(a, 0)$$ $$da = a > 0\ ?\ 1.0 * dx\ :\ 0.0$$ Em outras palavras, essa porta simplesmente passa o valor adiante se ele é maior que zero, ou interrompe o fluxo e seta o valor para zero. Na backpropagação, a porta vai passar o gradiente atual se ele foi ativado durante a forward. Se a entrada original foi menor que zero, ela vai interromper o fluxo de gradiente. Finalmente, vamos ver como calcular o gradiente em operações vetorizadas que vamos utilizar muito em redes neurais: $$W = np.random.randn(5,10)$$ $$X = np.random.randn(3,10)$$ $$Y = X.dot(W^T)$$ Supondo que o gradiente de Y é dado como a seguir: $$dY = np.random.randn(*Y.shape)$$ $$dW = dY^T.dot(X)$$ $$dX = dY.dot(W)$$ Espero que tenha entendido como calcular expressões inteiras (que são feitas de muitas portas) e como calcular a backpropagação para cada uma delas. ## Resumo dos Padrões na Backpropagation Para resumir os padrões no fluxo da backpropagation considere esse circuito: <img src='images/backpropagation_padroes.png' width='450'> A **porta de soma** simplesmente pega o gradiente na saída e distribui igualmente para entrada, independente dos valores durante a etapa de forward. Isso vem do fato que o gradiente local para a operação de adicionar é simplesmente +1.0, então os gradientes em todas as entradas vão ser exatamente iguais ao gradiente da saída porque ele vai ser multiplicado por 1.0 (e continua o mesmo). No circuito acima, repare como a porta + roteou o gradiente 2.0 para ambas as entradas, igualmente e sem alteração. A **porta max** roteia o gradiente. Diferente da porta de soma que distribui o gradiente para todas as entradas, distribui o gradiente (sem alteração) para exatamente uma das entradas (a que tinha o maior valor durante a etapa de forward). Isso acontece por que o gradiente local é 1.0 para o maior valor e 0.0 para os outros valores. No circuito acima, a operação max roteou o gradiente de 2.0 para a variável $z$, que tinha um valor maior que $w$, e o gradiente de $w$ continua zero. A **porta de multiplicação** é um pouquinho mais difícil de interpretar. Os gradientes locais são os valores das entradas (cambiados) e multiplicados pelo gradiente da saída durante a regra da cadeia. No exemplo acima, o gradiente em $x$ é -8.00, pois é igual a -4.00x2.00. *Efeitos não inutuitivos e suas consequências*. Note que se uma das entradas na porta de multiplicação é muito pequena e a outra é muito grande, então a porta de multiplicação vai fazer algo intuitivo: ela vai atribuir um gradiente muito alto para a menor entrada e um muito pequeno para a maior entrada. Perceba que no caso de classificadores lineares, onde os pesos são multiplicados com as entradas $w^Tx_i$, isso implica que a escala dos dados tem um efeito na magnitude do gradiente para os pesos. Por exemplo, se você multiplicar todos os dados de entrada **$x_i$** por 1000 durante pré-processamento, então o gradiente dos pesos vão ser 1000x maior, e você terá de usar baixas taxas de aprendizagem para compensar o fator. Por isso que o pré-processamento é tão importante e o conhecimento intuitivo sobre os gradientes podem ajudar a debugar alguns desses casos. ## Exemplo 1 Implementando o nosso neurônio <img src='images/circuito_3_back.png' width='800'> ``` w0, w1, w2 = 2, -3, -3 x0, x1 = -1, -2 # forward pass # backward pass ``` ## Exemplo 2 Vamos ver outro exemplo. Suponha que temos a seguinte função: $$f(x,y) = \frac{x + \sigma(y)}{\sigma(x) + (x+y)^2}$$ Só para deixar claro, essa função é completamente inútil, mas um bom exemplo de backpropagation na prática. Também é importante destacar que ela é bem difícil de derivar em relação a $x$ e $y$. No entanto, como vimos, saber derivar uma função é completamente desnecessário por que não precisamos saber derivar a função inteira para calcular os gradientes. Só precisamos saber como calcular os gradientes locais. Aqui está a resolução: ``` x, y = 3, -4 # forward pass # backward pass ``` Repare em algumas coisas importantes: **Variáveis temporárias para armazenar resultados**. Para calcular a backpropagation, é importante ter algumas (se não todas) das variáveis calculadas na etapa de forward. Na prática, é bom estruturar seu código de maneira a guardar esses valores para a backprop. Em último caso, você pode recalculá-las. **Gradientes adicionados**. A etapa de forward envolveu as variáveis $x$ e $y$ muitas vezes, então quando fazemos a backprop temos de ter cuidados de acumular o gradiente nessas variáveis (+=). Isso segue a **regra da cadeia multivariável** em cálculo. # Referências 1. [CS231n - Optimization: Stochastic Gradient Descent](http://cs231n.github.io/optimization-1/) 2. [CS231n - Backpropagation, Intuitions](http://cs231n.github.io/optimization-2/) 3. [Hacker's guide to Neural Networks](http://karpathy.github.io/neuralnets/)
github_jupyter
import numpy as np import matplotlib.pyplot as plt %matplotlib inline x,y = -2,3 melhor_saida = forwardMultiplyGate(x,y) melhor_x, melhor_y = 0,0 for k in range(0,100): x_try = 5*np.random.random() - 5 y_try = 5*np.random.random() - 5 out = forwardMultiplyGate(x_try, y_try) if out > melhor_saida: melhor_saida = out melhor_x, melhor_y = x_try, y_try print(melhor_x, melhor_y, forwardMultiplyGate(melhor_x, melhor_y)) x,y = -2,3 passo = 0.01 melhor_saida = forwardMultiplyGate(x,y) melhor_x, melhor_y = 0,0 for k in range(0,100): x_try = x + passo * (2*np.random.random() - 1) y_try = y + passo * (2*np.random.random() - 1) out = forwardMultiplyGate(x_try, y_try) if out > melhor_saida: melhor_saida = out melhor_x, melhor_y = x_try, y_try print(melhor_x, melhor_y, forwardMultiplyGate(melhor_x, melhor_y)) x,y = -2,3 out = forwardMultiplyGate(x,y) h = 0.0001 # derivada em relação a x # derivada em relação a y passo = 0.01 out = forwardMultiplyGate(x,y) x = x + passo * derivada_x y = y + passo * derivada_y out_new = forwardMultiplyGate(x,y) print(out_new) x,y = -2,3 out = forwardMultiplyGate(x,y) # insira seu código aqui! def forwardAddGate(a, b): return a+b def forwardCircuit(x,y,z): q = forwardAddGate(x,y) f = forwardMultiplyGate(q, z) return f print(forwardCircuit(-2, 5, -4)) x, y, z = -2, 5, -4 q = forwardAddGate(x,y) f = forwardMultiplyGate(q,z) # Derivada da porta de multiplicação # Derivada da porta de adição # Regra da cadeia grad_f_rel_xyz = [der_f_rel_x, der_f_rel_y, der_f_rel_z] passo = 0.01 x = x + passo * der_f_rel_x y = y + passo * der_f_rel_y z = z + passo * der_f_rel_z print(forwardCircuit(x,y,z)) x,y,z = -2,5,-4 h = 0.0001 #insira seu código aqui w0, w1, w2 = 2, -3, -3 x0, x1 = -1, -2 # forward pass # backward pass # Nova saida w0, w1, w2 = 2, -3, -3 x0, x1 = -1, -2 # forward pass # backward pass x, y = 3, -4 # forward pass # backward pass
0.382026
0.953144
<a href="https://colab.research.google.com/github/AndrewDrury/FakeNewsML/blob/main/MSCI446_FakeNews.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # MSCI Project - Fake News Written by Andrew Drury & Michael Cheng ### Imports All imports required for the project. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import StratifiedKFold from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn import metrics from mlxtend.frequent_patterns import apriori, association_rules from scipy.sparse import hstack from mlxtend.preprocessing import TransactionEncoder from wordcloud import WordCloud ``` ## Part 1: Data Transformation ### Load data from tsv file The tsv file contains 12,386 rows each containing a statement that has been fact checked with additional descriptive columns. A github repository was created in order to store the tsv file. The data is imported and assigned column names. ``` # Link to the raw tsv file stored in a Github repository datasetUrl = 'https://raw.githubusercontent.com/AndrewDrury/FakeNewsML/main/factCheck.tsv' # Load tsv file, assign column names to tsv df = pd.read_csv(datasetUrl, delimiter='\t', engine='python', header=None, names=['id', 'label', 'statement', 'subjects', 'speaker', 'job-title', 'state', 'party-affiliation', 'barely-true', 'false', 'half-true', 'mostly-true', 'pants-fire', 'context']) ``` ### Apply Text Mining Methods to Prep Inputs To predict the truthfulness of a statement, the words in the statement and context need to be extracted and transformed into a more useful form of input. The following steps are taken to transform the statements and contexts into tokens. The subjects do not need to be lemmatized as they are already cleaned with only 144 possible values. 1. All characters in the statements and contexts will be transformed to lowercase 2. The strings will be split into tokens, each token representing a continuous sequence of letters 3. All characters not present in the alphabet will be removed 4. Remove common stopwords that do not provide any useful information (words like 'the', 'a') 5. The WordNetLemmatizer package will be used to transform tokens into lemmas. WordNetLemmatizer is chosen over other word processing tools such as a Stemmer since WordNetLemmatizer transforms words into their contextual root form. A stemmer often times neglects context when removing characters from a word leading to alternate meanings. After applying lemmatization, similar tokens will be reduced to their singular and common root. For example, the words 'likes', 'liked', 'liking', and 'likely' will all be reduced to their lemma 'like'. ``` def getTokens(column, removeUnique=False): # Transform all characters to lowercase lowerCase = column.str.lower() # Tokenize strings tokens = lowerCase.apply(word_tokenize) # Remove all characters not present in the alphabet tokens = tokens.apply(lambda words: [re.sub(r'[^a-z]+', '', token) for token in words]) # Remove common stopwords that do not provide any useful information (words like 'the', 'a') stopList = stopwords.words('english') tokens = tokens.apply(lambda statement: [token for token in statement if token not in (stopList)]) # Lemmatize tokens in strings using WordNetLemmatizer wnLemmatizer = nltk.WordNetLemmatizer() tokens = tokens.apply(lambda x: [wnLemmatizer.lemmatize(y) for y in x]) tokens = pd.DataFrame(val for val in tokens) return tokens # Create token matrix for all statements statementMatrix = getTokens(df['statement']) # The contexts contained non-string NaN values, all NaN values are replaced by an empty string contextsNoNan = df['context'].replace(np.nan,'') # Create token matrix for all contexts contextMatrix = getTokens(contextsNoNan) # Store subjects, subjects do not need to be lemmatized as they are already processed subjects = df['subjects'].str.split(',', expand=True) # Store the truth result labels truthLabels = df['label'] ``` ### Create TF-IDF Matrix for Supervised Classification To predict the truthfulness of a statement given a context and some subjects, a TF-IDF (term frequency inverse document frequency matrix) of tokens will be created from the statements, contexts, and subjects. A TF-IDF 2D matrix will be created from the inputs prepared above using SKLearn's TfidfVectorizer. Each row in the matrix created will represent a statement with a given context and subjects. The TF-IDF score is computed by multiplying the term frequency TF in the statement by the inverse document frequency IDF. The inverse document frequency is calculated by dividing the total number of statements by the number of documents that contain the given token. The TF-IDF score is commonly used with textual data as it augments unique and relevant terms in a body of text such as a statement. ``` def tfidfMatrix(input): # Tokens need to reform into sentences split with a space ' ' to use the TF-IDF vectorizer inputList = input.tolist() # Remove all empty strings inputList = [[val for val in row if val != ''] for row in inputList] # Create a list of strings, each string representing an entire row (needed for tfidf) inputList = [' '.join(val) for val in inputList] # Create TF-IDF Matrix of tokens tfidfVectorizer = TfidfVectorizer() tfidfFinal = tfidfVectorizer.fit_transform(inputList) return tfidfFinal, tfidfVectorizer # Group together statement tokens, subjects, and context tokens as the input for supervised learning xSupervised = pd.concat([statementMatrix, subjects, contextMatrix], axis=1) # Replace all 'None' values with empty string '' xSupervised = np.asarray(xSupervised.fillna(value='')) # Create tfidf matrix xSupervised, xVectorizer = tfidfMatrix(xSupervised) # Store truth labels ySupervised = truthLabels # Set all truth labels that are 'true' and 'mostly-true' to true, set all other labels to 'false' ySupervised = ySupervised.replace(['mostly-true'],'true') ySupervised = ySupervised.replace(['half-true','barely-true','pants-fire'],'false') print('There are %s rows and %s columns in the supervised tfidf matrix.'% (xSupervised.shape[0], len(xVectorizer.get_feature_names_out()))) ``` ### Encode Input Data for Unsupervised Learning with the Apriori Algorithm To apply the apriori algorithm, the dataset of tokens and subjects needs to be encoded with values of 0 or 1. Additionally, the tokens 'true' and 'false' need to be modified for the statement and context tokens to avoid collisions with the fact check label. All other fact check labels contain a hyphen, which has been stripped of all tokens. Thus, no collision will occur and only 'true' and 'false' need to be modified. The label 'true' will become 'true_' and 'false' will become 'false_'. ``` # Group together statement tokens, subjects, context tokens for unsupervised learning input aprioriInput = pd.concat([statementMatrix, subjects, contextMatrix, truthLabels], axis=1) # Replace all instances of 'true' and 'false' with 'true_' and 'false_' to distinguish # naturally occuring strings from the fact check labels, then add fact check labels to dataset # Note: all other fact check labels (ex: mostly-true) contain a hyphen which has # been removed from the rest of the dataset, thus no collision will occur aprioriInput = aprioriInput.replace(['true'],'true_') aprioriInput = aprioriInput.replace(['false'],'false_') # Add truth labels to the dataset now that exisiting 'true' and 'false' have been modified aprioriInput = pd.concat([aprioriInput, truthLabels], axis=1) # Replace all none values with an empty string aprioriInput = aprioriInput.fillna(value='') aprioriInput = aprioriInput.values.tolist() # Transform data into encoded matrix for the apriori algorithm encoder = TransactionEncoder() aprioriInput = encoder.fit(aprioriInput).transform(aprioriInput) # Convert input data into dataframe aprioriInput = pd.DataFrame(aprioriInput, columns=encoder.columns_) # Drop the column containing empty strings aprioriInput.drop('', axis=1, inplace=True) print('There are %s rows and %s columns in the unsupervised encoded matrix.'% (aprioriInput.shape[0], aprioriInput.shape[1])) ``` ## Part 2: Data Description ``` # Create bar chart for fact check label counts used for unsupervised learning labelCounts = truthLabels.value_counts() labelCounts = labelCounts.reindex(['true','mostly-true','half-true','barely-true','false','pants-fire']) labelCountPlot = labelCounts.plot(kind='bar', rot=0, title='Unsupervised Fact Check Rating Count', label='_nolegend_', figsize=(8,6)) labelCountPlot.set_xlabel('Fact Check Rating') labelCountPlot.set_ylabel('Count') mean = labelCounts.mean() median = labelCounts.median() labelCountPlot.axhline(mean, color='r', ls='--', label="Mean") labelCountPlot.axhline(median, color='g', ls='--', label="Median") labelCountPlot.legend() print('Mean: ', mean) print('Median: ', median) print('-'*20+'\n', labelCounts) # Create bar chart for fact check label counts used for supervised learning labelCounts = ySupervised.value_counts() labelCountPlot = labelCounts.plot(kind='bar', rot=0, title='Supervised Fact Check Rating Count', label='_nolegend_', figsize=(8,6)) labelCountPlot.set_xlabel('Fact Check Rating') labelCountPlot.set_ylabel('Count') mean = labelCounts.mean() median = labelCounts.median() labelCountPlot.axhline(mean, color='r', ls='--', label="Mean") labelCountPlot.axhline(median, color='g', ls='-.', label="Median") labelCountPlot.legend() print('Mean: ', mean) print('Median: ', median) print('-'*20+'\n', labelCounts) # Generate plot showing the count of the top 10 contexts counts = contextsNoNan.value_counts() print(counts.shape) otherCounts = counts[10:].sum() countsMatrix = counts[:10] countsMatrix = countsMatrix.append(pd.Series([otherCounts], index=['other'])) countsPlot = countsMatrix.plot(kind='bar', rot=30, title='Top 10 Contexts', label='_nolegend_',figsize=(10,6)) countsPlot.set_xlabel('Contexts') countsPlot.set_ylabel('Count') # Generate plot showing the count of the top 10 subjects splitSubjects = [] for row in df['subjects']: for sub in row.split(','): splitSubjects.append(sub) subDf = pd.DataFrame(splitSubjects) counts = subDf.value_counts() countsMatrix = counts[:10] # Clean indexes (each subject is stored in tuple, change to tuple to subjet itself) indexes = countsMatrix.index.tolist() newIndexes = [index[0] for index in indexes] countsMatrix.index = newIndexes otherCounts = counts[10:].sum() countsMatrix = countsMatrix.append(pd.Series([otherCounts], index=['other'])) countsPlot = countsMatrix.plot(kind='bar', rot=30, title='Top 10 Subjects', label='_nolegend_',figsize=(10,6)) countsPlot.set_xlabel('Subjects') countsPlot.set_ylabel('Count') ``` ## Part 3: Supervised Learning - Classification The TF-IDF matrix is split into 5 folds and done as an 80-20 train/test. metrics lists are created for the 4 metrics accuracy, precision, recall, and f1. The naive bayes and logistic regression classifiers are run separately using the TF-IDF matrix. Initially both algorithms were run with the 6 y variable classes. To try and improve performance we grouped the y variables class labels into 2 groups instead of the 6 ('true','mostly-true','half-true','barely-true','false','pants-fire'). So 'true' and 'mostly-true' become 'true' and everything else becomes 'false'. ``` folds = 5 metrics_supervised_a = [] metrics_supervised_p = [] metrics_supervised_r = [] metrics_supervised_f = [] # SPLIT DATASET 80-20 TRAIN/TEST with Stratified kfold validation using 5 folds # Stratified kfold preserves the same proportions of each class in the split kIndices = StratifiedKFold(n_splits=folds) for trainIndice, testIndice in kIndices.split(xSupervised, ySupervised): train_x, test_x = xSupervised[trainIndice], xSupervised[testIndice] train_y, test_y = ySupervised[trainIndice], ySupervised[testIndice] # Train classification model with train_x and train_y #naive bayes naive_bayes_classifier = MultinomialNB() naive_bayes_classifier.fit(train_x, train_y) #logistic regression #logistic_regression_classifier = LogisticRegression() #logistic_regression_classifier.fit(train_x, train_y) # Test model with test_x and test_y #naive bayes y_pred = naive_bayes_classifier.predict(test_x) #logistic regression #y_pred = logistic_regression_classifier.predict(test_x) # Compute metrics for model for current kfold accuracy = metrics.accuracy_score(test_y, y_pred) precision = metrics.precision_score(test_y, y_pred,average='weighted',zero_division=1) recall = metrics.recall_score(test_y, y_pred,average='weighted',zero_division=1) f1 = metrics.f1_score(test_y, y_pred,average='weighted',zero_division=1) metrics_supervised_a.append(accuracy) metrics_supervised_p.append(precision) metrics_supervised_r.append(recall) metrics_supervised_f.append(f1) # Calculate metric averages across the 5 kfolds print(sum(metrics_supervised_a)/folds) print(sum(metrics_supervised_p)/folds) print(sum(metrics_supervised_r)/folds) print(sum(metrics_supervised_f)/folds) ``` ## Part 4: Unsupervised Learning - Association Rule Mining The encoded matrix input assembled in Part 2 will be fed into the Apriori algorithm to generate a list of frequent itemsets. Then, the itemsets will be pruned, removing all sets that do not contain a truth label. Itemsets with truth labels are exclusively relevant as we are seeking to identify trends associated with truthfulness. The pruned sets will be passed into the association rules method in order to generate rules. ``` # Generate frequent itemsets with minimum support def getFrequentItemset(input, supportThreshold): frequentItemsets = apriori(input,min_support=supportThreshold,use_colnames=True) return frequentItemsets # Generate association rules for frequent itemsets def getRules(itemsets,confidenceThreshold): rules = association_rules(itemsets,metric='confidence',min_threshold=confidenceThreshold) # Keep only rules that contain truth labels truthLabels = {'true','mostly-true','half-true','barely-true','false','pants-fire'} removeIndexes = [] for index, values in rules.iterrows(): if not any(label in values['antecedents'] or label in values['consequents'] for label in truthLabels): removeIndexes.append(index) # Drop all rules that do not have truth labels rules.drop(rules.index[removeIndexes], inplace=True) return rules # Setup thershold parameters for experiements highSupport = 0.02 lowSupport = 0.01 highConfidence = 0.99 lowConfidence = 0.1 # Define experiments to run experiments = [ { 'title':'Low Support and Low Confidence', 'support':lowSupport, 'confidence':lowConfidence, }, { 'title':'Low Support and High Confidence', 'support':lowSupport, 'confidence':highConfidence, }, { 'title':'High Support and Low Confidence', 'support':highSupport, 'confidence':lowConfidence, }, { 'title':'High Support and High Confidence', 'support':highSupport, 'confidence':highConfidence, }, ] rulesets = [] # Run experiments for each support and confidence threshold for index, experiment in enumerate(experiments): frequentItemsets = getFrequentItemset(aprioriInput,experiment['support']) rules = getRules(frequentItemsets,experiment['confidence']) # Sort rules by confidence, then by support for all ties rules.sort_values(['confidence','support'],ascending = [False, False], inplace=True) # Reindex rules rules.index = range(len(rules)) rulesets.append(rules) # Print results of experiment, including top 10 and bottom 10 association rules print('EXPERIMENT #%s'% (index+1)) print('Title: %s'% experiment['title']) print('Support Threshold: %s'% experiment['support']) print('Confidence Threshold: %s'% experiment['confidence']) print('# Rules: %s'% len(rules)) print('-'*60, '\nTop 10 Rules:') print(rules[['antecedents','consequents','support','confidence']][:10]) print('-'*60, '\nBottom 10 Rules:') print(rules[['antecedents','consequents','support','confidence']][-10:]) print('\n' + '='*80) ``` ## Visualizing Trends with WordCloud Plots ``` # Generate new rules for WordCloud plot given a very low support threshold and at least 0.5 confidence value # THIS WILL TAKE A LONG TIME TO RUN given the low support threshold supportThreshold = 0.002 confidenceThreshold = 0.5 frequentItemsets = getFrequentItemset(aprioriInput,supportThreshold) rules = getRules(frequentItemsets,confidenceThreshold) # Create dictrionary of words and their calculated sizes for WordCloud plot for the given label def getWordSizeDict(label, rules): wordSizeDict = {} # Create dictrionary of words and their calculated sizes for WordCloud plot for the given label for index, values in rules.iterrows(): # print(values) if label in values['antecedents'] or label in values['consequents']: words = list(values['antecedents']) + list(values['consequents']) words.remove(label) # Calculate size for given word # The size will be a summation of the support x confidence for all occurences of the word in rules sizeMeasure = values['support'] * values['confidence'] for word in words: if word in wordSizeDict: wordSizeDict[word] += sizeMeasure else: wordSizeDict[word] = sizeMeasure return wordSizeDict # Create word cloud for given word size dictionary def createWordCloud(wordSizeDict): wordCloud = WordCloud(width = 1000, height = 600, background_color='white').generate_from_frequencies(wordSizeDict) return wordCloud truthLabelsList = ['true','mostly-true','half-true','barely-true','false','pants-fire'] # Create a WordCloud plot for each degree of truthfulness based on rules with very low support threshold and at least 0.5 confidence for index in range(len(truthLabelsList)): label = truthLabelsList[index] # Generate word size dictionary for current fact-check label wordSizesDict = getWordSizeDict(label, rules) # Create wordCloud wordCloud = createWordCloud(wordSizesDict) print(label + ' WordCloud Plot') plt.figure(index) plt.imshow(wordCloud, interpolation='bilinear') plt.axis('off') plt.show() ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt import re import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import StratifiedKFold from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn import metrics from mlxtend.frequent_patterns import apriori, association_rules from scipy.sparse import hstack from mlxtend.preprocessing import TransactionEncoder from wordcloud import WordCloud # Link to the raw tsv file stored in a Github repository datasetUrl = 'https://raw.githubusercontent.com/AndrewDrury/FakeNewsML/main/factCheck.tsv' # Load tsv file, assign column names to tsv df = pd.read_csv(datasetUrl, delimiter='\t', engine='python', header=None, names=['id', 'label', 'statement', 'subjects', 'speaker', 'job-title', 'state', 'party-affiliation', 'barely-true', 'false', 'half-true', 'mostly-true', 'pants-fire', 'context']) def getTokens(column, removeUnique=False): # Transform all characters to lowercase lowerCase = column.str.lower() # Tokenize strings tokens = lowerCase.apply(word_tokenize) # Remove all characters not present in the alphabet tokens = tokens.apply(lambda words: [re.sub(r'[^a-z]+', '', token) for token in words]) # Remove common stopwords that do not provide any useful information (words like 'the', 'a') stopList = stopwords.words('english') tokens = tokens.apply(lambda statement: [token for token in statement if token not in (stopList)]) # Lemmatize tokens in strings using WordNetLemmatizer wnLemmatizer = nltk.WordNetLemmatizer() tokens = tokens.apply(lambda x: [wnLemmatizer.lemmatize(y) for y in x]) tokens = pd.DataFrame(val for val in tokens) return tokens # Create token matrix for all statements statementMatrix = getTokens(df['statement']) # The contexts contained non-string NaN values, all NaN values are replaced by an empty string contextsNoNan = df['context'].replace(np.nan,'') # Create token matrix for all contexts contextMatrix = getTokens(contextsNoNan) # Store subjects, subjects do not need to be lemmatized as they are already processed subjects = df['subjects'].str.split(',', expand=True) # Store the truth result labels truthLabels = df['label'] def tfidfMatrix(input): # Tokens need to reform into sentences split with a space ' ' to use the TF-IDF vectorizer inputList = input.tolist() # Remove all empty strings inputList = [[val for val in row if val != ''] for row in inputList] # Create a list of strings, each string representing an entire row (needed for tfidf) inputList = [' '.join(val) for val in inputList] # Create TF-IDF Matrix of tokens tfidfVectorizer = TfidfVectorizer() tfidfFinal = tfidfVectorizer.fit_transform(inputList) return tfidfFinal, tfidfVectorizer # Group together statement tokens, subjects, and context tokens as the input for supervised learning xSupervised = pd.concat([statementMatrix, subjects, contextMatrix], axis=1) # Replace all 'None' values with empty string '' xSupervised = np.asarray(xSupervised.fillna(value='')) # Create tfidf matrix xSupervised, xVectorizer = tfidfMatrix(xSupervised) # Store truth labels ySupervised = truthLabels # Set all truth labels that are 'true' and 'mostly-true' to true, set all other labels to 'false' ySupervised = ySupervised.replace(['mostly-true'],'true') ySupervised = ySupervised.replace(['half-true','barely-true','pants-fire'],'false') print('There are %s rows and %s columns in the supervised tfidf matrix.'% (xSupervised.shape[0], len(xVectorizer.get_feature_names_out()))) # Group together statement tokens, subjects, context tokens for unsupervised learning input aprioriInput = pd.concat([statementMatrix, subjects, contextMatrix, truthLabels], axis=1) # Replace all instances of 'true' and 'false' with 'true_' and 'false_' to distinguish # naturally occuring strings from the fact check labels, then add fact check labels to dataset # Note: all other fact check labels (ex: mostly-true) contain a hyphen which has # been removed from the rest of the dataset, thus no collision will occur aprioriInput = aprioriInput.replace(['true'],'true_') aprioriInput = aprioriInput.replace(['false'],'false_') # Add truth labels to the dataset now that exisiting 'true' and 'false' have been modified aprioriInput = pd.concat([aprioriInput, truthLabels], axis=1) # Replace all none values with an empty string aprioriInput = aprioriInput.fillna(value='') aprioriInput = aprioriInput.values.tolist() # Transform data into encoded matrix for the apriori algorithm encoder = TransactionEncoder() aprioriInput = encoder.fit(aprioriInput).transform(aprioriInput) # Convert input data into dataframe aprioriInput = pd.DataFrame(aprioriInput, columns=encoder.columns_) # Drop the column containing empty strings aprioriInput.drop('', axis=1, inplace=True) print('There are %s rows and %s columns in the unsupervised encoded matrix.'% (aprioriInput.shape[0], aprioriInput.shape[1])) # Create bar chart for fact check label counts used for unsupervised learning labelCounts = truthLabels.value_counts() labelCounts = labelCounts.reindex(['true','mostly-true','half-true','barely-true','false','pants-fire']) labelCountPlot = labelCounts.plot(kind='bar', rot=0, title='Unsupervised Fact Check Rating Count', label='_nolegend_', figsize=(8,6)) labelCountPlot.set_xlabel('Fact Check Rating') labelCountPlot.set_ylabel('Count') mean = labelCounts.mean() median = labelCounts.median() labelCountPlot.axhline(mean, color='r', ls='--', label="Mean") labelCountPlot.axhline(median, color='g', ls='--', label="Median") labelCountPlot.legend() print('Mean: ', mean) print('Median: ', median) print('-'*20+'\n', labelCounts) # Create bar chart for fact check label counts used for supervised learning labelCounts = ySupervised.value_counts() labelCountPlot = labelCounts.plot(kind='bar', rot=0, title='Supervised Fact Check Rating Count', label='_nolegend_', figsize=(8,6)) labelCountPlot.set_xlabel('Fact Check Rating') labelCountPlot.set_ylabel('Count') mean = labelCounts.mean() median = labelCounts.median() labelCountPlot.axhline(mean, color='r', ls='--', label="Mean") labelCountPlot.axhline(median, color='g', ls='-.', label="Median") labelCountPlot.legend() print('Mean: ', mean) print('Median: ', median) print('-'*20+'\n', labelCounts) # Generate plot showing the count of the top 10 contexts counts = contextsNoNan.value_counts() print(counts.shape) otherCounts = counts[10:].sum() countsMatrix = counts[:10] countsMatrix = countsMatrix.append(pd.Series([otherCounts], index=['other'])) countsPlot = countsMatrix.plot(kind='bar', rot=30, title='Top 10 Contexts', label='_nolegend_',figsize=(10,6)) countsPlot.set_xlabel('Contexts') countsPlot.set_ylabel('Count') # Generate plot showing the count of the top 10 subjects splitSubjects = [] for row in df['subjects']: for sub in row.split(','): splitSubjects.append(sub) subDf = pd.DataFrame(splitSubjects) counts = subDf.value_counts() countsMatrix = counts[:10] # Clean indexes (each subject is stored in tuple, change to tuple to subjet itself) indexes = countsMatrix.index.tolist() newIndexes = [index[0] for index in indexes] countsMatrix.index = newIndexes otherCounts = counts[10:].sum() countsMatrix = countsMatrix.append(pd.Series([otherCounts], index=['other'])) countsPlot = countsMatrix.plot(kind='bar', rot=30, title='Top 10 Subjects', label='_nolegend_',figsize=(10,6)) countsPlot.set_xlabel('Subjects') countsPlot.set_ylabel('Count') folds = 5 metrics_supervised_a = [] metrics_supervised_p = [] metrics_supervised_r = [] metrics_supervised_f = [] # SPLIT DATASET 80-20 TRAIN/TEST with Stratified kfold validation using 5 folds # Stratified kfold preserves the same proportions of each class in the split kIndices = StratifiedKFold(n_splits=folds) for trainIndice, testIndice in kIndices.split(xSupervised, ySupervised): train_x, test_x = xSupervised[trainIndice], xSupervised[testIndice] train_y, test_y = ySupervised[trainIndice], ySupervised[testIndice] # Train classification model with train_x and train_y #naive bayes naive_bayes_classifier = MultinomialNB() naive_bayes_classifier.fit(train_x, train_y) #logistic regression #logistic_regression_classifier = LogisticRegression() #logistic_regression_classifier.fit(train_x, train_y) # Test model with test_x and test_y #naive bayes y_pred = naive_bayes_classifier.predict(test_x) #logistic regression #y_pred = logistic_regression_classifier.predict(test_x) # Compute metrics for model for current kfold accuracy = metrics.accuracy_score(test_y, y_pred) precision = metrics.precision_score(test_y, y_pred,average='weighted',zero_division=1) recall = metrics.recall_score(test_y, y_pred,average='weighted',zero_division=1) f1 = metrics.f1_score(test_y, y_pred,average='weighted',zero_division=1) metrics_supervised_a.append(accuracy) metrics_supervised_p.append(precision) metrics_supervised_r.append(recall) metrics_supervised_f.append(f1) # Calculate metric averages across the 5 kfolds print(sum(metrics_supervised_a)/folds) print(sum(metrics_supervised_p)/folds) print(sum(metrics_supervised_r)/folds) print(sum(metrics_supervised_f)/folds) # Generate frequent itemsets with minimum support def getFrequentItemset(input, supportThreshold): frequentItemsets = apriori(input,min_support=supportThreshold,use_colnames=True) return frequentItemsets # Generate association rules for frequent itemsets def getRules(itemsets,confidenceThreshold): rules = association_rules(itemsets,metric='confidence',min_threshold=confidenceThreshold) # Keep only rules that contain truth labels truthLabels = {'true','mostly-true','half-true','barely-true','false','pants-fire'} removeIndexes = [] for index, values in rules.iterrows(): if not any(label in values['antecedents'] or label in values['consequents'] for label in truthLabels): removeIndexes.append(index) # Drop all rules that do not have truth labels rules.drop(rules.index[removeIndexes], inplace=True) return rules # Setup thershold parameters for experiements highSupport = 0.02 lowSupport = 0.01 highConfidence = 0.99 lowConfidence = 0.1 # Define experiments to run experiments = [ { 'title':'Low Support and Low Confidence', 'support':lowSupport, 'confidence':lowConfidence, }, { 'title':'Low Support and High Confidence', 'support':lowSupport, 'confidence':highConfidence, }, { 'title':'High Support and Low Confidence', 'support':highSupport, 'confidence':lowConfidence, }, { 'title':'High Support and High Confidence', 'support':highSupport, 'confidence':highConfidence, }, ] rulesets = [] # Run experiments for each support and confidence threshold for index, experiment in enumerate(experiments): frequentItemsets = getFrequentItemset(aprioriInput,experiment['support']) rules = getRules(frequentItemsets,experiment['confidence']) # Sort rules by confidence, then by support for all ties rules.sort_values(['confidence','support'],ascending = [False, False], inplace=True) # Reindex rules rules.index = range(len(rules)) rulesets.append(rules) # Print results of experiment, including top 10 and bottom 10 association rules print('EXPERIMENT #%s'% (index+1)) print('Title: %s'% experiment['title']) print('Support Threshold: %s'% experiment['support']) print('Confidence Threshold: %s'% experiment['confidence']) print('# Rules: %s'% len(rules)) print('-'*60, '\nTop 10 Rules:') print(rules[['antecedents','consequents','support','confidence']][:10]) print('-'*60, '\nBottom 10 Rules:') print(rules[['antecedents','consequents','support','confidence']][-10:]) print('\n' + '='*80) # Generate new rules for WordCloud plot given a very low support threshold and at least 0.5 confidence value # THIS WILL TAKE A LONG TIME TO RUN given the low support threshold supportThreshold = 0.002 confidenceThreshold = 0.5 frequentItemsets = getFrequentItemset(aprioriInput,supportThreshold) rules = getRules(frequentItemsets,confidenceThreshold) # Create dictrionary of words and their calculated sizes for WordCloud plot for the given label def getWordSizeDict(label, rules): wordSizeDict = {} # Create dictrionary of words and their calculated sizes for WordCloud plot for the given label for index, values in rules.iterrows(): # print(values) if label in values['antecedents'] or label in values['consequents']: words = list(values['antecedents']) + list(values['consequents']) words.remove(label) # Calculate size for given word # The size will be a summation of the support x confidence for all occurences of the word in rules sizeMeasure = values['support'] * values['confidence'] for word in words: if word in wordSizeDict: wordSizeDict[word] += sizeMeasure else: wordSizeDict[word] = sizeMeasure return wordSizeDict # Create word cloud for given word size dictionary def createWordCloud(wordSizeDict): wordCloud = WordCloud(width = 1000, height = 600, background_color='white').generate_from_frequencies(wordSizeDict) return wordCloud truthLabelsList = ['true','mostly-true','half-true','barely-true','false','pants-fire'] # Create a WordCloud plot for each degree of truthfulness based on rules with very low support threshold and at least 0.5 confidence for index in range(len(truthLabelsList)): label = truthLabelsList[index] # Generate word size dictionary for current fact-check label wordSizesDict = getWordSizeDict(label, rules) # Create wordCloud wordCloud = createWordCloud(wordSizesDict) print(label + ' WordCloud Plot') plt.figure(index) plt.imshow(wordCloud, interpolation='bilinear') plt.axis('off') plt.show()
0.58948
0.892328
# Ungraded Lab Part 2 - Consuming a Machine Learning Model Welcome to the second part of this ungraded lab! **Before going forward check that the server from part 1 is still running.** In this notebook you will code a minimal client that uses Python's `requests` library to interact with your running server. ``` import os import io import cv2 import requests import numpy as np from IPython.display import Image, display ``` ## Understanding the URL ### Breaking down the URL After experimenting with the fastAPI's client you may have noticed that we made all requests by pointing to a specific URL and appending some parameters to it. More concretely: 1. The server is hosted in the URL [http://localhost:8000/](http://localhost:8000/). 2. The endpoint that serves your model is the `/predict` endpoint. Also you can specify the model to use: `yolov3` or`yolov3-tiny`. Let's stick to the tiny version for computational efficiency. Let's get started by putting in place all this information. ``` base_url = 'http://localhost:8000' endpoint = '/predict' model = 'yolov3-tiny' confidence = '0.2' ``` To consume your model, you append the endpoint to the base URL to get the full URL. Notice that the parameters are absent for now. ``` url_with_endpoint_no_params = base_url + endpoint url_with_endpoint_no_params ``` To set any of the expected parameters, the syntax is to add a "?" character followed by the name of the parameter and its value. Let's do it and check how the final URL looks like: ``` full_url = url_with_endpoint_no_params + "?model=" + model + "&confidence=" + confidence full_url ``` This endpoint expects both a model's name and an image. But since the image is more complex it is not passed within the URL. Instead we leverage the `requests` library to handle this process. # Sending a request to your server ### Coding the response_from_server function As a reminder, this endpoint expects a POST HTTP request. The `post` function is part of the requests library. To pass the file along with the request, you need to create a dictionary indicating the name of the file ('file' in this case) and the actual file. `status code` is a handy command to check the status of the response the request triggered. **A status code of 200 means that everything went well.** ``` def response_from_server(url, image_file, verbose=True): """Makes a POST request to the server and returns the response. Args: url (str): URL that the request is sent to. image_file (_io.BufferedReader): File to upload, should be an image. verbose (bool): True if the status of the response should be printed. False otherwise. Returns: requests.models.Response: Response from the server. """ files = {'file': image_file} response = requests.post(url, files=files) status_code = response.status_code if verbose: msg = "Everything went well!" if status_code == 200 else "There was an error when handling the request." print(msg) return response ``` To test this function, open a file in your filesystem and pass it as a parameter alongside the URL: ``` with open("images/clock2.jpg", "rb") as image_file: prediction = response_from_server(full_url, image_file) ``` Great news! The request was successful. However, you are not getting any information about the objects in the image. To get the image with the bounding boxes and labels, you need to parse the content of the response into an appropriate format. This process looks very similar to how you read raw images into a cv2 image on the server. To handle this step, let's create a directory called `images_predicted` to save the image to: ``` dir_name = "images_predicted" if not os.path.exists(dir_name): os.mkdir(dir_name) ``` ### Creating the display_image_from_response function ``` def display_image_from_response(response): """Display image within server's response. Args: response (requests.models.Response): The response from the server after object detection. """ image_stream = io.BytesIO(response.content) image_stream.seek(0) file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) filename = "image_with_objects.jpeg" cv2.imwrite(f'images_predicted/{filename}', image) display(Image(f'images_predicted/{filename}')) display_image_from_response(prediction) ``` Now you are ready to consume your object detection model through your own client! Let's test it out on some other images: ``` image_files = [ 'car2.jpg', 'clock3.jpg', 'apples.jpg' ] for image_file in image_files: with open(f"images/{image_file}", "rb") as image_file: prediction = response_from_server(full_url, image_file, verbose=False) display_image_from_response(prediction) ``` **Congratulations on finishing this ungraded lab!** Real life clients and servers have a lot more going on in terms of security and performance. However, the code you just experienced is close to what you see in real production environments. Hopefully, this lab served the purpose of increasing your familiarity with the process of deploying a Deep Learning model, and consuming from it. **Keep it up!** # ## Optional Challenge - Adding the confidence level to the request Let's expand on what you have learned so far. The next logical step is to extend the server and the client so that they can accommodate an additional parameter: the level of confidence of the prediction. **To test your extended implementation you must perform the following steps:** - Stop the server by interrupting the Kernel. - Extend the `prediction` function in the server. - Re run the cell containing your server code. - Re launch the server. - Extend your client. - Test it with some images (either with your client or fastAPI's one). Here are some hints that can help you out throughout the process: #### Server side: - The `prediction` function that handles the `/predict` endpoint needs an additional parameter to accept the confidence level. Add this new parameter before the `File` parameter. This is necessary because `File` has a default value and must be specified last. - `cv.detect_common_objects` accepts the `confidence` parameter, which is a floating point number (type `float`in Python). #### Client side: - You can add a new parameter to the URL by extending it with an `&` followed by the name of the parameter and its value. The name of this new parameter must be equal to the name used within the `prediction` function in the server. An example would look like this: `myawesomemodel.com/predict?model=yolov3-tiny&newParam=value` **You can do it!**
github_jupyter
import os import io import cv2 import requests import numpy as np from IPython.display import Image, display base_url = 'http://localhost:8000' endpoint = '/predict' model = 'yolov3-tiny' confidence = '0.2' url_with_endpoint_no_params = base_url + endpoint url_with_endpoint_no_params full_url = url_with_endpoint_no_params + "?model=" + model + "&confidence=" + confidence full_url def response_from_server(url, image_file, verbose=True): """Makes a POST request to the server and returns the response. Args: url (str): URL that the request is sent to. image_file (_io.BufferedReader): File to upload, should be an image. verbose (bool): True if the status of the response should be printed. False otherwise. Returns: requests.models.Response: Response from the server. """ files = {'file': image_file} response = requests.post(url, files=files) status_code = response.status_code if verbose: msg = "Everything went well!" if status_code == 200 else "There was an error when handling the request." print(msg) return response with open("images/clock2.jpg", "rb") as image_file: prediction = response_from_server(full_url, image_file) dir_name = "images_predicted" if not os.path.exists(dir_name): os.mkdir(dir_name) def display_image_from_response(response): """Display image within server's response. Args: response (requests.models.Response): The response from the server after object detection. """ image_stream = io.BytesIO(response.content) image_stream.seek(0) file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) filename = "image_with_objects.jpeg" cv2.imwrite(f'images_predicted/{filename}', image) display(Image(f'images_predicted/{filename}')) display_image_from_response(prediction) image_files = [ 'car2.jpg', 'clock3.jpg', 'apples.jpg' ] for image_file in image_files: with open(f"images/{image_file}", "rb") as image_file: prediction = response_from_server(full_url, image_file, verbose=False) display_image_from_response(prediction)
0.577138
0.932699
# Python e Pandas - Nozioni e operazioni base **questo notebook è utile come riferimento *-cassetta degli attrezzi-* per le attività formative del DataLifeLab e per le esercitazioni in classe del corso di Open Notebook Science del dell'Università di Firenze** ## Python - nozioni base * Assegnazione variabili, stampa e indentificazione tipo di dati della variabile * tipi di dati * Numeri e operatori * Stringhe * Liste * Dictionari * Valori Booleani * Tuple * Set * Operatori di comparazione * Condizioni (if, elif, else, try-except) * Cicli For * Cicli While * range() * list comprehension * funzioni * expressioni lambda * metodi ## Python - operazioni su file * aprire un file in lettura * aprire un file in scrittura ## Pandas - nozioni base * Importare correttamente la libreria * Crare un DataFrame * DataFrame e Serie * Filtrare DataFrame e Serie * Concatenazione e Merge * Tabella Pivot * Formato Wide e Long (melt) * Applicare una funzione alle celle * Aprire e creare un file * excel ## Pandas - funzioni statistiche di base * Describe * media * deviazione standard * correlazione ## Pandas - grafici di base # Python ## Assegnazione variabili, stampa e indentificazione tipo di dati della variabile ``` variabile = "qualsiasi cosa in python è un oggetto" variabile print(variabile) type(variabile) ``` ## tipi di dati ### Numeri ``` # interi 1 # numeri con virgola (float) 1.4 # operazioni 1 + 1 1 * 3 1 / 2 2 ** 4 # modulo o resto 4 % 2 5 % 2 (2 + 3) * (5 + 5) ``` ## Stringhe ``` 'testo compreso fra apostrofi' "virgolette" " testo che contiene apostrofi ' " ``` ## Liste ``` [1,2,3] ['ciao',1,[1,2]] la_mia_lista = ['a','b','c'] la_mia_lista.append('d') la_mia_lista la_mia_lista[0] la_mia_lista[1] la_mia_lista[1:] la_mia_lista[:1] la_mia_lista[0] = "modifico l'elemento zero" la_mia_lista lista_nested = [1,2,3,[4,5,['secondo livello del terzo elemento']]] lista_nested lista_nested[3] lista_nested[3][2] ``` <a id="Dizionari"></a> ## Dizionari ``` d = {'chiave1':'elemento1','chiave2':'elemento2'} d d['chiave1'] d.keys() ``` ## Valori Booleani ``` True False ## Turple t = (1,2,3) t[0] # t[0] = 'NEW' # TypeError: 'tuple' object does not support item assignment ## Sets {1,2,3} {1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2} ``` ## Operatori di comparazione ``` 1 > 2 1 < 2 1 >= 1 1 <= 4 1 == 1 'hi' == 'bye' ``` ## Operatori Logici ``` (1 > 2) and (2 < 3) (1 > 2) or (2 < 3) (1 == 2) or (2 == 3) or (4 == 4) #Gestione eccezioni if 1 < 2: print('bene!') if 1 < 2: print('bene') else: print('altrimenti') if 1 > 2: print('bene') else: print('altrimenti') if 1 == 2: print('primo') elif 3 == 3: print('intermedio') else: print('ultimo') try: print("10") except: print("fai qualcosa in caso di errore") #print(int("mamma")) #ValueError: invalid literal for int() with base 10: 'mamma' try: print(int("mamma")) except: print("fai qualcosa in caso di errore") ``` ## Cicli Loop ``` seq = [1,2,3,4,5] for item in seq: print(item) for item in seq: print('Bene!') for nome_a_caso in seq: print(nome_a_caso+nome_a_caso) ## Ciclo while i = 1 while i < 10: print('i is: {}'.format(i)) i = i+1 ## range() range(10) for i in range(5): print(i) list(range(5)) ``` ## Metodi ``` st = 'Marco saluta il Mondo' # lista di metodi e attributi dir(st) st.lower() st.upper() st.split() post = 'si parla di scienza! #DataLifeLab' post.split('#') post.split('#')[1] d d.keys() d.items() 'x' in [1,2,3] 'x' in ['x','y','z'] ``` # Operazioni su file ``` # aprire un file di testo in sola lettura # il file si trova nella cartella Esercizio_1 all'interno della cartella esercizi e il nome del file è albert.txt with open("./esercizi/Esercizio_1/albert.txt" , "r") as miofile: contenuto_mio_file = miofile.read() print(contenuto_mio_file) # aprire un file di testo in scrittura # il file si trova nella cartella Esercizio_2 all'interno della cartella esercizi e il nome del file è Luigino.txt contenuto = ''' Luigino è stato uno dei più grandi artisti lirici della storia di San Marino Nascita: 14 marzo 1945, San Marino, San Marino. Decesso: 18 aprile 2015, San Marino, San Marino. Altezza: 1,75 m Peso: 205 KG ''' with open("./esercizi/Esercizio_2/Luigino.txt", "w") as miofile: contenuto_mio_file = miofile.write(contenuto) ``` ## Funzioni ``` def mia_funzione(parametro='default'): """ documentazione della funzione va qui. """ return(parametro) mia_funzione mia_funzione() mia_funzione("marco") def area_triangolo(base, altezza): return base * altezza / 2 area_triangolo(10,15) ## Espressioni Lambda def la_variabile_per_2(var): return var*2 la_variabile_per_2(10) a = lambda var: var*2 a(10) ``` # Pandas - nozioni base ``` import numpy as np import pandas as pd #costruzione di un dataframe df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,800,1200,444],'col3':['abc','abc','ghi','xyz']}) df ``` !["DataFrame"](https://www.kdnuggets.com/wp-content/uploads/pandas-02.png "DataFrame") ``` ## filtrare i dati con **loc** # filtrare passando i valori di indice e colonna df.loc[3,"col3"] df.loc[2,["col2", "col3"]] df.loc[[2,1],["col2", "col3"]] (df['col2']==444) # filtrare passando un vettore boleano df.loc[(df['col1']>3) & (df['col2']==444), :] #oppure filtro1 = df['col1']>2 filtro2 = df['col2']==444 df.loc[filtro1 & filtro2, :] df.loc[(df['col1']>2) | (df['col2']==444), :] # assegnazione di valori #assegnazione ad un intera riga df.loc[(df['col1']>2) & (df['col2']==444), :] = 10 df #filtro tramite isin #si passa una lista di valori su cui effettuare il filtro df[df["col3"].isin(["ghi", "abc"])] #certe volte può essere utile filtrare "stile battaglia navale", passando per il numero di riga e di colonna # questo si può fare con iloc df.iloc[0:1,1:3] #prendo la seconda e terza colonna e la prima riga (indipendentemente dal valore del'indice) ## Operazioni fra colonne e righe # creazione di una nuova colonna nuova_colonna = df["col1"] + df["col2"] df["nuova_colonna"] = nuova_colonna df #sommare interi con stringa porta ad errore #df["col1"] + df["col3"] #TypeError: unsupported operand type(s) for +: 'int' and 'str' df[["col1", "col2", "nuova_colonna"]].div(df["nuova_colonna"], axis = 0) * 100 #per approfondire https://pandas.pydata.org/pandas-docs/stable/api.html#id4 altezze = pd.DataFrame({'Nome':["Marco","Luigi","Nino","Ugo", "Marino" ],'Altezza':[175,173,178,182,160]}) pesi = pd.DataFrame({'Nome':["Marco","Luigi","Vittorio","Ugo", "Marino" ],'Peso':[75,70,100,97,50]}) #concatenazione verticale # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html pd.concat([altezze, pesi]) #concatenazione orizzonate pd.concat([altezze, pesi], axis = 1) ## i dati sono disallineati # join dei dati # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html pd.merge(altezze, pesi, how = "outer", left_on = "Nome", right_on = "Nome") # aggiunto Gabriele e la sua altezza altezze = altezze.append([{"Nome": "Gabriele", "Altezza": 213}], ) pd.merge(altezze, pesi, how = "right", left_on = "Nome", right_on = "Nome") pd.merge(altezze, pesi, how = "left", left_on = "Nome", right_on = "Nome") # da formato wide a long # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html wide = pd.merge(altezze, pesi, how = "left", left_on = "Nome", right_on = "Nome") #calcolo un indice di massa corporea wide["Imc"] = wide["Peso"] / (wide["Altezza"] / 100) ** 2 wide long = pd.melt(wide, id_vars = "Nome", value_vars=['Altezza', 'Peso', "Imc"], var_name='Variabile', value_name= "Valori") long # tabella pivot # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html pd.pivot_table(long, values = "Valori", index = "Variabile", aggfunc = np.mean) #operazioni di riga e di cella wide.loc[:, ["Altezza", "Peso"]].apply(np.mean, axis = 0) # asse 0, vuol dire esegue l'operazione per colonna, asse 1 vuol dire per riga # operazione lambda def recode_altezza(altezza): if altezza >= 185: return "Alto" elif altezza < 185 and altezza > 165: return "Medio" else: return "Basso" wide["Altezza"].apply(recode_altezza) wide.loc[:, ["Altezza", "Peso"]].applymap(lambda cella: str(cella) + "_ciao") #Da long a wide long.pivot(index='Nome', columns='Variabile', values='Valori') ``` # Creare o aprire file excel ``` #Creare un file excel long.to_excel("excel_di_exempio.xlsx") #Leggere un file excel pd.read_excel("excel_di_exempio.xlsx") # Creare un file excel con più tab writer = pd.ExcelWriter('excel_di_esempio2.xlsx') long.loc[long["Variabile"] == "Altezza"].to_excel(writer,'Altezza') long.loc[long["Variabile"] == "Peso"].to_excel(writer,'Peso') long.loc[long["Variabile"] == "Imc"].to_excel(writer,'Imc') writer.save() # Leggere un file excel con più tab tab_multiple = pd.read_excel('excel_di_esempio2.xlsx', sheetname = None) tab_multiple # accedere ad una tab tab_multiple["Altezza"] ``` ## Pandas - funzioni statistiche ``` wide.describe() # media wide["Altezza"].mean() # deviazione standard # indice di dispersione wide["Altezza"].std() # correlazione wide.corr() ``` ![](https://www.kdnuggets.com/wp-content/uploads/pandas-08.png "alcune stats") ``` # iterazione con lambda per eseguire operazioni complesse fra colonne wide.apply(lambda x: (x['Altezza'] / x["Peso"]) , axis=1) # iterazione classica sui dati di un dataframe di pandas #Può essere molto utile quando si deve eseguire calcoli che è difficile rappresentare in lambda e si vuole adoperare (difetto principale: richiede maggiore tempo di esecuzione) for i, v in wide.items(): # qui prendo indice e valori print(i, v) # iterazione classica sui dati di una serie di pandas for i, v in wide["Altezza"].items(): # qui prendo indice e valori print(i, v) ``` ## Pandas - grafici di base ``` serie_dati = {"Italia": {2010: 35, 2011: 30, 2012: 25}, "Spagna": {2010: 25, 2011: 27, 2012: 34}} df_serie_dati = pd.DataFrame(serie_dati) df_serie_dati %matplotlib inline import matplotlib import matplotlib.pyplot as plt df_serie_dati.plot(kind = "line", grid = True, rot=45, figsize = (10,5)) df_serie_dati.index = pd.to_datetime(df_serie_dati.index, format="%Y") df_serie_dati df_serie_dati.plot(kind = "line", grid = True, rot=45, figsize = (10,5)) Altezza = tab_multiple["Altezza"] Altezza Altezza.plot(y= "Valori", kind = "box", grid = True, rot=45, figsize = (10,5)) Altezza.plot(x="Nome", y= "Valori", kind = "bar", grid = True, rot=45, figsize = (10,5)) ax = Altezza.plot(x="Nome", y= "Valori", kind = "bar", grid = True, rot=45, figsize = (10,5)) ax.set_ylabel('Infant mort. rate') ax.set_xlabel('Country') for riga in range(Altezza.shape[0]): ax.annotate(Altezza["Valori"][riga], (Altezza.index[riga], Altezza["Valori"][riga]), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='-|>')) plt.show() ``` ## Sezione monovariata In questa sezione sono contenute alcune procedure liberamente ispirate al volume di Alberto Marradi l'Analisi Monovariata. Per utilizzarlo utilizzeremo la libreria seaborn e la libreria marradi.py ``` #aggiorno la libreria Seaborn all'ultima versione !pip install git+https://github.com/mwaskom/seaborn.git #importo la libreria seaborn import seaborn as sns #importo la libreria marradi from marradi import * #creo alcuni dati di prova dati = pd.DataFrame({"Nome":["Marco", "Piero", "Luigi", "Massimo", "Alberto"], "Età": [34,34,22,32,13], "Titolo di studio": ["Terziaria", "Terziaria", "Secondaria", "Terziaria", "Secondaria"], "Residenza" : ["Firenze", "Roma", "Brescia", "Roma", "Bari"]}) # le opzioni di dist_frequenza help(dist_frequenza) #eseguo una distribuzione di frequenza sul titolo di studio. Variabile ordinata tit_stud = dist_frequenza(matrice = dati, colonna = "Titolo di studio", tipo = "ordinale", lista_ordinale = ["Primaria", "Secondaria", "Terziaria"]) tit_stud # le opzioni di plot_dist_frequenza help(plot_dist_frequenza) #restituisto sotto forma di grafico plot_dist_frequenza(tit_stud, tipo = "ordinale") resid= dist_frequenza(matrice = dati, colonna = "Residenza", tipo = "categoriale", ) resid plot_dist_frequenza(resid, tipo = "categoriale") # per le variabili cardinali è meglio partire direttamente dalla matrice fig, ax = plt.subplots(figsize=(12,8)) sns.distplot(dati["Età"], bins = 10, ax = ax) ```
github_jupyter
variabile = "qualsiasi cosa in python è un oggetto" variabile print(variabile) type(variabile) # interi 1 # numeri con virgola (float) 1.4 # operazioni 1 + 1 1 * 3 1 / 2 2 ** 4 # modulo o resto 4 % 2 5 % 2 (2 + 3) * (5 + 5) 'testo compreso fra apostrofi' "virgolette" " testo che contiene apostrofi ' " [1,2,3] ['ciao',1,[1,2]] la_mia_lista = ['a','b','c'] la_mia_lista.append('d') la_mia_lista la_mia_lista[0] la_mia_lista[1] la_mia_lista[1:] la_mia_lista[:1] la_mia_lista[0] = "modifico l'elemento zero" la_mia_lista lista_nested = [1,2,3,[4,5,['secondo livello del terzo elemento']]] lista_nested lista_nested[3] lista_nested[3][2] d = {'chiave1':'elemento1','chiave2':'elemento2'} d d['chiave1'] d.keys() True False ## Turple t = (1,2,3) t[0] # t[0] = 'NEW' # TypeError: 'tuple' object does not support item assignment ## Sets {1,2,3} {1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2} 1 > 2 1 < 2 1 >= 1 1 <= 4 1 == 1 'hi' == 'bye' (1 > 2) and (2 < 3) (1 > 2) or (2 < 3) (1 == 2) or (2 == 3) or (4 == 4) #Gestione eccezioni if 1 < 2: print('bene!') if 1 < 2: print('bene') else: print('altrimenti') if 1 > 2: print('bene') else: print('altrimenti') if 1 == 2: print('primo') elif 3 == 3: print('intermedio') else: print('ultimo') try: print("10") except: print("fai qualcosa in caso di errore") #print(int("mamma")) #ValueError: invalid literal for int() with base 10: 'mamma' try: print(int("mamma")) except: print("fai qualcosa in caso di errore") seq = [1,2,3,4,5] for item in seq: print(item) for item in seq: print('Bene!') for nome_a_caso in seq: print(nome_a_caso+nome_a_caso) ## Ciclo while i = 1 while i < 10: print('i is: {}'.format(i)) i = i+1 ## range() range(10) for i in range(5): print(i) list(range(5)) st = 'Marco saluta il Mondo' # lista di metodi e attributi dir(st) st.lower() st.upper() st.split() post = 'si parla di scienza! #DataLifeLab' post.split('#') post.split('#')[1] d d.keys() d.items() 'x' in [1,2,3] 'x' in ['x','y','z'] # aprire un file di testo in sola lettura # il file si trova nella cartella Esercizio_1 all'interno della cartella esercizi e il nome del file è albert.txt with open("./esercizi/Esercizio_1/albert.txt" , "r") as miofile: contenuto_mio_file = miofile.read() print(contenuto_mio_file) # aprire un file di testo in scrittura # il file si trova nella cartella Esercizio_2 all'interno della cartella esercizi e il nome del file è Luigino.txt contenuto = ''' Luigino è stato uno dei più grandi artisti lirici della storia di San Marino Nascita: 14 marzo 1945, San Marino, San Marino. Decesso: 18 aprile 2015, San Marino, San Marino. Altezza: 1,75 m Peso: 205 KG ''' with open("./esercizi/Esercizio_2/Luigino.txt", "w") as miofile: contenuto_mio_file = miofile.write(contenuto) def mia_funzione(parametro='default'): """ documentazione della funzione va qui. """ return(parametro) mia_funzione mia_funzione() mia_funzione("marco") def area_triangolo(base, altezza): return base * altezza / 2 area_triangolo(10,15) ## Espressioni Lambda def la_variabile_per_2(var): return var*2 la_variabile_per_2(10) a = lambda var: var*2 a(10) import numpy as np import pandas as pd #costruzione di un dataframe df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,800,1200,444],'col3':['abc','abc','ghi','xyz']}) df ## filtrare i dati con **loc** # filtrare passando i valori di indice e colonna df.loc[3,"col3"] df.loc[2,["col2", "col3"]] df.loc[[2,1],["col2", "col3"]] (df['col2']==444) # filtrare passando un vettore boleano df.loc[(df['col1']>3) & (df['col2']==444), :] #oppure filtro1 = df['col1']>2 filtro2 = df['col2']==444 df.loc[filtro1 & filtro2, :] df.loc[(df['col1']>2) | (df['col2']==444), :] # assegnazione di valori #assegnazione ad un intera riga df.loc[(df['col1']>2) & (df['col2']==444), :] = 10 df #filtro tramite isin #si passa una lista di valori su cui effettuare il filtro df[df["col3"].isin(["ghi", "abc"])] #certe volte può essere utile filtrare "stile battaglia navale", passando per il numero di riga e di colonna # questo si può fare con iloc df.iloc[0:1,1:3] #prendo la seconda e terza colonna e la prima riga (indipendentemente dal valore del'indice) ## Operazioni fra colonne e righe # creazione di una nuova colonna nuova_colonna = df["col1"] + df["col2"] df["nuova_colonna"] = nuova_colonna df #sommare interi con stringa porta ad errore #df["col1"] + df["col3"] #TypeError: unsupported operand type(s) for +: 'int' and 'str' df[["col1", "col2", "nuova_colonna"]].div(df["nuova_colonna"], axis = 0) * 100 #per approfondire https://pandas.pydata.org/pandas-docs/stable/api.html#id4 altezze = pd.DataFrame({'Nome':["Marco","Luigi","Nino","Ugo", "Marino" ],'Altezza':[175,173,178,182,160]}) pesi = pd.DataFrame({'Nome':["Marco","Luigi","Vittorio","Ugo", "Marino" ],'Peso':[75,70,100,97,50]}) #concatenazione verticale # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html pd.concat([altezze, pesi]) #concatenazione orizzonate pd.concat([altezze, pesi], axis = 1) ## i dati sono disallineati # join dei dati # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html pd.merge(altezze, pesi, how = "outer", left_on = "Nome", right_on = "Nome") # aggiunto Gabriele e la sua altezza altezze = altezze.append([{"Nome": "Gabriele", "Altezza": 213}], ) pd.merge(altezze, pesi, how = "right", left_on = "Nome", right_on = "Nome") pd.merge(altezze, pesi, how = "left", left_on = "Nome", right_on = "Nome") # da formato wide a long # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html wide = pd.merge(altezze, pesi, how = "left", left_on = "Nome", right_on = "Nome") #calcolo un indice di massa corporea wide["Imc"] = wide["Peso"] / (wide["Altezza"] / 100) ** 2 wide long = pd.melt(wide, id_vars = "Nome", value_vars=['Altezza', 'Peso', "Imc"], var_name='Variabile', value_name= "Valori") long # tabella pivot # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html pd.pivot_table(long, values = "Valori", index = "Variabile", aggfunc = np.mean) #operazioni di riga e di cella wide.loc[:, ["Altezza", "Peso"]].apply(np.mean, axis = 0) # asse 0, vuol dire esegue l'operazione per colonna, asse 1 vuol dire per riga # operazione lambda def recode_altezza(altezza): if altezza >= 185: return "Alto" elif altezza < 185 and altezza > 165: return "Medio" else: return "Basso" wide["Altezza"].apply(recode_altezza) wide.loc[:, ["Altezza", "Peso"]].applymap(lambda cella: str(cella) + "_ciao") #Da long a wide long.pivot(index='Nome', columns='Variabile', values='Valori') #Creare un file excel long.to_excel("excel_di_exempio.xlsx") #Leggere un file excel pd.read_excel("excel_di_exempio.xlsx") # Creare un file excel con più tab writer = pd.ExcelWriter('excel_di_esempio2.xlsx') long.loc[long["Variabile"] == "Altezza"].to_excel(writer,'Altezza') long.loc[long["Variabile"] == "Peso"].to_excel(writer,'Peso') long.loc[long["Variabile"] == "Imc"].to_excel(writer,'Imc') writer.save() # Leggere un file excel con più tab tab_multiple = pd.read_excel('excel_di_esempio2.xlsx', sheetname = None) tab_multiple # accedere ad una tab tab_multiple["Altezza"] wide.describe() # media wide["Altezza"].mean() # deviazione standard # indice di dispersione wide["Altezza"].std() # correlazione wide.corr() # iterazione con lambda per eseguire operazioni complesse fra colonne wide.apply(lambda x: (x['Altezza'] / x["Peso"]) , axis=1) # iterazione classica sui dati di un dataframe di pandas #Può essere molto utile quando si deve eseguire calcoli che è difficile rappresentare in lambda e si vuole adoperare (difetto principale: richiede maggiore tempo di esecuzione) for i, v in wide.items(): # qui prendo indice e valori print(i, v) # iterazione classica sui dati di una serie di pandas for i, v in wide["Altezza"].items(): # qui prendo indice e valori print(i, v) serie_dati = {"Italia": {2010: 35, 2011: 30, 2012: 25}, "Spagna": {2010: 25, 2011: 27, 2012: 34}} df_serie_dati = pd.DataFrame(serie_dati) df_serie_dati %matplotlib inline import matplotlib import matplotlib.pyplot as plt df_serie_dati.plot(kind = "line", grid = True, rot=45, figsize = (10,5)) df_serie_dati.index = pd.to_datetime(df_serie_dati.index, format="%Y") df_serie_dati df_serie_dati.plot(kind = "line", grid = True, rot=45, figsize = (10,5)) Altezza = tab_multiple["Altezza"] Altezza Altezza.plot(y= "Valori", kind = "box", grid = True, rot=45, figsize = (10,5)) Altezza.plot(x="Nome", y= "Valori", kind = "bar", grid = True, rot=45, figsize = (10,5)) ax = Altezza.plot(x="Nome", y= "Valori", kind = "bar", grid = True, rot=45, figsize = (10,5)) ax.set_ylabel('Infant mort. rate') ax.set_xlabel('Country') for riga in range(Altezza.shape[0]): ax.annotate(Altezza["Valori"][riga], (Altezza.index[riga], Altezza["Valori"][riga]), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='-|>')) plt.show() #aggiorno la libreria Seaborn all'ultima versione !pip install git+https://github.com/mwaskom/seaborn.git #importo la libreria seaborn import seaborn as sns #importo la libreria marradi from marradi import * #creo alcuni dati di prova dati = pd.DataFrame({"Nome":["Marco", "Piero", "Luigi", "Massimo", "Alberto"], "Età": [34,34,22,32,13], "Titolo di studio": ["Terziaria", "Terziaria", "Secondaria", "Terziaria", "Secondaria"], "Residenza" : ["Firenze", "Roma", "Brescia", "Roma", "Bari"]}) # le opzioni di dist_frequenza help(dist_frequenza) #eseguo una distribuzione di frequenza sul titolo di studio. Variabile ordinata tit_stud = dist_frequenza(matrice = dati, colonna = "Titolo di studio", tipo = "ordinale", lista_ordinale = ["Primaria", "Secondaria", "Terziaria"]) tit_stud # le opzioni di plot_dist_frequenza help(plot_dist_frequenza) #restituisto sotto forma di grafico plot_dist_frequenza(tit_stud, tipo = "ordinale") resid= dist_frequenza(matrice = dati, colonna = "Residenza", tipo = "categoriale", ) resid plot_dist_frequenza(resid, tipo = "categoriale") # per le variabili cardinali è meglio partire direttamente dalla matrice fig, ax = plt.subplots(figsize=(12,8)) sns.distplot(dati["Età"], bins = 10, ax = ax)
0.156491
0.804291
``` # -*- coding: utf-8 -*- """ EVCのためのEV-GMMを構築します. そして, 適応学習する. 詳細 : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580aabf534c4dbb8bc.pdf This program make EV-GMM for EVC. Then, it make adaptation learning. Check detail : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580abf534c4dbb8bc.pdf """ from __future__ import division, print_function import os from shutil import rmtree import argparse import glob import pickle import time import numpy as np from numpy.linalg import norm from sklearn.decomposition import PCA from sklearn.mixture import GMM # sklearn 0.20.0から使えない from sklearn.preprocessing import StandardScaler import scipy.signal import scipy.sparse %matplotlib inline import matplotlib.pyplot as plt import IPython from IPython.display import Audio import soundfile as sf import wave import pyworld as pw import librosa.display from dtw import dtw import warnings warnings.filterwarnings('ignore') """ Parameters __Mixtured : GMM混合数 __versions : 実験セット __convert_source : 変換元話者のパス __convert_target : 変換先話者のパス """ # parameters __Mixtured = 40 __versions = 'pre-stored0.1.3' __convert_source = 'input/EJM10/V01/T01/TIMIT/000/*.wav' __convert_target = 'adaptation/EJF01/V01/T01/ATR503/A/*.wav' # settings __same_path = './utterance/' + __versions + '/' __output_path = __same_path + 'output/EJF01/' # EJF01, EJF07, EJM04, EJM05 Mixtured = __Mixtured pre_stored_pickle = __same_path + __versions + '.pickle' pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav' pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav" #pre_stored_target_list = "" (not yet) pre_stored_gmm_init_pickle = __same_path + __versions + '_init-gmm.pickle' pre_stored_sv_npy = __same_path + __versions + '_sv.npy' save_for_evgmm_covarXX = __output_path + __versions + '_covarXX.npy' save_for_evgmm_covarYX = __output_path + __versions + '_covarYX.npy' save_for_evgmm_fitted_source = __output_path + __versions + '_fitted_source.npy' save_for_evgmm_fitted_target = __output_path + __versions + '_fitted_target.npy' save_for_evgmm_weights = __output_path + __versions + '_weights.npy' save_for_evgmm_source_means = __output_path + __versions + '_source_means.npy' for_convert_source = __same_path + __convert_source for_convert_target = __same_path + __convert_target converted_voice_npy = __output_path + 'sp_converted_' + __versions converted_voice_wav = __output_path + 'sp_converted_' + __versions mfcc_save_fig_png = __output_path + 'mfcc3dim_' + __versions f0_save_fig_png = __output_path + 'f0_converted' + __versions converted_voice_with_f0_wav = __output_path + 'sp_f0_converted' + __versions EPSILON = 1e-8 class MFCC: """ MFCC() : メル周波数ケプストラム係数(MFCC)を求めたり、MFCCからスペクトルに変換したりするクラス. 動的特徴量(delta)が実装途中. ref : http://aidiary.hatenablog.com/entry/20120225/1330179868 """ def __init__(self, frequency, nfft=1026, dimension=24, channels=24): """ 各種パラメータのセット nfft : FFTのサンプル点数 frequency : サンプリング周波数 dimension : MFCC次元数 channles : メルフィルタバンクのチャンネル数(dimensionに依存) fscale : 周波数スケール軸 filterbankl, fcenters : フィルタバンク行列, フィルタバンクの頂点(?) """ self.nfft = nfft self.frequency = frequency self.dimension = dimension self.channels = channels self.fscale = np.fft.fftfreq(self.nfft, d = 1.0 / self.frequency)[: int(self.nfft / 2)] self.filterbank, self.fcenters = self.melFilterBank() def hz2mel(self, f): """ 周波数からメル周波数に変換 """ return 1127.01048 * np.log(f / 700.0 + 1.0) def mel2hz(self, m): """ メル周波数から周波数に変換 """ return 700.0 * (np.exp(m / 1127.01048) - 1.0) def melFilterBank(self): """ メルフィルタバンクを生成する """ fmax = self.frequency / 2 melmax = self.hz2mel(fmax) nmax = int(self.nfft / 2) df = self.frequency / self.nfft dmel = melmax / (self.channels + 1) melcenters = np.arange(1, self.channels + 1) * dmel fcenters = self.mel2hz(melcenters) indexcenter = np.round(fcenters / df) indexstart = np.hstack(([0], indexcenter[0:self.channels - 1])) indexstop = np.hstack((indexcenter[1:self.channels], [nmax])) filterbank = np.zeros((self.channels, nmax)) for c in np.arange(0, self.channels): increment = 1.0 / (indexcenter[c] - indexstart[c]) # np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする for i in np.int_(np.arange(indexstart[c], indexcenter[c])): filterbank[c, i] = (i - indexstart[c]) * increment decrement = 1.0 / (indexstop[c] - indexcenter[c]) # np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする for i in np.int_(np.arange(indexcenter[c], indexstop[c])): filterbank[c, i] = 1.0 - ((i - indexcenter[c]) * decrement) return filterbank, fcenters def mfcc(self, spectrum): """ スペクトルからMFCCを求める. """ mspec = [] mspec = np.log10(np.dot(spectrum, self.filterbank.T)) mspec = np.array(mspec) return scipy.fftpack.realtransforms.dct(mspec, type=2, norm="ortho", axis=-1) def delta(self, mfcc): """ MFCCから動的特徴量を求める. 現在は,求める特徴量フレームtをt-1とt+1の平均としている. """ mfcc = np.concatenate([ [mfcc[0]], mfcc, [mfcc[-1]] ]) # 最初のフレームを最初に、最後のフレームを最後に付け足す delta = None for i in range(1, mfcc.shape[0] - 1): slope = (mfcc[i+1] - mfcc[i-1]) / 2 if delta is None: delta = slope else: delta = np.vstack([delta, slope]) return delta def imfcc(self, mfcc, spectrogram): """ MFCCからスペクトルを求める. """ im_sp = np.array([]) for i in range(mfcc.shape[0]): mfcc_s = np.hstack([mfcc[i], [0] * (self.channels - self.dimension)]) mspectrum = scipy.fftpack.idct(mfcc_s, norm='ortho') # splrep はスプライン補間のための補間関数を求める tck = scipy.interpolate.splrep(self.fcenters, np.power(10, mspectrum)) # splev は指定座標での補間値を求める im_spectrogram = scipy.interpolate.splev(self.fscale, tck) im_sp = np.concatenate((im_sp, im_spectrogram), axis=0) return im_sp.reshape(spectrogram.shape) def trim_zeros_frames(x, eps=1e-7): """ 無音区間を取り除く. """ T, D = x.shape s = np.sum(np.abs(x), axis=1) s[s < 1e-7] = 0. return x[s > eps] def analyse_by_world_with_harverst(x, fs): """ WORLD音声分析合成器で基本周波数F0,スペクトル包絡,非周期成分を求める. 基本周波数F0についてはharvest法により,より精度良く求める. """ # 4 Harvest with F0 refinement (using Stonemask) frame_period = 5 _f0_h, t_h = pw.harvest(x, fs, frame_period=frame_period) f0_h = pw.stonemask(x, _f0_h, t_h, fs) sp_h = pw.cheaptrick(x, f0_h, t_h, fs) ap_h = pw.d4c(x, f0_h, t_h, fs) return f0_h, sp_h, ap_h def wavread(file): """ wavファイルから音声トラックとサンプリング周波数を抽出する. """ wf = wave.open(file, "r") fs = wf.getframerate() x = wf.readframes(wf.getnframes()) x = np.frombuffer(x, dtype= "int16") / 32768.0 wf.close() return x, float(fs) def preEmphasis(signal, p=0.97): """ MFCC抽出のための高域強調フィルタ. 波形を通すことで,高域成分が強調される. """ return scipy.signal.lfilter([1.0, -p], 1, signal) def alignment(source, target, path): """ タイムアライメントを取る. target音声をsource音声の長さに合うように調整する. """ # ここでは814に合わせよう(targetに合わせる) # p_p = 0 if source.shape[0] > target.shape[0] else 1 #shapes = source.shape if source.shape[0] > target.shape[0] else target.shape shapes = source.shape align = np.array([]) for (i, p) in enumerate(path[0]): if i != 0: if j != p: temp = np.array(target[path[1][i]]) align = np.concatenate((align, temp), axis=0) else: temp = np.array(target[path[1][i]]) align = np.concatenate((align, temp), axis=0) j = p return align.reshape(shapes) """ pre-stored学習のためのパラレル学習データを作る。 時間がかかるため、利用できるlearn-data.pickleがある場合はそれを利用する。 それがない場合は一から作り直す。 """ timer_start = time.time() if os.path.exists(pre_stored_pickle): print("exist, ", pre_stored_pickle) with open(pre_stored_pickle, mode='rb') as f: total_data = pickle.load(f) print("open, ", pre_stored_pickle) print("Load pre-stored time = ", time.time() - timer_start , "[sec]") else: source_mfcc = [] #source_data_sets = [] for name in sorted(glob.iglob(pre_stored_source_list, recursive=True)): print(name) x, fs = sf.read(name) f0, sp, ap = analyse_by_world_with_harverst(x, fs) mfcc = MFCC(fs) source_mfcc_temp = mfcc.mfcc(sp) #source_data = np.hstack([source_mfcc_temp, mfcc.delta(source_mfcc_temp)]) # static & dynamic featuers source_mfcc.append(source_mfcc_temp) #source_data_sets.append(source_data) total_data = [] i = 0 _s_len = len(source_mfcc) for name in sorted(glob.iglob(pre_stored_list, recursive=True)): print(name, len(total_data)) x, fs = sf.read(name) f0, sp, ap = analyse_by_world_with_harverst(x, fs) mfcc = MFCC(fs) target_mfcc = mfcc.mfcc(sp) dist, cost, acc, path = dtw(source_mfcc[i%_s_len], target_mfcc, dist=lambda x, y: norm(x - y, ord=1)) #print('Normalized distance between the two sounds:' + str(dist)) #print("target_mfcc = {0}".format(target_mfcc.shape)) aligned = alignment(source_mfcc[i%_s_len], target_mfcc, path) #target_data_sets = np.hstack([aligned, mfcc.delta(aligned)]) # static & dynamic features #learn_data = np.hstack((source_data_sets[i], target_data_sets)) learn_data = np.hstack([source_mfcc[i%_s_len], aligned]) total_data.append(learn_data) i += 1 with open(pre_stored_pickle, 'wb') as output: pickle.dump(total_data, output) print("Make, ", pre_stored_pickle) print("Make pre-stored time = ", time.time() - timer_start , "[sec]") """ 全事前学習出力話者からラムダを推定する. ラムダは適応学習で変容する. """ S = len(total_data) D = int(total_data[0].shape[1] / 2) print("total_data[0].shape = ", total_data[0].shape) print("S = ", S) print("D = ", D) timer_start = time.time() if os.path.exists(pre_stored_gmm_init_pickle): print("exist, ", pre_stored_gmm_init_pickle) with open(pre_stored_gmm_init_pickle, mode='rb') as f: initial_gmm = pickle.load(f) print("open, ", pre_stored_gmm_init_pickle) print("Load initial_gmm time = ", time.time() - timer_start , "[sec]") else: initial_gmm = GMM(n_components = Mixtured, covariance_type = 'full') initial_gmm.fit(np.vstack(total_data)) with open(pre_stored_gmm_init_pickle, 'wb') as output: pickle.dump(initial_gmm, output) print("Make, ", initial_gmm) print("Make initial_gmm time = ", time.time() - timer_start , "[sec]") weights = initial_gmm.weights_ source_means = initial_gmm.means_[:, :D] target_means = initial_gmm.means_[:, D:] covarXX = initial_gmm.covars_[:, :D, :D] covarXY = initial_gmm.covars_[:, :D, D:] covarYX = initial_gmm.covars_[:, D:, :D] covarYY = initial_gmm.covars_[:, D:, D:] fitted_source = source_means fitted_target = target_means """ SVはGMMスーパーベクトルで、各pre-stored学習における出力話者について平均ベクトルを推定する。 GMMの学習を見てみる必要があるか? """ timer_start = time.time() if os.path.exists(pre_stored_sv_npy): print("exist, ", pre_stored_sv_npy) sv = np.load(pre_stored_sv_npy) print("open, ", pre_stored_sv_npy) print("Load pre_stored_sv time = ", time.time() - timer_start , "[sec]") else: sv = [] for i in range(S): gmm = GMM(n_components = Mixtured, params = 'm', init_params = '', covariance_type = 'full') gmm.weights_ = initial_gmm.weights_ gmm.means_ = initial_gmm.means_ gmm.covars_ = initial_gmm.covars_ gmm.fit(total_data[i]) sv.append(gmm.means_) sv = np.array(sv) np.save(pre_stored_sv_npy, sv) print("Make pre_stored_sv time = ", time.time() - timer_start , "[sec]") """ 各事前学習出力話者のGMM平均ベクトルに対して主成分分析(PCA)を行う. PCAで求めた固有値と固有ベクトルからeigenvectorsとbiasvectorsを作る. """ timer_start = time.time() #source_pca source_n_component, source_n_features = sv[:, :, :D].reshape(S, Mixtured*D).shape # 標準化(分散を1、平均を0にする) source_stdsc = StandardScaler() # 共分散行列を求める source_X_std = source_stdsc.fit_transform(sv[:, :, :D].reshape(S, Mixtured*D)) # PCAを行う source_cov = source_X_std.T @ source_X_std / (source_n_component - 1) source_W, source_V_pca = np.linalg.eig(source_cov) print(source_W.shape) print(source_V_pca.shape) # データを主成分の空間に変換する source_X_pca = source_X_std @ source_V_pca print(source_X_pca.shape) #target_pca target_n_component, target_n_features = sv[:, :, D:].reshape(S, Mixtured*D).shape # 標準化(分散を1、平均を0にする) target_stdsc = StandardScaler() #共分散行列を求める target_X_std = target_stdsc.fit_transform(sv[:, :, D:].reshape(S, Mixtured*D)) #PCAを行う target_cov = target_X_std.T @ target_X_std / (target_n_component - 1) target_W, target_V_pca = np.linalg.eig(target_cov) print(target_W.shape) print(target_V_pca.shape) # データを主成分の空間に変換する target_X_pca = target_X_std @ target_V_pca print(target_X_pca.shape) eigenvectors = source_X_pca.reshape((Mixtured, D, S)), target_X_pca.reshape((Mixtured, D, S)) source_bias = np.mean(sv[:, :, :D], axis=0) target_bias = np.mean(sv[:, :, D:], axis=0) biasvectors = source_bias.reshape((Mixtured, D)), target_bias.reshape((Mixtured, D)) print("Do PCA time = ", time.time() - timer_start , "[sec]") """ 声質変換に用いる変換元音声と目標音声を読み込む. """ timer_start = time.time() source_mfcc_for_convert = [] source_sp_for_convert = [] source_f0_for_convert = [] source_ap_for_convert = [] fs_source = None for name in sorted(glob.iglob(for_convert_source, recursive=True)): print("source = ", name) x_source, fs_source = sf.read(name) f0_source, sp_source, ap_source = analyse_by_world_with_harverst(x_source, fs_source) mfcc_source = MFCC(fs_source) #mfcc_s_tmp = mfcc_s.mfcc(sp) #source_mfcc_for_convert = np.hstack([mfcc_s_tmp, mfcc_s.delta(mfcc_s_tmp)]) source_mfcc_for_convert.append(mfcc_source.mfcc(sp_source)) source_sp_for_convert.append(sp_source) source_f0_for_convert.append(f0_source) source_ap_for_convert.append(ap_source) target_mfcc_for_fit = [] target_f0_for_fit = [] target_ap_for_fit = [] for name in sorted(glob.iglob(for_convert_target, recursive=True)): print("target = ", name) x_target, fs_target = sf.read(name) f0_target, sp_target, ap_target = analyse_by_world_with_harverst(x_target, fs_target) mfcc_target = MFCC(fs_target) #mfcc_target_tmp = mfcc_target.mfcc(sp_target) #target_mfcc_for_fit = np.hstack([mfcc_t_tmp, mfcc_t.delta(mfcc_t_tmp)]) target_mfcc_for_fit.append(mfcc_target.mfcc(sp_target)) target_f0_for_fit.append(f0_target) target_ap_for_fit.append(ap_target) # 全部numpy.arrrayにしておく source_data_mfcc = np.array(source_mfcc_for_convert) source_data_sp = np.array(source_sp_for_convert) source_data_f0 = np.array(source_f0_for_convert) source_data_ap = np.array(source_ap_for_convert) target_mfcc = np.array(target_mfcc_for_fit) target_f0 = np.array(target_f0_for_fit) target_ap = np.array(target_ap_for_fit) print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]") """ 適応話者学習を行う. つまり,事前学習出力話者から目標話者の空間を作りだす. 適応話者文数ごとにfitted_targetを集めるのは未実装. """ timer_start = time.time() epoch=100 py = GMM(n_components = Mixtured, covariance_type = 'full') py.weights_ = weights py.means_ = target_means py.covars_ = covarYY fitted_target = None for i in range(len(target_mfcc)): print("adaptation = ", i+1, "/", len(target_mfcc)) target = target_mfcc[i] for x in range(epoch): print("epoch = ", x) predict = py.predict_proba(np.atleast_2d(target)) y = np.sum([predict[:, i: i + 1] * (target - biasvectors[1][i]) for i in range(Mixtured)], axis = 1) gamma = np.sum(predict, axis = 0) left = np.sum([gamma[i] * np.dot(eigenvectors[1][i].T, np.linalg.solve(py.covars_, eigenvectors[1])[i]) for i in range(Mixtured)], axis=0) right = np.sum([np.dot(eigenvectors[1][i].T, np.linalg.solve(py.covars_, y)[i]) for i in range(Mixtured)], axis = 0) weight = np.linalg.solve(left, right) fitted_target = np.dot(eigenvectors[1], weight) + biasvectors[1] py.means_ = fitted_target print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]") """ 変換に必要なものを残しておく. """ np.save(save_for_evgmm_covarXX, covarXX) np.save(save_for_evgmm_covarYX, covarYX) np.save(save_for_evgmm_fitted_source, fitted_source) np.save(save_for_evgmm_fitted_target, fitted_target) np.save(save_for_evgmm_weights, weights) np.save(save_for_evgmm_source_means, source_means) ```
github_jupyter
# -*- coding: utf-8 -*- """ EVCのためのEV-GMMを構築します. そして, 適応学習する. 詳細 : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580aabf534c4dbb8bc.pdf This program make EV-GMM for EVC. Then, it make adaptation learning. Check detail : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580abf534c4dbb8bc.pdf """ from __future__ import division, print_function import os from shutil import rmtree import argparse import glob import pickle import time import numpy as np from numpy.linalg import norm from sklearn.decomposition import PCA from sklearn.mixture import GMM # sklearn 0.20.0から使えない from sklearn.preprocessing import StandardScaler import scipy.signal import scipy.sparse %matplotlib inline import matplotlib.pyplot as plt import IPython from IPython.display import Audio import soundfile as sf import wave import pyworld as pw import librosa.display from dtw import dtw import warnings warnings.filterwarnings('ignore') """ Parameters __Mixtured : GMM混合数 __versions : 実験セット __convert_source : 変換元話者のパス __convert_target : 変換先話者のパス """ # parameters __Mixtured = 40 __versions = 'pre-stored0.1.3' __convert_source = 'input/EJM10/V01/T01/TIMIT/000/*.wav' __convert_target = 'adaptation/EJF01/V01/T01/ATR503/A/*.wav' # settings __same_path = './utterance/' + __versions + '/' __output_path = __same_path + 'output/EJF01/' # EJF01, EJF07, EJM04, EJM05 Mixtured = __Mixtured pre_stored_pickle = __same_path + __versions + '.pickle' pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav' pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav" #pre_stored_target_list = "" (not yet) pre_stored_gmm_init_pickle = __same_path + __versions + '_init-gmm.pickle' pre_stored_sv_npy = __same_path + __versions + '_sv.npy' save_for_evgmm_covarXX = __output_path + __versions + '_covarXX.npy' save_for_evgmm_covarYX = __output_path + __versions + '_covarYX.npy' save_for_evgmm_fitted_source = __output_path + __versions + '_fitted_source.npy' save_for_evgmm_fitted_target = __output_path + __versions + '_fitted_target.npy' save_for_evgmm_weights = __output_path + __versions + '_weights.npy' save_for_evgmm_source_means = __output_path + __versions + '_source_means.npy' for_convert_source = __same_path + __convert_source for_convert_target = __same_path + __convert_target converted_voice_npy = __output_path + 'sp_converted_' + __versions converted_voice_wav = __output_path + 'sp_converted_' + __versions mfcc_save_fig_png = __output_path + 'mfcc3dim_' + __versions f0_save_fig_png = __output_path + 'f0_converted' + __versions converted_voice_with_f0_wav = __output_path + 'sp_f0_converted' + __versions EPSILON = 1e-8 class MFCC: """ MFCC() : メル周波数ケプストラム係数(MFCC)を求めたり、MFCCからスペクトルに変換したりするクラス. 動的特徴量(delta)が実装途中. ref : http://aidiary.hatenablog.com/entry/20120225/1330179868 """ def __init__(self, frequency, nfft=1026, dimension=24, channels=24): """ 各種パラメータのセット nfft : FFTのサンプル点数 frequency : サンプリング周波数 dimension : MFCC次元数 channles : メルフィルタバンクのチャンネル数(dimensionに依存) fscale : 周波数スケール軸 filterbankl, fcenters : フィルタバンク行列, フィルタバンクの頂点(?) """ self.nfft = nfft self.frequency = frequency self.dimension = dimension self.channels = channels self.fscale = np.fft.fftfreq(self.nfft, d = 1.0 / self.frequency)[: int(self.nfft / 2)] self.filterbank, self.fcenters = self.melFilterBank() def hz2mel(self, f): """ 周波数からメル周波数に変換 """ return 1127.01048 * np.log(f / 700.0 + 1.0) def mel2hz(self, m): """ メル周波数から周波数に変換 """ return 700.0 * (np.exp(m / 1127.01048) - 1.0) def melFilterBank(self): """ メルフィルタバンクを生成する """ fmax = self.frequency / 2 melmax = self.hz2mel(fmax) nmax = int(self.nfft / 2) df = self.frequency / self.nfft dmel = melmax / (self.channels + 1) melcenters = np.arange(1, self.channels + 1) * dmel fcenters = self.mel2hz(melcenters) indexcenter = np.round(fcenters / df) indexstart = np.hstack(([0], indexcenter[0:self.channels - 1])) indexstop = np.hstack((indexcenter[1:self.channels], [nmax])) filterbank = np.zeros((self.channels, nmax)) for c in np.arange(0, self.channels): increment = 1.0 / (indexcenter[c] - indexstart[c]) # np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする for i in np.int_(np.arange(indexstart[c], indexcenter[c])): filterbank[c, i] = (i - indexstart[c]) * increment decrement = 1.0 / (indexstop[c] - indexcenter[c]) # np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする for i in np.int_(np.arange(indexcenter[c], indexstop[c])): filterbank[c, i] = 1.0 - ((i - indexcenter[c]) * decrement) return filterbank, fcenters def mfcc(self, spectrum): """ スペクトルからMFCCを求める. """ mspec = [] mspec = np.log10(np.dot(spectrum, self.filterbank.T)) mspec = np.array(mspec) return scipy.fftpack.realtransforms.dct(mspec, type=2, norm="ortho", axis=-1) def delta(self, mfcc): """ MFCCから動的特徴量を求める. 現在は,求める特徴量フレームtをt-1とt+1の平均としている. """ mfcc = np.concatenate([ [mfcc[0]], mfcc, [mfcc[-1]] ]) # 最初のフレームを最初に、最後のフレームを最後に付け足す delta = None for i in range(1, mfcc.shape[0] - 1): slope = (mfcc[i+1] - mfcc[i-1]) / 2 if delta is None: delta = slope else: delta = np.vstack([delta, slope]) return delta def imfcc(self, mfcc, spectrogram): """ MFCCからスペクトルを求める. """ im_sp = np.array([]) for i in range(mfcc.shape[0]): mfcc_s = np.hstack([mfcc[i], [0] * (self.channels - self.dimension)]) mspectrum = scipy.fftpack.idct(mfcc_s, norm='ortho') # splrep はスプライン補間のための補間関数を求める tck = scipy.interpolate.splrep(self.fcenters, np.power(10, mspectrum)) # splev は指定座標での補間値を求める im_spectrogram = scipy.interpolate.splev(self.fscale, tck) im_sp = np.concatenate((im_sp, im_spectrogram), axis=0) return im_sp.reshape(spectrogram.shape) def trim_zeros_frames(x, eps=1e-7): """ 無音区間を取り除く. """ T, D = x.shape s = np.sum(np.abs(x), axis=1) s[s < 1e-7] = 0. return x[s > eps] def analyse_by_world_with_harverst(x, fs): """ WORLD音声分析合成器で基本周波数F0,スペクトル包絡,非周期成分を求める. 基本周波数F0についてはharvest法により,より精度良く求める. """ # 4 Harvest with F0 refinement (using Stonemask) frame_period = 5 _f0_h, t_h = pw.harvest(x, fs, frame_period=frame_period) f0_h = pw.stonemask(x, _f0_h, t_h, fs) sp_h = pw.cheaptrick(x, f0_h, t_h, fs) ap_h = pw.d4c(x, f0_h, t_h, fs) return f0_h, sp_h, ap_h def wavread(file): """ wavファイルから音声トラックとサンプリング周波数を抽出する. """ wf = wave.open(file, "r") fs = wf.getframerate() x = wf.readframes(wf.getnframes()) x = np.frombuffer(x, dtype= "int16") / 32768.0 wf.close() return x, float(fs) def preEmphasis(signal, p=0.97): """ MFCC抽出のための高域強調フィルタ. 波形を通すことで,高域成分が強調される. """ return scipy.signal.lfilter([1.0, -p], 1, signal) def alignment(source, target, path): """ タイムアライメントを取る. target音声をsource音声の長さに合うように調整する. """ # ここでは814に合わせよう(targetに合わせる) # p_p = 0 if source.shape[0] > target.shape[0] else 1 #shapes = source.shape if source.shape[0] > target.shape[0] else target.shape shapes = source.shape align = np.array([]) for (i, p) in enumerate(path[0]): if i != 0: if j != p: temp = np.array(target[path[1][i]]) align = np.concatenate((align, temp), axis=0) else: temp = np.array(target[path[1][i]]) align = np.concatenate((align, temp), axis=0) j = p return align.reshape(shapes) """ pre-stored学習のためのパラレル学習データを作る。 時間がかかるため、利用できるlearn-data.pickleがある場合はそれを利用する。 それがない場合は一から作り直す。 """ timer_start = time.time() if os.path.exists(pre_stored_pickle): print("exist, ", pre_stored_pickle) with open(pre_stored_pickle, mode='rb') as f: total_data = pickle.load(f) print("open, ", pre_stored_pickle) print("Load pre-stored time = ", time.time() - timer_start , "[sec]") else: source_mfcc = [] #source_data_sets = [] for name in sorted(glob.iglob(pre_stored_source_list, recursive=True)): print(name) x, fs = sf.read(name) f0, sp, ap = analyse_by_world_with_harverst(x, fs) mfcc = MFCC(fs) source_mfcc_temp = mfcc.mfcc(sp) #source_data = np.hstack([source_mfcc_temp, mfcc.delta(source_mfcc_temp)]) # static & dynamic featuers source_mfcc.append(source_mfcc_temp) #source_data_sets.append(source_data) total_data = [] i = 0 _s_len = len(source_mfcc) for name in sorted(glob.iglob(pre_stored_list, recursive=True)): print(name, len(total_data)) x, fs = sf.read(name) f0, sp, ap = analyse_by_world_with_harverst(x, fs) mfcc = MFCC(fs) target_mfcc = mfcc.mfcc(sp) dist, cost, acc, path = dtw(source_mfcc[i%_s_len], target_mfcc, dist=lambda x, y: norm(x - y, ord=1)) #print('Normalized distance between the two sounds:' + str(dist)) #print("target_mfcc = {0}".format(target_mfcc.shape)) aligned = alignment(source_mfcc[i%_s_len], target_mfcc, path) #target_data_sets = np.hstack([aligned, mfcc.delta(aligned)]) # static & dynamic features #learn_data = np.hstack((source_data_sets[i], target_data_sets)) learn_data = np.hstack([source_mfcc[i%_s_len], aligned]) total_data.append(learn_data) i += 1 with open(pre_stored_pickle, 'wb') as output: pickle.dump(total_data, output) print("Make, ", pre_stored_pickle) print("Make pre-stored time = ", time.time() - timer_start , "[sec]") """ 全事前学習出力話者からラムダを推定する. ラムダは適応学習で変容する. """ S = len(total_data) D = int(total_data[0].shape[1] / 2) print("total_data[0].shape = ", total_data[0].shape) print("S = ", S) print("D = ", D) timer_start = time.time() if os.path.exists(pre_stored_gmm_init_pickle): print("exist, ", pre_stored_gmm_init_pickle) with open(pre_stored_gmm_init_pickle, mode='rb') as f: initial_gmm = pickle.load(f) print("open, ", pre_stored_gmm_init_pickle) print("Load initial_gmm time = ", time.time() - timer_start , "[sec]") else: initial_gmm = GMM(n_components = Mixtured, covariance_type = 'full') initial_gmm.fit(np.vstack(total_data)) with open(pre_stored_gmm_init_pickle, 'wb') as output: pickle.dump(initial_gmm, output) print("Make, ", initial_gmm) print("Make initial_gmm time = ", time.time() - timer_start , "[sec]") weights = initial_gmm.weights_ source_means = initial_gmm.means_[:, :D] target_means = initial_gmm.means_[:, D:] covarXX = initial_gmm.covars_[:, :D, :D] covarXY = initial_gmm.covars_[:, :D, D:] covarYX = initial_gmm.covars_[:, D:, :D] covarYY = initial_gmm.covars_[:, D:, D:] fitted_source = source_means fitted_target = target_means """ SVはGMMスーパーベクトルで、各pre-stored学習における出力話者について平均ベクトルを推定する。 GMMの学習を見てみる必要があるか? """ timer_start = time.time() if os.path.exists(pre_stored_sv_npy): print("exist, ", pre_stored_sv_npy) sv = np.load(pre_stored_sv_npy) print("open, ", pre_stored_sv_npy) print("Load pre_stored_sv time = ", time.time() - timer_start , "[sec]") else: sv = [] for i in range(S): gmm = GMM(n_components = Mixtured, params = 'm', init_params = '', covariance_type = 'full') gmm.weights_ = initial_gmm.weights_ gmm.means_ = initial_gmm.means_ gmm.covars_ = initial_gmm.covars_ gmm.fit(total_data[i]) sv.append(gmm.means_) sv = np.array(sv) np.save(pre_stored_sv_npy, sv) print("Make pre_stored_sv time = ", time.time() - timer_start , "[sec]") """ 各事前学習出力話者のGMM平均ベクトルに対して主成分分析(PCA)を行う. PCAで求めた固有値と固有ベクトルからeigenvectorsとbiasvectorsを作る. """ timer_start = time.time() #source_pca source_n_component, source_n_features = sv[:, :, :D].reshape(S, Mixtured*D).shape # 標準化(分散を1、平均を0にする) source_stdsc = StandardScaler() # 共分散行列を求める source_X_std = source_stdsc.fit_transform(sv[:, :, :D].reshape(S, Mixtured*D)) # PCAを行う source_cov = source_X_std.T @ source_X_std / (source_n_component - 1) source_W, source_V_pca = np.linalg.eig(source_cov) print(source_W.shape) print(source_V_pca.shape) # データを主成分の空間に変換する source_X_pca = source_X_std @ source_V_pca print(source_X_pca.shape) #target_pca target_n_component, target_n_features = sv[:, :, D:].reshape(S, Mixtured*D).shape # 標準化(分散を1、平均を0にする) target_stdsc = StandardScaler() #共分散行列を求める target_X_std = target_stdsc.fit_transform(sv[:, :, D:].reshape(S, Mixtured*D)) #PCAを行う target_cov = target_X_std.T @ target_X_std / (target_n_component - 1) target_W, target_V_pca = np.linalg.eig(target_cov) print(target_W.shape) print(target_V_pca.shape) # データを主成分の空間に変換する target_X_pca = target_X_std @ target_V_pca print(target_X_pca.shape) eigenvectors = source_X_pca.reshape((Mixtured, D, S)), target_X_pca.reshape((Mixtured, D, S)) source_bias = np.mean(sv[:, :, :D], axis=0) target_bias = np.mean(sv[:, :, D:], axis=0) biasvectors = source_bias.reshape((Mixtured, D)), target_bias.reshape((Mixtured, D)) print("Do PCA time = ", time.time() - timer_start , "[sec]") """ 声質変換に用いる変換元音声と目標音声を読み込む. """ timer_start = time.time() source_mfcc_for_convert = [] source_sp_for_convert = [] source_f0_for_convert = [] source_ap_for_convert = [] fs_source = None for name in sorted(glob.iglob(for_convert_source, recursive=True)): print("source = ", name) x_source, fs_source = sf.read(name) f0_source, sp_source, ap_source = analyse_by_world_with_harverst(x_source, fs_source) mfcc_source = MFCC(fs_source) #mfcc_s_tmp = mfcc_s.mfcc(sp) #source_mfcc_for_convert = np.hstack([mfcc_s_tmp, mfcc_s.delta(mfcc_s_tmp)]) source_mfcc_for_convert.append(mfcc_source.mfcc(sp_source)) source_sp_for_convert.append(sp_source) source_f0_for_convert.append(f0_source) source_ap_for_convert.append(ap_source) target_mfcc_for_fit = [] target_f0_for_fit = [] target_ap_for_fit = [] for name in sorted(glob.iglob(for_convert_target, recursive=True)): print("target = ", name) x_target, fs_target = sf.read(name) f0_target, sp_target, ap_target = analyse_by_world_with_harverst(x_target, fs_target) mfcc_target = MFCC(fs_target) #mfcc_target_tmp = mfcc_target.mfcc(sp_target) #target_mfcc_for_fit = np.hstack([mfcc_t_tmp, mfcc_t.delta(mfcc_t_tmp)]) target_mfcc_for_fit.append(mfcc_target.mfcc(sp_target)) target_f0_for_fit.append(f0_target) target_ap_for_fit.append(ap_target) # 全部numpy.arrrayにしておく source_data_mfcc = np.array(source_mfcc_for_convert) source_data_sp = np.array(source_sp_for_convert) source_data_f0 = np.array(source_f0_for_convert) source_data_ap = np.array(source_ap_for_convert) target_mfcc = np.array(target_mfcc_for_fit) target_f0 = np.array(target_f0_for_fit) target_ap = np.array(target_ap_for_fit) print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]") """ 適応話者学習を行う. つまり,事前学習出力話者から目標話者の空間を作りだす. 適応話者文数ごとにfitted_targetを集めるのは未実装. """ timer_start = time.time() epoch=100 py = GMM(n_components = Mixtured, covariance_type = 'full') py.weights_ = weights py.means_ = target_means py.covars_ = covarYY fitted_target = None for i in range(len(target_mfcc)): print("adaptation = ", i+1, "/", len(target_mfcc)) target = target_mfcc[i] for x in range(epoch): print("epoch = ", x) predict = py.predict_proba(np.atleast_2d(target)) y = np.sum([predict[:, i: i + 1] * (target - biasvectors[1][i]) for i in range(Mixtured)], axis = 1) gamma = np.sum(predict, axis = 0) left = np.sum([gamma[i] * np.dot(eigenvectors[1][i].T, np.linalg.solve(py.covars_, eigenvectors[1])[i]) for i in range(Mixtured)], axis=0) right = np.sum([np.dot(eigenvectors[1][i].T, np.linalg.solve(py.covars_, y)[i]) for i in range(Mixtured)], axis = 0) weight = np.linalg.solve(left, right) fitted_target = np.dot(eigenvectors[1], weight) + biasvectors[1] py.means_ = fitted_target print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]") """ 変換に必要なものを残しておく. """ np.save(save_for_evgmm_covarXX, covarXX) np.save(save_for_evgmm_covarYX, covarYX) np.save(save_for_evgmm_fitted_source, fitted_source) np.save(save_for_evgmm_fitted_target, fitted_target) np.save(save_for_evgmm_weights, weights) np.save(save_for_evgmm_source_means, source_means)
0.435902
0.248876
# 基于注意力的神经机器翻译 此笔记本训练一个将爱沙尼亚语翻译为英语的序列到序列(sequence to sequence,简写为 seq2seq)模型。此例子难度较高,需要对序列到序列模型的知识有一定了解。 训练完此笔记本中的模型后,你将能够输入一个爱沙尼亚语句子,例如 *"Oot!"*,并返回其英语翻译 *"Stop!"* 对于一个简单的例子来说,翻译质量令人满意。但是更有趣的可能是生成的注意力图:它显示在翻译过程中,输入句子的哪些部分受到了模型的注意。 <img src="https://tensorflow.google.cn/images/spanish-english.png" alt="spanish-english attention plot"> 请注意:运行这个例子用一个 P100 GPU 需要花大约 10 分钟。 ``` import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.ticker as ticker from sklearn.model_selection import train_test_split import unicodedata import re import numpy as np import os import io import time ``` ## 下载和准备数据集 我们将使用 http://www.manythings.org/anki/ 提供的一个语言数据集。这个数据集包含如下格式的语言翻译对: ``` May I borrow this book? ¿Puedo tomar prestado este libro? ``` 这个数据集中有很多种语言可供选择。我们将使用英语 - 爱沙尼亚语数据集。为方便使用,我们在谷歌云上提供了此数据集的一份副本。但是你也可以自己下载副本。下载完数据集后,我们将采取下列步骤准备数据: 1. 给每个句子添加一个 *开始* 和一个 *结束* 标记(token)。 2. 删除特殊字符以清理句子。 3. 创建一个单词索引和一个反向单词索引(即一个从单词映射至 id 的词典和一个从 id 映射至单词的词典)。 4. 将每个句子填充(pad)到最大长度。 ``` ''' # 下载文件 path_to_zip = tf.keras.utils.get_file( 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip', extract=True) path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt" ''' path_to_file = "./lan/est.txt" # 将 unicode 文件转换为 ascii def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def preprocess_sentence(w): w = unicode_to_ascii(w.lower().strip()) # 在单词与跟在其后的标点符号之间插入一个空格 # 例如: "he is a boy." => "he is a boy ." # 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # 除了 (a-z, A-Z, ".", "?", "!", ","),将所有字符替换为空格 w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w) w = w.rstrip().strip() # 给句子加上开始和结束标记 # 以便模型知道何时开始和结束预测 w = '<start> ' + w + ' <end>' return w en_sentence = u"May I borrow this book?" sp_sentence = u"¿Puedo tomar prestado este libro?" print(preprocess_sentence(en_sentence)) print(preprocess_sentence(sp_sentence).encode('utf-8')) # 1. 去除重音符号 # 2. 清理句子 # 3. 返回这样格式的单词对:[ENGLISH, SPANISH] def create_dataset(path, num_examples): lines = io.open(path, encoding='UTF-8').read().strip().split('\n') word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]] return zip(*word_pairs) en, sp = create_dataset(path_to_file, None) print(en[-1]) print(sp[-1]) def max_length(tensor): return max(len(t) for t in tensor) def tokenize(lang): lang_tokenizer = tf.keras.preprocessing.text.Tokenizer( filters='') lang_tokenizer.fit_on_texts(lang) tensor = lang_tokenizer.texts_to_sequences(lang) tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post') return tensor, lang_tokenizer def load_dataset(path, num_examples=None): # 创建清理过的输入输出对 targ_lang, inp_lang = create_dataset(path, num_examples) input_tensor, inp_lang_tokenizer = tokenize(inp_lang) target_tensor, targ_lang_tokenizer = tokenize(targ_lang) return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer ``` ### 限制数据集的大小以加快实验速度(可选) 在超过 10 万个句子的完整数据集上训练需要很长时间。为了更快地训练,我们可以将数据集的大小限制为 3 万个句子(当然,翻译质量也会随着数据的减少而降低): ``` # 尝试实验不同大小的数据集 num_examples = 30000 input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples) # 计算目标张量的最大长度 (max_length) max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor) # 采用 80 - 20 的比例切分训练集和验证集 input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2) # 显示长度 print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)) def convert(lang, tensor): for t in tensor: if t!=0: print ("%d ----> %s" % (t, lang.index_word[t])) print ("Input Language; index to word mapping") convert(inp_lang, input_tensor_train[0]) print () print ("Target Language; index to word mapping") convert(targ_lang, target_tensor_train[0]) ``` ### 创建一个 tf.data 数据集 ``` BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 steps_per_epoch = len(input_tensor_train)//BATCH_SIZE embedding_dim = 256 units = 1024 vocab_inp_size = len(inp_lang.word_index)+1 vocab_tar_size = len(targ_lang.word_index)+1 dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) example_input_batch, example_target_batch = next(iter(dataset)) example_input_batch.shape, example_target_batch.shape ``` ## 编写编码器 (encoder) 和解码器 (decoder) 模型 实现一个基于注意力的编码器 - 解码器模型。关于这种模型,你可以阅读 TensorFlow 的 [神经机器翻译 (序列到序列) 教程](https://github.com/tensorflow/nmt)。本示例采用一组更新的 API。此笔记本实现了上述序列到序列教程中的 [注意力方程式](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism)。下图显示了注意力机制为每个输入单词分配一个权重,然后解码器将这个权重用于预测句子中的下一个单词。下图和公式是 [Luong 的论文](https://arxiv.org/abs/1508.04025v5)中注意力机制的一个例子。 <img src="https://tensorflow.google.cn/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism"> 输入经过编码器模型,编码器模型为我们提供形状为 *(批大小,最大长度,隐藏层大小)* 的编码器输出和形状为 *(批大小,隐藏层大小)* 的编码器隐藏层状态。 下面是所实现的方程式: <img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800"> <img src="https://tensorflow.google.cn/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800"> 本教程的编码器采用 [Bahdanau 注意力](https://arxiv.org/pdf/1409.0473.pdf)。在用简化形式编写之前,让我们先决定符号: * FC = 完全连接(密集)层 * EO = 编码器输出 * H = 隐藏层状态 * X = 解码器输入 以及伪代码: * `score = FC(tanh(FC(EO) + FC(H)))` * `attention weights = softmax(score, axis = 1)`。 Softmax 默认被应用于最后一个轴,但是这里我们想将它应用于 *第一个轴*, 因为分数 (score) 的形状是 *(批大小,最大长度,隐藏层大小)*。最大长度 (`max_length`) 是我们的输入的长度。因为我们想为每个输入分配一个权重,所以 softmax 应该用在这个轴上。 * `context vector = sum(attention weights * EO, axis = 1)`。选择第一个轴的原因同上。 * `embedding output` = 解码器输入 X 通过一个嵌入层。 * `merged vector = concat(embedding output, context vector)` * 此合并后的向量随后被传送到 GRU 每个步骤中所有向量的形状已在代码的注释中阐明: ``` class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) # 样本输入 sample_hidden = encoder.initialize_hidden_state() sample_output, sample_hidden = encoder(example_input_batch, sample_hidden) print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape)) print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape)) class BahdanauAttention(tf.keras.layers.Layer): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, query, values): # 隐藏层的形状 == (批大小,隐藏层大小) # hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小) # 这样做是为了执行加法以计算分数 hidden_with_time_axis = tf.expand_dims(query, 1) # 分数的形状 == (批大小,最大长度,1) # 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V # 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位) score = self.V(tf.nn.tanh( self.W1(values) + self.W2(hidden_with_time_axis))) # 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1) attention_weights = tf.nn.softmax(score, axis=1) # 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights attention_layer = BahdanauAttention(10) attention_result, attention_weights = attention_layer(sample_hidden, sample_output) print("Attention result shape: (batch size, units) {}".format(attention_result.shape)) print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape)) class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') self.fc = tf.keras.layers.Dense(vocab_size) # 用于注意力 self.attention = BahdanauAttention(self.dec_units) def call(self, x, hidden, enc_output): # 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小) context_vector, attention_weights = self.attention(hidden, enc_output) # x 在通过嵌入层后的形状 == (批大小,1,嵌入维度) x = self.embedding(x) # x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # 将合并后的向量传送到 GRU output, state = self.gru(x) # 输出的形状 == (批大小 * 1,隐藏层大小) output = tf.reshape(output, (-1, output.shape[2])) # 输出的形状 == (批大小,vocab) x = self.fc(output) return x, state, attention_weights decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) sample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)), sample_hidden, sample_output) print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape)) ``` ## 定义优化器和损失函数 ``` optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) ``` ## 检查点(基于对象保存) ``` checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) ``` ## 训练 1. 将 *输入* 传送至 *编码器*,编码器返回 *编码器输出* 和 *编码器隐藏层状态*。 2. 将编码器输出、编码器隐藏层状态和解码器输入(即 *开始标记*)传送至解码器。 3. 解码器返回 *预测* 和 *解码器隐藏层状态*。 4. 解码器隐藏层状态被传送回模型,预测被用于计算损失。 5. 使用 *教师强制 (teacher forcing)* 决定解码器的下一个输入。 6. *教师强制* 是将 *目标词* 作为 *下一个输入* 传送至解码器的技术。 7. 最后一步是计算梯度,并将其应用于优化器和反向传播。 ``` @tf.function def train_step(inp, targ, enc_hidden): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, enc_hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1) # 教师强制 - 将目标词作为下一个输入 for t in range(1, targ.shape[1]): # 将编码器输出 (enc_output) 传送至解码器 predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) # 使用教师强制 dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return batch_loss EPOCHS = 10 for epoch in range(EPOCHS): start = time.time() enc_hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)): batch_loss = train_step(inp, targ, enc_hidden) total_loss += batch_loss if batch % 100 == 0: print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy())) # 每 2 个周期(epoch),保存(检查点)一次模型 if (epoch + 1) % 2 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / steps_per_epoch)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) ``` ## 翻译 * 评估函数类似于训练循环,不同之处在于在这里我们不使用 *教师强制*。每个时间步的解码器输入是其先前的预测、隐藏层状态和编码器输出。 * 当模型预测 *结束标记* 时停止预测。 * 存储 *每个时间步的注意力权重*。 请注意:对于一个输入,编码器输出仅计算一次。 ``` def evaluate(sentence): attention_plot = np.zeros((max_length_targ, max_length_inp)) sentence = preprocess_sentence(sentence) inputs = [inp_lang.word_index[i] for i in sentence.split(' ')] inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post') inputs = tf.convert_to_tensor(inputs) result = '' hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0) for t in range(max_length_targ): predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out) # 存储注意力权重以便后面制图 attention_weights = tf.reshape(attention_weights, (-1, )) attention_plot[t] = attention_weights.numpy() predicted_id = tf.argmax(predictions[0]).numpy() result += targ_lang.index_word[predicted_id] + ' ' if targ_lang.index_word[predicted_id] == '<end>': return result, sentence, attention_plot # 预测的 ID 被输送回模型 dec_input = tf.expand_dims([predicted_id], 0) return result, sentence, attention_plot # 注意力权重制图函数 def plot_attention(attention, sentence, predicted_sentence): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1) ax.matshow(attention, cmap='viridis') fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def translate(sentence): result, sentence, attention_plot = evaluate(sentence) print('Input: %s' % (sentence)) print('Predicted translation: {}'.format(result)) attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))] plot_attention(attention_plot, sentence.split(' '), result.split(' ')) ``` ## 恢复最新的检查点并验证 ``` # 恢复检查点目录 (checkpoint_dir) 中最新的检查点 checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) translate(u'hace mucho frio aqui.') translate(u'esta es mi vida.') translate(u'¿todavia estan en casa?') # 错误的翻译 translate(u'trata de averiguarlo.') ```
github_jupyter
import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.ticker as ticker from sklearn.model_selection import train_test_split import unicodedata import re import numpy as np import os import io import time May I borrow this book? ¿Puedo tomar prestado este libro? ''' # 下载文件 path_to_zip = tf.keras.utils.get_file( 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip', extract=True) path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt" ''' path_to_file = "./lan/est.txt" # 将 unicode 文件转换为 ascii def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def preprocess_sentence(w): w = unicode_to_ascii(w.lower().strip()) # 在单词与跟在其后的标点符号之间插入一个空格 # 例如: "he is a boy." => "he is a boy ." # 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # 除了 (a-z, A-Z, ".", "?", "!", ","),将所有字符替换为空格 w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w) w = w.rstrip().strip() # 给句子加上开始和结束标记 # 以便模型知道何时开始和结束预测 w = '<start> ' + w + ' <end>' return w en_sentence = u"May I borrow this book?" sp_sentence = u"¿Puedo tomar prestado este libro?" print(preprocess_sentence(en_sentence)) print(preprocess_sentence(sp_sentence).encode('utf-8')) # 1. 去除重音符号 # 2. 清理句子 # 3. 返回这样格式的单词对:[ENGLISH, SPANISH] def create_dataset(path, num_examples): lines = io.open(path, encoding='UTF-8').read().strip().split('\n') word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]] return zip(*word_pairs) en, sp = create_dataset(path_to_file, None) print(en[-1]) print(sp[-1]) def max_length(tensor): return max(len(t) for t in tensor) def tokenize(lang): lang_tokenizer = tf.keras.preprocessing.text.Tokenizer( filters='') lang_tokenizer.fit_on_texts(lang) tensor = lang_tokenizer.texts_to_sequences(lang) tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post') return tensor, lang_tokenizer def load_dataset(path, num_examples=None): # 创建清理过的输入输出对 targ_lang, inp_lang = create_dataset(path, num_examples) input_tensor, inp_lang_tokenizer = tokenize(inp_lang) target_tensor, targ_lang_tokenizer = tokenize(targ_lang) return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer # 尝试实验不同大小的数据集 num_examples = 30000 input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples) # 计算目标张量的最大长度 (max_length) max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor) # 采用 80 - 20 的比例切分训练集和验证集 input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2) # 显示长度 print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)) def convert(lang, tensor): for t in tensor: if t!=0: print ("%d ----> %s" % (t, lang.index_word[t])) print ("Input Language; index to word mapping") convert(inp_lang, input_tensor_train[0]) print () print ("Target Language; index to word mapping") convert(targ_lang, target_tensor_train[0]) BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 steps_per_epoch = len(input_tensor_train)//BATCH_SIZE embedding_dim = 256 units = 1024 vocab_inp_size = len(inp_lang.word_index)+1 vocab_tar_size = len(targ_lang.word_index)+1 dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) example_input_batch, example_target_batch = next(iter(dataset)) example_input_batch.shape, example_target_batch.shape class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) # 样本输入 sample_hidden = encoder.initialize_hidden_state() sample_output, sample_hidden = encoder(example_input_batch, sample_hidden) print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape)) print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape)) class BahdanauAttention(tf.keras.layers.Layer): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, query, values): # 隐藏层的形状 == (批大小,隐藏层大小) # hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小) # 这样做是为了执行加法以计算分数 hidden_with_time_axis = tf.expand_dims(query, 1) # 分数的形状 == (批大小,最大长度,1) # 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V # 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位) score = self.V(tf.nn.tanh( self.W1(values) + self.W2(hidden_with_time_axis))) # 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1) attention_weights = tf.nn.softmax(score, axis=1) # 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights attention_layer = BahdanauAttention(10) attention_result, attention_weights = attention_layer(sample_hidden, sample_output) print("Attention result shape: (batch size, units) {}".format(attention_result.shape)) print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape)) class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') self.fc = tf.keras.layers.Dense(vocab_size) # 用于注意力 self.attention = BahdanauAttention(self.dec_units) def call(self, x, hidden, enc_output): # 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小) context_vector, attention_weights = self.attention(hidden, enc_output) # x 在通过嵌入层后的形状 == (批大小,1,嵌入维度) x = self.embedding(x) # x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # 将合并后的向量传送到 GRU output, state = self.gru(x) # 输出的形状 == (批大小 * 1,隐藏层大小) output = tf.reshape(output, (-1, output.shape[2])) # 输出的形状 == (批大小,vocab) x = self.fc(output) return x, state, attention_weights decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) sample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)), sample_hidden, sample_output) print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape)) optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) @tf.function def train_step(inp, targ, enc_hidden): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, enc_hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1) # 教师强制 - 将目标词作为下一个输入 for t in range(1, targ.shape[1]): # 将编码器输出 (enc_output) 传送至解码器 predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) # 使用教师强制 dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return batch_loss EPOCHS = 10 for epoch in range(EPOCHS): start = time.time() enc_hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)): batch_loss = train_step(inp, targ, enc_hidden) total_loss += batch_loss if batch % 100 == 0: print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy())) # 每 2 个周期(epoch),保存(检查点)一次模型 if (epoch + 1) % 2 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / steps_per_epoch)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) def evaluate(sentence): attention_plot = np.zeros((max_length_targ, max_length_inp)) sentence = preprocess_sentence(sentence) inputs = [inp_lang.word_index[i] for i in sentence.split(' ')] inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post') inputs = tf.convert_to_tensor(inputs) result = '' hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0) for t in range(max_length_targ): predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out) # 存储注意力权重以便后面制图 attention_weights = tf.reshape(attention_weights, (-1, )) attention_plot[t] = attention_weights.numpy() predicted_id = tf.argmax(predictions[0]).numpy() result += targ_lang.index_word[predicted_id] + ' ' if targ_lang.index_word[predicted_id] == '<end>': return result, sentence, attention_plot # 预测的 ID 被输送回模型 dec_input = tf.expand_dims([predicted_id], 0) return result, sentence, attention_plot # 注意力权重制图函数 def plot_attention(attention, sentence, predicted_sentence): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1) ax.matshow(attention, cmap='viridis') fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def translate(sentence): result, sentence, attention_plot = evaluate(sentence) print('Input: %s' % (sentence)) print('Predicted translation: {}'.format(result)) attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))] plot_attention(attention_plot, sentence.split(' '), result.split(' ')) # 恢复检查点目录 (checkpoint_dir) 中最新的检查点 checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) translate(u'hace mucho frio aqui.') translate(u'esta es mi vida.') translate(u'¿todavia estan en casa?') # 错误的翻译 translate(u'trata de averiguarlo.')
0.469763
0.897021
<a href="https://colab.research.google.com/github/ervinismu/machine-learning/blob/master/ML0120EN_5_1_Review_Autoencoders.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <h1>AUTOENCODERS</h1> Welcome to this notebook about autoencoders. In this notebook you will find an explanation of what is an autoencoder, how it works, and see an implementation of an autoencoder in TensorFlow. # Table of Contents <p>- <a href="#ref1">Introduction</a></p> <p>- <a href="#ref2">Feature Extraction and Dimensionality Reduction</a></p> <p>- <a href="#ref3">Autoencoder Structure</a></p> <p>- <a href="#ref4">Performance</a></p> <p>- <a href="#ref5">Training: Loss Function</a></p> <p>- <a href="#ref6">Code</a></p> <p></p> By the end of this notebook, you should be able to create simple autoencoders and how to apply them in problems. </div> <br> ---------------- <a id="ref1"></a> # Introduction An autoencoder, also known as autoassociator or Diabolo networks, is a artificial neural network employed to recreate the given input. It takes a set of **unlabeled** inputs, encodes them and then tries to extract the most valuable information from them. They are used for feature extraction, learning generative models of data, dimensionality reduction and can be used for compression. A 2006 paper named Reducing the Dimensionality of Data with Neural Networks, done by G. E. Hinton and R. R. Salakhutdinov, showed better results than years of refining other types of network, and was a breakthrough in the field of Neural Networks, a field that was "stagnant" for 10 years. Now, autoencoders, based on Restricted Boltzmann Machines, are employed in some of the largest deep learning applications. They are the building blocks of Deep Belief Networks (DBN). <img src = "https://ibm.box.com/shared/static/xlkv9v7xzxhjww681dq3h1pydxcm4ktp.png" style="width: 350px;"/> <a id="ref2"></a> # Feature Extraction and Dimensionality Reduction An example given by Nikhil Buduma in KdNuggets (<a href="http://www.kdnuggets.com/2015/03/deep-learning-curse-dimensionality-autoencoders.html">link</a>) can explain the utility of this type of Neural Network with excellence. Say that you want to extract what feeling the person in a photography is feeling. Using as an example the following 256x256 grayscale picture: <img src = "https://ibm.box.com/shared/static/r5knpow4bk2farlvxia71e9jp2f2u126.png" /> But then we start facing a bottleneck! This image being 256x256 correspond with an input vector of 65536 dimensions! If we used an image produced with convential cellphone cameras, that generates images of 4000 x 3000 pixels, we would have 12 million dimensions to analyse. This bottleneck is further problematized as the difficulty of a machine learning problem is increased as more dimensions are involved. According to a 1982 study by C.J. Stone (<a href="http://www-personal.umich.edu/~jizhu/jizhu/wuke/Stone-AoS82.pdf">link</a>), the time to fit a model, at best, is: <div class="alert alert-block alert-info" style="margin-top: 20px"> <center><font size = 6><strong>$m^{-p/(2p+d)}$</strong></font></center> <br> Where: <br> m: Number of data points <br> d: Dimensionality of the data <br> p: Parameter that depends on the model </div> As you can see, it increases exponentially! Returning to our example, we don't need to use all of the 65,536 dimensions to classify an emotion. A human identify emotions according to some specific facial expression, some **key features**, like the shape of the mouth and eyebrows. <img src="https://ibm.box.com/shared/static/m8urvuqujkt2vt1ru1fnslzh24pv7hn4.png" height="256" width="256"/> -------------------------------------- <a id="ref3"></a> # Autoencoder Structure <img src="https://ibm.box.com/shared/static/no7omt2jhqvv7uuls7ihnzikyl9ysnfp.png" style="width: 400px;"/> An autoencoder can be divided in two parts, the **encoder** and the **decoder**. The encoder needs to compress the representation of an input. In this case we are going to compress the face of our actor, that consists of 2000 dimensional data to only 30 dimensions, taking some steps between this compression. The decoder is a reflection of the encoder network. It works to recreate the input, as closely as possible. It has an important role during training, to force the autoencoder to select the most important features in the compressed representation. -------------------------------------- <a id="ref4"></a> # Performance After the training has been done, you can use the encoded data as a reliable dimensionally-reduced data, applying it to any problems that a dimensionality reduction problem seem to fit. <img src="https://ibm.box.com/shared/static/yt3xyon4g2jyw1w9qup1mvx7cgh28l64.png"/> This image was extracted from the Hinton paper comparing the two-dimensional reduction for 500 digits of the MNIST, with PCA on the left and autoencoder on the right. We can see that the autoencoder provided us with a better separation of data. <hr> <a id="ref5"></a> # Training: Loss function An autoencoder uses the Loss function to properly train the network. The Loss function will calculate the differences between our output and the expected results. After that, we can minimize this error doing gradient descent. There are more than one type of Loss function, it depends on the type of data. ### Binary Values: $$l(f(x)) = - \sum_{k} (x_k log(\hat{x}_k) + (1 - x_k) \log (1 - \hat{x}_k) \ )$$ For binary values, we can use an equation based on the sum of Bernoulli's cross-entropy. $x_k$ is one of our inputs and $\hat{x}_k$ is the respective output. We use this function so that if $x_k$ equals to one, we want to push $\hat{x}_k$ as close as possible to one. The same if $x_k$ equals to zero. If the value is one, we just need to calculate the first part of the formula, that is, $- x_k log(\hat{x}_k)$. Which, turns out to just calculate $- log(\hat{x}_k)$. And if the value is zero, we need to calculate just the second part, $(1 - x_k) \log (1 - \hat{x}_k) \ )$ - which turns out to be $log (1 - \hat{x}_k) $. ### Real values: $$l(f(x)) = - 1/2\sum_{k} (\hat{x}_k- x_k \ )^2$$ As the above function would behave badly with inputs that are not 0 or 1, we can use the sum of squared differences for our Loss function. If you use this loss function, it's necessary that you use a linear activation function for the output layer. As it was with the above example, $x_k$ is one of our inputs and $\hat{x}_k$ is the respective output, and we want to make our output as similar as possible to our input. ### Loss Gradient: $$\nabla_{\hat{a}(x^{(t)})} \ l( \ f(x^{(t)})) = \hat{x}^{(t)} - x^{(t)} $$ We use the gradient descent to reach the local minumum of our function $l( \ f(x^{(t)})$, taking steps towards the negative of the gradient of the function in the current point. Our function talks about the preactivation of the output layer $(\nabla_{\hat{a}(x^{(t)})})$ of the loss $l( \ f(x^{(t)})$. It's actually a simple formula, it just calculates the difference between our output $\hat{x}^{(t)}$ and our input $x^{(t)}$. Then our network just backpropagates our gradient $\nabla_{\hat{a}(x^{(t)})} \ l( \ f(x^{(t)}))$ through the network using **backpropagation**. ------------------- <a id="ref6"></a> # Code For this part, we walk through a lot of Python 2.7.11 code. We are going to use the MNIST dataset for our example. The following code was created by Aymeric Damien. You can find some of his code in [here](https://github.com/aymericdamien). There are just some modifications for us to import the datasets to Jupyter Notebooks. Let's call our imports and make the MNIST data available to use. ``` from __future__ import division, print_function, absolute_import import tensorflow as tf import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) ``` Now, let's give the parameters that are going to be used by our NN. ``` learning_rate = 0.01 training_epochs = 20 batch_size = 256 display_step = 1 examples_to_show = 10 # Network Parameters n_hidden_1 = 256 # 1st layer num features n_hidden_2 = 128 # 2nd layer num features n_input = 784 # MNIST data input (img shape: 28*28) # tf Graph input (only pictures) X = tf.placeholder("float", [None, n_input]) weights = { 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])), } biases = { 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'decoder_b2': tf.Variable(tf.random_normal([n_input])), } ``` Now we need to create our encoder. For this, we are going to use sigmoidal functions. Sigmoidal functions continue to deliver great results with this type of networks. This is due to having a good derivative that is well-suited to backpropagation. We can create our encoder using the sigmoidal function like this: ``` # Building the encoder def encoder(x): # Encoder first layer with sigmoid activation #1 layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])) # Encoder second layer with sigmoid activation #2 layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2'])) return layer_2 ``` And the decoder: You can see that the layer_1 in the encoder is the layer_2 in the decoder and vice-versa. ``` # Building the decoder def decoder(x): # Decoder first layer with sigmoid activation #1 layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])) # Decoder second layer with sigmoid activation #2 layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2'])) return layer_2 ``` Let's construct our model. In the variable `cost` we have the loss function and in the `optimizer` variable we have our gradient used for backpropagation. ``` # Construct model encoder_op = encoder(X) decoder_op = decoder(encoder_op) # Prediction y_pred = decoder_op # Targets (Labels) are the input data. y_true = X # Define loss and optimizer, minimize the squared error cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) # Initializing the variables init = tf.global_variables_initializer() ``` The training will run for 20 epochs. ``` # Launch the graph # Using InteractiveSession (more convenient while using Notebooks) sess = tf.InteractiveSession() sess.run(init) total_batch = int(mnist.train.num_examples/batch_size) # Training cycle for epoch in range(training_epochs): # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) # Display logs per epoch step if epoch % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) print("Optimization Finished!") ``` Now, let's apply encode and decode for our tests. ``` # Applying encode and decode over test set encode_decode = sess.run( y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) ``` Let's simply visualize our graphs! ``` # Compare original images with their reconstructions f, a = plt.subplots(2, 10, figsize=(10, 2)) for i in range(examples_to_show): a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) ``` As you can see, the reconstructions were successful. It can be seen that some noise was added to the image. ## Want to learn more? Running deep learning programs usually needs a high performance platform. PowerAI speeds up deep learning and AI. Built on IBM's Power Systems, PowerAI is a scalable software platform that accelerates deep learning and AI with blazing performance for individual users or enterprises. The PowerAI platform supports popular machine learning libraries and dependencies including Tensorflow, Caffe, Torch, and Theano. You can download a [free version of PowerAI](https://cocl.us/ML0120EN_PAI). Also, you can use Data Science Experience to run these notebooks faster with bigger datasets. Data Science Experience is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, DSX enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of DSX users today with a free account at [Data Science Experience](https://cocl.us/ML0120EN_DSX)This is the end of this lesson. Hopefully, now you have a deeper and intuitive understanding regarding the LSTM model. Thank you for reading this notebook, and good luck on your studies. # Thanks for completing this lesson! Authors: - <a href = "https://www.linkedin.com/in/franciscomagioli">Francisco Magioli</a> - <a href = "https://ca.linkedin.com/in/erich-natsubori-sato">Erich Natsubori Sato</a> - Gabriel Garcez Barros Souza ### References: - https://en.wikipedia.org/wiki/Autoencoder - http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/ - http://www.slideshare.net/billlangjun/simple-introduction-to-autoencoder - http://www.slideshare.net/danieljohnlewis/piotr-mirowski-review-autoencoders-deep-learning-ciuuk14 - https://cs.stanford.edu/~quocle/tutorial2.pdf - https://gist.github.com/hussius/1534135a419bb0b957b9 - http://www.deeplearningbook.org/contents/autoencoders.html - http://www.kdnuggets.com/2015/03/deep-learning-curse-dimensionality-autoencoders.html/ - https://www.youtube.com/watch?v=xTU79Zs4XKY - http://www-personal.umich.edu/~jizhu/jizhu/wuke/Stone-AoS82.pdf - Reducing the Dimensionality of Data with Neural Networks, G. E. Hinton, R. R. Salakhutdinov, Science 28 Jul 2006, Vol. 313, Issue 5786, pp. 504-507, DOI: 10.1126/science.1127647 - http://science.sciencemag.org/content/313/5786/504.full <hr> <p>Copyright &copy; 2017 IBM <a href="https://cognitiveclass.ai/?utm_source=ML0151&utm_medium=lab&utm_campaign=cclab">IBM Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
github_jupyter
from __future__ import division, print_function, absolute_import import tensorflow as tf import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) learning_rate = 0.01 training_epochs = 20 batch_size = 256 display_step = 1 examples_to_show = 10 # Network Parameters n_hidden_1 = 256 # 1st layer num features n_hidden_2 = 128 # 2nd layer num features n_input = 784 # MNIST data input (img shape: 28*28) # tf Graph input (only pictures) X = tf.placeholder("float", [None, n_input]) weights = { 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])), } biases = { 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'decoder_b2': tf.Variable(tf.random_normal([n_input])), } # Building the encoder def encoder(x): # Encoder first layer with sigmoid activation #1 layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])) # Encoder second layer with sigmoid activation #2 layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2'])) return layer_2 # Building the decoder def decoder(x): # Decoder first layer with sigmoid activation #1 layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])) # Decoder second layer with sigmoid activation #2 layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2'])) return layer_2 # Construct model encoder_op = encoder(X) decoder_op = decoder(encoder_op) # Prediction y_pred = decoder_op # Targets (Labels) are the input data. y_true = X # Define loss and optimizer, minimize the squared error cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) # Initializing the variables init = tf.global_variables_initializer() # Launch the graph # Using InteractiveSession (more convenient while using Notebooks) sess = tf.InteractiveSession() sess.run(init) total_batch = int(mnist.train.num_examples/batch_size) # Training cycle for epoch in range(training_epochs): # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) # Display logs per epoch step if epoch % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) print("Optimization Finished!") # Applying encode and decode over test set encode_decode = sess.run( y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) # Compare original images with their reconstructions f, a = plt.subplots(2, 10, figsize=(10, 2)) for i in range(examples_to_show): a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
0.792223
0.985172
# The Variational Quantum Linear Solver ``` import qiskit from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import Aer, transpile, assemble import math import random import numpy as np from scipy.optimize import minimize ``` ## 1. Introduction The Variational Quantum Linear Solver, or the VQLS is a variational quantum algorithm that utilizes VQE in order to solve systems of linear equations more efficiently than classical computational algorithms. Specifically, if we are given some matrix $\textbf{A}$, such that $\textbf{A} |\textbf{x}\rangle \ = \ |\textbf{b}\rangle$, where $|\textbf{b}\rangle$ is some known vector, the VQLS algorithm is theoretically able to find a normalized $|x\rangle$ that is proportional to $|\textbf{x}\rangle$, which makes the above relationship true. The output of this algorithm is identical to that of the HHL Quantum Linear-Solving Algorithm, except, while HHL provides a much more favourable computation speedup over VQLS, the variational nature of our algorithm allows for it to be performed on NISQ quantum computers, while HHL would require much more robust quantum hardware, and many more qubits. ## 2. The Algorithm To begin, the inputs into this algorithm are evidently the matrix $\textbf{A}$, which we have to decompose into a linear combination of unitaries with complex coefficients: $$A \ = \ \displaystyle\sum_{n} c_n \ A_n$$ Where each $A_n$ is some unitary, and some unitary $U$ that prepares state $|\textbf{b}\rangle$ from $|0\rangle$. Now, recall the general structure of a variational quantum algorithm. We have to construct a quantum cost function, which can be evaluated with a low-depth parameterized quantum circuit, then output to the classical optimizer. This allows us to search a parameter space for some set of parameters $\alpha$, such that $|\psi(\alpha)\rangle \ = \ \frac{|\textbf{x}\rangle}{|| \textbf{x} ||}$, where $|\psi(k)\rangle$ is the output of out quantum circuit corresponding to some parameter set $k$. Before we actually begin constructing the cost function, let's take a look at a "high level" overview of the sub-routines within this algorithm, as illustrated in this image from the original paper: ![alt text](images/bro.png) So essentially, we start off with a qubit register, with each qubit initialized to $|0\rangle$. Our algorithm takes its inputs, then prepares and evaluates the cost function, starting with the creation of some ansatz $V(\alpha)$. If the computed cost is greater than some parameter $\gamma$, the algorithm is run again with updated parameters, and if not, the algorithm terminates, and the ansatz is calculated with the optimal parameters (determined at termination). This gives us the state vector that minimizes our cost function, and therefore the normalized form of $|\textbf{x}\rangle$. ## 3. Qiskit Implementation ### Fixed Hardware Ansatz Let's start off by considering the ansatz $V(\alpha)$, which is just a circuit that prepares some arbitrary state $|\psi(k)\rangle$. This allows us to "search" the state space by varying some set of parameters, $k$. Anyways, the ansatz that we will use for this implementation is given as follows: ``` def apply_fixed_ansatz(qubits, parameters): for iz in range (0, len(qubits)): circ.ry(parameters[0][iz], qubits[iz]) circ.cz(qubits[0], qubits[1]) circ.cz(qubits[2], qubits[0]) for iz in range (0, len(qubits)): circ.ry(parameters[1][iz], qubits[iz]) circ.cz(qubits[1], qubits[2]) circ.cz(qubits[2], qubits[0]) for iz in range (0, len(qubits)): circ.ry(parameters[2][iz], qubits[iz]) circ = QuantumCircuit(3) apply_fixed_ansatz([0, 1, 2], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) circ.draw() ``` This is called a **fixed hardware ansatz**: the configuration of quantum gates remains the same for each run of the circuit, all that changes are the parameters. Unlike the QAOA ansatz, it is not composed solely of Trotterized Hamiltonians. The applications of $Ry$ gates allow us to search the state space, while the $CZ$ gates create "interference" between the different qubit states. Now, it makes sense for us to consider the actual **cost function**. The goal of our algorithm will be to minimize cost, so when $|\Phi\rangle \ = \ \textbf{A} |\psi(k)\rangle$ is very close to $|\textbf{b}\rangle$, we want our cost function's output to be very small, and when the vectors are close to being orthogonal, we want the cost function to be very large. Thus, we introduce the "projection" Hamiltonian: $$H_P \ = \ \mathbb{I} \ - \ |b\rangle \langle b|$$ Where we have: $$C_P \ = \ \langle \Phi | H_P | \Phi \rangle \ = \ \langle \Phi | (\mathbb{I} \ - \ |b\rangle \langle b|) |\Phi \rangle \ = \ \langle \Phi | \Phi \rangle \ - \ \langle \Phi |b\rangle \langle b | \Phi \rangle$$ Notice how the second term tells us "how much" of $|\Phi\rangle$ lies along $|b\rangle$. We then subtract this from another number to get the desired low number when the inner product of $|\Phi\rangle$ and $|b\rangle$ is greater (they agree more), and the opposite for when they are close to being orthogonal. This is looking good so far! However, there is still one more thing we can do to increase the accuracy of the algorithm: normalizing the cost function. This is due to the fact that if $|\Phi\rangle$ has a small norm, then the cost function will still be low, even if it does not agree with $|\textbf{b}\rangle$. Thus, we replace $|\Phi\rangle$ with $\frac{|\Phi\rangle}{\sqrt{\langle \Phi | \Phi \rangle}}$: $$\hat{C}_P \ = \ \frac{\langle \Phi | \Phi \rangle}{\langle \Phi | \Phi \rangle} \ - \ \frac{\langle \Phi |b\rangle \langle b | \Phi \rangle}{\langle \Phi | \Phi \rangle} \ = \ 1 \ - \ \frac{\langle \Phi |b\rangle \langle b | \Phi \rangle}{\langle \Phi | \Phi \rangle} \ = \ 1 \ - \ \frac{|\langle b | \Phi \rangle|^2}{\langle \Phi | \Phi \rangle}$$ Ok, so, we have prepared our state $|\psi(k)\rangle$ with the ansatz. Now, we have two values to calculate in order to evaluate the cost function, namely $|\langle b | \Phi \rangle|^2$ and $\langle \Phi | \Phi \rangle$. Luckily, a nifty little quantum subroutine called the **Hadamard Test** allows us to do this! Essentially, if we have some unitary $U$ and some state $|\phi\rangle$, and we want to find the expectation value of $U$ with respect to the state, $\langle \phi | U | \phi \rangle$, then we can evaluate the following circuit: <br><br> ![image1](images/h.png) <br><br> Then, the probability of measuring the first qubit to be $0$ is equal to $\frac{1}{2} (1 \ + \ \text{Re}\langle U \rangle)$ and the probability of measuring $1$ is $\frac{1}{2} (1 \ - \ \text{Re}\langle U \rangle)$, so subtracting the two probabilities gives us $\text{Re} \langle U \rangle$. Luckily, the matrices we will be dealing with when we test this algorithm are completely real, so $\text{Re} \langle U \rangle \ = \ \langle U \rangle$, for this specific implementation. Here is how the Hadamard test works. By the circuit diagram, we have as our general state vector: <br> $$\frac{|0\rangle \ + \ |1\rangle}{\sqrt{2}} \ \otimes \ |\psi\rangle \ = \ \frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle}{\sqrt{2}}$$ <br> Applying our controlled unitary: <br> $$\frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle}{\sqrt{2}} \ \rightarrow \ \frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ U|\psi\rangle}{\sqrt{2}}$$ <br> Then applying the Hadamard gate to the first qubit: <br> $$\frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ U|\psi\rangle}{\sqrt{2}} \ \rightarrow \ \frac{1}{2} \ \big[ |0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle \ + \ |0\rangle \ \otimes \ U|\psi\rangle \ - \ |1\rangle \ \otimes \ U|\psi\rangle \big]$$ <br> $$\Rightarrow \ |0\rangle \ \otimes \ (\mathbb{I} \ + \ U)|\psi\rangle \ + \ |1\rangle \ \otimes \ (\mathbb{I} \ - \ U)|\psi\rangle$$ <br> When we take a measurement of the first qubit, remember that in order to find the probability of measuring $0$, we must take the inner product of the state vector with $|0\rangle$, then multiply by its complex conjugate (see the quantum mechanics section if you are not familiar with this). The same follows for the probability of measuring $1$. Thus, we have: <br> $$P(0) \ = \ \frac{1}{4} \ \langle \psi | (\mathbb{I} \ + \ U) (\mathbb{I} \ + \ U^{\dagger}) |\psi\rangle \ = \ \frac{1}{4} \ \langle \psi | (\mathbb{I}^2 \ + U \ + \ U^{\dagger} \ + \ U^{\dagger} U) |\psi\rangle \ = \ \frac{1}{4} \ \langle \psi | (2\mathbb{I} \ + U \ + \ U^{\dagger}) |\psi\rangle$$ <br> $$\Rightarrow \ \frac{1}{4} \Big[ 2 \ + \ \langle \psi | U^{\dagger} | \psi \rangle \ + \ \langle \psi | U | \psi \rangle \Big] \ = \ \frac{1}{4} \Big[ 2 \ + \ (\langle \psi | U | \psi \rangle)^{*} \ + \ \langle \psi | U | \psi \rangle \Big] \ = \ \frac{1}{2} (1 \ + \ \text{Re} \ \langle \psi | U | \psi \rangle)$$ <br> By a similar procedure, we get: <br> $$P(1) \ = \ \frac{1}{2} \ (1 \ - \ \text{Re} \ \langle \psi | U | \psi \rangle)$$ <br> And so, by taking the difference: <br> $$P(0) \ - \ P(1) \ = \ \text{Re} \ \langle \psi | U | \psi \rangle$$ <br> Cool! Now, we can actually implement this for the two values we have to compute. Starting with $\langle \Phi | \Phi \rangle$, we have: <br> $$\langle \Phi | \Phi \rangle \ = \ \langle \psi(k) | A^{\dagger} A |\psi(k) \rangle \ = \ \langle 0 | V(k)^{\dagger} A^{\dagger} A V(k) |0\rangle \ = \ \langle 0 | V(k)^{\dagger} \Big( \displaystyle\sum_{n} c_n \ A_n \Big)^{\dagger} \Big( \displaystyle\sum_{n} c_n \ A_n \Big) V(k) |0\rangle$$ <br> $$\Rightarrow \ \langle \Phi | \Phi \rangle \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m^{*} c_n \langle 0 | V(k)^{\dagger} A_m^{\dagger} A_n V(k) |0\rangle$$ <br> and so our task becomes computing every possible term $\langle 0 | V(k)^{\dagger} A_m^{\dagger} A_n V(k) |0\rangle$ using the Hadamard test. This requires us to prepare the state $V(k) |0\rangle$, and then perform controlled operations with some control-auxiliary qubits for the unitary matrices $A_m^{\dagger}$ and $A_n$. We can implement this in code: ``` # Creates the Hadamard test def had_test(gate_type, qubits, auxiliary_index, parameters): circ.h(auxiliary_index) apply_fixed_ansatz(qubits, parameters) for ie in range (0, len(gate_type[0])): if (gate_type[0][ie] == 1): circ.cz(auxiliary_index, qubits[ie]) for ie in range (0, len(gate_type[1])): if (gate_type[1][ie] == 1): circ.cz(auxiliary_index, qubits[ie]) circ.h(auxiliary_index) circ = QuantumCircuit(4) had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) circ.draw() ``` The reason why we are applying two different "gate_types" is because this represents the pairs of gates shown in the expanded form of $\langle \Phi | \Phi \rangle$. It is also important to note that for the purposes of this implementation (the systems of equations we will actually be solving, we are only concerned with the gates $Z$ and $\mathbb{I}$, so I only include support for these gates (The code includes number "identifiers" that signify the application of different gates, $0$ for $\mathbb{I}$ and $1$ for $Z$). Now, we can move on to the second value we must calculate, which is $|\langle b | \Phi \rangle|^2$. We get: <br> $$|\langle b | \Phi \rangle|^2 \ = \ |\langle b | A V(k) | 0 \rangle|^2 \ = \ |\langle 0 | U^{\dagger} A V(k) | 0 \rangle|^2 \ = \ \langle 0 | U^{\dagger} A V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$$ <br> All we have to do now is the same expansion as before for the product $\langle 0 | U^{\dagger} A V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$: <br> $$\langle 0 | U^{\dagger} A V(k) | 0 \rangle^2 \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m^{*} c_n \langle 0 | U^{\dagger} A_n V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A_m^{\dagger} U |0\rangle$$ <br> Now, again, for the purposes of this demonstration, we will soon see that all the outputs/expectation values of our implementation will be real, so we have: <br> $$\Rightarrow \ \langle 0 | U^{\dagger} A V(k) | 0 \rangle \ = \ (\langle 0 | U^{\dagger} A V(k) | 0 \rangle)^{*} \ = \ \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$$ <br> Thus, in this particular implementation: <br> $$|\langle b | \Phi \rangle|^2 \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m c_n \langle 0 | U^{\dagger} A_n V(k) | 0 \rangle \langle 0 | U^{\dagger} A_m V(k) | 0 \rangle$$ <br> There is a sophisticated way of solving for this value, using a newly-proposed subroutine called the **Hadamard Overlap Test** (see cited paper), but for this tutorial, we will just be using a standard Hadamard Test, where we control each matrix. This unfortunately requires the use of an extra auxiliary qubit. We essentially just place a control on each of the gates involved in the auxiliary, the $|b\rangle$ preparation unitary, and the $A_n$ unitaries. We get something like this for the controlled-ansatz: ``` # Creates controlled anstaz for calculating |<b|psi>|^2 with a Hadamard test def control_fixed_ansatz(qubits, parameters, auxiliary, reg): for i in range (0, len(qubits)): circ.cry(parameters[0][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) circ.ccx(auxiliary, qubits[1], 4) circ.cz(qubits[0], 4) circ.ccx(auxiliary, qubits[1], 4) circ.ccx(auxiliary, qubits[0], 4) circ.cz(qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) for i in range (0, len(qubits)): circ.cry(parameters[1][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) circ.ccx(auxiliary, qubits[2], 4) circ.cz(qubits[1], 4) circ.ccx(auxiliary, qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) circ.cz(qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) for i in range (0, len(qubits)): circ.cry(parameters[2][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) q_reg = QuantumRegister(5) circ = QuantumCircuit(q_reg) control_fixed_ansatz([1, 2, 3], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], 0, q_reg) circ.draw() ``` Notice the extra qubit, `q0_4`. This is an auxiliary, and allows us to create a $CCZ$ gate, as is shown in the circuit. Now, we also have to create the circuit for $U$. In our implementation, we will pick $U$ as: <br> $$U \ = \ H_1 H_2 H_3$$ <br> Thus, we have: ``` def control_b(auxiliary, qubits): for ia in qubits: circ.ch(auxiliary, ia) circ = QuantumCircuit(4) control_b(0, [1, 2, 3]) circ.draw() ``` Finally, we construct our new Hadamard test: ``` # Create the controlled Hadamard test, for calculating <psi|psi> def special_had_test(gate_type, qubits, auxiliary_index, parameters, reg): circ.h(auxiliary_index) control_fixed_ansatz(qubits, parameters, auxiliary_index, reg) for ty in range (0, len(gate_type)): if (gate_type[ty] == 1): circ.cz(auxiliary_index, qubits[ty]) control_b(auxiliary_index, qubits) circ.h(auxiliary_index) q_reg = QuantumRegister(5) circ = QuantumCircuit(q_reg) special_had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]], q_reg) circ.draw() ``` This is for the specific implementation when all of our parameters are set to $1$, and the set of gates $A_n$ is simply `[0, 0, 0]`, and `[0, 0, 1]`, which corresponds to the identity matrix on all qubits, as well as the $Z$ matrix on the third qubit (with my "code notation"). Now, we are ready to calculate the final cost function. This simply involves us taking the products of all combinations of the expectation outputs from the different circuits, multiplying by their respective coefficients, and arranging into the cost function that we discussed previously! ``` # Implements the entire cost function on the quantum circuit def calculate_cost_function(parameters): global opt overall_sum_1 = 0 parameters = [parameters[0:3], parameters[3:6], parameters[6:9]] for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): global circ qctl = QuantumRegister(5) qc = ClassicalRegister(5) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('statevector_simulator') multiply = coefficient_set[i]*coefficient_set[j] had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters) t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() outputstate = np.real(result.get_statevector(circ, decimals=100)) o = outputstate m_sum = 0 for l in range (0, len(o)): if (l%2 == 1): n = o[l]**2 m_sum+=n overall_sum_1+=multiply*(1-(2*m_sum)) overall_sum_2 = 0 for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): multiply = coefficient_set[i]*coefficient_set[j] mult = 1 for extra in range(0, 2): qctl = QuantumRegister(5) qc = ClassicalRegister(5) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('statevector_simulator') if (extra == 0): special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl) if (extra == 1): special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl) t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() outputstate = np.real(result.get_statevector(circ, decimals=100)) o = outputstate m_sum = 0 for l in range (0, len(o)): if (l%2 == 1): n = o[l]**2 m_sum+=n mult = mult*(1-(2*m_sum)) overall_sum_2+=multiply*mult print(1-float(overall_sum_2/overall_sum_1)) return 1-float(overall_sum_2/overall_sum_1) ``` This code may look long and daunting, but it isn't! In this simulation, I'm taking a **numerical** approach, where I'm calculating the amplitude squared of each state corresponding to a measurement of the auxiliary Hadamard test qubit in the $1$ state, then calculating $P(0) \ - \ P(1) \ = \ 1 \ - \ 2P(1)$ with that information. This is very exact, but is not realistic, as a real quantum device would have to sample the circuit many times to generate these probabilities (I'll discuss sampling later). In addition, this code is not completely optimized (it completes more evaluations of the quantum circuit than it has to), but this is the simplest way in which the code can be implemented, and I will be optimizing it in an update to this tutorial in the near future. The final step is to actually use this code to solve a real linear system. We will first be looking at the example: <br> $$A \ = \ 0.45 Z_3 \ + \ 0.55 \mathbb{I}$$ <br> In order to minimize the cost function, we use the COBYLA optimizer method, which we repeatedly applying. Our search space for parameters is determined by $\frac{k}{1000} \ k \ \in \ \{0, \ 3000\}$, which is initially chosen randomly. We will run the optimizer for $200$ steps, then terminate and apply the ansatz for our optimal parameters, to get our optimized state vector! In addition, we will compute some post-processing, to see if our algorithm actually works! In order to do this, we will apply $A$ to our optimal vector $|\psi\rangle_o$, normalize it, then calculate the inner product squared of this vector and the solution vector, $|b\rangle$! We can put this all into code as: ``` coefficient_set = [0.55, 0.45] gate_set = [[0, 0, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(a1, a2) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) ``` As you can see, our cost function has achieved a fairly low value of `0.03273673575407443`, and when we calculate our classical cost function, we get `0.96776862579723`, which agrees perfectly with what we measured, the vectors $|\psi\rangle_o$ and $|b\rangle$ are very similar! Let's do another test! This time, we will keep $|b\rangle$ the same, but we will have: <br> $$A \ = \ 0.55 \mathbb{I} \ + \ 0.225 Z_2 \ + \ 0.225 Z_3$$ Again, we run our optimization code: ``` coefficient_set = [0.55, 0.225, 0.225] gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(np.add(a2, a0), a1) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) ``` Again, very low error, and the classical cost function agrees! Great, so it works! Now, we have found that this algorithm works **in theory**. I tried to run some simulations with a circuit that samples the circuit instead of calculating the probabilities numerically. Now, let's try to **sample** the quantum circuit, as a real quantum computer would do! For some reason, this simulation would only converge somewhat well for a ridiculously high number of "shots" (runs of the circuit, in order to calculate the probability distribution of outcomes). I think that this is mostly to do with limitations in the classical optimizer (COBYLA), due to the noisy nature of sampling a quantum circuit (a measurement with the same parameters won't always yield the same outcome). Luckily, there are other optimizers that are built for noisy functions, such as SPSA, but we won't be looking into that in this tutorial. Let's try our sampling for our second value of $A$, with the same matrix $U$: ``` #Implements the entire cost function on the quantum circuit (sampling, 100000 shots) def calculate_cost_function(parameters): global opt overall_sum_1 = 0 parameters = [parameters[0:3], parameters[3:6], parameters[6:9]] for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): global circ qctl = QuantumRegister(5) qc = ClassicalRegister(1) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('qasm_simulator') multiply = coefficient_set[i]*coefficient_set[j] had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters) circ.measure(0, 0) t_circ = transpile(circ, backend) qobj = assemble(t_circ, shots=10000) job = backend.run(qobj) result = job.result() outputstate = result.get_counts(circ) if ('1' in outputstate.keys()): m_sum = float(outputstate["1"])/100000 else: m_sum = 0 overall_sum_1+=multiply*(1-2*m_sum) overall_sum_2 = 0 for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): multiply = coefficient_set[i]*coefficient_set[j] mult = 1 for extra in range(0, 2): qctl = QuantumRegister(5) qc = ClassicalRegister(1) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('qasm_simulator') if (extra == 0): special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl) if (extra == 1): special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl) circ.measure(0, 0) t_circ = transpile(circ, backend) qobj = assemble(t_circ, shots=10000) job = backend.run(qobj) result = job.result() outputstate = result.get_counts(circ) if ('1' in outputstate.keys()): m_sum = float(outputstate["1"])/100000 else: m_sum = 0 mult = mult*(1-2*m_sum) overall_sum_2+=multiply*mult print(1-float(overall_sum_2/overall_sum_1)) return 1-float(overall_sum_2/overall_sum_1) coefficient_set = [0.55, 0.225, 0.225] gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(np.add(a2, a0), a1) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) ``` So as you can see, not amazing, our solution is still off by a fairly significant margin ($3.677\%$ error isn't awful, but ideally, we want it to be **much** closer to 0). Again, I think this is due to the optimizer itself, not the actual quantum circuit. I will be making an update to this Notebook once I figure out how to correct this problem (likely with the introduction of a noisy optimizer, as I previously mentioned). ## 4. Acknowledgements This implementation is based on the work presented in the research paper "Variational Quantum Linear Solver: A Hybrid Algorithm for Linear Systems", written by Carlos Bravo-Prieto, Ryan LaRose, M. Cerezo, Yiğit Subaşı, Lukasz Cincio, and Patrick J. Coles, which is available at [this](https://arxiv.org/abs/1909.05820) link. Special thanks to Carlos Bravo-Prieto for personally helping me out, by answering some of my questions concerning the paper! ``` import qiskit qiskit.__qiskit_version__ ```
github_jupyter
import qiskit from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import Aer, transpile, assemble import math import random import numpy as np from scipy.optimize import minimize def apply_fixed_ansatz(qubits, parameters): for iz in range (0, len(qubits)): circ.ry(parameters[0][iz], qubits[iz]) circ.cz(qubits[0], qubits[1]) circ.cz(qubits[2], qubits[0]) for iz in range (0, len(qubits)): circ.ry(parameters[1][iz], qubits[iz]) circ.cz(qubits[1], qubits[2]) circ.cz(qubits[2], qubits[0]) for iz in range (0, len(qubits)): circ.ry(parameters[2][iz], qubits[iz]) circ = QuantumCircuit(3) apply_fixed_ansatz([0, 1, 2], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) circ.draw() # Creates the Hadamard test def had_test(gate_type, qubits, auxiliary_index, parameters): circ.h(auxiliary_index) apply_fixed_ansatz(qubits, parameters) for ie in range (0, len(gate_type[0])): if (gate_type[0][ie] == 1): circ.cz(auxiliary_index, qubits[ie]) for ie in range (0, len(gate_type[1])): if (gate_type[1][ie] == 1): circ.cz(auxiliary_index, qubits[ie]) circ.h(auxiliary_index) circ = QuantumCircuit(4) had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) circ.draw() # Creates controlled anstaz for calculating |<b|psi>|^2 with a Hadamard test def control_fixed_ansatz(qubits, parameters, auxiliary, reg): for i in range (0, len(qubits)): circ.cry(parameters[0][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) circ.ccx(auxiliary, qubits[1], 4) circ.cz(qubits[0], 4) circ.ccx(auxiliary, qubits[1], 4) circ.ccx(auxiliary, qubits[0], 4) circ.cz(qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) for i in range (0, len(qubits)): circ.cry(parameters[1][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) circ.ccx(auxiliary, qubits[2], 4) circ.cz(qubits[1], 4) circ.ccx(auxiliary, qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) circ.cz(qubits[2], 4) circ.ccx(auxiliary, qubits[0], 4) for i in range (0, len(qubits)): circ.cry(parameters[2][i], qiskit.circuit.Qubit(reg, auxiliary), qiskit.circuit.Qubit(reg, qubits[i])) q_reg = QuantumRegister(5) circ = QuantumCircuit(q_reg) control_fixed_ansatz([1, 2, 3], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], 0, q_reg) circ.draw() def control_b(auxiliary, qubits): for ia in qubits: circ.ch(auxiliary, ia) circ = QuantumCircuit(4) control_b(0, [1, 2, 3]) circ.draw() # Create the controlled Hadamard test, for calculating <psi|psi> def special_had_test(gate_type, qubits, auxiliary_index, parameters, reg): circ.h(auxiliary_index) control_fixed_ansatz(qubits, parameters, auxiliary_index, reg) for ty in range (0, len(gate_type)): if (gate_type[ty] == 1): circ.cz(auxiliary_index, qubits[ty]) control_b(auxiliary_index, qubits) circ.h(auxiliary_index) q_reg = QuantumRegister(5) circ = QuantumCircuit(q_reg) special_had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]], q_reg) circ.draw() # Implements the entire cost function on the quantum circuit def calculate_cost_function(parameters): global opt overall_sum_1 = 0 parameters = [parameters[0:3], parameters[3:6], parameters[6:9]] for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): global circ qctl = QuantumRegister(5) qc = ClassicalRegister(5) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('statevector_simulator') multiply = coefficient_set[i]*coefficient_set[j] had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters) t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() outputstate = np.real(result.get_statevector(circ, decimals=100)) o = outputstate m_sum = 0 for l in range (0, len(o)): if (l%2 == 1): n = o[l]**2 m_sum+=n overall_sum_1+=multiply*(1-(2*m_sum)) overall_sum_2 = 0 for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): multiply = coefficient_set[i]*coefficient_set[j] mult = 1 for extra in range(0, 2): qctl = QuantumRegister(5) qc = ClassicalRegister(5) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('statevector_simulator') if (extra == 0): special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl) if (extra == 1): special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl) t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() outputstate = np.real(result.get_statevector(circ, decimals=100)) o = outputstate m_sum = 0 for l in range (0, len(o)): if (l%2 == 1): n = o[l]**2 m_sum+=n mult = mult*(1-(2*m_sum)) overall_sum_2+=multiply*mult print(1-float(overall_sum_2/overall_sum_1)) return 1-float(overall_sum_2/overall_sum_1) coefficient_set = [0.55, 0.45] gate_set = [[0, 0, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(a1, a2) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) coefficient_set = [0.55, 0.225, 0.225] gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(np.add(a2, a0), a1) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) #Implements the entire cost function on the quantum circuit (sampling, 100000 shots) def calculate_cost_function(parameters): global opt overall_sum_1 = 0 parameters = [parameters[0:3], parameters[3:6], parameters[6:9]] for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): global circ qctl = QuantumRegister(5) qc = ClassicalRegister(1) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('qasm_simulator') multiply = coefficient_set[i]*coefficient_set[j] had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters) circ.measure(0, 0) t_circ = transpile(circ, backend) qobj = assemble(t_circ, shots=10000) job = backend.run(qobj) result = job.result() outputstate = result.get_counts(circ) if ('1' in outputstate.keys()): m_sum = float(outputstate["1"])/100000 else: m_sum = 0 overall_sum_1+=multiply*(1-2*m_sum) overall_sum_2 = 0 for i in range(0, len(gate_set)): for j in range(0, len(gate_set)): multiply = coefficient_set[i]*coefficient_set[j] mult = 1 for extra in range(0, 2): qctl = QuantumRegister(5) qc = ClassicalRegister(1) circ = QuantumCircuit(qctl, qc) backend = Aer.get_backend('qasm_simulator') if (extra == 0): special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl) if (extra == 1): special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl) circ.measure(0, 0) t_circ = transpile(circ, backend) qobj = assemble(t_circ, shots=10000) job = backend.run(qobj) result = job.result() outputstate = result.get_counts(circ) if ('1' in outputstate.keys()): m_sum = float(outputstate["1"])/100000 else: m_sum = 0 mult = mult*(1-2*m_sum) overall_sum_2+=multiply*mult print(1-float(overall_sum_2/overall_sum_1)) return 1-float(overall_sum_2/overall_sum_1) coefficient_set = [0.55, 0.225, 0.225] gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200}) print(out) out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]] circ = QuantumCircuit(3, 3) apply_fixed_ansatz([0, 1, 2], out_f) backend = Aer.get_backend('statevector_simulator') t_circ = transpile(circ, backend) qobj = assemble(t_circ) job = backend.run(qobj) result = job.result() o = result.get_statevector(circ, decimals=10) a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]]) a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]]) a3 = np.add(np.add(a2, a0), a1) b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))]) print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2) import qiskit qiskit.__qiskit_version__
0.452294
0.995099
# Package ``` import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split %matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras.utils import plot_model from tensorflow.keras import backend as K from tensorflow.keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint print(tf.__version__) print(keras.__version__) ``` # Variables ``` height = 32 width = 32 channels = 3 latent_dim = 256 batch_size = 10 epochs = 20 ``` # Function and Model ``` def PreprocessData(X_Train, X_Test): #Normalize picture X_Train = X_Train.astype('float32') X_Test = X_Test.astype('float32') X_Train = ((X_Train - np.min(X_Train)) / (np.max(X_Train) - np.min(X_Train)) - 0.5) * 2 X_Test = ((X_Test - np.min(X_Test)) / (np.max(X_Test) - np.min(X_Test)) - 0.5) * 2 #split training data into training and validation set X_Train_gray = rgb_2_gray(X_Train).reshape(-1,32, 32, 1) X_Test_gray = rgb_2_gray(X_Test).reshape(-1,32, 32, 1) X_train_gray, X_valid_gray, X_train, X_valid = train_test_split(X_Train_gray, X_Train, test_size=0.2, random_state=7, shuffle= True) return ((X_train_gray, X_train), (X_valid_gray, X_valid), (X_Test_gray, X_Test)) def Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size): train_datagen = keras.preprocessing.image.ImageDataGenerator( shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, ) validation_datagen = keras.preprocessing.image.ImageDataGenerator() train_generator = train_datagen.flow(X_train, Y_train, batch_size=batch_size) valid_generator = validation_datagen.flow(X_valid, Y_valid, batch_size=batch_size) return train_generator, valid_generator def rgb_2_gray(image): return np.dot(image[..., :3], [0.299, 0.587, 0.114]) def encoder(input_shape): inputs = Input(shape=input_shape) x = inputs x = Conv2D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same')(x) x = Conv2D(filters=128, kernel_size=3, strides=2, activation='relu', padding='same')(x) x = Conv2D(filters=256, kernel_size=3, strides=2, activation='relu', padding='same')(x) shape = K.int_shape(x) x = Flatten()(x) latent = Dense(latent_dim)(x) return shape, inputs, Model(inputs, latent) def decoder(latent_dim, shape): latent_inputs = Input(shape=(latent_dim)) x = Dense(shape[1]*shape[2]*shape[3])(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) x = Conv2DTranspose(filters=256, kernel_size=3, strides = 2, activation='relu', padding='same')(x) x = Conv2DTranspose(filters=128, kernel_size=3, strides = 2, activation='relu', padding='same')(x) x = Conv2DTranspose(filters=64, kernel_size=3, strides = 2, activation='relu', padding='same')(x) output = Conv2DTranspose(filters=3, kernel_size=3, activation='sigmoid', padding='same')(x) return Model(latent_inputs, output) ``` # Data Preprocess ``` cifar100 = keras.datasets.cifar100.load_data() (x_train_all, y_train_all), (x_test, y_test) = cifar100 (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test) = PreprocessData(x_train_all, x_test) train_generator, valid_generator = Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size) print(X_train.shape) print(Y_train.shape) ``` # Training Process ``` earlystop = keras.callbacks.EarlyStopping(monitor='loss', patience=10, restore_best_weights=True, verbose=1) cp = keras.callbacks.ModelCheckpoint(filepath = 'best_model.h5',save_best_only = True,verbose=1) lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_delta=0.0001) callbacks = [earlystop, cp, lr] shape, inputs, Encoder = encoder((height, width, 1)) Decoder = decoder(latent_dim, shape) autoencoder = Model(inputs, Decoder(Encoder(inputs))) autoencoder.compile(optimizer='adam', loss='mse' , metrics=['accuracy']) plot_model(Encoder, to_file='encoder.png', show_shapes=True) plot_model(Decoder, to_file='decoder.png', show_shapes=True) plot_model(autoencoder, to_file='autoencoder.png', show_shapes=True) history = autoencoder.fit(train_generator, epochs=epochs, steps_per_epoch=X_train.shape[0]//batch_size, validation_data=valid_generator, validation_steps=len(valid_generator) // batch_size, callbacks=callbacks, verbose=1) ``` # Test ``` autoencoder.evaluate(X_test, Y_test) ``` # Cifar10 ``` cifar10 = keras.datasets.cifar10.load_data() (x_train_all, y_train_all), (x_test, y_test) = cifar10 (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test) = PreprocessData(x_train_all, x_test) train_generator, valid_generator = Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size) print(X_train.shape) print(Y_train.shape) shape, inputs, Encoder = encoder((height, width, 1)) Decoder = decoder(latent_dim, shape) autoencoder = Model(inputs, Decoder(Encoder(inputs))) autoencoder.compile(optimizer='adam', loss='mse' , metrics=['accuracy']) history = autoencoder.fit(train_generator, epochs=epochs, steps_per_epoch=X_train.shape[0]//batch_size, validation_data=valid_generator, validation_steps=len(valid_generator) // batch_size, callbacks=callbacks, verbose=1) ``` # Test ``` autoencoder.evaluate(X_test, Y_test) %who int ```
github_jupyter
import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split %matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras.utils import plot_model from tensorflow.keras import backend as K from tensorflow.keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint print(tf.__version__) print(keras.__version__) height = 32 width = 32 channels = 3 latent_dim = 256 batch_size = 10 epochs = 20 def PreprocessData(X_Train, X_Test): #Normalize picture X_Train = X_Train.astype('float32') X_Test = X_Test.astype('float32') X_Train = ((X_Train - np.min(X_Train)) / (np.max(X_Train) - np.min(X_Train)) - 0.5) * 2 X_Test = ((X_Test - np.min(X_Test)) / (np.max(X_Test) - np.min(X_Test)) - 0.5) * 2 #split training data into training and validation set X_Train_gray = rgb_2_gray(X_Train).reshape(-1,32, 32, 1) X_Test_gray = rgb_2_gray(X_Test).reshape(-1,32, 32, 1) X_train_gray, X_valid_gray, X_train, X_valid = train_test_split(X_Train_gray, X_Train, test_size=0.2, random_state=7, shuffle= True) return ((X_train_gray, X_train), (X_valid_gray, X_valid), (X_Test_gray, X_Test)) def Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size): train_datagen = keras.preprocessing.image.ImageDataGenerator( shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, ) validation_datagen = keras.preprocessing.image.ImageDataGenerator() train_generator = train_datagen.flow(X_train, Y_train, batch_size=batch_size) valid_generator = validation_datagen.flow(X_valid, Y_valid, batch_size=batch_size) return train_generator, valid_generator def rgb_2_gray(image): return np.dot(image[..., :3], [0.299, 0.587, 0.114]) def encoder(input_shape): inputs = Input(shape=input_shape) x = inputs x = Conv2D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same')(x) x = Conv2D(filters=128, kernel_size=3, strides=2, activation='relu', padding='same')(x) x = Conv2D(filters=256, kernel_size=3, strides=2, activation='relu', padding='same')(x) shape = K.int_shape(x) x = Flatten()(x) latent = Dense(latent_dim)(x) return shape, inputs, Model(inputs, latent) def decoder(latent_dim, shape): latent_inputs = Input(shape=(latent_dim)) x = Dense(shape[1]*shape[2]*shape[3])(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) x = Conv2DTranspose(filters=256, kernel_size=3, strides = 2, activation='relu', padding='same')(x) x = Conv2DTranspose(filters=128, kernel_size=3, strides = 2, activation='relu', padding='same')(x) x = Conv2DTranspose(filters=64, kernel_size=3, strides = 2, activation='relu', padding='same')(x) output = Conv2DTranspose(filters=3, kernel_size=3, activation='sigmoid', padding='same')(x) return Model(latent_inputs, output) cifar100 = keras.datasets.cifar100.load_data() (x_train_all, y_train_all), (x_test, y_test) = cifar100 (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test) = PreprocessData(x_train_all, x_test) train_generator, valid_generator = Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size) print(X_train.shape) print(Y_train.shape) earlystop = keras.callbacks.EarlyStopping(monitor='loss', patience=10, restore_best_weights=True, verbose=1) cp = keras.callbacks.ModelCheckpoint(filepath = 'best_model.h5',save_best_only = True,verbose=1) lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_delta=0.0001) callbacks = [earlystop, cp, lr] shape, inputs, Encoder = encoder((height, width, 1)) Decoder = decoder(latent_dim, shape) autoencoder = Model(inputs, Decoder(Encoder(inputs))) autoencoder.compile(optimizer='adam', loss='mse' , metrics=['accuracy']) plot_model(Encoder, to_file='encoder.png', show_shapes=True) plot_model(Decoder, to_file='decoder.png', show_shapes=True) plot_model(autoencoder, to_file='autoencoder.png', show_shapes=True) history = autoencoder.fit(train_generator, epochs=epochs, steps_per_epoch=X_train.shape[0]//batch_size, validation_data=valid_generator, validation_steps=len(valid_generator) // batch_size, callbacks=callbacks, verbose=1) autoencoder.evaluate(X_test, Y_test) cifar10 = keras.datasets.cifar10.load_data() (x_train_all, y_train_all), (x_test, y_test) = cifar10 (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test) = PreprocessData(x_train_all, x_test) train_generator, valid_generator = Aug_Data(X_train, Y_train, X_valid, Y_valid, batch_size) print(X_train.shape) print(Y_train.shape) shape, inputs, Encoder = encoder((height, width, 1)) Decoder = decoder(latent_dim, shape) autoencoder = Model(inputs, Decoder(Encoder(inputs))) autoencoder.compile(optimizer='adam', loss='mse' , metrics=['accuracy']) history = autoencoder.fit(train_generator, epochs=epochs, steps_per_epoch=X_train.shape[0]//batch_size, validation_data=valid_generator, validation_steps=len(valid_generator) // batch_size, callbacks=callbacks, verbose=1) autoencoder.evaluate(X_test, Y_test) %who int
0.817902
0.888178
## Methods 3 - the biogeochemical and transport model parameters identification, validation The resulting biogeochemical model has 51 parameters in total whose values need identification. Also, we have to establish the parameters required by the transport model: 1) the advective exchange coefficient $K_{h}$, which to a large degree limits OM production in the 1-dimensional model ($K_{h}$ defines nutrient inputs required for local primary production) and 2) the sediment dispersion coefficient ($kz_{\text{dispersion}}$) and sediment porosity ($\phi$), which determine vertical mixing in the sediment domain. ### Identification of organic matter production and degradation parameter values The Non-Linear Least-Squares Fitting method is applied to find the horizontal diffusivity coefficient $K_{h}$, photosynthetic efficiency at low irradiance ($\alpha$), the maximum hourly rate of photosynthesis normalized to chlorophyll biomass ($p_{m}^{B}$), half-saturation constants for nutrient uptake by autotrophs ($Ks_{PO_{4}^{3 -}}$, $Ks_{\text{Si}}$, $Ks_{NH_{4}^{+}}$, $Ks_{NO_{x}}$), autotrophs mortality coefficient ($K_{phy\_ mortality}$), and three coefficients controlling heterotroph rates of grazing and dying: heterotroph grazing on autotrophs $K_{het\_ phy}$, a half-saturation constant of heterotroph to autotroph ratio $Ks_{het\_ phy\_ ratio}$, a heterotroph rate of mortality $K_{het\_ mortality}$. A chi-square statistic (a cost function) is constructed using $\text{Chlorophyll a}$ data as a target variable (output, or $y$). These parameters are responsible for the autotrophs' seasonality and primary production. The values of the heterotroph grazing on the $\text{POM}$ rate $K_{het\_ pom}$ and a half-saturation constant for the heterotroph to $\text{POM}$ ratio $Ks_{het\_ pom\_ ratio}$ are not determined separately, but the corresponding values for phytoplankton are adopted. To use the Non-Linear Least-Squares Fitting method, the biogeochemical model is implemented in Python as a box model (which consists of only one layer) and then the LMFIT module ([Newville et al., 2014]) is applied. The biogeochemical model is in `src/brom_functions.py` file. Biogeochemical model parameters identification routines are in `s_3_biogeochemical_model_parameters_identification.ipynb`. In the Python box model, the same reactions responsible for autotroph growth are present as in the original model, but the grid is restricted to a single layer permanently mixed box. The parameters are identified for the box model and then applied to the multilayer model (which is written in Fortran). All results from multilayer model are tested to fit the Wadden Sea total OM production estimation approximately 309 $\text{g m}^{- 2}\ \text{year}^{- 1}$ according to ([van Beusekom et al., 1999]) - `s_4_OM_production_validation.ipynb` (if not the production is adjusted by changing the maximum hourly rate of photosynthesis ($p_{m}^{B}$)). Along with additional advective input of OM (110 $\text{g m}^{- 2}\ \text{year}^{- 1}$) according to ([van Beusekom et al., 1999]) the total OM input into the water domain during a year equals the total remineralization (419 $\text{g m}^{2}\ \text{year}^{- 1}$) reported in a carbon budget of the Sylt-Rømø basin ([van Beusekom et al., 1999]). We use this value as an approximation of the total OM available for remineralization in the Wadden Sea. Thus, the model parameters related to organic matter production are identified to fit the seasonality of $\text{Chlorophyll a}$ concentrations and the total OM input to the Wadden Sea. [Newville et al., 2014]: https://dx.doi.org/10.5281/zenodo.11813 [van Beusekom et al., 1999]: https://link.springer.com/article/10.1007/BF02764176 | Parameter | Notation | Units | Value (Range) | Source | |:-----------:|:-----------:|:-----------:|:------------------:|:-----------:| | Photosynthetic effeciency at low irradiance | $\alpha$ | $$\text{mg}\ \text{C}\ (\text{mg}\ \text{Chl a}\ \text{h})^{- 1}\ (\mu M\ \text{quanta}\ m^{- 2}\ s^{- 1})^{- 1}$$ | 0.089 | LMFIT | | Maximum hourly rate of photosynthesis | $p_{m}^{B}$ | $$\text{mg}\ C\ (\text{mg}\ \text{Chl a}\ h)^{- 1}$$ | 2.6 - 2.96 | LMFIT | | Half-saturation constant of $\text{PO}_{4}^{3 -}$ uptake by $\text{Phy}$ | $Ks_{\text{PO}_{4}^{3 -}}$ | $${\text{mM }\text{P m}}^{- 3}$$ | 0.1 | LMFIT | | Half-saturation constant of $\text{Si}$ uptake by $\text{Phy}$ | $Ks_{\text{Si}}$ | $$\text{mM Si m}^{-3}$$ | 0.1 | LMFIT | | Half-saturation constant of $\text{NH}_{4}^{+}$ uptake by $\text{Phy}$ | $Ks_{\text{NH}_{4 }^{+}}$ | $${\text{mM}\text{N m}}^{- 3}$$ | 7 | LMFIT | | Half-saturation constant of $\text{NO}_{2}^{-}$ and $\text{NO}_{3}^{-}$ uptake by $\text{Phy}$ | $Ks_{NO_{x}}$ | $${\text{mM}\text{N m}}^{- 3}$$ | 14.9 | LMFIT | | $\text{Phy}$ rate of mortality | $K_{phy\_mortality}$ | $d^{- 1}$ | 1e-5 | LMFIT | | $\text{Phy}$ rate of excretion | $K_{phy\_excrete}$ | $d^{- 1}$ | 0.015 | ([Yakushev et al., 2017]) | | $\text{Het}$ grazing on $\text{Phy}$ | $K_{het\_phy}$ | $d^{- 1}$ | 0.2 | LMFIT | | Half-saturation constant of $\text{Het}$ to $\text{Phy}$ ratio | $Ks_{het\_phy\_ ratio}$ | - | 0.3 | LMFIT | | $\text{Het}$ grazing on $\text{POM}$ rate | $K_{het\_pom}$ | $d^{- 1}$ | 0.2 | LMFIT | | Half-saturation constant of $\text{Het}$ to $\text{POM}$ ratio | $Ks_{het\_pom\_ ratio}$ | - | 0.3 | LMFIT | | $\text{Het}$ rate of respiration | $K_{het\_mortality}$ | $d^{- 1}$ | 0.015 | ([Yakushev et al., 2017]) | | $\text{Het}$ rate of mortality | $K_{het\_mortality}$ | $d^{- 1}$ | 0.0225 | LMFIT | | $\text{Het}$ food absorbency | $\text{Uz}$ | - | 0.5 | ([Yakushev et al., 2017]) | | $\text{Het}$ ratio between dissolved and particulate excretion | $\text{Hz}$ | - | 0.5 | ([Yakushev et al., 2017]) | [Yakushev et al., 2017]: https://doi.org/10.5194/gmd-10-453-2017 OM oxygen respiration rate and sulfate reduction rate coefficients are adjusted to fit oxygen consumption rate and sulfate reduction rate measured in sandy intertidal sediments of Sylt-Rømø Basin, Wadden Sea reported by [de Beer et al. (2005)]. Denitrification rate coefficients are adopted from relative cell yield values from [Krumins et al., (2013)]. [de Beer et al. (2005)]: https://doi.org/10.4319/lo.2005.50.1.0113 [Krumins et al., (2013)]: https://doi.org/10.5194/bg-10-371-2013 | Parameter | Notation | Units | Value (Range) | Source | |:-----------:|:-----------:|:-----------:|:------------------:|:-----------:| | $\text{POM}$ to $\text{DOM}$ autolysis | $$K_{pom\_ dom}$$ | $$d^{- 1}$$ | 0.15 | ([Yakushev et al., 2017]) | | $\text{DOM}$ oxygen respiration | $$K_{O_{2}dom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.1 | see text | | $\text{POM}$ oxygen respiration | $$K_{O_{2}pom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.002 | see text | | Half-saturation constant of $\text{O}_{2}$ for OM oxygen respiration | $$Ks_{O_{2}}$$ | $$\text{mM O}_{2}\ m^{- 3}$$ | 1 | ([Yakushev et al., 2017]) | | $\text{DOM}$ denitrification 1st stage | $$K_{NO_{3}^{-}dom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.075 | see text | | $\text{POM}$ denitrification 1st stage | $$K_{NO_{3}^{-}pom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.0015 | see text | | Half-saturation constant of $\text{NO}_{3}^{-}$ for OM denitrification | $$Ks_{NO_{3}^{-}}$$ | $$\text{mM NO}_{3}^{-}\ m^{- 3}$$ | 0.1 | ([Yakushev et al., 2017]) | | Half-saturation constant of $\text{O}_{2}$ for OM denitrification | $$Ks_{O_{2}\text{forN}O_{3}^{-}}$$ | $$\text{mM O}_{2}\ m^{- 3}$$ | 10 | ([Yakushev et al., 2017]) | | $\text{DOM}$ denitrification 2st stage | $$K_{NO_{2}^{-}dom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.075 | see text | | $\text{POM}$ denitrification 2st stage | $$K_{NO_{2}^{-}pom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.0015 | see text | | Half-saturation constant of $\text{NO}_{2}^{-}$ for $\text{OM}$ denitrification | $$Ks_{NO_{2}^{-}}$$ | $$\text{mM NO}_{2}^{-}\ m^{- 3}$$ | 0.1 | ([Yakushev et al., 2017]) | | $\text{DOM}$ sulfate reduction | $$K_{SO_{4}^{2 -}dom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.1 | see text | | $\text{POM}$ sulfate reduction | $$K_{SO_{4}^{2 -}pom\_ hydrolysis}$$ | $$d^{- 1}$$ | 0.002 | see text | | Half-saturation constant of $\text{SO}_{4}^{2 -}$ for OM sulfate reduction | $$Ks_{SO_{4}^{2 -}}$$ | $$\text{mM SO}_{4}^{2 -}\ m^{- 3}$$ | 1 | ([Yakushev et al., 2017]) | | Half-saturation constant of $\text{O}_{2}$ for OM sulfate reduction | $$Ks_{O_{2}\text{forS}O_{4}^{2 -}}$$ | $$\text{mM O}_{2}\ m^{- 3}$$ | 25 | ([Yakushev et al., 2017]) | | Half-saturation constant of $\text{NO}_{3}^{-}$ for OM sulfate reduction | $$Ks_{NO_{3}\text{forS}O_{4}^{2 -}}$$ | $$\text{mM NO}_{3}^{-}\ m^{- 3}$$ | 5 | ([Yakushev et al., 2017]) | | Reference temperature | $$T_{\text{ref}}$$ | $^{\circ}$C | 2 | | | Temperature factor | $$q_{10}$$ | - | 2 | ([Soetaert and Herman, 2009]) | [Yakushev et al., 2017]: https://doi.org/10.5194/gmd-10-453-2017 [Soetaert and Herman, 2009]: https://www.springer.com/gp/book/9781402086236 ### Identification of dispersion coefficient $\mathbf{k}\mathbf{z}_{\mathbf{\text{dispersion}}}$ and other parameter values The Wadden sea sediments can be roughly separated into two zones with different permeability: sands cover approximately 70$\%$, and muds cover approximately 30$\%$ ([de Beer et al., 2005]). About 50$\%$ of the sediments in the Wadden Sea are exposed during low tide, and tidal flats consist mostly of sands ([de Beer et al., 2005]). While the muddy environment is reported to have higher OM content, the sands are more permeable for electron acceptors and for new organic material from the overlying water ([de Beer et al., 2005]). Alkalinity generation, which needs both supply of electron acceptors and OM occurs mostly in sandy environments ([de Beer et al., 2005]). Thus, according to the goal of the study to reproduce conditions in the Wadden Sea, which favors the maximum amount of alkalinity generation, we assume our sediments consist of sand. [de Beer et al., 2005]: https://doi.org/10.4319/lo.2005.50.1.0113 We do not include explicit tidal dynamics into our calculations. Instead, we introduce a range of dispersion coefficients ([Boudreau, 1997]) in the sediment domain to reproduce different vertical mixing conditions. The average porosity ($\phi$) of the upper 10 cm of sandy sediments in the Wadden Sea is approximately 0.43 - roughly the average value of all porosity values found in ([Jensen et al., 1996]; [de Beer et al., 2005]). Many different vertical mixing regimes in sandy sediments of the Wadden Sea exist. [Neumann et al., (2017)] using estimations of the vertical advective fluxes of nitrates in sediments of the German Bight calculated Peclet numbers for permeable sediments, which varied from 1 to 1000. Using a relation between dispersion coefficients and Peclet numbers ([Boudreau, 1997]) it is possible to evaluate the range for dispersion coefficient values. For Peclet numbers around 1, the dispersion coefficient is approximately equal to the molecular diffusion coefficient of approximately 1 $\cdot$ 10$^{- 9}\text{m}^{2}\text{sec}^{- 1}$. For Peclet numbers around 1000, the dispersion coefficient is approximately 2500 times larger than the molecular diffusion coefficient. Therefore, we applied a wide range for $kz_{\text{dispersion}}$ in our series of runs, simulating different vertical mixing conditions in sediments starting from 0. Hence, we can reproduce different alkalinity fluxes from those regions of the Wadden Sea with mostly advective vertical mixing conditions in sediments to regions with diffusive vertical mixing. We cannot apply these calculations to specific regions since the vertical advective conditions can change significantly within short distances. Using this approach, we can determine the possible range of values of $\text{TA}$ and $\text{TA}$ fluxes at the SWI in the Wadden Sea. [Boudreau, 1997]: https://www.academia.edu/3695121/Diagenetic_Models_and_Their_Implementation [de Beer et al., 2005]: https://doi.org/10.4319/lo.2005.50.1.0113 [Jensen et al., 1996]: https://doi.org/10.3354/ame011181 [Neumann et al., (2017)]: https://doi.org/10.1016/j.seares.2017.06.012 | Parameter | Notation | Units | Value (Range) | Source | |:-----------:|:-----------:|:-----------:|:------------------:|:-----------:| | Horizontal diffusivity coefficient | $$K_{h}$$ | $$\mathrm{m}^{\mathrm{2}}\mathrm{s}^{\mathrm{- 1}}$$ | 713 | LMFIT | | Vertical dispersion coefficient in sediments | $$kz_{\text{dispersion}}$$ | $$\mathrm{m}^{\mathrm{2}}\mathrm{s}^{\mathrm{- 1}}$$ | 1e-9 - 35e-9 | see text | | Porosity | $$\phi$$ | - | 0.43 | see text | The reaction parameters for anammox, nitrification and the sulfur cycle are taken from ([Yakushev et al., 2017]). Anammox does not change $\text{TA}$ directly (it does not affect the total charge in $\text{TA}_{\text{ec}}$), the loss of nitrogen compounds is compensated by the horizontal advection reproduced in the transport model. The rest of the parameters are adapted from ([Yakushev et al., 2017]). Uncertainties due to adaptation of some parameters from literature are compensated by using parameter values identified explicitly by the Non-Linear Least-Squares Fitting method. [Yakushev et al., 2017]: https://doi.org/10.5194/gmd-10-453-2017 ### Validation According to the reasoning provided in the Methods 1 section the most important reactions for alkalinity generation are OM degradation reactions. For the proper alkalinity evaluation, apart from the rates of OM degradation rates we should have the correct values of OM production and its timing. As mentioned previously, we identified the parameters (and forcing) of the transport and biogeochemical models to fit the seasonality of $\text{Chlorophyll a}$ concentrations and the total OM input to the Wadden Sea. The rates of OM degradation reactions are also identified to fit the reported values. However, there are still several factors that can influence a maximum alkalinity generation due to biogeochemical reactions in the Wadden Sea assessment. The Wadden Sea has diverse morphology and hydrodynamics. Tidal basins of the Wadden Sea are composed of sands so, as mentioned previously, they are the main candidates for the most important TA generators. Instead of modeling complicated tidal basins hydrodynamics we apply the variety of vertical mixing conditions in sediments and use the Wadden Sea average depth (which is 2.5 meters) to calculate corresponding TA concentrations for different mixing conditions. Thus, we normalize resulting TA values to the average depth of the Wadden Sea. For the sake of simplicity, we skip the changing water levels and different depths during high tide in different tidal basins. The actual process of sedimentary alkalinity generation in the coastal area, such as the Wadden Sea is split into different stages depending on the tidal phase. Alkalinity generation requires new organics and oxidizers, which incoming tide delivers. During air exposure, there are stagnant conditions in the sediments ([de Beer et al., 2005]; [Al-Raei et al., 2009]), which means that no additional organic matter and electron acceptors are available. Therefore, low tide means sedimentary biogeochemical processes get fewer reagents for reactions so that it can cause less extensive OM degradation rates. The simplification to skip low tides dynamics should not underestimate alkalinity generation, so it is in the scope of the goals of the study. The summary of simplifications applied in the multilayer box model is presented in the following table. [de Beer et al., 2005]: https://doi.org/10.4319/lo.2005.50.1.0113 [Al-Raei et al., 2009]: https://doi.org/10.1007/s10236-009-0186-5 | The Wadden Sea | The multilayer box | |:--------------:|:------------------:| |Extensive mixing with the surrounding North Sea|Horizontal diffusive exchange with an external box| |Different mixing in sediments in different spots <br>of the Wadden Sea and in different tidal phases | Separate calculations for the range of dispersion coefficients <br>to reproduce different mixing regimes in sediments| |Varying depth due to tides|A constant depth of 2.5 meters| To check whether the applied simplifications do not underestimate TA production we can evaluate the actual Wadden Sea morphology and hydrodynamics influence on the alkalinity generation considering the several setups with different depths and mixing in sediments. The tidal amplitude in the Wadden Sea is about 1.5 meters at the northern and western edges of the region and about 3 to 4 m in the inner German Bight, average tidal basins' tides are up to approximately 2.5 meters high ([van Beusekom et al., 2001]). Thus, to understand the influence of the changing water level in tidal basins of the Wadden Sea with different depths, let's consider three setups: where the seawater depth during high tide reaches 0.5 meters, 1.5 meters, and 2.5 meters. To implement a low tide behavior we introduce a periodic mixing in sediments when there is no mixing in sediments and between sediments and the water column during part of each day a year. For example, in the tidal flat with the depth of 0.5 meters during high tide the most of a day (let's say 2/3) the sediments are exposed to the air (so mixing is 1/3 of a day). During the no mixing period all other processes (biogeochemical in both sediments and the water column, mixing between layers of the water column) are still active. For the depth of 2.5 meters, there will be a whole day mixing between seawater and sediments. Therefore, we perform three runs for three different setups with different depths and different mixing timing, but with all other similar parametrization to understand whether our basic simplification does not underestimate TA generation. [van Beusekom et al., 2001]: https://www.waddensea-worldheritage.org/resources/ecosystem-14-wadden-sea-specific-eutrophication-criteria |mixing period: |0.5 meters|1.5 meters|2.5 meters| |:-|:-:|:-:|:-:| |1/3 of a day|x|o|o| |1/2 of a day|o|x|o| |a whole day |o|o|x| ``` import src.plot_functions as pf result = list(map(pf.extract_alk, (('data/validation/d_0p5_om+prod_0p33mix/water.nc', 0.125), ('data/validation/d_1p5_om+prod_0p5mix/water.nc', 0.375), ('data/validation/d_2p5_om+prod_mix/water.nc', 0.625)))) results = list(zip(result, ('0.5 meters', '1.5 meters', '2.5 meters'))) pf.show_alk(results) ``` **Figure M3-1**. Alkalinity profiles for the three setups with different depths during high tide and different mixing conditions. Blue line - mixing during 1/3 of a day. Orange line - 1/2 of a day. Green line - a whole day. Figure M3-1 provides that our simplifications do not underestimate TA generation. Shallower areas with less extensive mixing between sediments and seawater generate less alkalinity. Deeper areas generate more alkalinity. These changes are due to different amount of OM available for degradation. Deeper areas get more organic matter, so more organic matter is available for denitrification and sulfate reduction reactions. ``` import src.plot_functions as pf result = list(map(pf.extract_alk, (('data/validation/d_2p5_om+prod_0p33mix/water.nc', 0.625), ('data/validation/d_2p5_om+prod_0p5mix/water.nc', 0.625), ('data/validation/d_2p5_om+prod_mix/water.nc', 0.625)))) results = list(zip(result, ('0.33', '0.5', '1'))) pf.show_alk(results) ``` **Figure M3-2**. Alkalinity profiles for the three setups with different mixing conditions but the same depth of 2.5 meters. Blue line - mixing during 1/3 of a day. Orange line - 1/2 of a day. Green line - a whole day. Figure M3-2 shows different mixing conditions influence on a year TA profile. One can see that shapes of the curves are various, but the maximum values are approximately similar.
github_jupyter
import src.plot_functions as pf result = list(map(pf.extract_alk, (('data/validation/d_0p5_om+prod_0p33mix/water.nc', 0.125), ('data/validation/d_1p5_om+prod_0p5mix/water.nc', 0.375), ('data/validation/d_2p5_om+prod_mix/water.nc', 0.625)))) results = list(zip(result, ('0.5 meters', '1.5 meters', '2.5 meters'))) pf.show_alk(results) import src.plot_functions as pf result = list(map(pf.extract_alk, (('data/validation/d_2p5_om+prod_0p33mix/water.nc', 0.625), ('data/validation/d_2p5_om+prod_0p5mix/water.nc', 0.625), ('data/validation/d_2p5_om+prod_mix/water.nc', 0.625)))) results = list(zip(result, ('0.33', '0.5', '1'))) pf.show_alk(results)
0.240596
0.990962
Probability theory is a cornerstone for machine learning. We can think of quantum states as probability distributions with certain properties that make them different from our classical notion of probabilities. Contrasting these properties is an easy and straightforward introduction to the most basic concepts we need in quantum computing. Apart from probability theory, linear algebra is also critical for many learning protocols. As we will see, geometry and probabilities are intrinsically linked in quantum computing, but geometric notions are also familiar in dealing with classical probability distributions. This notebook first talks about classical probabilities and stochastic vectors, and introduces quantum states as a natural generalization. Throughout this course, we will assume finite probability distributions and finite dimensional spaces. This significantly simplifies notation and most quantum computers operate over finite dimensional spaces, so we do not lose much in generality. # Classical probability distributions Let us toss a biased coin. Without getting too technical, we can associate a random variable $X$ with the output: it takes the value 0 for heads and the value 1 for tails. We get heads with probability $P(X=0) = p_0$ and tails with $P(X=1) = p_1$ for each toss of the coin. In classical, Kolmogorovian probability theory, $p_i\geq 0$ for all $i$, and the probabilities sum to one: $\sum_i p_i = 1$. Let's sample this distribution ``` import numpy as np n_samples = 100 p_1 = 0.2 x_data = np.random.binomial(1, p_1, (n_samples,)) print(x_data) ``` We naturally expect that the empirically observed frequencies also sum to one: ``` frequency_of_zeros, frequency_of_ones = 0, 0 for x in x_data: if x: frequency_of_ones += 1/n_samples else: frequency_of_zeros += 1/n_samples print(frequency_of_ones+frequency_of_zeros) ``` Since $p_0$ and $p_1$ must be non-negative, all possible probability distributions are restricted to the positive orthant. The normalization constraint puts every possible distribution on a straight line. This plot describes all possible probability distributions by biased and unbiased coins. ``` import matplotlib.pyplot as plt %matplotlib inline p_0 = np.linspace(0, 1, 100) p_1 = 1-p_0 fig, ax = plt.subplots() ax.set_xlim(-1.2, 1.2) ax.set_ylim(-1.2, 1.2) ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.set_xlabel("$p_0$") ax.xaxis.set_label_coords(1.0, 0.5) ax.set_ylabel("$p_1$") ax.yaxis.set_label_coords(0.5, 1.0) plt.plot(p_0, p_1) ``` We may also arrange the probabilities in a vector $\vec{p} = \begin{bmatrix} p_0 \\ p_1 \end{bmatrix}$. Here, for notational convenience, we put an arrow above the variable representing the vector, to distinguish it from scalars. You will see that quantum states also have a standard notation that provides convenience, but goes much further in usefulness than the humble arrow here. A vector representing a probability distribution is called a *stochastic vector*. The normalization constraint essentially says that the norm of the vector is restricted to one in the $l_1$ norm. In other words, $||\vec{p}||_1 = \sum_i |p_i| = 1$. This would be the unit circle in the $l_1$ norm, but since $p_i\geq 0$, we are restricted to a quarter of the unit circle, just as we plotted above. We can easily verify this with numpy's norm function: ``` p = np.array([[0.8], [0.2]]) np.linalg.norm(p, ord=1) ``` We know that the probability of heads is just the first element in the $\vec{p}$, but since it is a vector, we could use linear algebra to extract it. Geometrically, it means that we project the vector to the first axis. This projection is described by the matrix $\begin{bmatrix} 1 & 0\\0 & 0\end{bmatrix}$. The length in the $l_1$ norm gives the sought probability: ``` Π_0 = np.array([[1, 0], [0, 0]]) np.linalg.norm(Π_0.dot(p), ord=1) ``` We can repeat the process to get the probability of tails: ``` Π_1 = np.array([[0, 0], [0, 1]]) np.linalg.norm(Π_1.dot(p), ord=1) ``` The two projections play an equivalent role to the values 0 and 1 when we defined the probability distribution. In fact, we could define a new random variable called $\Pi$ that can take the projections $\Pi_0$ and $\Pi_1$ as values and we would end up with an identical probability distribution. This may sound convoluted and unnatural, but the measurement in quantum mechanics is essentially a random variable that takes operator values, such as projections. What happens when we want to transform a probability distribution to another one? For instance, to change the bias of a coin, or to describe the transition of a Markov chain. Since the probability distribution is also a stochastic vector, we can apply a matrix on the vector, where the matrix has to fulfill certain conditions. A left *stochastic matrix* will map stochastic vectors to stochastic vectors when multiplied from the left: its columns add up to one. In other words, it maps probability distributions to probability distributions. For example, starting with a unbiased coin, the map $M$ will transform the distribution to a biased coin: ``` p = np.array([[.5], [.5]]) M = np.array([[0.7, 0.6], [0.3, 0.4]]) np.linalg.norm(M.dot(p), ord=1) ``` One last concept that will come handy is entropy. A probability distribution's entropy is defined as $H(p) = - \sum_i p_i \log p_i$. Let us plot it over all possible probability distributions of coin tosses: ``` ϵ = 10e-10 p_0 = np.linspace(ϵ, 1-ϵ, 100) p_1 = 1-p_0 H = -(p_0*np.log(p_0) + p_1*np.log(p_1)) fig, ax = plt.subplots() ax.set_xlim(0, 1) ax.set_ylim(0, -np.log(0.5)) ax.set_xlabel("$p_0$") ax.set_ylabel("$H$") plt.plot(p_0, H) plt.axvline(x=0.5, color='k', linestyle='--') ``` Here we can see that the entropy is maximal for the unbiased coin. This is true in general: the entropy peaks for the uniform distribution. In a sense, this is the most unpredictable distribution: if we get heads with probability 0.2, betting tails is a great idea. On the other hand, if the coin is unbiased, then a deterministic strategy is of little help in winning. Entropy quantifies this notion of surprise and unpredictability. # Quantum states A classical coin is a two-level system: it is either heads or tails. At a first look a quantum state is a probability distribution, and the simplest case is a two-level state, which we call a qubit. Just like the way we can write the probability distribution as a column vector, we can write a quantum state as a column vector. For notational convenience that will become apparent later, we write the label of a quantum state in what is called a ket in the Dirac notation. So for instance, for some qubit, we can write $$ |\psi\rangle = \begin{bmatrix} a_0 \\ a_1 \\ \end{bmatrix}. $$ In other words, a ket is just a column vector, exactly like the stochastic vector in the classical case. Instead of putting an arrow over the name of the variable to express that it is a vector, we use the ket to say that it is a column vector that represents a quantum state. There's more to this notation, as we will see. The key difference to classical probability distributions and stochastic vectors is the normalization constraint. The square sum of their absolute values adds up to 1: $$ \sqrt{|a_0|^2+|a_1|^2}=1, $$ where $a_0, a_1\in \mathbb{C}$. In other words, we are normalizing in the $l_2$ norm instead of the $l_1$ norm. Furthermore, we are no longer restricted to the positive orthant: the components of the quantum state vector, which we call *probability amplitudes*, are complex valued. Let us introduce two special qubits, corresponding to the canonical basis vectors in two dimensions: $|0\rangle$ and $|1\rangle$. $$ |0\rangle = \begin{bmatrix} 1 \\ 0 \\ \end{bmatrix}, \,\,\, |1\rangle = \begin{bmatrix} 0 \\ 1 \\ \end{bmatrix}. $$ This basis is also called the computational basis in quantum computing. We can expand an arbitrary qubit state in this basis: $$ |\psi\rangle = \begin{bmatrix} a_0 \\ a_1 \\ \end{bmatrix}=a_0\begin{bmatrix} 1 \\ 0 \\ \end{bmatrix} + a_1\begin{bmatrix} 0 \\ 1 \\ \end{bmatrix}= a_0|0\rangle+a_1|1\rangle. $$ This expansion in a basis is called a superposition. If we sample the qubit state, we obtain the outcome 0 with probability $|a_0|^2$, and 1 with probability $|a_1|^2$. This is known as the Born rule; you will learn more about measurements and this rule in a subsequent notebook. For now, let's take a look at how we can simulate classical coin tossing on a quantum computer. Let's start with a completely biased case where we get heads with probability 1. This means that our qubit $|\psi\rangle=|0\rangle$. We create a circuit of a single qubit and a single classical register where the results of the sampling (measurements) go. ``` from pyquil import Program, get_qc from pyquil.gates import * import numpy as np from forest_tools import * π = np.pi qvm_server, quilc_server, fc = init_qvm_and_quilc('/home/local/bin/qvm', '/home/local/bin/quilc') circuit = Program() ``` Any qubit is initialized in $|0\rangle$, so if we measure it rightaway, we should get our maximally biased coin. ``` ro = circuit.declare('ro', 'BIT', 1) circuit += MEASURE(0, ro[0]) ``` Let us execute it a hundred times and study the result ``` qc = get_qc('1q-qvm', connection=fc) circuit.wrap_in_numshots_loop(100) executable = qc.compile(circuit) result = qc.run(executable) result.T ``` As expected, all of our outcomes are 0. To understand the possible quantum states, we use the Bloch sphere visualization. Since the probability amplitudes are complex and there are two of them for a single qubit, this would require a four-dimensional space. Now since the vectors are normalized, this removes a degree of freedom, allowing a three-dimensional representation with an appropriate embedding. This embedding is the Bloch sphere. It is slightly different than an ordinary sphere in three dimensions: we identify the north pole with the state $|0\rangle$, and the south pole with $|1\rangle$. In other words, two orthogonal vectors appear as if they were on the same axis -- the axis Z. The computational basis is just one basis: the axes X and Y represent two other bases. Any point on the surface of this sphere is a valid quantum state. This is also true the other way around: every pure quantum state is a point on the Bloch sphere. Here it 'pure' is an important technical term and it essentially means that the state is described by a ket (column vector). Later in the course we will see other states called mix states that are not described by a ket (you will see later that these are inside the Bloch sphere). To make it less abstract, let's plot our $|0\rangle$ on the Bloch sphere: ``` from pyquil.api import WavefunctionSimulator wf_sim = WavefunctionSimulator(connection=fc) wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) ``` Compare this sphere with the straight line in the positive orthant that describes all classical probability distributions of coin tosses. You can already see that there is a much richer structure in the quantum probability space. Let us pick another point on the Bloch sphere, that is, another distribution. Let's transform the state $|0\rangle$ to $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$. This corresponds to the unbiased coin, since we will get 0 with probability $|\frac{1}{\sqrt{2}}|^2=1/2$, and the other way around. There are many ways to do this transformation. We pick a rotation around the Y axis by $\pi/2$, which corresponds to the matrix $\frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -1\\1 & 1\end{bmatrix}$. ``` circuit = Program() circuit += RY(π/2, 0) results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) ``` To get an intuition why it is called a rotation around the Y axis, let's plot it on the Bloch sphere: ``` wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) ``` It does exactly what it says: it rotates from the north pole of the Bloch sphere. Why is interesting to have complex probability amplitudes instead of non-negative real numbers? To get some insight, take a look what happens if we apply the same rotation to $|1\rangle$. To achieve this, first we flip $|0\rangle$ to $|1\rangle$ by applying a NOT gate (denoted by X in quantum computing) and then the rotation. ``` circuit = Program() circuit += X(0) circuit += RY(π/2, 0) wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) ``` You can verify that the result is $\frac{1}{\sqrt{2}}(-|0\rangle + |1\rangle)$. That is, the exact same state as before, except that the first term got a minus sign: it is a negative probability amplitude. Note that the difference cannot be observed from the statistics: ``` results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) ``` It still looks like an approximately biased coin. Yet, that negative sign -- or any complex value -- is what models *interference*, a critically important phenomenon where probability amplitudes can interact in a constructive or a destructive way. To see this, if we apply the rotation twice in a row on $|0\rangle$, we get another deterministic output, $|1\rangle$, although in between the two, it was some superposition. ``` circuit = Program() circuit += RY(π/2, 0) circuit += RY(π/2, 0) results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) ``` Many quantum algorithms exploit interference, for instance, the seminal [Deutsch-Josza algorithm](https://en.wikipedia.org/wiki/Deutsch–Jozsa_algorithm), which is among the simplest to understand its significance. # More qubits and entanglement We have already seen that quantum states are probability distributions normed to 1 in the $l_2$ norm and we got a first peek at interference. If we introduce more qubits, we see another crucial quantum effect emerging. To do that, we first have to define how we write down the column vector for describing two qubits. We use a tensor product, which, in the case of qubits, is equivalent to the Kronecker product. Given two qubits, $|\psi\rangle=\begin{bmatrix}a_0\\a_1\end{bmatrix}$ and $|\psi'\rangle=\begin{bmatrix}b_0\\b_1\end{bmatrix}$, their product is $|\psi\rangle\otimes|\psi'\rangle=\begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix}$. Imagine that you have two registers $q_0$ and $q_1$, each can hold a qubit, and both qubits are in the state $|0\rangle$. Then this composite state would be described by according to this product rule as follows: ``` q0 = np.array([[1], [0]]) q1 = np.array([[1], [0]]) np.kron(q0, q1) ``` This is the $|0\rangle\otimes|0\rangle$ state, which we often abbreviate as $|00\rangle$. The states $|01\rangle$, $|10\rangle$, and $|11\rangle$ are defined analogously, and the four of them give the canonical basis of the four dimensional complex space, $\mathbb{C}^2\otimes\mathbb{C}^2$. Now comes the interesting and counter-intuitive part. In machine learning, we also work we high-dimensional spaces, but we never construct it as a tensor product: it is typically $\mathbb{R}^d$ for some dimension $d$. The interesting part of writing the high-dimensional space as a tensor product is that not all vectors in can be written as a product of vectors in the component space. Take the following state: $|\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$. This vector is clearly in $\mathbb{C}^2\otimes\mathbb{C}^2$, since it is a linear combination of two of the basis vector in this space. Yet, it cannot be written as $|\psi\rangle\otimes|\psi'\rangle$ for some $|\psi\rangle$, $|\psi'\rangle\in\mathbb{C}^2$. To see this, assume that it can be written in this form. Then $$ |\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle) = \begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix} = a_0b_0|00\rangle + a_0b_1|01\rangle + a_1b_0|10\rangle + a_1b_1|11\rangle. $$ $|01\rangle$ and $|10\rangle$ do not appear on the left-hand side, so their coefficients must be zero: $a_1b_0=0$ and $a_0b_1=0$. This leads to a contradiction, since $a_1$ cannot be zero ($a_1b_1=1$), so $b_0$ must be zero, but $a_0b_0=1$. Therefore $|\phi^+\rangle$ cannot be written as a product. States that cannot be written as a product are called entangled states. This is the mathematical form of describing a phenomenon of strong correlations between random variables that exceed what is possible classically. Entanglement plays a central role in countless quantum algorithms. A simple example is [quantum teleportation](https://en.wikipedia.org/wiki/Quantum_teleportation). We will also see its applications in quantum machine learning protocols. We will have a closer look at entanglement in a subsequent notebook on measurements, but as a teaser, let us look at the measurement statistics of the $|\phi^+\rangle$ state. The explanation of the circuit preparing it will also come in a subsequent notebook. ``` qc = get_qc('2q-qvm', connection=fc) circuit = Program() circuit += H(0) circuit += CNOT(0, 1) ro = circuit.declare('ro', 'BIT', 2) circuit += MEASURE(0, ro[0]) circuit += MEASURE(1, ro[1]) circuit.wrap_in_numshots_loop(100) executable = qc.compile(circuit) result = qc.run(executable) plot_histogram(result) ``` Notice that 01 or 10 never appear in the measurement statistics. ``` qvm_server.terminate() quilc_server.terminate() ``` # Further reading Chapter 9 in Quantum Computing since Democritus by Scott Aaronson describes a similar approach to understanding quantum states -- in fact, the interference example was lifted from there.
github_jupyter
import numpy as np n_samples = 100 p_1 = 0.2 x_data = np.random.binomial(1, p_1, (n_samples,)) print(x_data) frequency_of_zeros, frequency_of_ones = 0, 0 for x in x_data: if x: frequency_of_ones += 1/n_samples else: frequency_of_zeros += 1/n_samples print(frequency_of_ones+frequency_of_zeros) import matplotlib.pyplot as plt %matplotlib inline p_0 = np.linspace(0, 1, 100) p_1 = 1-p_0 fig, ax = plt.subplots() ax.set_xlim(-1.2, 1.2) ax.set_ylim(-1.2, 1.2) ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.set_xlabel("$p_0$") ax.xaxis.set_label_coords(1.0, 0.5) ax.set_ylabel("$p_1$") ax.yaxis.set_label_coords(0.5, 1.0) plt.plot(p_0, p_1) p = np.array([[0.8], [0.2]]) np.linalg.norm(p, ord=1) Π_0 = np.array([[1, 0], [0, 0]]) np.linalg.norm(Π_0.dot(p), ord=1) Π_1 = np.array([[0, 0], [0, 1]]) np.linalg.norm(Π_1.dot(p), ord=1) p = np.array([[.5], [.5]]) M = np.array([[0.7, 0.6], [0.3, 0.4]]) np.linalg.norm(M.dot(p), ord=1) ϵ = 10e-10 p_0 = np.linspace(ϵ, 1-ϵ, 100) p_1 = 1-p_0 H = -(p_0*np.log(p_0) + p_1*np.log(p_1)) fig, ax = plt.subplots() ax.set_xlim(0, 1) ax.set_ylim(0, -np.log(0.5)) ax.set_xlabel("$p_0$") ax.set_ylabel("$H$") plt.plot(p_0, H) plt.axvline(x=0.5, color='k', linestyle='--') from pyquil import Program, get_qc from pyquil.gates import * import numpy as np from forest_tools import * π = np.pi qvm_server, quilc_server, fc = init_qvm_and_quilc('/home/local/bin/qvm', '/home/local/bin/quilc') circuit = Program() ro = circuit.declare('ro', 'BIT', 1) circuit += MEASURE(0, ro[0]) qc = get_qc('1q-qvm', connection=fc) circuit.wrap_in_numshots_loop(100) executable = qc.compile(circuit) result = qc.run(executable) result.T from pyquil.api import WavefunctionSimulator wf_sim = WavefunctionSimulator(connection=fc) wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) circuit = Program() circuit += RY(π/2, 0) results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) circuit = Program() circuit += X(0) circuit += RY(π/2, 0) wavefunction = wf_sim.wavefunction(circuit) plot_quantum_state(wavefunction.amplitudes) results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) circuit = Program() circuit += RY(π/2, 0) circuit += RY(π/2, 0) results = qc.run_and_measure(circuit, trials=100) plot_histogram(results) q0 = np.array([[1], [0]]) q1 = np.array([[1], [0]]) np.kron(q0, q1) qc = get_qc('2q-qvm', connection=fc) circuit = Program() circuit += H(0) circuit += CNOT(0, 1) ro = circuit.declare('ro', 'BIT', 2) circuit += MEASURE(0, ro[0]) circuit += MEASURE(1, ro[1]) circuit.wrap_in_numshots_loop(100) executable = qc.compile(circuit) result = qc.run(executable) plot_histogram(result) qvm_server.terminate() quilc_server.terminate()
0.290679
0.994895
``` import pandas as pd import numpy as np ``` # Data I/O * csv * json * hdf * html * many more Now, let's assume we have already gotten our dataset loaded into pandas. Next job is to do meaningful analysis on the data. # inspecting data ``` from urllib import request url = 'https://raw.githubusercontent.com/MPIDS/Python-Course/master/other/pandas/amazonianBirds_climate.csv' data = request.urlopen(url) data = data.read() st = data.decode('utf-8') # saving data import os datadir = os.path.join(os.getcwd(), '..', 'data') birds_filepath = os.path.join(datadir, 'amazonianBirds_climate.csv') with open(birds_filepath, 'w') as f: f.write(st) df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, error_bad_lines=False) df.head() df.shape df.columns df.sample(n = 4) df.mean() ``` # Selection ``` df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, na_values = ['?'], error_bad_lines=False) df.head() ``` ## Getting rows by row number ``` df.iloc[0:4] ``` **Note**: This syntax also works, but see caveats below. ```python df[0:4]``` ## Sorting data by date ``` df = df.sort_values('datetime') df.head() ``` ## Filtering out bad data (Boolean indexing) *we saw it already in the morning* ``` df = df[df['datetime']>='1970-01-01'] df.head() %%latex \[ \texttt{df = df[}\underbrace{\texttt{df['datetime']>='1970-01-01'}}_{\texttt{Boolean array}}] \] ``` ### Quite complex filetering is also possible ``` df[(df['datetime']>'2012-07-01') & (df['datetime']<'2012-08-01')].head() ``` ### Transforming data ``` df['elevation'].astype(float) def coerce_float(x): try: return float(x) except ValueError: return None df['elevation'] = df['elevation'].apply(coerce_float) ``` ## Reindex by datetime ``` df['datetime'] = pd.to_datetime(df['datetime'], errors='coerce') df = df[pd.notnull(df.datetime)] df.head() df = df.set_index('datetime') df.head() ``` ## Selecting by **index label** ``` df.loc['1990-07-01':'1992-12-01'] ``` This is equivalent to ``` df['1990-07-01':'1992-12-01'] ``` **WARNING:** The `.loc` syntax is infinitely preferable for selecting by label, as then you avoid pitfalls like: ``` td = pd.DataFrame(np.random.randint(10, size = (8,4)), index = range(3,11), columns=['A', 'B', 'C', 'D']) td td[0:4] # == td.iloc[0:4] ``` This does not return rows with index label between 0 and 4, however `.loc` does ``` td.loc[0:4] ``` Why: the slicing operator `[:]` tries `iloc` first, then falls back to `loc` ### Slicing with increments ``` td td.iloc[:5:2, :] td.iloc[::-1, :] ``` Weird behaviour: ``` td.iloc[:1:-1, :] td.loc[3:7:2, :] ``` **WARNING:** Unlike Python's array indexing, `df.loc` and `df.iloc` *includes the endpoints* of the slices ## Benefits of indexing ``` df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, error_bad_lines=False) df.head() %time df[(df['datetime'] > '2011-02-24') & (df['datetime'] < '2013-04-28') ].count() idf = df.copy() idf['datetime'] = pd.to_datetime(idf['datetime'], errors='coerce') idf = idf[pd.notnull(df.datetime)] idf = idf.set_index('datetime') idf.head() %time idf['2011-02-24':'2013-04-28']['latitude'].count() ``` ## Selecting by columns ``` df.loc[:, ['recordist', 'latitude', 'longitude']].head() ``` # Transforming data ## Adding a column ``` df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, error_bad_lines=False) df['datetime'] = pd.to_datetime(df['datetime'], errors='coerce') df = df[pd.notnull(df.datetime)] df = df.set_index('datetime') df.head() df.loc[:, 'year'] = df.index.year df.head() ``` ## Applying transformations on a column, revisited ``` df.loc[:, 'year'] = df.loc[:, 'year'] - df.loc[:, 'year'].min() df.head() ``` ## Append ``` df1 = pd.DataFrame(np.random.randint(5, size=(4,6)), columns=list('ABCDEF')) df2 = pd.DataFrame(np.random.randint(5, size=(4,6)), columns=list('ABCDEF')) df1 df2 df3 = df1.append(df2) df3 ``` Now df3 has nonsensical index: ``` df3.loc[2:3] ``` We have to reindex: ``` df3 df3.index = range(len(df3)) ``` ## Concatenation ``` df1 = pd.DataFrame(np.random.randint(5, size=(4,6)), columns=list('ABCDEF')) df2 = pd.DataFrame(np.random.randint(5, size=(4,6)), columns=list('ABCDEF')) df3 = pd.DataFrame(np.random.randint(5, size=(4,6)), columns=list('ABCDEF')) pd.concat([df1, df2, df3]) ``` ## Join ### Combining two tables with some common column(s) ``` from urllib import request from bs4 import BeautifulSoup import io ``` Before running the next cell, you will need to install a python packahe called `lxml`. ``` !pip install --user lxml fossil_fuel_url = 'http://www.worldatlas.com/articles/countries-the-most-dependent-on-fossil-fuels.html' response = request.urlopen(fossil_fuel_url) data = response.read() ff_st = data.decode('utf-8') soup = BeautifulSoup(ff_st, "lxml") fossil = str(soup.find_all('table', attrs={'data-role':"table"})[0]) fdf = pd.read_html(io.StringIO(fossil))[0] fdf.head() renewable_url = 'https://en.wikipedia.org/wiki/List_of_countries_by_electricity_production_from_renewable_sources' response = request.urlopen(renewable_url) data = response.read() ff_st = data.decode('utf-8') soup = BeautifulSoup(ff_st, "lxml") renew = str(soup.find_all('table', class_='wikitable sortable')[0]) rdf = pd.read_html(io.StringIO(renew), header=0)[0] rdf.head() df1 = fdf.copy() df2 = rdf.copy() df3 = pd.merge(df1, df2, left_on = 'Country Name', right_on='Country') df3.head() ``` #### By default, join uses the intersection of the join keys ``` len(set(df1['Country Name'])), len(set(df2['Country'])), len(set(df1['Country Name'])&set(df2['Country'])) df3.shape ``` #### To have the union of the keys, use `how=outer` option: ``` df4 = pd.merge(df1, df2, left_on = 'Country Name', right_on='Country', how = 'outer') df4.head() ``` All countries in the `df1` and `df2` are present, with rows that are not on both tables filled with `NaN` values. ``` len(set(df1['Country Name'])), len(set(df2['Country'])), len(set(df1['Country Name'])|set(df2['Country'])) df4.shape ``` #### It is also possible to have *only* the keys in the left table (or right) ``` df5 = pd.merge(df1, df2, left_on = 'Country Name', right_on='Country', how = 'left') df5.head() ``` All countries in the `df1` columns are present, even if they are not in `df2`; but not the other way round. ### Join (AKA, merging on index) ``` idf1 = df1.set_index('Country Name') idf2 = df2.set_index('Country') idf1.join(idf2) ``` **Warning:** One major difference between `marge` and `join`: by default (i.e. without `how=<kind>` specified, `merge` does **inner join**, but `join` does **left outer join**. **Note:** Indexing is better for performing searches. ``` %timeit -n 1 idf1.join(idf2) %timeit -n 1 pd.merge(df1, df2, left_on = 'Country Name', right_on='Country', how = 'left') ``` This is of course, also true outside on merge ``` df2 ``` # Aggregation ``` population_url = 'https://en.wikipedia.org/wiki/List_of_cities_proper_by_population' response = request.urlopen(population_url) data = response.read() pop_st = data.decode('utf-8') soup = BeautifulSoup(pop_st, "lxml") pops = str(soup.find_all('table', class_='sortable wikitable')[0]) popdf= pd.read_html(io.StringIO(pops), header=0)[0] popdf.head() popdf = popdf.loc[:, ['City', 'Population density (/km²)', 'Country']] popdf.rename(columns={'Population density (/km²)': 'population density'}, inplace=True) ``` ## How to obtain number of cities per country with population density higher than 3000 ``` x = popdf[popdf['population density']>3000] gr = x.groupby('Country') gr.aggregate('count') ``` ### Using different aggregators ``` gr.aggregate({'City':'count', 'population density':'mean'}) ``` `groupby.describe` gives very succinct statistical summary of your dataset ``` gr.describe() ``` ## The result of `groupby` is a DataFrame with a `MultiIndex` ``` stats = gr.describe() stats.index ``` ### This is a very powerful tool to store hierarchical data ``` stats ``` ## Selecting in a MultiIndexed dataFrame ``` stats.loc['Vietnam'] stats.loc['Vietnam', 'mean'] ``` ### Slicing works, too ``` stats.loc['Afghanistan':'Brazil'] ``` ## Getting a "flat" DataFrame back: `reset_index()` ### Obtaining the top 10 countries by average population density in urban areas ``` x = stats.sort_values('population density', ascending=False) x y = x.reset_index() y.head() y[y['level_1']=='mean'][0:10] ``` That's it! Time for some exercises. # Exercises ## Solar energy **Relevant concepts:** * BeautifulSoup * read_html * df.join * df.groupby, df.aggregate Use the [UN data portal](http://data.un.org) to download datasets on [Worldwide sunshine](http://data.un.org/Data.aspx?q=Korea&d=CLINO&f=ElementCode%3A15%3BCountryCode%3AKO), [solar electricity production](http://data.un.org/Data.aspx?d=EDATA&f=cmID%3AES) and [total Electricity production](http://data.un.org/Data.aspx?d=EDATA&f=cmID%3AEC). Compute 1. Year-by-year change in Solar electricity output by country. 2. How do sunshine and fractional share of solar in total electricity output correlate? ## Beijing PM2.5 **Relevant concepts:** * integer based/label based slicing * df.groupby Read the [Beijing PM2.5 Data Data Set](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data). Plot: 1. Average pm2.5 count at each month. Plot separate curves for each year. 2. Plot the correlation between pm2.5 and temperature. ``` import matplotlib matplotlib.rcParams['figure.figsize'] = (8,6) %matplotlib inline from importlib import reload reload(plt) import pandas as pd from io import StringIO from urllib import request url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv' ``` ## GPS tracks Download the GPS tracks dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/GPS+Trajectories). Read the two csvs `go_track_tracks.csv` and `go_track_trackspoints.csv`. The first one contains a list of GPS tracks, while the second file contains individual points in each GPS track. Determine: 1. The origin and destination of each vehicle. 2. The top speed of each vehicle (choose a suitable time interval, otherwise you will get a non-smooth velocity profile). 3. The fraction of cars and buses (separately) that exceeded > 80km/hr velocity. ## Container in Sea Download the datset at [http://data.deutschebahn.com/dataset/data-sensordaten-schenker-seefrachtcontainer](http://data.deutschebahn.com/dataset/data-sensordaten-schenker-seefrachtcontainer). The dataset contains GPS traces from contains in Cargo ships. 1. Plot the trajectories of each cargoship. 2. Compute the distance travelled in each day by each ship. 3. **(Extra challenge)** Visualize the trajectories on top of a world map by using `bokeh` or `plotly`. ``` url = 'http://download-data.deutschebahn.com/static/datasets/sensordaten_schenker/161209_Schenker_Sensordaten.csv' ```
github_jupyter
import pandas as pd import numpy as np from urllib import request url = 'https://raw.githubusercontent.com/MPIDS/Python-Course/master/other/pandas/amazonianBirds_climate.csv' data = request.urlopen(url) data = data.read() st = data.decode('utf-8') # saving data import os datadir = os.path.join(os.getcwd(), '..', 'data') birds_filepath = os.path.join(datadir, 'amazonianBirds_climate.csv') with open(birds_filepath, 'w') as f: f.write(st) df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, error_bad_lines=False) df.head() df.shape df.columns df.sample(n = 4) df.mean() df = pd.read_csv(birds_filepath, parse_dates={'datetime':[1,2]}, na_values = ['?'], error_bad_lines=False) df.head() df.iloc[0:4] df[0:4]``` ## Sorting data by date ## Filtering out bad data (Boolean indexing) *we saw it already in the morning* ### Quite complex filetering is also possible ### Transforming data ## Reindex by datetime ## Selecting by **index label** This is equivalent to **WARNING:** The `.loc` syntax is infinitely preferable for selecting by label, as then you avoid pitfalls like: This does not return rows with index label between 0 and 4, however `.loc` does Why: the slicing operator `[:]` tries `iloc` first, then falls back to `loc` ### Slicing with increments Weird behaviour: **WARNING:** Unlike Python's array indexing, `df.loc` and `df.iloc` *includes the endpoints* of the slices ## Benefits of indexing ## Selecting by columns # Transforming data ## Adding a column ## Applying transformations on a column, revisited ## Append Now df3 has nonsensical index: We have to reindex: ## Concatenation ## Join ### Combining two tables with some common column(s) Before running the next cell, you will need to install a python packahe called `lxml`. #### By default, join uses the intersection of the join keys #### To have the union of the keys, use `how=outer` option: All countries in the `df1` and `df2` are present, with rows that are not on both tables filled with `NaN` values. #### It is also possible to have *only* the keys in the left table (or right) All countries in the `df1` columns are present, even if they are not in `df2`; but not the other way round. ### Join (AKA, merging on index) **Warning:** One major difference between `marge` and `join`: by default (i.e. without `how=<kind>` specified, `merge` does **inner join**, but `join` does **left outer join**. **Note:** Indexing is better for performing searches. This is of course, also true outside on merge # Aggregation ## How to obtain number of cities per country with population density higher than 3000 ### Using different aggregators `groupby.describe` gives very succinct statistical summary of your dataset ## The result of `groupby` is a DataFrame with a `MultiIndex` ### This is a very powerful tool to store hierarchical data ## Selecting in a MultiIndexed dataFrame ### Slicing works, too ## Getting a "flat" DataFrame back: `reset_index()` ### Obtaining the top 10 countries by average population density in urban areas That's it! Time for some exercises. # Exercises ## Solar energy **Relevant concepts:** * BeautifulSoup * read_html * df.join * df.groupby, df.aggregate Use the [UN data portal](http://data.un.org) to download datasets on [Worldwide sunshine](http://data.un.org/Data.aspx?q=Korea&d=CLINO&f=ElementCode%3A15%3BCountryCode%3AKO), [solar electricity production](http://data.un.org/Data.aspx?d=EDATA&f=cmID%3AES) and [total Electricity production](http://data.un.org/Data.aspx?d=EDATA&f=cmID%3AEC). Compute 1. Year-by-year change in Solar electricity output by country. 2. How do sunshine and fractional share of solar in total electricity output correlate? ## Beijing PM2.5 **Relevant concepts:** * integer based/label based slicing * df.groupby Read the [Beijing PM2.5 Data Data Set](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data). Plot: 1. Average pm2.5 count at each month. Plot separate curves for each year. 2. Plot the correlation between pm2.5 and temperature. ## GPS tracks Download the GPS tracks dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/GPS+Trajectories). Read the two csvs `go_track_tracks.csv` and `go_track_trackspoints.csv`. The first one contains a list of GPS tracks, while the second file contains individual points in each GPS track. Determine: 1. The origin and destination of each vehicle. 2. The top speed of each vehicle (choose a suitable time interval, otherwise you will get a non-smooth velocity profile). 3. The fraction of cars and buses (separately) that exceeded > 80km/hr velocity. ## Container in Sea Download the datset at [http://data.deutschebahn.com/dataset/data-sensordaten-schenker-seefrachtcontainer](http://data.deutschebahn.com/dataset/data-sensordaten-schenker-seefrachtcontainer). The dataset contains GPS traces from contains in Cargo ships. 1. Plot the trajectories of each cargoship. 2. Compute the distance travelled in each day by each ship. 3. **(Extra challenge)** Visualize the trajectories on top of a world map by using `bokeh` or `plotly`.
0.721056
0.83498
# **Petunjuk TTS** Silakan simpan file ke dalam bentuk **TTS_NIM.ipynb**. Tuliskan koding untuk dapat menampilkan hasil seperti yang diminta. Setelah selesai mengerjakan, silakan unggah ke GClass. Jangan sampai terlambat ya...keterlambatan mengakibatkan pengurangan nilai TTS. Have fun! ### **1. Import Libraries** ``` # Silakan import librari yang kalian butuhkan (pandas, numpy, matplotlib, seaborn, dan missingno) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sea import missingno as msno # Ambil dataset dari (https://github.com/adelnehme/python-for-spreadsheet-users-webinar/blob/master/datasets/airbnb.csv?raw=true) dan simpan dengan nama variabel data data = pd.read_csv ('https://github.com/adelnehme/python-for-spreadsheet-users-webinar/blob/master/datasets/airbnb.csv?raw=true', index_col ='Unnamed: 0') # Lihat 5 data teratas pada DataFrame "data" data.head() # Cek tipe data dalam "data" data.dtypes ``` ### **2. Membersihkan Data** **Challenge 1**: Ganti `coordinates` dengan kolom `latitude` dan `longitude` ``` # Hapus tanda "(" dan ")" dari kolom coordinates data['coordinates'] = data['coordinates'].str.replace("(","") data['coordinates'] = data['coordinates'].str.replace(")","") # Cetak 5 data paling atas dari kolom coordinates data['coordinates'].head() # Lakukan split data di kolom coordinates, beri nama lat_long lat_long = data['coordinates'].str.split(",", expand = True) # Buat supaya latitude berisi nilai lat_long[0] dan longitude berisi nilai lat_long[1] data['latitude']=lat_long[0] data['longitude']=lat_long[1] # Cetak data data.head(10) # Konversi nilai latitude dan longitude menjadi FLOAT data['latitude'] = data['latitude'].astype(float) data['longitude'] = data['longitude'].astype(float) # Cek tipe data sekali lagi data.dtypes # Hapus kolom coordinates yg_dihapus = ['coordinates'] data.drop(yg_dihapus, inplace = True, axis = 1) # Cetak data sekali lagi data.head(5) ``` **Challenge 2**: Hapus `$` dari kolom `price` dan konversi ke `float` ``` # Hapus tanda dolar ($) dari kolom price sebelum nantinya akan kita konversi ke tipe data FLOAT data['price'] = data['price'].str.strip('$') # Cetak untuk melihat apakah sudah terjadi perubahan pada kolom price data['price'].head(5) # Konversi kolom price ke bentuk FLOAT data['price'] = data['price'].astype(float) # Hitung nilai mean dari kolom price setelah dikonversi data['price'].mean() # Buat visualisasi distribusi dari kolom prices sea.displot(data['price'], bins = 20) ``` **Challenge 3**: Kita buat supaya kolom `room_type` benar-benar rapi secara kategori. ``` # Cetak supaya tahu kategori apa saja yang ada di kolom `room_type` data['room_type'].unique() # Buat supaya semua berubah menjadi huruf kecil data['room_type']=data['room_type'].str.lower() data['room_type'].unique() # Buat supaya tidak ada spasi di awal dan akhir data['room_type']=data['room_type'].str.strip() data['room_type'].unique() # Ganti menjadi kategori 'Shared room', 'Entire place', 'Private room' dan 'Hotel room' mapping = {'private room' : 'Private Room', 'private' : 'Private Room', 'entire home/apt' : 'Entire Place', 'shared room' : 'Shared Room', 'home' : 'Hotel Room'} for key in mapping.keys(): data['room_type']=data['room_type'].str.replace(key, mapping[key]) # Cek 5 data teratas untuk melihat perubahan yang terjadi data.head(5) ``` **Challenge 4**: Kita bersihkan **missing data** ``` pip install missingno # Visualisasi missingness msno.matrix(data) plt.show() # Visualisasikan missingness berdasarkan rating msno.matrix(data.sort_values(by ='rating')) plt.show() # Buat dalam bentuk barplot msno.bar(data) # Cek DataFrame yang ada missing values di kolom rating, number_of_stays, 5_stars, reviews_per_month data[data['rating'].isna()].describe() # Lakukan imputasi untuk missing data data = data.fillna({'reviews_per_month':0, 'number_of_stays':0, '5_stars':0}) # Buat kolom baru, beri nama sudah_dinilai dinilai = np.where(data['rating'].isna() == True, 0, 1) data['dinilai'] = dinilai # Cek data data.head() ``` **Challenge 5**: Hapus kolom-kolom berikut 1. neighbourhood_full 2. number_of_reviews 3. last_review 4. reviews_per_month 5. availability_365 6. number_of_stays 7. 5_stars 8. listing_added ``` # Hapus beberapa kolom yang tidak dibutuhkan yg_dihapus2 = ['neighbourhood_full','number_of_reviews', 'last_review', 'reviews_per_month', 'availability_365', 'number_of_stays', '5_stars', 'listing_added'] data.drop(yg_dihapus2, inplace = True, axis = 1) # Tampilkan data data.head(10) ```
github_jupyter
# Silakan import librari yang kalian butuhkan (pandas, numpy, matplotlib, seaborn, dan missingno) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sea import missingno as msno # Ambil dataset dari (https://github.com/adelnehme/python-for-spreadsheet-users-webinar/blob/master/datasets/airbnb.csv?raw=true) dan simpan dengan nama variabel data data = pd.read_csv ('https://github.com/adelnehme/python-for-spreadsheet-users-webinar/blob/master/datasets/airbnb.csv?raw=true', index_col ='Unnamed: 0') # Lihat 5 data teratas pada DataFrame "data" data.head() # Cek tipe data dalam "data" data.dtypes # Hapus tanda "(" dan ")" dari kolom coordinates data['coordinates'] = data['coordinates'].str.replace("(","") data['coordinates'] = data['coordinates'].str.replace(")","") # Cetak 5 data paling atas dari kolom coordinates data['coordinates'].head() # Lakukan split data di kolom coordinates, beri nama lat_long lat_long = data['coordinates'].str.split(",", expand = True) # Buat supaya latitude berisi nilai lat_long[0] dan longitude berisi nilai lat_long[1] data['latitude']=lat_long[0] data['longitude']=lat_long[1] # Cetak data data.head(10) # Konversi nilai latitude dan longitude menjadi FLOAT data['latitude'] = data['latitude'].astype(float) data['longitude'] = data['longitude'].astype(float) # Cek tipe data sekali lagi data.dtypes # Hapus kolom coordinates yg_dihapus = ['coordinates'] data.drop(yg_dihapus, inplace = True, axis = 1) # Cetak data sekali lagi data.head(5) # Hapus tanda dolar ($) dari kolom price sebelum nantinya akan kita konversi ke tipe data FLOAT data['price'] = data['price'].str.strip('$') # Cetak untuk melihat apakah sudah terjadi perubahan pada kolom price data['price'].head(5) # Konversi kolom price ke bentuk FLOAT data['price'] = data['price'].astype(float) # Hitung nilai mean dari kolom price setelah dikonversi data['price'].mean() # Buat visualisasi distribusi dari kolom prices sea.displot(data['price'], bins = 20) # Cetak supaya tahu kategori apa saja yang ada di kolom `room_type` data['room_type'].unique() # Buat supaya semua berubah menjadi huruf kecil data['room_type']=data['room_type'].str.lower() data['room_type'].unique() # Buat supaya tidak ada spasi di awal dan akhir data['room_type']=data['room_type'].str.strip() data['room_type'].unique() # Ganti menjadi kategori 'Shared room', 'Entire place', 'Private room' dan 'Hotel room' mapping = {'private room' : 'Private Room', 'private' : 'Private Room', 'entire home/apt' : 'Entire Place', 'shared room' : 'Shared Room', 'home' : 'Hotel Room'} for key in mapping.keys(): data['room_type']=data['room_type'].str.replace(key, mapping[key]) # Cek 5 data teratas untuk melihat perubahan yang terjadi data.head(5) pip install missingno # Visualisasi missingness msno.matrix(data) plt.show() # Visualisasikan missingness berdasarkan rating msno.matrix(data.sort_values(by ='rating')) plt.show() # Buat dalam bentuk barplot msno.bar(data) # Cek DataFrame yang ada missing values di kolom rating, number_of_stays, 5_stars, reviews_per_month data[data['rating'].isna()].describe() # Lakukan imputasi untuk missing data data = data.fillna({'reviews_per_month':0, 'number_of_stays':0, '5_stars':0}) # Buat kolom baru, beri nama sudah_dinilai dinilai = np.where(data['rating'].isna() == True, 0, 1) data['dinilai'] = dinilai # Cek data data.head() # Hapus beberapa kolom yang tidak dibutuhkan yg_dihapus2 = ['neighbourhood_full','number_of_reviews', 'last_review', 'reviews_per_month', 'availability_365', 'number_of_stays', '5_stars', 'listing_added'] data.drop(yg_dihapus2, inplace = True, axis = 1) # Tampilkan data data.head(10)
0.445047
0.889337
# Computing with Shared-memory HLS Cores in PYNQ This simple notebook demonstrates using an HLS core that communicates data via Shared-Memory Region. The core implemented in the fabric is a simple matrix multiply core. The HLS is below: ```C++ #include "mmult.hpp" // mmult() // Implements a simple matrix-multiply function in HLS // Parameters: // A - mata_t // A 2-dimensional array of mata_t values to be multiplied // // BT - matb_t // A 2-dimensional array of matb_t values to be multiplied // BT is the transpose of B // // C - matc_t // Matrix multiply output definition // // The dimensions of the arrays are defined in mmult.hpp. void mmult(const mata_t A [A_ROWS][A_COLS], const matb_t BT [B_COLS][B_ROWS], matc_t C [A_ROWS][B_COLS]){ /* Define a new AXI-Lite bus named CTRL for offset arguments, and HLS Status/Control registers (return)*/ #pragma HLS INTERFACE s_axilite port=return bundle=CTRL /* Define a new AXI4 Master bus named DATA for memory ports A, BT, and C. The argument offset=slave specifies that the the pointers (offset) of A, BT, and C can be set using register writes in the CTRL axi slave port */ #pragma HLS INTERFACE m_axi port=A offset=slave bundle=DATA #pragma HLS INTERFACE m_axi port=BT offset=slave bundle=DATA #pragma HLS INTERFACE m_axi port=C offset=slave bundle=DATA // We use the log2 functions in mmult.hpp to determine the correct size // of the index variables i, j, and k. Typically, vivado will size these // correctly ap_uint<pynq::log2(A_ROWS) + 1> i = 0; ap_uint<pynq::log2(B_COLS) + 1> j = 0; ap_uint<pynq::log2(A_COLS) + 1> k = 0; // Perform a simple matrix-multiply with three nested for-loops for(i = 0; i < A_ROWS; ++i){ for(j = 0; j < B_COLS; ++j){ matc_t sum = 0; for(k = 0; k < A_ROWS; ++k){ #pragma HLS PIPELINE sum += A[i][k]*BT[j][k]; } C[i][j] = sum; } } } ``` The HLS Core automatically transposes B for you. The documentation for the PYNQ Overlay can be found by running: ``` from pynqhls.sharedmem import sharedmemOverlay overlay = sharedmemOverlay('sharedmem.bit') sharedmemOverlay? ``` To begin, generate the random matrices A and B. Compute their product using Numpy's matmul function. ``` import numpy as np A = np.random.randint(-10, 10, size=(10,10)) B = np.random.randint(-10, 10, size=(10,10)) C = np.matmul(A, B) ``` Next, run the overlay: ``` CHLS = overlay.run(A, B) ``` Finally, compare the results: ``` if(np.array_equal(CHLS, C)): print("Results are equal!") else: print("Results are not equal!") ```
github_jupyter
The HLS Core automatically transposes B for you. The documentation for the PYNQ Overlay can be found by running: To begin, generate the random matrices A and B. Compute their product using Numpy's matmul function. Next, run the overlay: Finally, compare the results:
0.69368
0.903805
``` # This is the code used to generate the figures in: # Gene regulatory network reconstruction using single-cell RNA sequencing of barcoded genotypes in diverse environments # https://doi.org/10.1101/581678 # The data files to run this script are located on Zenodo # https://zenodo.org/record/3354412 # Load modules from inferelator import inferelator_workflow, inferelator_verbose_level, MPControl, CrossValidationManager # Set verbosity level to "Talky" inferelator_verbose_level(1) # Set the location of the input data and the desired location of the output files DATA_DIR = '../data/yeast' OUTPUT_DIR = '~/jackson_2019/' EXPRESSION_FILE_NAME = '103118_SS_Data.tsv.gz' GENE_METADATA_FILE_NAME = 'orfs.tsv' METADATA_COLUMNS = ['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'] YEASTRACT_PRIOR = "YEASTRACT_20190713_BOTH.tsv" TF_NAMES = "tf_names_gold_standard.txt" YEASTRACT_TF_NAMES = "tf_names_yeastract.txt" # Start Multiprocessing Engine n_cores_local = 10 local_engine = True # Multiprocessing uses the pathos implementation of multiprocessing (with dill instead of cPickle) # This is suited for a single computer, but will likely be too slow for the example here if __name__ == '__main__' and local_engine: MPControl.set_multiprocess_engine("multiprocessing") MPControl.client.processes = n_cores_local MPControl.connect() # Define the general run parameters used for all figures def set_up_workflow(wkf): wkf.set_file_paths(input_dir=DATA_DIR, output_dir=OUTPUT_DIR, expression_matrix_file='103118_SS_Data.tsv.gz', gene_metadata_file='orfs.tsv', gold_standard_file='gold_standard.tsv', priors_file='gold_standard.tsv', tf_names_file=TF_NAMES) wkf.set_file_properties(extract_metadata_from_expression_matrix=True, expression_matrix_metadata=METADATA_COLUMNS, expression_matrix_columns_are_genes=True, gene_list_index="SystematicName") wkf.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True, cv_split_ratio=0.5) wkf.set_run_parameters(num_bootstraps=5) wkf.set_count_minimum(0.05) wkf.add_preprocess_step("log2") return wkf def set_up_fig5a(wkf): cv_wrap = CrossValidationManager(wkf) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) return cv_wrap def set_up_fig5b(wkf): cv_wrap = CrossValidationManager(wkf) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) cv_wrap.add_size_subsampling([0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 1], seed=86) return cv_wrap def yeastract(wkf): wkf.set_file_paths(tf_names_file=YEASTRACT_TF_NAMES, priors_file=YEASTRACT_PRIOR) # Figure 5A: Shuffled Priors worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_shuffle_parameters(shuffle_prior_axis=0) worker.append_to_path('output_dir', 'figure_5a_shuffled') set_up_fig5a(worker).run() # Figure 5A: Random Data worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='110518_SS_NEG_Data.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_neg_data') set_up_fig5a(worker).run() # Figure 5A: No Imputation worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5a_no_impute') set_up_fig5a(worker).run() # Figure 5A: MAGIC worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='MAGIC_DATA.tsv.gz') worker.preprocessing_workflow = list() worker.append_to_path('output_dir', 'figure_5a_magic') set_up_fig5a(worker).run() # Figure 5A: scImpute worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='SCIMPUTE_DATA.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_scImpute') set_up_fig5a(worker).run() # Figure 5A: VIPER worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='VIPER_DATA.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_VIPER') set_up_fig5a(worker).run() # Figure 5B: ATAC-Seq prior worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(priors_file="yeast-motif-prior.tsv", gold_standard_file="gold_standard.tsv") worker.append_to_path('output_dir', 'figure_5b_atac') set_up_fig5b(worker).run() # Figure 5B: Bussemaker worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(priors_file="Bussemaker_pSAM_priors.tsv", gold_standard_file="gold_standard.tsv") worker.append_to_path('output_dir', 'figure_5b_atac') set_up_fig5b(worker).run() # Figure 5B: No Priors worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_tfa(tfa_driver=False) worker.append_to_path('output_dir', 'figure_5b_no_priors') set_up_fig5b(worker).run() # Figure 5B: Gold Standard worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5b_gold_standard_cv') set_up_fig5b(worker).run() # Figure 5B: YEASTRACT worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5b_yeastract') set_up_fig5b(worker).run() # Figure 5C: Condition Specific worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5c_conditions') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) cv_wrap.add_grouping_dropin("Condition", group_size=500) cv_wrap.run() del cv_wrap # Figure 5D: Single Task Learning worker = set_up_workflow(workflow.inferelator_workflow(regression="bbsr", workflow="single-cell")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_stl') cv_wrap = crossvalidation_workflow.CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 5D: BBSR By Task Learning worker = set_up_workflow(inferelator_workflow(regression="bbsr-by-task", workflow="multitask")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_mtl_bbsr') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 5D: Multi Task Learning worker = set_up_workflow(inferelator_workflow(regression="amusr", workflow="multitask")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_mtl_amusr') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 6: Final Network worker = set_up_workflow(inferelator_workflow(regression="amusr", workflow="multitask")) yeastract(worker) worker.set_file_paths(gold_standard_file="YEASTRACT_Both_20181118.tsv") worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=False, cv_split_ratio=None) worker.set_run_parameters(num_bootstraps=50, random_seed=100) worker.append_to_path('output_dir', 'figure_6_final') final_network = worker.run() del worker ```
github_jupyter
# This is the code used to generate the figures in: # Gene regulatory network reconstruction using single-cell RNA sequencing of barcoded genotypes in diverse environments # https://doi.org/10.1101/581678 # The data files to run this script are located on Zenodo # https://zenodo.org/record/3354412 # Load modules from inferelator import inferelator_workflow, inferelator_verbose_level, MPControl, CrossValidationManager # Set verbosity level to "Talky" inferelator_verbose_level(1) # Set the location of the input data and the desired location of the output files DATA_DIR = '../data/yeast' OUTPUT_DIR = '~/jackson_2019/' EXPRESSION_FILE_NAME = '103118_SS_Data.tsv.gz' GENE_METADATA_FILE_NAME = 'orfs.tsv' METADATA_COLUMNS = ['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'] YEASTRACT_PRIOR = "YEASTRACT_20190713_BOTH.tsv" TF_NAMES = "tf_names_gold_standard.txt" YEASTRACT_TF_NAMES = "tf_names_yeastract.txt" # Start Multiprocessing Engine n_cores_local = 10 local_engine = True # Multiprocessing uses the pathos implementation of multiprocessing (with dill instead of cPickle) # This is suited for a single computer, but will likely be too slow for the example here if __name__ == '__main__' and local_engine: MPControl.set_multiprocess_engine("multiprocessing") MPControl.client.processes = n_cores_local MPControl.connect() # Define the general run parameters used for all figures def set_up_workflow(wkf): wkf.set_file_paths(input_dir=DATA_DIR, output_dir=OUTPUT_DIR, expression_matrix_file='103118_SS_Data.tsv.gz', gene_metadata_file='orfs.tsv', gold_standard_file='gold_standard.tsv', priors_file='gold_standard.tsv', tf_names_file=TF_NAMES) wkf.set_file_properties(extract_metadata_from_expression_matrix=True, expression_matrix_metadata=METADATA_COLUMNS, expression_matrix_columns_are_genes=True, gene_list_index="SystematicName") wkf.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True, cv_split_ratio=0.5) wkf.set_run_parameters(num_bootstraps=5) wkf.set_count_minimum(0.05) wkf.add_preprocess_step("log2") return wkf def set_up_fig5a(wkf): cv_wrap = CrossValidationManager(wkf) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) return cv_wrap def set_up_fig5b(wkf): cv_wrap = CrossValidationManager(wkf) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) cv_wrap.add_size_subsampling([0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 1], seed=86) return cv_wrap def yeastract(wkf): wkf.set_file_paths(tf_names_file=YEASTRACT_TF_NAMES, priors_file=YEASTRACT_PRIOR) # Figure 5A: Shuffled Priors worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_shuffle_parameters(shuffle_prior_axis=0) worker.append_to_path('output_dir', 'figure_5a_shuffled') set_up_fig5a(worker).run() # Figure 5A: Random Data worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='110518_SS_NEG_Data.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_neg_data') set_up_fig5a(worker).run() # Figure 5A: No Imputation worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5a_no_impute') set_up_fig5a(worker).run() # Figure 5A: MAGIC worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='MAGIC_DATA.tsv.gz') worker.preprocessing_workflow = list() worker.append_to_path('output_dir', 'figure_5a_magic') set_up_fig5a(worker).run() # Figure 5A: scImpute worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='SCIMPUTE_DATA.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_scImpute') set_up_fig5a(worker).run() # Figure 5A: VIPER worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(expression_matrix_file='VIPER_DATA.tsv.gz') worker.append_to_path('output_dir', 'figure_5a_VIPER') set_up_fig5a(worker).run() # Figure 5B: ATAC-Seq prior worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(priors_file="yeast-motif-prior.tsv", gold_standard_file="gold_standard.tsv") worker.append_to_path('output_dir', 'figure_5b_atac') set_up_fig5b(worker).run() # Figure 5B: Bussemaker worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_file_paths(priors_file="Bussemaker_pSAM_priors.tsv", gold_standard_file="gold_standard.tsv") worker.append_to_path('output_dir', 'figure_5b_atac') set_up_fig5b(worker).run() # Figure 5B: No Priors worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.set_tfa(tfa_driver=False) worker.append_to_path('output_dir', 'figure_5b_no_priors') set_up_fig5b(worker).run() # Figure 5B: Gold Standard worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5b_gold_standard_cv') set_up_fig5b(worker).run() # Figure 5B: YEASTRACT worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5b_yeastract') set_up_fig5b(worker).run() # Figure 5C: Condition Specific worker = set_up_workflow(inferelator_workflow(regression="bbsr", workflow="single-cell")) worker.append_to_path('output_dir', 'figure_5c_conditions') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(42, 52))) cv_wrap.add_grouping_dropin("Condition", group_size=500) cv_wrap.run() del cv_wrap # Figure 5D: Single Task Learning worker = set_up_workflow(workflow.inferelator_workflow(regression="bbsr", workflow="single-cell")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_stl') cv_wrap = crossvalidation_workflow.CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 5D: BBSR By Task Learning worker = set_up_workflow(inferelator_workflow(regression="bbsr-by-task", workflow="multitask")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_mtl_bbsr') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 5D: Multi Task Learning worker = set_up_workflow(inferelator_workflow(regression="amusr", workflow="multitask")) yeastract(worker) worker.append_to_path('output_dir', 'figure_5d_mtl_amusr') cv_wrap = CrossValidationManager(worker) cv_wrap.add_gridsearch_parameter('random_seed', list(range(52, 62))) cv_wrap.run() del cv_wrap # Figure 6: Final Network worker = set_up_workflow(inferelator_workflow(regression="amusr", workflow="multitask")) yeastract(worker) worker.set_file_paths(gold_standard_file="YEASTRACT_Both_20181118.tsv") worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=False, cv_split_ratio=None) worker.set_run_parameters(num_bootstraps=50, random_seed=100) worker.append_to_path('output_dir', 'figure_6_final') final_network = worker.run() del worker
0.742702
0.499268
Plot the maximum margin separating hyperplane within a two-class separable dataset using a linear Support Vector Machines classifier trained using SGD. #### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! ### Version ``` import sklearn sklearn.__version__ ``` ### Imports This tutorial imports [SGDClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier) and [make_blobs](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs). ``` import plotly.plotly as py import plotly.graph_objs as go import numpy as np from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs ``` ### Calculations ``` # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([[x1, x2]]) Z[i, j] = p[0] ``` ### Plot Results ``` levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] cmap = [[0, 'black'],[1, 'white']] trace = go.Contour(x=xx, y=yy, z=Z, colorscale=cmap, showscale=False, ncontours=4, contours=dict(coloring='lines', start=-1, size=1, end=2 ), ) trace1 = go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=X[:, 0], colorscale='Viridis', showscale=False, line=dict(color='black', width=1)) ) data = [trace, trace1] layout = go.Layout(xaxis=dict(zeroline=False, showgrid=False), yaxis=dict(zeroline=False, showgrid=False), hovermode='closest') fig = go.Figure(data=data, layout=layout) py.iplot(fig) from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'SGD Maximum Margin Separating Hyperplane.ipynb', 'scikit-learn/plot-sgd-separating-hyperplane/', 'SGD Maximum Margin Separating Hyperplane | plotly', ' ', title = 'SGD Maximum Margin Separating Hyperplane | plotly', name = 'SGD Maximum Margin Separating Hyperplane', has_thumbnail='true', thumbnail='thumbnail/sgd.jpg', language='scikit-learn', page_type='example_index', display_as='linear_models', order=4, ipynb= '~Diksha_Gabha/3173') ```
github_jupyter
import sklearn sklearn.__version__ import plotly.plotly as py import plotly.graph_objs as go import numpy as np from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([[x1, x2]]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] cmap = [[0, 'black'],[1, 'white']] trace = go.Contour(x=xx, y=yy, z=Z, colorscale=cmap, showscale=False, ncontours=4, contours=dict(coloring='lines', start=-1, size=1, end=2 ), ) trace1 = go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=X[:, 0], colorscale='Viridis', showscale=False, line=dict(color='black', width=1)) ) data = [trace, trace1] layout = go.Layout(xaxis=dict(zeroline=False, showgrid=False), yaxis=dict(zeroline=False, showgrid=False), hovermode='closest') fig = go.Figure(data=data, layout=layout) py.iplot(fig) from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'SGD Maximum Margin Separating Hyperplane.ipynb', 'scikit-learn/plot-sgd-separating-hyperplane/', 'SGD Maximum Margin Separating Hyperplane | plotly', ' ', title = 'SGD Maximum Margin Separating Hyperplane | plotly', name = 'SGD Maximum Margin Separating Hyperplane', has_thumbnail='true', thumbnail='thumbnail/sgd.jpg', language='scikit-learn', page_type='example_index', display_as='linear_models', order=4, ipynb= '~Diksha_Gabha/3173')
0.752377
0.949295
<a href="https://colab.research.google.com/github/qwerlarlgus/CNN-Cats-Dogs/blob/main/4_2_aug_pretrained.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/drive') %matplotlib inline !ls -l !cp ./drive/MyDrive/training_data.zip . !unzip training_data.zip import glob import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img IMG_DIM = (150, 150) train_files = glob.glob('training_data/*') train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files] train_imgs = np.array(train_imgs) train_labels = [fn.split('/')[1].split('.')[0].strip() for fn in train_files] validation_files = glob.glob('validation_data/*') validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files] validation_imgs = np.array(validation_imgs) validation_labels = [fn.split('/')[1].split('.')[0].strip() for fn in validation_files] print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) train_imgs_scaled = train_imgs.astype('float32') validation_imgs_scaled = validation_imgs.astype('float32') train_imgs_scaled /= 255 validation_imgs_scaled /= 255 batch_size = 50 num_classes = 2 epochs = 150 input_shape = (150, 150, 3) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) # encode wine type labels train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) print(train_labels[0:5], train_labels_enc[0:5]) train_datagen = ImageDataGenerator( zoom_range=0.3, rotation_range=50, # rescale=1./255, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator() # rescale=1./255 train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30) val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20) from tensorflow.keras.applications import vgg16 from tensorflow.keras.models import Model import tensorflow.keras vgg = vgg16.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) output = vgg.layers[-1].output output = tensorflow.keras.layers.Flatten()(output) vgg_model = Model(vgg.input, output) vgg_model.trainable = False for layer in vgg_model.layers: layer.trainable = False vgg_model.summary() import pandas as pd pd.set_option('max_colwidth', -1) layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers] pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) print("Trainable layers:", vgg_model.trainable_weights) bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1]) print(bottleneck_feature_example.shape) plt.imshow(bottleneck_feature_example[0][:,:,0]) def get_bottleneck_features(model, input_imgs): features = model.predict(input_imgs, verbose=0) return features train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled) validation_features_vgg = get_bottleneck_features(vgg_model, validation_imgs_scaled) print('Train Bottleneck Features:', train_features_vgg.shape, '\tValidation Bottleneck Features:', validation_features_vgg.shape) from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer from tensorflow.keras.models import Sequential from tensorflow.keras import optimizers model = Sequential() model.add(vgg_model) model.add(Dense(64, activation='relu', input_dim=input_shape)) model.add(Dropout(0.3)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['accuracy']) model.summary() history = model.fit_generator(train_generator, epochs=100, validation_data=val_generator, verbose=1) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Pre-trained CNN (Transfer Learning) Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,101)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 101, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 101, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") model.save('4-2-augpretrained_cnn.h5') ```
github_jupyter
from google.colab import drive drive.mount('/content/drive') %matplotlib inline !ls -l !cp ./drive/MyDrive/training_data.zip . !unzip training_data.zip import glob import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img IMG_DIM = (150, 150) train_files = glob.glob('training_data/*') train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files] train_imgs = np.array(train_imgs) train_labels = [fn.split('/')[1].split('.')[0].strip() for fn in train_files] validation_files = glob.glob('validation_data/*') validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files] validation_imgs = np.array(validation_imgs) validation_labels = [fn.split('/')[1].split('.')[0].strip() for fn in validation_files] print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) train_imgs_scaled = train_imgs.astype('float32') validation_imgs_scaled = validation_imgs.astype('float32') train_imgs_scaled /= 255 validation_imgs_scaled /= 255 batch_size = 50 num_classes = 2 epochs = 150 input_shape = (150, 150, 3) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) # encode wine type labels train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) print(train_labels[0:5], train_labels_enc[0:5]) train_datagen = ImageDataGenerator( zoom_range=0.3, rotation_range=50, # rescale=1./255, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator() # rescale=1./255 train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30) val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20) from tensorflow.keras.applications import vgg16 from tensorflow.keras.models import Model import tensorflow.keras vgg = vgg16.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) output = vgg.layers[-1].output output = tensorflow.keras.layers.Flatten()(output) vgg_model = Model(vgg.input, output) vgg_model.trainable = False for layer in vgg_model.layers: layer.trainable = False vgg_model.summary() import pandas as pd pd.set_option('max_colwidth', -1) layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers] pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) print("Trainable layers:", vgg_model.trainable_weights) bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1]) print(bottleneck_feature_example.shape) plt.imshow(bottleneck_feature_example[0][:,:,0]) def get_bottleneck_features(model, input_imgs): features = model.predict(input_imgs, verbose=0) return features train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled) validation_features_vgg = get_bottleneck_features(vgg_model, validation_imgs_scaled) print('Train Bottleneck Features:', train_features_vgg.shape, '\tValidation Bottleneck Features:', validation_features_vgg.shape) from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer from tensorflow.keras.models import Sequential from tensorflow.keras import optimizers model = Sequential() model.add(vgg_model) model.add(Dense(64, activation='relu', input_dim=input_shape)) model.add(Dropout(0.3)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['accuracy']) model.summary() history = model.fit_generator(train_generator, epochs=100, validation_data=val_generator, verbose=1) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Pre-trained CNN (Transfer Learning) Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,101)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 101, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 101, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") model.save('4-2-augpretrained_cnn.h5')
0.655667
0.848847
``` import os import itertools import math from collections import defaultdict from itertools import product import pandas as pd import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns plt.style.use(["science"]) results_folder = "../results/drift/" models = os.listdir(results_folder) models = ["MLP", "LSTM", "CNN", "TCN"] datasets = [d.replace(".csv", "") for d in os.listdir(results_folder + models[0])] print( f"{len(models)} models and {len(datasets)} datasets\n", f'\tmodels: {", ".join(models)}\n', f'\tdatasets: {", ".join(datasets)}', ) colors_list = ["#0051a2", "#ffd44f", "#f4777f", "#93003a", "#97964a"] colors = {m: colors_list[i] for i, m in enumerate(models)} data = defaultdict(dict) for d, m in itertools.product(datasets, models): df = pd.read_csv("{}/{}/{}.csv".format(results_folder, m, d)) data[d][m] = df.copy() means = {d: {m: data[d][m].metric.mean() for m in models} for d in datasets} pd.DataFrame(means).T ``` ## Plot Results ``` plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 2.0 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", "RBFi-slow", "RBFi-fast", "LED-4", ] fig, big_axes = plt.subplots(nrows=5, ncols=1, figsize=fig_size) for row, big_ax in enumerate(big_axes, start=0): if row == 0: big_ax.set_title("Abrupt", fontsize=8) if row == 2: big_ax.set_title("Gradual", fontsize=8) if row == 4: big_ax.set_title("Incremental", fontsize=8) # Turn off axis lines and ticks of the big subplot # obs alpha is 0 in RGBA string! # big_ax.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off') # removes the white frame # big_ax._frameon = False subplots = [ ((5, 6), (0, 0), 2, "y"), ((5, 6), (0, 2), 2, "t-Abrupt"), ((5, 6), (0, 4), 2, ""), ((5, 6), (1, 0), 2, "y"), ((5, 6), (1, 2), 2, ""), ((5, 6), (1, 4), 2, ""), ((5, 6), (2, 0), 2, "y"), ((5, 6), (2, 2), 2, "t-Gradual"), ((5, 6), (2, 4), 2, ""), ((5, 6), (3, 0), 2, ""), ((5, 6), (3, 2), 2, ""), ((5, 6), (3, 4), 2, ""), ((5, 6), (4, 0), 2, "yx"), ((5, 6), (4, 2), 2, "xt-Incremental"), ((5, 6), (4, 4), 2, "x"), ] ax = [] for sh, loc, colspan, opt in subplots: axi = plt.subplot2grid(sh, loc, colspan=colspan, fig=fig) axi.set_xticklabels([]) axi.set_yticklabels([]) axi.set_ylim(-0.1, 1.1) axi.set_yticks([0.0, 0.5, 1.0]) if "x" in opt: axi.set_xticks([int(j / 2 * 100000) for j in range(11)], minor=True) axi.set_xticks([0, 250e3, 500e3], minor=False) axi.set_xticklabels([str(0), "", str(int(500e3))]) if "y" in opt: axi.set_ylabel("Kappa") axi.set_yticklabels(["0.0", "0.5", "1.0"]) ax.append(axi) for i, d in enumerate(selected_datasets): if i == 1: ax[i].set_title(r"\textbf{Abrupt}" + "\n" + d) elif i == 7: ax[i].set_title(r"\textbf{Gradual}" + "\n" + d) elif i == 13: ax[i].set_title(r"\textbf{Incremental}" + "\n" + d) else: ax[i].set_title(d) for m in models: ax[i].plot( data[d][m].instances, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) ax[0].legend(ncol=len(models), loc=(0.4, -6.85)) plt.subplots_adjust(hspace=0.575) # plt.tight_layout() fig.savefig("images/drift-results-all.png", format="png", dpi=500) plt.show() plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 3.5 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", "RBFi-slow", "RBFi-fast", "LED-4", ] n = 3 # number of double-rows m = 4 # number of columns t = 0.9 # 1-t == top space b = 0.1 # bottom space (both in figure coordinates) msp = 0.3 # minor spacing sp = 1.0 # major spacing offs = (1 + msp) * (t - b) / (3 * n + n * msp + (n - 1) * sp) # grid offset hspace = sp + msp + 2 # height space per grid gso = GridSpec(n, m, bottom=b + 2 * offs, top=t, hspace=hspace) gse = GridSpec(n, m, bottom=b + offs, top=t - offs, hspace=hspace) gsl = GridSpec(n, m, bottom=b, top=t - 2 * offs, hspace=hspace) subplots = [ (gso, 0, 2, "y"), (gso, 2, 2, ""), (gse, 0, 2, "y"), (gse, 2, 2, ""), (gsl, 0, 2, "y"), (gsl, 2, 2, ""), (gso, 4, 2, "y"), (gso, 6, 2, ""), (gse, 4, 2, "y"), (gse, 6, 2, ""), (gsl, 4, 2, "y"), (gsl, 6, 2, ""), (gso, 8, 2, "y"), (gso, 10, 2, ""), (gse, 9, 2, "x"), ] fig = plt.figure(figsize=fig_size) ax = [] for (gs, idx, ncol, opt), d in zip(subplots, selected_datasets): axi = fig.add_subplot(gs[idx : idx + ncol]) axi.set_xticklabels([]) axi.set_yticklabels([]) axi.set_ylim(-0.1, 1.1) axi.set_yticks([0.0, 0.5, 1.0]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) if "x" in opt: axi.set_xticks([int(j / 2 * 100000) for j in range(11)], minor=True) axi.set_xticks([0, 250e3, 500e3], minor=False) axi.set_xticklabels([str(0), "", str(int(500e3))]) if "y" in opt: axi.set_ylabel("Kappa") axi.set_yticklabels(["0.0", "0.5", "1.0"]) if "t" in opt: axi.set_title( r"\normalsize\textbf{-- " + opt.split("-")[-1] + " --}" + "\n" + d ) ax.append(axi) ax[0].legend(ncol=len(models), loc=(0.25, -10.5)) plt.subplots_adjust(hspace=0.575) plt.tight_layout() fig.savefig("images/drift-results-all-2.png", format="png", dpi=500) plt.show() ``` ## Separated images for each drift type ``` # -- ABRUPT -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", ] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) if i == 2: # RTGa6 axi.set_xticks([0, 500, 1000]) axi.set_xticklabels([0, 500, 1000]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[3].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[4].set_yticklabels([]) ax[5].set_yticklabels([]) ax[4].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) ax[0].legend( ncol=len(models), loc=(0.35, 1.35), frameon=False, fancybox=False, shadow=True ) plt.savefig("images/abrupt_drift.pdf", dpi=500) # -- Gradual -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", ] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) if i == 2: # RTGa6 axi.set_xticks([0, 500, 1000]) axi.set_xticklabels([0, 500, 1000]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[3].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[4].set_yticklabels([]) ax[5].set_yticklabels([]) ax[4].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) plt.savefig("images/gradual_drift.pdf", dpi=500) # -- Incremental -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = ["RBFi-slow", "RBFi-fast", "LED-4"] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[1].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) plt.savefig("images/incremental_drift.pdf", dpi=500) ``` ## Attempt to create a metric to evaluate concept drift recovery ``` from scipy import stats import numpy as np _data = [] for m in models: for row in data["RTGg"][m].values: _data.append([row[1], row[2], m]) _data = pd.DataFrame(_data, columns=["Instances", "Kappa", "Model"]) def estimate_maxima(data): kde = stats.gaussian_kde(data) no_samples = 1000 samples = np.linspace(0, 10, no_samples) probs = kde.evaluate(samples) maxima_index = probs.argmax() maxima = samples[maxima_index] return maxima drift = 250e3 df_metric = pd.DataFrame(columns=["Model", "Metric"]) df_metric_abs = pd.DataFrame(columns=["Model", "Metric"]) for m in models: mean1 = estimate_maxima( _data[(_data["Model"] == m) & (_data["Instances"] <= drift)].Kappa ) mean2 = estimate_maxima( _data[(_data["Model"] == m) & (_data["Instances"] >= drift)].Kappa ) mean_diff = max(mean1, mean2) - min(mean1, mean2) df_metric = df_metric.append({"Model": m, "Metric": mean1}, ignore_index=True) df_metric = df_metric.append({"Model": m, "Metric": mean2}, ignore_index=True) df_metric_abs = df_metric_abs.append( {"Model": m, "Metric": mean_diff}, ignore_index=True ) display(df_metric_abs) hue_order = ["MLP", "LSTM", "CNN", "TCN"] fig, ax = plt.subplots(nrows=1, ncols=6, figsize=(14, 4), sharey=True) gs = ax[1].get_gridspec() ax[1].remove() ax[2].remove() axbig = fig.add_subplot(gs[1:3]) sns.kdeplot( y="Kappa", data=_data[_data["Instances"] <= drift], hue="Model", ax=ax[0], hue_order=hue_order, palette=colors, ) sns.kdeplot( y="Kappa", data=_data[_data["Instances"] >= drift], hue="Model", ax=ax[3], hue_order=hue_order, palette=colors, ) sns.boxplot( x="Model", y="Metric", data=df_metric, ax=ax[4], hue_order=hue_order, palette=colors ) sns.barplot( x="Model", y="Metric", data=df_metric_abs, ax=ax[5], hue_order=hue_order, palette=colors, ) sns.lineplot( x="Instances", y="Kappa", data=_data, hue="Model", ax=axbig, hue_order=hue_order, palette=colors, ) axbig.axvline(x=drift, linestyle="--", color="grey") ax[0].get_legend().remove() ax[3].get_legend().remove() ax[0].invert_xaxis() ax[3].set_ylabel("") ax[4].set_ylabel("") axbig.set_ylim(ax[0].get_ylim()) axbig.set_yticklabels([]) axbig.set_ylabel("") plt.savefig("images/drift_evaluation_metric.png", dpi=500) plt.show() ```
github_jupyter
import os import itertools import math from collections import defaultdict from itertools import product import pandas as pd import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns plt.style.use(["science"]) results_folder = "../results/drift/" models = os.listdir(results_folder) models = ["MLP", "LSTM", "CNN", "TCN"] datasets = [d.replace(".csv", "") for d in os.listdir(results_folder + models[0])] print( f"{len(models)} models and {len(datasets)} datasets\n", f'\tmodels: {", ".join(models)}\n', f'\tdatasets: {", ".join(datasets)}', ) colors_list = ["#0051a2", "#ffd44f", "#f4777f", "#93003a", "#97964a"] colors = {m: colors_list[i] for i, m in enumerate(models)} data = defaultdict(dict) for d, m in itertools.product(datasets, models): df = pd.read_csv("{}/{}/{}.csv".format(results_folder, m, d)) data[d][m] = df.copy() means = {d: {m: data[d][m].metric.mean() for m in models} for d in datasets} pd.DataFrame(means).T plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 2.0 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", "RBFi-slow", "RBFi-fast", "LED-4", ] fig, big_axes = plt.subplots(nrows=5, ncols=1, figsize=fig_size) for row, big_ax in enumerate(big_axes, start=0): if row == 0: big_ax.set_title("Abrupt", fontsize=8) if row == 2: big_ax.set_title("Gradual", fontsize=8) if row == 4: big_ax.set_title("Incremental", fontsize=8) # Turn off axis lines and ticks of the big subplot # obs alpha is 0 in RGBA string! # big_ax.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off') # removes the white frame # big_ax._frameon = False subplots = [ ((5, 6), (0, 0), 2, "y"), ((5, 6), (0, 2), 2, "t-Abrupt"), ((5, 6), (0, 4), 2, ""), ((5, 6), (1, 0), 2, "y"), ((5, 6), (1, 2), 2, ""), ((5, 6), (1, 4), 2, ""), ((5, 6), (2, 0), 2, "y"), ((5, 6), (2, 2), 2, "t-Gradual"), ((5, 6), (2, 4), 2, ""), ((5, 6), (3, 0), 2, ""), ((5, 6), (3, 2), 2, ""), ((5, 6), (3, 4), 2, ""), ((5, 6), (4, 0), 2, "yx"), ((5, 6), (4, 2), 2, "xt-Incremental"), ((5, 6), (4, 4), 2, "x"), ] ax = [] for sh, loc, colspan, opt in subplots: axi = plt.subplot2grid(sh, loc, colspan=colspan, fig=fig) axi.set_xticklabels([]) axi.set_yticklabels([]) axi.set_ylim(-0.1, 1.1) axi.set_yticks([0.0, 0.5, 1.0]) if "x" in opt: axi.set_xticks([int(j / 2 * 100000) for j in range(11)], minor=True) axi.set_xticks([0, 250e3, 500e3], minor=False) axi.set_xticklabels([str(0), "", str(int(500e3))]) if "y" in opt: axi.set_ylabel("Kappa") axi.set_yticklabels(["0.0", "0.5", "1.0"]) ax.append(axi) for i, d in enumerate(selected_datasets): if i == 1: ax[i].set_title(r"\textbf{Abrupt}" + "\n" + d) elif i == 7: ax[i].set_title(r"\textbf{Gradual}" + "\n" + d) elif i == 13: ax[i].set_title(r"\textbf{Incremental}" + "\n" + d) else: ax[i].set_title(d) for m in models: ax[i].plot( data[d][m].instances, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) ax[0].legend(ncol=len(models), loc=(0.4, -6.85)) plt.subplots_adjust(hspace=0.575) # plt.tight_layout() fig.savefig("images/drift-results-all.png", format="png", dpi=500) plt.show() plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 3.5 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", "RBFi-slow", "RBFi-fast", "LED-4", ] n = 3 # number of double-rows m = 4 # number of columns t = 0.9 # 1-t == top space b = 0.1 # bottom space (both in figure coordinates) msp = 0.3 # minor spacing sp = 1.0 # major spacing offs = (1 + msp) * (t - b) / (3 * n + n * msp + (n - 1) * sp) # grid offset hspace = sp + msp + 2 # height space per grid gso = GridSpec(n, m, bottom=b + 2 * offs, top=t, hspace=hspace) gse = GridSpec(n, m, bottom=b + offs, top=t - offs, hspace=hspace) gsl = GridSpec(n, m, bottom=b, top=t - 2 * offs, hspace=hspace) subplots = [ (gso, 0, 2, "y"), (gso, 2, 2, ""), (gse, 0, 2, "y"), (gse, 2, 2, ""), (gsl, 0, 2, "y"), (gsl, 2, 2, ""), (gso, 4, 2, "y"), (gso, 6, 2, ""), (gse, 4, 2, "y"), (gse, 6, 2, ""), (gsl, 4, 2, "y"), (gsl, 6, 2, ""), (gso, 8, 2, "y"), (gso, 10, 2, ""), (gse, 9, 2, "x"), ] fig = plt.figure(figsize=fig_size) ax = [] for (gs, idx, ncol, opt), d in zip(subplots, selected_datasets): axi = fig.add_subplot(gs[idx : idx + ncol]) axi.set_xticklabels([]) axi.set_yticklabels([]) axi.set_ylim(-0.1, 1.1) axi.set_yticks([0.0, 0.5, 1.0]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) if "x" in opt: axi.set_xticks([int(j / 2 * 100000) for j in range(11)], minor=True) axi.set_xticks([0, 250e3, 500e3], minor=False) axi.set_xticklabels([str(0), "", str(int(500e3))]) if "y" in opt: axi.set_ylabel("Kappa") axi.set_yticklabels(["0.0", "0.5", "1.0"]) if "t" in opt: axi.set_title( r"\normalsize\textbf{-- " + opt.split("-")[-1] + " --}" + "\n" + d ) ax.append(axi) ax[0].legend(ncol=len(models), loc=(0.25, -10.5)) plt.subplots_adjust(hspace=0.575) plt.tight_layout() fig.savefig("images/drift-results-all-2.png", format="png", dpi=500) plt.show() # -- ABRUPT -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGa", "RTGa3", "RTGa6", "ARGWa-F1F4", "ARGWa-F2F5F8", "SEAa-F2F4", ] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) if i == 2: # RTGa6 axi.set_xticks([0, 500, 1000]) axi.set_xticklabels([0, 500, 1000]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[3].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[4].set_yticklabels([]) ax[5].set_yticklabels([]) ax[4].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) ax[0].legend( ncol=len(models), loc=(0.35, 1.35), frameon=False, fancybox=False, shadow=True ) plt.savefig("images/abrupt_drift.pdf", dpi=500) # -- Gradual -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = [ "RTGg", "RTGg3", "RTGg6", "ARGWg-F1F4", "ARGWg-F2F5F8", "SEAg-F2F4", ] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) if i == 2: # RTGa6 axi.set_xticks([0, 500, 1000]) axi.set_xticklabels([0, 500, 1000]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[3].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[4].set_yticklabels([]) ax[5].set_yticklabels([]) ax[4].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) plt.savefig("images/gradual_drift.pdf", dpi=500) # -- Incremental -- plt.rcParams.update({"font.size": 7}) fig_width_pt = 347.12354 * 1.0 # Get this from LaTeX using \showthe\columnwidth inches_per_pt = 1.0 / 72.27 # Convert pt to inches golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio fig_width = fig_width_pt * inches_per_pt # width in inches fig_height = 0.75 * fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] selected_datasets = ["RBFi-slow", "RBFi-fast", "LED-4"] fig = plt.figure(figsize=fig_size) gs = GridSpec(2, 3 * 2) ax = [] for i, d in enumerate(selected_datasets): axi = fig.add_subplot(gs[i * 2 : (i * 2 + 2)]) axi.set_title(d) for m in models: axi.plot( data[d][m].instances / 1000, data[d][m].metric, label=m, c=colors[m], linewidth=0.7, alpha=0.8, ) axi.set_ylim((-0.1, 1.1)) axi.set_xticks([0, 250, 500]) axi.set_xticklabels([0, 250, 500]) ax.append(axi) ax[0].set_ylabel("Kappa") ax[1].set_yticklabels([]) ax[2].set_yticklabels([]) ax[1].set_xlabel(r"Processed instances ($\times 10^3$)") plt.subplots_adjust(hspace=0.5) plt.savefig("images/incremental_drift.pdf", dpi=500) from scipy import stats import numpy as np _data = [] for m in models: for row in data["RTGg"][m].values: _data.append([row[1], row[2], m]) _data = pd.DataFrame(_data, columns=["Instances", "Kappa", "Model"]) def estimate_maxima(data): kde = stats.gaussian_kde(data) no_samples = 1000 samples = np.linspace(0, 10, no_samples) probs = kde.evaluate(samples) maxima_index = probs.argmax() maxima = samples[maxima_index] return maxima drift = 250e3 df_metric = pd.DataFrame(columns=["Model", "Metric"]) df_metric_abs = pd.DataFrame(columns=["Model", "Metric"]) for m in models: mean1 = estimate_maxima( _data[(_data["Model"] == m) & (_data["Instances"] <= drift)].Kappa ) mean2 = estimate_maxima( _data[(_data["Model"] == m) & (_data["Instances"] >= drift)].Kappa ) mean_diff = max(mean1, mean2) - min(mean1, mean2) df_metric = df_metric.append({"Model": m, "Metric": mean1}, ignore_index=True) df_metric = df_metric.append({"Model": m, "Metric": mean2}, ignore_index=True) df_metric_abs = df_metric_abs.append( {"Model": m, "Metric": mean_diff}, ignore_index=True ) display(df_metric_abs) hue_order = ["MLP", "LSTM", "CNN", "TCN"] fig, ax = plt.subplots(nrows=1, ncols=6, figsize=(14, 4), sharey=True) gs = ax[1].get_gridspec() ax[1].remove() ax[2].remove() axbig = fig.add_subplot(gs[1:3]) sns.kdeplot( y="Kappa", data=_data[_data["Instances"] <= drift], hue="Model", ax=ax[0], hue_order=hue_order, palette=colors, ) sns.kdeplot( y="Kappa", data=_data[_data["Instances"] >= drift], hue="Model", ax=ax[3], hue_order=hue_order, palette=colors, ) sns.boxplot( x="Model", y="Metric", data=df_metric, ax=ax[4], hue_order=hue_order, palette=colors ) sns.barplot( x="Model", y="Metric", data=df_metric_abs, ax=ax[5], hue_order=hue_order, palette=colors, ) sns.lineplot( x="Instances", y="Kappa", data=_data, hue="Model", ax=axbig, hue_order=hue_order, palette=colors, ) axbig.axvline(x=drift, linestyle="--", color="grey") ax[0].get_legend().remove() ax[3].get_legend().remove() ax[0].invert_xaxis() ax[3].set_ylabel("") ax[4].set_ylabel("") axbig.set_ylim(ax[0].get_ylim()) axbig.set_yticklabels([]) axbig.set_ylabel("") plt.savefig("images/drift_evaluation_metric.png", dpi=500) plt.show()
0.371137
0.629177
# Task 3 # Imports ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd from joblib import dump, load from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split plt.style.use("ggplot") plt.rcParams.update( {"font.size": 16, "axes.labelweight": "bold", "figure.figsize": (8, 6)} ) ## add any other additional packages that you need. You are free to use any packages for vizualization. ``` ## Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:** 1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models. ``` ## Depending on the permissions that you provided to your bucket you might need to provide your aws credentials ## to read from the bucket, if so provide with your credentials and pass as storage_options=aws_credentials # aws_credentials = {"key": "","secret": "","token":""} df = pd.read_csv("s3://mds-s3-25/output/ml_data_SYD.csv", index_col=0, parse_dates=True) df.info() df.dropna(inplace=True) df.info() train_df, test_df = train_test_split(df, test_size=0.2, random_state=123) # EDA train_df.describe(include="all") # EDA X_train_mean = train_df.mean(axis=0) X_train_mean.plot(figsize=(12, 9), title="Mean Rainfall from Different Models"); X_train = train_df.drop(columns=["observed_rainfall"]) y_train = train_df["observed_rainfall"] X_test = test_df.drop(columns=["observed_rainfall"]) y_test = test_df["observed_rainfall"] # fit training set rf_model = RandomForestRegressor(random_state=25) rf_model.fit(X_train, y_train) # predict on test set y_predict = rf_model.predict(X_test) print(y_predict) # Calculate RMSE rmse = np.sqrt(mean_squared_error(y_test, y_predict)) rmse # Calculate RMSE of other models rmse_models = [] for c in X_test.columns: rmse_models.append(np.sqrt(mean_squared_error(y_test, X_test[c]))) rmse_models ``` ### Discussion Yes, the RMSE of the ensemble model is smaller than all the individual models and thus we have a better result from the ensemble model. ## Part 2: ### Preparation for deploying model next week ***NOTE: Complete task 4 from the milestone3 before coming here*** We’ve found the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model. ``` model = RandomForestRegressor(random_state=25, n_estimators=50, max_depth=5) model.fit(X_train, y_train) print( f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}" ) print( f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}" ) # ready to deploy dump(model, "model.joblib") ``` ***Upload model.joblib to s3 under output folder. You choose how you want to upload it (using CLI, SDK, or web console).***
github_jupyter
import matplotlib.pyplot as plt import numpy as np import pandas as pd from joblib import dump, load from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split plt.style.use("ggplot") plt.rcParams.update( {"font.size": 16, "axes.labelweight": "bold", "figure.figsize": (8, 6)} ) ## add any other additional packages that you need. You are free to use any packages for vizualization. ## Depending on the permissions that you provided to your bucket you might need to provide your aws credentials ## to read from the bucket, if so provide with your credentials and pass as storage_options=aws_credentials # aws_credentials = {"key": "","secret": "","token":""} df = pd.read_csv("s3://mds-s3-25/output/ml_data_SYD.csv", index_col=0, parse_dates=True) df.info() df.dropna(inplace=True) df.info() train_df, test_df = train_test_split(df, test_size=0.2, random_state=123) # EDA train_df.describe(include="all") # EDA X_train_mean = train_df.mean(axis=0) X_train_mean.plot(figsize=(12, 9), title="Mean Rainfall from Different Models"); X_train = train_df.drop(columns=["observed_rainfall"]) y_train = train_df["observed_rainfall"] X_test = test_df.drop(columns=["observed_rainfall"]) y_test = test_df["observed_rainfall"] # fit training set rf_model = RandomForestRegressor(random_state=25) rf_model.fit(X_train, y_train) # predict on test set y_predict = rf_model.predict(X_test) print(y_predict) # Calculate RMSE rmse = np.sqrt(mean_squared_error(y_test, y_predict)) rmse # Calculate RMSE of other models rmse_models = [] for c in X_test.columns: rmse_models.append(np.sqrt(mean_squared_error(y_test, X_test[c]))) rmse_models model = RandomForestRegressor(random_state=25, n_estimators=50, max_depth=5) model.fit(X_train, y_train) print( f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}" ) print( f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}" ) # ready to deploy dump(model, "model.joblib")
0.744935
0.929632
``` import os import pandas as pd %matplotlib inline from newsapi.newsapi_client import NewsApiClient from dotenv import load_dotenv from nltk.sentiment.vader import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() ``` # News Headlines Sentiment Use the news api to pull the latest news articles for bitcoin and ethereum and create a DataFrame of sentiment scores for each coin. Use descriptive statistics to answer the following questions: 1. Which coin had the highest mean positive score? 2. Which coin had the highest negative score? 3. Which coin had the highest positive score? ``` # Read your api key environment variable load_dotenv() # Create a newsapi client newsapi=NewsApiClient(api_key=os.environ['news_api']) # Fetch the Bitcoin news articles bitcoin_articles=newsapi.get_everything(q='bitcoin', language='en', sort_by='relevancy') # Fetch the Ethereum news articles ethereum_articles=newsapi.get_everything(q='ethereum', language='en', sort_by='relevancy') # Create the Bitcoin sentiment scores DataFrame bitcoin_sentiments = [] for article in bitcoin_articles["articles"]: try: text = article["content"] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] bitcoin_sentiments.append({ "text": text, "Compound": compound, "Positive": pos, "Negative": neg, "Neutral": neu }) except AttributeError: pass # Create DataFrame bitcoin_df = pd.DataFrame(bitcoin_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "text"] bitcoin_df = bitcoin_df[cols] bitcoin_df.head() # Create the ethereum sentiment scores DataFrame ethereum_sentiments = [] for article in ethereum_articles["articles"]: try: text = article["content"] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] ethereum_sentiments.append({ "text": text, "Compound": compound, "Positive": pos, "Negative": neg, "Neutral": neu }) except AttributeError: pass # Create DataFrame ethereum_df = pd.DataFrame(ethereum_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "text"] ethereum_df = ethereum_df[cols] ethereum_df.head() # Describe the Bitcoin Sentiment bitcoin_df.describe() # Describe the Ethereum Sentiment ethereum_df.describe() ``` ### Questions: Q: Which coin had the highest mean positive score? A: Ethereum had the highest mean positive score. Q: Which coin had the highest compound score? A: Ethereum had the highest compound score. Q. Which coin had the highest positive score? A: Ethereum had the highest positive score. --- # Tokenizer In this section, you will use NLTK and Python to tokenize the text for each coin. Be sure to: 1. Lowercase each word 2. Remove Punctuation 3. Remove Stopwords ``` from nltk.tokenize import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, PorterStemmer from string import punctuation import re # Expand the default stopwords list if necessary lemmatizer = WordNetLemmatizer() # Complete the tokenizer function def tokenizer(text): sw = set(stopwords.words('english')) regex = re.compile("[^a-zA-Z ]") re_clean = regex.sub('', text) words = word_tokenize(re_clean) lem = [lemmatizer.lemmatize(word) for word in words] tokens = [word.lower() for word in lem if word.lower() not in sw] return tokens # Create a new tokens column for bitcoin bitcoin_tokens=[] for article_text in bitcoin_df['text']: article_tokens= tokenizer(article_text) bitcoin_tokens.append(article_tokens) bitcoin_df['tokens']=bitcoin_tokens bitcoin_df.head() # Create a new tokens column for ethereum ethereum_tokens=[] for article_text in ethereum_df['text']: article_tokens= tokenizer(article_text) ethereum_tokens.append(article_tokens) ethereum_df['tokens']=ethereum_tokens ethereum_df.head() ``` --- # NGrams and Frequency Analysis In this section you will look at the ngrams and word frequency for each coin. 1. Use NLTK to produce the n-grams for N = 2. 2. List the top 10 words for each coin. ``` from collections import Counter from nltk import ngrams # Generate the Bitcoin N-grams where N=2 btc_flat_list = [] for sublist in bitcoin_tokens: for item in sublist: btc_flat_list.append(item) bigram_counts_btc = Counter(ngrams(btc_flat_list, n=2)) dict(bigram_counts_btc.most_common(23)) # Generate the Ethereum N-grams where N=2 eth_flat_list = [] for sublist in ethereum_tokens: for item in sublist: eth_flat_list.append(item) bigram_counts_eth = Counter(ngrams(eth_flat_list, n=2)) dict(bigram_counts_eth.most_common(23)) # Use the token_count function to generate the top 10 words from each coin def token_count(tokens, N=10): """Returns the top N tokens from the frequency count""" return Counter(tokens).most_common(N) # Get the top 10 words for Bitcoin btc_counts=Counter(ngrams(btc_flat_list, n=1)) btc_counts.most_common(10) # Get the top 10 words for Ethereum eth_counts=Counter(ngrams(eth_flat_list, n=1)) eth_counts.most_common(10) ``` # Word Clouds In this section, you will generate word clouds for each coin to summarize the news for each coin ``` from wordcloud import WordCloud import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import matplotlib as mpl mpl.rcParams['figure.figsize'] = [20.0, 10.0] # Generate the Bitcoin word cloud btc_string = ' '.join([str(elem) for elem in btc_flat_list]) wc = WordCloud().generate(btc_string) plt.imshow(wc) # Generate the Ethereum word cloud eth_string = ' '.join([str(elem) for elem in eth_flat_list]) wc = WordCloud().generate(eth_string) plt.imshow(wc) ``` # Named Entity Recognition In this section, you will build a named entity recognition model for both coins and visualize the tags using SpaCy. ``` import spacy from spacy import displacy # Optional - download a language model for SpaCy # !python -m spacy download en_core_web_sm # Load the spaCy model nlp = spacy.load('en_core_web_sm') ``` ## Bitcoin NER ``` # Concatenate all of the bitcoin text together btc_concat = ' '.join(bitcoin_df["text"]) # Run the NER processor on all of the text doc=nlp(btc_concat) displacy.render(doc, style='ent') # Add a title to the document # Render the visualization # List all Entities for ent in doc.ents: print(ent.text, ent.label_) ``` --- ## Ethereum NER ``` # Concatenate all of the bitcoin text together # Run the NER processor on all of the text # Add a title to the document # Render the visualization # List all Entities ```
github_jupyter
import os import pandas as pd %matplotlib inline from newsapi.newsapi_client import NewsApiClient from dotenv import load_dotenv from nltk.sentiment.vader import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # Read your api key environment variable load_dotenv() # Create a newsapi client newsapi=NewsApiClient(api_key=os.environ['news_api']) # Fetch the Bitcoin news articles bitcoin_articles=newsapi.get_everything(q='bitcoin', language='en', sort_by='relevancy') # Fetch the Ethereum news articles ethereum_articles=newsapi.get_everything(q='ethereum', language='en', sort_by='relevancy') # Create the Bitcoin sentiment scores DataFrame bitcoin_sentiments = [] for article in bitcoin_articles["articles"]: try: text = article["content"] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] bitcoin_sentiments.append({ "text": text, "Compound": compound, "Positive": pos, "Negative": neg, "Neutral": neu }) except AttributeError: pass # Create DataFrame bitcoin_df = pd.DataFrame(bitcoin_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "text"] bitcoin_df = bitcoin_df[cols] bitcoin_df.head() # Create the ethereum sentiment scores DataFrame ethereum_sentiments = [] for article in ethereum_articles["articles"]: try: text = article["content"] sentiment = analyzer.polarity_scores(text) compound = sentiment["compound"] pos = sentiment["pos"] neu = sentiment["neu"] neg = sentiment["neg"] ethereum_sentiments.append({ "text": text, "Compound": compound, "Positive": pos, "Negative": neg, "Neutral": neu }) except AttributeError: pass # Create DataFrame ethereum_df = pd.DataFrame(ethereum_sentiments) # Reorder DataFrame columns cols = ["Compound", "Negative", "Neutral", "Positive", "text"] ethereum_df = ethereum_df[cols] ethereum_df.head() # Describe the Bitcoin Sentiment bitcoin_df.describe() # Describe the Ethereum Sentiment ethereum_df.describe() from nltk.tokenize import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, PorterStemmer from string import punctuation import re # Expand the default stopwords list if necessary lemmatizer = WordNetLemmatizer() # Complete the tokenizer function def tokenizer(text): sw = set(stopwords.words('english')) regex = re.compile("[^a-zA-Z ]") re_clean = regex.sub('', text) words = word_tokenize(re_clean) lem = [lemmatizer.lemmatize(word) for word in words] tokens = [word.lower() for word in lem if word.lower() not in sw] return tokens # Create a new tokens column for bitcoin bitcoin_tokens=[] for article_text in bitcoin_df['text']: article_tokens= tokenizer(article_text) bitcoin_tokens.append(article_tokens) bitcoin_df['tokens']=bitcoin_tokens bitcoin_df.head() # Create a new tokens column for ethereum ethereum_tokens=[] for article_text in ethereum_df['text']: article_tokens= tokenizer(article_text) ethereum_tokens.append(article_tokens) ethereum_df['tokens']=ethereum_tokens ethereum_df.head() from collections import Counter from nltk import ngrams # Generate the Bitcoin N-grams where N=2 btc_flat_list = [] for sublist in bitcoin_tokens: for item in sublist: btc_flat_list.append(item) bigram_counts_btc = Counter(ngrams(btc_flat_list, n=2)) dict(bigram_counts_btc.most_common(23)) # Generate the Ethereum N-grams where N=2 eth_flat_list = [] for sublist in ethereum_tokens: for item in sublist: eth_flat_list.append(item) bigram_counts_eth = Counter(ngrams(eth_flat_list, n=2)) dict(bigram_counts_eth.most_common(23)) # Use the token_count function to generate the top 10 words from each coin def token_count(tokens, N=10): """Returns the top N tokens from the frequency count""" return Counter(tokens).most_common(N) # Get the top 10 words for Bitcoin btc_counts=Counter(ngrams(btc_flat_list, n=1)) btc_counts.most_common(10) # Get the top 10 words for Ethereum eth_counts=Counter(ngrams(eth_flat_list, n=1)) eth_counts.most_common(10) from wordcloud import WordCloud import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import matplotlib as mpl mpl.rcParams['figure.figsize'] = [20.0, 10.0] # Generate the Bitcoin word cloud btc_string = ' '.join([str(elem) for elem in btc_flat_list]) wc = WordCloud().generate(btc_string) plt.imshow(wc) # Generate the Ethereum word cloud eth_string = ' '.join([str(elem) for elem in eth_flat_list]) wc = WordCloud().generate(eth_string) plt.imshow(wc) import spacy from spacy import displacy # Optional - download a language model for SpaCy # !python -m spacy download en_core_web_sm # Load the spaCy model nlp = spacy.load('en_core_web_sm') # Concatenate all of the bitcoin text together btc_concat = ' '.join(bitcoin_df["text"]) # Run the NER processor on all of the text doc=nlp(btc_concat) displacy.render(doc, style='ent') # Add a title to the document # Render the visualization # List all Entities for ent in doc.ents: print(ent.text, ent.label_) # Concatenate all of the bitcoin text together # Run the NER processor on all of the text # Add a title to the document # Render the visualization # List all Entities
0.600071
0.767973
# Módulo 2: Scraping con Selenium ## LATAM Airlines <a href="https://www.latam.com/es_ar/"><img src="https://i.pinimg.com/originals/dd/52/74/dd5274702d1382d696caeb6e0f6980c5.png" width="420"></img></a> <br> Vamos a scrapear el sitio de Latam para averiguar datos de vuelos en funcion el origen y destino, fecha y cabina. La información que esperamos obtener de cada vuelo es: - Precio(s) disponibles - Horas de salida y llegada (duración) - Información de las escalas **¡Empecemos!** Utilicemos lo aprendido hasta ahora para lograr el objetivo propuesto ``` import requests from bs4 import BeautifulSoup url = 'https://www.latam.com/es_ar/apps/personas/booking?fecha1_dia=18&fecha1_anomes=2019-12&auAvailability=1&ida_vuelta=ida&vuelos_origen=Buenos%20Aires&from_city1=BUE&vuelos_destino=Madrid&to_city1=MAD&flex=1&vuelos_fecha_salida_ddmmaaaa=18/12/2019&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=#/' r = requests.get(url) r.status_code s = BeautifulSoup(r.text, 'lxml') print(s.prettify()) ``` Vemos que la respuesta de la página no contiene la información que buscamos, ya que la misma aparece recién después de ejecutar el código JavaSCript que está en la respuesta. ## Selenium Selenium es una herramienta que nos permitirá controlar un navegador y podremos utilizar las funcionalidades del motor de JavaScript para cargar el contenido que no viene en el HTML de la página. Para esto necesitamos el módulo `webdriver`. ``` from selenium import webdriver ``` Paso 1: instanciar un **driver** del navegador ``` options = webdriver.ChromeOptions() options.add_argument('--incognito') driver = webdriver.Chrome(executable_path='../chromedriver', options=options) ``` Paso 2: hacer que el navegador cargue la página web. ``` driver.get(url) ``` Paso 3: extraer la información de la página ``` vuelos = driver.find_elements_by_xpath('//li[@class="flight"]') vuelos vuelo = vuelos[0] vuelo # Hora de salida vuelo.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime') # Hora de llegada vuelo.find_element_by_xpath('.//div[@class="arrival"]/time').get_attribute('datetime') # Duración del vuelo vuelo.find_element_by_xpath('.//span[@class="duration"]/time').get_attribute('datetime') boton_escalas = vuelo.find_element_by_xpath('.//div[@class="flight-summary-stops-description"]/button') boton_escalas.click() segmentos = vuelo.find_elements_by_xpath('//div[@class="segments-graph"]/div[@class="segments-graph-segment"]') segmentos escalas = len(segmentos) - 1 escalas segmento = segmentos[0] segmento.find_element_by_xpath('.//div[@class="departure"]/span[@class="ground-point-name"]').text segmento.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime') driver.find_element_by_xpath('//div[@class="modal-dialog"]//button[@class="close"]').click() vuelo.click() tarifas = vuelo.find_elements_by_xpath('.//div[@class="fares-table-container"]//tfoot//td[contains(@class, "fare-")]') tarifas ``` Paso 4: cerrar el navegador ``` driver.close() ```
github_jupyter
import requests from bs4 import BeautifulSoup url = 'https://www.latam.com/es_ar/apps/personas/booking?fecha1_dia=18&fecha1_anomes=2019-12&auAvailability=1&ida_vuelta=ida&vuelos_origen=Buenos%20Aires&from_city1=BUE&vuelos_destino=Madrid&to_city1=MAD&flex=1&vuelos_fecha_salida_ddmmaaaa=18/12/2019&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=#/' r = requests.get(url) r.status_code s = BeautifulSoup(r.text, 'lxml') print(s.prettify()) from selenium import webdriver options = webdriver.ChromeOptions() options.add_argument('--incognito') driver = webdriver.Chrome(executable_path='../chromedriver', options=options) driver.get(url) vuelos = driver.find_elements_by_xpath('//li[@class="flight"]') vuelos vuelo = vuelos[0] vuelo # Hora de salida vuelo.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime') # Hora de llegada vuelo.find_element_by_xpath('.//div[@class="arrival"]/time').get_attribute('datetime') # Duración del vuelo vuelo.find_element_by_xpath('.//span[@class="duration"]/time').get_attribute('datetime') boton_escalas = vuelo.find_element_by_xpath('.//div[@class="flight-summary-stops-description"]/button') boton_escalas.click() segmentos = vuelo.find_elements_by_xpath('//div[@class="segments-graph"]/div[@class="segments-graph-segment"]') segmentos escalas = len(segmentos) - 1 escalas segmento = segmentos[0] segmento.find_element_by_xpath('.//div[@class="departure"]/span[@class="ground-point-name"]').text segmento.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime') driver.find_element_by_xpath('//div[@class="modal-dialog"]//button[@class="close"]').click() vuelo.click() tarifas = vuelo.find_elements_by_xpath('.//div[@class="fares-table-container"]//tfoot//td[contains(@class, "fare-")]') tarifas driver.close()
0.131006
0.864768
# Circuitos RL sem fonte Jupyter Notebook desenvolvido por [Gustavo S.S.](https://github.com/GSimas) Considere a conexão em série de um resistor e um indutor, conforme mostra a Figura 7.11. Em t = 0, supomos que o indutor tenha uma corrente inicial Io. \begin{align} I(0) = I_0 \end{align} Assim, a energia correspondente armazenada no indutor como segue: \begin{align} w(0) = \frac{1}{2} LI_0² \end{align} Exponenciando em e, obtemos: \begin{align} i(t) = I_0 e^{-t \frac{R}{L}} \end{align} ![](http://i.imgur.com/uSDKiS3.png) Isso demonstra que a resposta natural de um circuito RL é uma queda exponencial da corrente inicial. A resposta em corrente é mostrada na Figura 7.12. Fica evidente, da Equação, que a constante de tempo para o circuito RL é: \begin{align} τ = \frac{L}{R} \end{align} ![](http://i.imgur.com/qmoBboA.png) A tensão no resistor como segue: \begin{align} v_R(t) = I_0 R e^{-t/τ} \end{align} A potência dissipada no resistor é: \begin{align} p = v_R i = I_0^2 R e^{-2t/τ} \end{align} A energia absorvida pelo resistor é: \begin{align} w_R(t) = \int_{0}^{t} p(t)dt = \frac{1}{2} L I_0^2 (1 - e^{-2t/τ}) \end{align} **Enquanto t → ∞, wr(∞) → 1/2 L I0², que é o mesmo que wl(0), a energia armazenada inicialmente no indutor** Assim, os procedimentos são: 1. Determinar corrente inicial i(0) = I0 por meio do indutor. 2. Determinar a constante de tempo τ = L/R **Exemplo 7.3** Supondo que i(0) = 10 A, calcule i(t) e ix(t) no circuito da Figura 7.13. ![](https://i.imgur.com/VVqoaiM.png) ``` print("Exemplo 7.3") import numpy as np from sympy import * I0 = 10 L = 0.5 R1 = 2 R2 = 4 t = symbols('t') #Determinar Req = Rth #Io hipotético = 1 A #Analise de Malhas #4i2 + 2(i2 - i0) = -3i0 #6i2 = 5 #i2 = 5/6 #ix' = i2 - i1 = 5/6 - 1 = -1/6 #Vr1 = ix' * R1 = -1/6 * 2 = -1/3 #Rth = Vr1/i0 = (-1/3)/(-1) = 1/3 Rth = 1/3 tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") vl = L*diff(i,t) ix = vl/R1 print("Corrente ix(t):",ix,"A") ``` **Problema Prático 7.3** Determine i e vx no circuito da Figura 7.15. Façamos i(0) = 12 A. ![](https://i.imgur.com/uwR39A9.png) ``` print("Problema Prático 7.3") L = 2 I0 = 12 R1 = 1 #Determinar Req = Rth #i0 hipotetico = 1 A #vx = 4 V #vx + 2(i0 - i1) + 2vx - v0 = 0 #-2i1 - v0 = -14 #-2vx + 2(i1 - i0) + 6i1 = 0 #8i1 = 10 #i1 = 10/8 = 5/4 #v0 = vx + 2(i0 - i1) + 2vx #v0 = 4 + 2 - 5/2 + 8 = 11.5 #Rth = v0/i0 = 11.5/1 = 11.5 Rth = 11.5 tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") vx = -R1*i print("Tensão vx(t):",vx,"V") ``` **Exemplo 7.4** A chave do circuito da Figura 7.16 foi fechada por um longo período. Em t = 0, a chave é aberta. Calcule i(t) para t > 0. ![](https://i.imgur.com/gF1mXJT.png) ``` print("Exemplo 7.4") Vs = 40 L = 2 def Req(x,y): #funcao para calculo de resistencia equivalente em paralelo res = (x*y)/(x + y) return res Req1 = Req(4,12) V1 = Vs*Req1/(Req1 + 2) I0 = V1/4 Req2 = 12 + 4 Rth = Req(Req2, 16) tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") ``` **Problema Prático 7.4** Para o circuito da Figura 7.18, determine i(t) para t > 0. ![](https://i.imgur.com/7XTimt9.png) ``` print("Problema Prático 7.4") L = 2 Cs = 15 R1 = 24 Req1 = Req(12,8) i1 = Cs*R1/(R1 + Req1) I0 = i1*8/(8 + 12) Rth = Req(12+8,5) tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") ``` **Exemplo 7.5** No circuito indicado na Figura 7.19, encontre io, vo e i durante todo o tempo, supondo que a chave fora aberta por um longo período. ![](https://i.imgur.com/SIls1dG.png) ``` print("Exemplo 7.5") Vs = 10 L = 2 print("Para t < 0, i0:",0,"A") I0 = Vs/(2 + 3) v0 = 3*I0 print("Para t < 0, i:",I0,"A") print("Para t < 0, v0:",v0,"V") Rth = Req(3,6) tau = L/Rth i = I0*exp(-t/tau) v0 = -L*diff(i,t) i0 = -i*3/(3 + 6) print("Para t > 0, i0:",i0,"A") print("Para t > 0, v0:",v0,"V") print("Para t > 0 i:",i,"A") ``` **Problema Prático 7.5** Determine i, io e vo para todo t no circuito mostrado na Figura 7.22. ![](https://i.imgur.com/aBRyPkM.png) ``` print("Problema Prático 7.5") Cs = 24 L = 1 #Para t < 0 i = Cs*4/(4 + 2) i0 = Cs*2/(2 + 4) v0 = 2*i print("Para t < 0, i =",i,"A") print("Para t < 0, i0 =",i0,"A") print("Para t < 0, v0 =",v0,"V") #Para t > 0 R = Req(4 + 2,3) tau = L/R I0 = i i = I0*exp(-t/tau) i0 = -i*3/(3 + 4 + 2) v0 = -i0*2 print("Para t < 0, i =",i,"A") print("Para t < 0, i0 =",i0,"A") print("Para t < 0, v0 =",v0,"V") ```
github_jupyter
print("Exemplo 7.3") import numpy as np from sympy import * I0 = 10 L = 0.5 R1 = 2 R2 = 4 t = symbols('t') #Determinar Req = Rth #Io hipotético = 1 A #Analise de Malhas #4i2 + 2(i2 - i0) = -3i0 #6i2 = 5 #i2 = 5/6 #ix' = i2 - i1 = 5/6 - 1 = -1/6 #Vr1 = ix' * R1 = -1/6 * 2 = -1/3 #Rth = Vr1/i0 = (-1/3)/(-1) = 1/3 Rth = 1/3 tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") vl = L*diff(i,t) ix = vl/R1 print("Corrente ix(t):",ix,"A") print("Problema Prático 7.3") L = 2 I0 = 12 R1 = 1 #Determinar Req = Rth #i0 hipotetico = 1 A #vx = 4 V #vx + 2(i0 - i1) + 2vx - v0 = 0 #-2i1 - v0 = -14 #-2vx + 2(i1 - i0) + 6i1 = 0 #8i1 = 10 #i1 = 10/8 = 5/4 #v0 = vx + 2(i0 - i1) + 2vx #v0 = 4 + 2 - 5/2 + 8 = 11.5 #Rth = v0/i0 = 11.5/1 = 11.5 Rth = 11.5 tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") vx = -R1*i print("Tensão vx(t):",vx,"V") print("Exemplo 7.4") Vs = 40 L = 2 def Req(x,y): #funcao para calculo de resistencia equivalente em paralelo res = (x*y)/(x + y) return res Req1 = Req(4,12) V1 = Vs*Req1/(Req1 + 2) I0 = V1/4 Req2 = 12 + 4 Rth = Req(Req2, 16) tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") print("Problema Prático 7.4") L = 2 Cs = 15 R1 = 24 Req1 = Req(12,8) i1 = Cs*R1/(R1 + Req1) I0 = i1*8/(8 + 12) Rth = Req(12+8,5) tau = L/Rth i = I0*exp(-t/tau) print("Corrente i(t):",i,"A") print("Exemplo 7.5") Vs = 10 L = 2 print("Para t < 0, i0:",0,"A") I0 = Vs/(2 + 3) v0 = 3*I0 print("Para t < 0, i:",I0,"A") print("Para t < 0, v0:",v0,"V") Rth = Req(3,6) tau = L/Rth i = I0*exp(-t/tau) v0 = -L*diff(i,t) i0 = -i*3/(3 + 6) print("Para t > 0, i0:",i0,"A") print("Para t > 0, v0:",v0,"V") print("Para t > 0 i:",i,"A") print("Problema Prático 7.5") Cs = 24 L = 1 #Para t < 0 i = Cs*4/(4 + 2) i0 = Cs*2/(2 + 4) v0 = 2*i print("Para t < 0, i =",i,"A") print("Para t < 0, i0 =",i0,"A") print("Para t < 0, v0 =",v0,"V") #Para t > 0 R = Req(4 + 2,3) tau = L/R I0 = i i = I0*exp(-t/tau) i0 = -i*3/(3 + 4 + 2) v0 = -i0*2 print("Para t < 0, i =",i,"A") print("Para t < 0, i0 =",i0,"A") print("Para t < 0, v0 =",v0,"V")
0.162546
0.90764