prompt
stringlengths 501
4.98M
| target
stringclasses 1
value | chunk_prompt
bool 1
class | kind
stringclasses 2
values | prob
float64 0.2
0.97
⌀ | path
stringlengths 10
394
⌀ | quality_prob
float64 0.4
0.99
⌀ | learning_prob
float64 0.15
1
⌀ | filename
stringlengths 4
221
⌀ |
---|---|---|---|---|---|---|---|---|
<a href="https://colab.research.google.com/github/cxbxmxcx/EvolutionaryDeepLearning/blob/main/EDL_6_5_Keras_GA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Original source https://github.com/zinsmatt/Neural-Network-Numpy/blob/master/neural-network.py
"""
Created on Thu Nov 15 20:42:52 2018
@author: matthieu
"""
```
#@title Install DEAP
!pip install deap --quiet
#@title Imports
import tensorflow as tf
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib.pyplot as plt
from IPython.display import clear_output
#DEAP
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
import random
#@title Dataset Parameters { run: "auto" }
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
X, Y = x_train / 255.0, y_train
plt.imshow(X[0])
print(Y[0])
#@title Define Keras Model
middle_layer = 128 #@param {type:"slider", min:16, max:128, step:2}
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(middle_layer, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(learning_rate=.001)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
trainableParams = np.sum([np.prod(v.get_shape()) for v in model.trainable_weights])
print(f"Trainable parameters: {trainableParams}")
def score_model():
y_hat = model.predict(x_test)
acc = [np.argmax(y)==y_test[i] for i,y in enumerate(y_hat)]
return sum(acc)/len(acc)
def print_parameters():
for layer in model.layers:
for na in layer.get_weights():
print(na)
def set_parameters(individual):
idx = 0
tensors=[]
for layer in model.layers:
for na in layer.get_weights():
size = na.size
sh = na.shape
t = individual[idx:idx+size]
t = np.array(t)
t = np.reshape(t, sh)
idx += size
tensors.append(t)
model.set_weights(tensors)
individual = np.random.rand(trainableParams)
set_parameters(individual)
print(score_model())
print_parameters()
#@title Setting up the Creator
creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
#@title Create Individual and Population
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox = base.Toolbox()
toolbox.register("attr_float", uniform, -1, 1, trainableParams)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("select", tools.selTournament, tournsize=5)
def customBlend(ind1, ind2):
for i, (x1, x2) in enumerate(zip(ind1, ind2)):
ind1[i] = (x1 + x2) / 2
ind2[i] = (x1 + x2) / 2
return ind1, ind2
#toolbox.register("mate", tools.cxBlend, alpha=.5)
toolbox.register("mate", customBlend)
toolbox.register("mutate", tools.mutGaussian, mu=0.0, sigma=.1, indpb=.25)
def evaluate(individual):
set_parameters(individual)
print('.', end='')
return 1./score_model(),
toolbox.register("evaluate", evaluate)
#@title Optimize the Weights { run: "auto" }
MU = 25 #@param {type:"slider", min:5, max:1000, step:5}
NGEN = 1000 #@param {type:"slider", min:100, max:1000, step:10}
RGEN = 10 #@param {type:"slider", min:1, max:100, step:1}
CXPB = .6
MUTPB = .3
random.seed(64)
pop = toolbox.population(n=MU)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
from sklearn.metrics import classification_report
best = None
history = []
for g in range(NGEN):
pop, logbook = algorithms.eaSimple(pop, toolbox,
cxpb=CXPB, mutpb=MUTPB, ngen=RGEN, stats=stats, halloffame=hof, verbose=False)
best = hof[0]
clear_output()
print(f"Gen ({(g+1)*RGEN})")
history.extend([1/l["min"] for l in logbook])
plt.plot(history)
plt.show()
set_parameters(best)
accuracy = score_model()
print("Best Neural Network accuracy : ", accuracy)
if accuracy > .99999: #stop condition
break
y_pred = model.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
print(classification_report(y_test, y_pred))
```
| true |
code
| 0.666551 | null | null | null | null |
|
# Predicting prices with a single-asset regression model
## Preparing the independent and target variables
```
from alpha_vantage.timeseries import TimeSeries
# Update your Alpha Vantage API key here...
ALPHA_VANTAGE_API_KEY = 'PZ2ISG9CYY379KLI'
ts = TimeSeries(key=ALPHA_VANTAGE_API_KEY, output_format='pandas')
df_jpm, meta_data = ts.get_daily_adjusted(
symbol='JPM', outputsize='full')
df_gs, meta_data = ts.get_daily_adjusted(
symbol='GS', outputsize='full')
import pandas as pd
df_x = pd.DataFrame({'GS': df_gs['5. adjusted close']})
jpm_prices = df_jpm['5. adjusted close']
```
## Writing the linear regression model
```
from sklearn.linear_model import LinearRegression
class LinearRegressionModel(object):
def __init__(self):
self.df_result = pd.DataFrame(columns=['Actual', 'Predicted'])
def get_model(self):
return LinearRegression(fit_intercept=False)
def learn(self, df, ys, start_date, end_date, lookback_period=20):
model = self.get_model()
for date in df[start_date:end_date].index:
# Fit the model
x = self.get_prices_since(df, date, lookback_period)
y = self.get_prices_since(ys, date, lookback_period)
model.fit(x, y.ravel())
# Predict the current period
x_current = df.loc[date].values
[y_pred] = model.predict([x_current])
# Store predictions
new_index = pd.to_datetime(date, format='%Y-%m-%d')
y_actual = ys.loc[date]
self.df_result.loc[new_index] = [y_actual, y_pred]
def get_prices_since(self, df, date_since, lookback):
index = df.index.get_loc(date_since)
return df.iloc[index-lookback:index]
linear_reg_model = LinearRegressionModel()
linear_reg_model.learn(df_x, jpm_prices, start_date='2018',
end_date='2019', lookback_period=20)
%matplotlib inline
linear_reg_model.df_result.plot(
title='JPM prediction by OLS',
style=['-', '--'], figsize=(12,8));
```
## Risk metrics for measuring prediction performance
### Mean absolute error (MAE) as a risk metric
```
from sklearn.metrics import mean_absolute_error
actual = linear_reg_model.df_result['Actual']
predicted = linear_reg_model.df_result['Predicted']
mae = mean_absolute_error(actual, predicted)
print('mean absolute error:', mae)
```
#### Mean squared error (MSE) as a risk metric
```
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(actual, predicted)
print('mean squared error:', mse)
```
### Explained variance score as a risk metric
```
from sklearn.metrics import explained_variance_score
eva = explained_variance_score(actual, predicted)
print('explained variance score:', eva)
```
### R<sup>2</sup> as a risk metric
```
from sklearn.metrics import r2_score
r2 = r2_score(actual, predicted)
print('r2 score:', r2)
```
## Ridge regression
```
from sklearn.linear_model import Ridge
class RidgeRegressionModel(LinearRegressionModel):
def get_model(self):
return Ridge(alpha=.5)
ridge_reg_model = RidgeRegressionModel()
ridge_reg_model.learn(df_x, jpm_prices, start_date='2018',
end_date='2019', lookback_period=20)
from sklearn.metrics import (
accuracy_score, mean_absolute_error,
explained_variance_score, r2_score
)
def print_regression_metrics(df_result):
actual = list(df_result['Actual'])
predicted = list(df_result['Predicted'])
print('mean_absolute_error:',
mean_absolute_error(actual, predicted))
print('mean_squared_error:', mean_squared_error(actual, predicted))
print('explained_variance_score:',
explained_variance_score(actual, predicted))
print('r2_score:', r2_score(actual, predicted))
print_regression_metrics(ridge_reg_model.df_result)
```
# Predicting returns with a cross-asset momentum model
## Preparing the independent variables
```
df_spx, meta_data = ts.get_daily_adjusted(
symbol='SPX', outputsize='full')
df_gld, meta_data = ts.get_daily_adjusted(
symbol='GLD', outputsize='full')
df_dxy, dxy_meta_data = ts.get_daily_adjusted(
symbol='UUP', outputsize='full')
df_ief, meta_data = ts.get_daily_adjusted(
symbol='IEF', outputsize='full')
import pandas as pd
df_assets = pd.DataFrame({
'SPX': df_spx['5. adjusted close'],
'GLD': df_gld['5. adjusted close'],
'UUP': df_dxy['5. adjusted close'],
'IEF': df_ief['5. adjusted close'],
}).dropna()
df_assets_1m = df_assets.pct_change(periods=20)
df_assets_1m.columns = ['%s_1m'%col for col in df_assets.columns]
df_assets_3m = df_assets.pct_change(periods=60)
df_assets_3m.columns = ['%s_3m'%col for col in df_assets.columns]
df_assets_6m = df_assets.pct_change(periods=120)
df_assets_6m.columns = ['%s_6m'%col for col in df_assets.columns]
df_assets_12m = df_assets.pct_change(periods=240)
df_assets_12m.columns = ['%s_12m'%col for col in df_assets.columns]
df_lagged = df_assets_1m.join(df_assets_3m)\
.join(df_assets_6m)\
.join(df_assets_12m)\
.dropna()
df_lagged.info()
```
## Preparing the target variables
```
y = jpm_prices.pct_change().dropna()
multi_linear_model = LinearRegressionModel()
multi_linear_model.learn(df_lagged, y, start_date='2018',
end_date='2019', lookback_period=10)
multi_linear_model.df_result.plot(
title='JPM actual versus predicted percentage returns',
style=['-', '--'], figsize=(12,8));
print_regression_metrics(multi_linear_model.df_result)
```
## An ensemble of decision trees
### Bagging regressor
```
from sklearn.ensemble import BaggingRegressor
class BaggingRegressorModel(LinearRegressionModel):
def get_model(self):
return BaggingRegressor(n_estimators=20, random_state=0)
bagging = BaggingRegressorModel()
bagging.learn(df_lagged, y, start_date='2018',
end_date='2019', lookback_period=10)
print_regression_metrics(bagging.df_result)
```
# Predicting trends with classification-based machine learning
## Preparing the target variables
```
import numpy as np
y_direction = y >= 0
y_direction.head(3)
flags = list(y_direction.unique())
flags.sort()
print(flags)
```
## Preparing the dataset of multiple assets as input variables
```
df_input = df_assets_1m.join(df_assets_3m).dropna()
df_input.info()
```
## Logistic regression
```
from sklearn.linear_model import LogisticRegression
class LogisticRegressionModel(LinearRegressionModel):
def get_model(self):
return LogisticRegression(solver='lbfgs')
logistic_reg_model = LogisticRegressionModel()
logistic_reg_model.learn(df_input, y_direction, start_date='2018',
end_date='2019', lookback_period=100)
logistic_reg_model.df_result.head()
```
### Risk metrics for measuring classification-based predictions
### Confusion matrix
```
from sklearn.metrics import confusion_matrix
df_result = logistic_reg_model.df_result
actual = list(df_result['Actual'])
predicted = list(df_result['Predicted'])
matrix = confusion_matrix(actual, predicted)
print(matrix)
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
plt.subplots(figsize=(12,8))
sns.heatmap(matrix.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=flags, yticklabels=flags)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('JPM percentage returns 2018');
```
### Accuracy score
```
from sklearn.metrics import accuracy_score
print('accuracy_score:', accuracy_score(actual, predicted))
```
### Precision score
```
from sklearn.metrics import precision_score
print('precision_score:', precision_score(actual, predicted))
```
### Recall score
```
from sklearn.metrics import recall_score
print('recall_score:', recall_score(actual, predicted))
```
### F1 Score
```
from sklearn.metrics import f1_score
print('f1_score:', f1_score(actual, predicted))
```
## Support Vector Classifier
```
from sklearn.svm import SVC
class SVCModel(LogisticRegressionModel):
def get_model(self):
return SVC(C=1000, gamma='auto')
svc_model = SVCModel()
svc_model.learn(df_input, y_direction, start_date='2018',
end_date='2019', lookback_period=100)
df_result = svc_model.df_result
actual = list(df_result['Actual'])
predicted = list(df_result['Predicted'])
print('accuracy_score:', accuracy_score(actual, predicted))
print('precision_score:', precision_score(actual, predicted))
print('recall_score:', recall_score(actual, predicted))
print('f1_score:', f1_score(actual, predicted))
```
| true |
code
| 0.709975 | null | null | null | null |
|
# Analyzing the UncertaintyForest Class by Reproducing Posterior Estimates
This set of four tutorials (`uncertaintyforest_running_example.ipynb`, `uncertaintyforest_posteriorestimates.ipynb`, `uncertaintyforest_conditionalentropyestimates.ipynb`, and `uncertaintyforest_mutualinformationestimates.ipynb`) will explain the UncertaintyForest class. After following these tutorials, you should have the ability to run UncertaintyForest on your own machine and generate Figures 1, 2, and 3 from [this paper](https://arxiv.org/pdf/1907.00325.pdf), which help you to visualize a comparison of the estimated posteriors and conditional entropy values for several different algorithms.
If you haven't seen it already, take a look at other tutorials to setup and install the ProgLearn package: `installation_guide.ipynb`.
*Goal: Run the UncertaintyForest class to produce a figure that compares estimated posteriors for the UncertaintyForest, CART, and IRF algorithms, as in Figure 1 from [this paper](https://arxiv.org/pdf/1907.00325.pdf)*
## Import Required Packages
```
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from proglearn.forest import UncertaintyForest
from functions.unc_forest_tutorials_functions import estimate_posterior, plot_fig1
```
## Specify Parameters
```
# The following are two sets of parameters.
# The first are those that were actually used to produce Figure 1.
# These take a long time to actually run since there are 6000 data points.
# Below those, you'll find some scaled-down parameters so that you can see the results more quickly.
# Here are the paper reproduction parameters
#n = 6000
#mean = 1
#var = 1
#num_trials = 100
#X_eval = np.linspace(-2, 2, num = 30).reshape(-1, 1)
#n_estimators = 300
#num_plotted_trials = 10
# Here are the scaled-down tutorial parameters
n = 300 # number of data points
mean = 1 # mean of the data
var = 1 # variance of the data
num_trials = 3 # number of trials to run
X_eval = np.linspace(-2, 2, num = 10).reshape(-1, 1) # the evaluation span (over X) for the plot
n_estimators = 200 # the number of estimators
num_plotted_trials = 2 # the number of "fainter" lines to be displayed on the figure
```
## Specify Learners
Now, we'll specify which learners we'll compare. Figure 1 uses three different learners.
```
# Algorithms used to produce Figure 1
algos = [
{
'instance': RandomForestClassifier(n_estimators = n_estimators),
'label': 'CART',
'title': 'CART Forest',
'color': "#1b9e77",
},
{
'instance': CalibratedClassifierCV(base_estimator=RandomForestClassifier(n_estimators = n_estimators // 5),
method='isotonic',
cv = 5),
'label': 'IRF',
'title': 'Isotonic Reg. Forest',
'color': "#fdae61",
},
{
'instance': UncertaintyForest(n_estimators = n_estimators, tree_construction_proportion = 0.4, kappa = 3.0),
'label': 'UF',
'title': 'Uncertainty Forest',
'color': "#F41711",
},
]
# Plotting parameters
parallel = False
```
## Generate predicted posteriors
Now, we'll run the code to obtain the results that will be displayed in Figure 1.
```
# This is the code that actually generates data and predictions.
for algo in algos:
algo['predicted_posterior'] = estimate_posterior(algo, n, mean, var, num_trials, X_eval, parallel = parallel)
```
## Create Figure 1
```
plot_fig1(algos, num_plotted_trials, X_eval, n, mean, var)
```
| true |
code
| 0.759025 | null | null | null | null |
|
# Classification
*Supervised* machine learning techniques involve training a model to operate on a set of *features* and predict a *label* using a dataset that includes some already-known label values. You can think of this function like this, in which ***y*** represents the label we want to predict and ***X*** represents the vector of features the model uses to predict it.
$$y = f([x_1, x_2, x_3, ...])$$
*Classification* is a form of supervised machine learning in which you train a model to use the features (the ***x*** values in our function) to predict a label (***y***) that calculates the probability of the observed case belonging to each of a number of possible classes, and predicting an appropriate label. The simplest form of classification is *binary* classification, in which the label is 0 or 1, representing one of two classes; for example, "True" or "False"; "Internal" or "External"; "Profitable" or "Non-Profitable"; and so on.
## Binary Classification
Let's start by looking at an example of *binary classification*, where the model must predict a label that belongs to one of two classes. In this exercsie, we'll train a binary classifier to predict whether or not a patient should be tested for diabetes based on some medical data.
### Explore the data
Run the following cell to load a CSV file of patent data into a **Pandas** dataframe:
> **Citation**: The diabetes dataset used in this exercise is based on data originally collected by the National Institute of Diabetes and Digestive and Kidney Diseases.
```
import pandas as pd
# load the training dataset
diabetes = pd.read_csv('data/diabetes.csv')
diabetes.head()
```
This data consists of diagnostic information about some patients who have been tested for diabetes. Scroll to the right if necessary, and note that the final column in the dataset (**Diabetic**) contains the value ***0*** for patients who tested negative for diabetes, and ***1*** for patients who tested positive. This is the label that we will train our mode to predict; most of the other columns (**Pregnancies**,**PlasmaGlucose**,**DiastolicBloodPressure**, and so on) are the features we will use to predict the **Diabetic** label.
Let's separate the features from the labels - we'll call the features ***X*** and the label ***y***:
```
# Separate features and labels
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
label = 'Diabetic'
X, y = diabetes[features].values, diabetes[label].values
for n in range(0,4):
print("Patient", str(n+1), "\n Features:",list(X[n]), "\n Label:", y[n])
```
Now let's compare the feature distributions for each label value.
```
from matplotlib import pyplot as plt
%matplotlib inline
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
for col in features:
diabetes.boxplot(column=col, by='Diabetic', figsize=(6,6))
plt.title(col)
plt.show()
```
For some of the features, there's a noticable difference in the distribution for each label value. In particular, **Pregnancies** and **Age** show markedly different distributions for diabetic patients than for non-diabetic patients. These features may help predict whether or not a patient is diabetic.
### Split the data
Our dataset includes known values for the label, so we can use this to train a classifier so that it finds a statistical relationship between the features and the label value; but how will we know if our model is any good? How do we know it will predict correctly when we use it with new data that it wasn't trained with? Well, we can take advantage of the fact we have a large dataset with known label values, use only some of it to train the model, and hold back some to test the trained model - enabling us to compare the predicted labels with the already known labels in the test set.
In Python, the **scikit-learn** package contains a large number of functions we can use to build a machine learning model - including a **train_test_split** function that ensures we get a statistically random split of training and test data. We'll use that to split the data into 70% for training and hold back 30% for testing.
```
from sklearn.model_selection import train_test_split
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training cases: %d\nTest cases: %d' % (X_train.size, X_test.size))
```
### Train and Evaluate a Binary Classification Model
OK, now we're ready to train our model by fitting the training features (**X_train**) to the training labels (**y_train**). There are various algorithms we can use to train the model. In this example, we'll use *Logistic Regression*, which (despite its name) is a well-established algorithm for classification. In addition to the training features and labels, we'll need to set a *regularization* parameter. This is used to counteract any bias in the sample, and help the model generalize well by avoiding *overfitting* the model to the training data.
> **Note**: Parameters for machine learning algorithms are generally referred to as *hyperparameters* (to a data scientist, *parameters* are values in the data itself - *hyperparameters* are defined externally from the data!)
```
# Train the model
from sklearn.linear_model import LogisticRegression
# Set regularization rate
reg = 0.01
# train a logistic regression model on the training set
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
print (model)
```
Now we've trained the model using the training data, we can use the test data we held back to evaluate how well it predicts. Again, **scikit-learn** can help us do this. Let's start by using the model to predict labels for our test set, and compare the predicted labels to the known labels:
```
predictions = model.predict(X_test)
print('Predicted labels: ', predictions)
print('Actual labels: ' ,y_test)
```
The arrays of labels are too long to be displayed in the notebook output, so we can only compare a few values. Even if we printed out all of the predicted and actual labels, there are too many of them to make this a sensible way to evaluate the model. Fortunately, **scikit-learn** has a few more tricks up its sleeve, and it provides some metrics that we can use to evaluate the model.
The most obvious thing you might want to do is to check the *accuracy* of the predictions - in simple terms, what proportion of the labels did the model predict correctly?
```
from sklearn.metrics import accuracy_score
print('Accuracy: ', accuracy_score(y_test, predictions))
```
The accuracy is returned as a decimal value - a value of 1.0 would mean that the model got 100% of the predictions right; while an accuracy of 0.0 is, well, pretty useless!
Accuracy seems like a sensible metric to evaluate (and to a certain extent it is), but you need to be careful about drawing too many conclusions from the accuracy of a classifier. Remember that it's simply a measure of how many cases were predicted correctly. Suppose only 3% of the population is diabetic. You could create a classifier that always just predicts 0, and it would be 97% accurate - but not terribly helpful in identifying patients with diabetes!
Fortunately, there are some other metrics that reveal a little more about how our model is performing. Scikit-Learn includes the ability to create a *classification report* that provides more insight than raw accuracy alone.
```
from sklearn. metrics import classification_report
print(classification_report(y_test, predictions))
```
The classification report includes the following metrics for each class (0 and 1)
> note that the header row may not line up with the values!
* *Precision*: Of the predictons the model made for this class, what proportion were correct?
* *Recall*: Out of all of the instances of this class in the test dataset, how many did the model identify?
* *F1-Score*: An average metric that takes both precision and recall into account.
* *Support*: How many instances of this class are there in the test dataset?
The classification report also includes averages for these metrics, including a weighted average that allows for the imbalance in the number of cases of each class.
Because this is a *binary* classification problem, the ***1*** class is considered *positive* and its precision and recall are particularly interesting - these in effect answer the questions:
- Of all the patients the model predicted are diabetic, how many are actually diabetic?
- Of all the patients that are actually diabetic, how many did the model identify?
You can retrieve these values on their own by using the **precision_score** and **recall_score** metrics in scikit-learn (which by default assume a binary classification model).
```
from sklearn.metrics import precision_score, recall_score
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
```
The precision and recall metrics are derived from four possible prediction outcomes:
* *True Positives*: The predicted label and the actual label are both 1.
* *False Positives*: The predicted label is 1, but the actual label is 0.
* *False Negatives*: The predicted label is 0, but the actual label is 1.
* *True Negatives*: The predicted label and the actual label are both 0.
These metrics are generally tabulated for the test set and shown together as a *confusion matrix*, which takes the following form:
<table style="border: 1px solid black;">
<tr style="border: 1px solid black;">
<td style="border: 1px solid black;color: black;" bgcolor="lightgray">TN</td><td style="border: 1px solid black;color: black;" bgcolor="white">FP</td>
</tr>
<tr style="border: 1px solid black;">
<td style="border: 1px solid black;color: black;" bgcolor="white">FN</td><td style="border: 1px solid black;color: black;" bgcolor="lightgray">TP</td>
</tr>
</table>
Note that the correct (*true*) predictions form a diagonal line from top left to bottom right - these figures should be significantly higher than the *false* predictions if the model is any good.
In Python, you can use the **sklearn.metrics.confusion_matrix** function to find these values for a trained classifier:
```
from sklearn.metrics import confusion_matrix
# Print the confusion matrix
cm = confusion_matrix(y_test, predictions)
print (cm)
```
Until now, we've considered the predictions from the model as being either 1 or 0 class labels. Actually, things are a little more complex than that. Statistical machine learning algorithms, like logistic regression, are based on *probability*; so what actually gets predicted by a binary classifier is the probability that the label is true (**P(y)**) and the probability that the label is false (1 - **P(y)**). A threshold value of 0.5 is used to decide whether the predicted label is a 1 (*P(y) > 0.5*) or a 0 (*P(y) <= 0.5*). You can use the **predict_proba** method to see the probability pairs for each case:
```
y_scores = model.predict_proba(X_test)
print(y_scores)
```
The decision to score a prediction as a 1 or a 0 depends on the threshold to which the predicted probabilties are compared. If we were to change the threshold, it would affect the predictions; and therefore change the metrics in the confusion matrix. A common way to evaluate a classifier is to examine the *true positive rate* (which is another name for recall) and the *false positive rate* for a range of possible thresholds. These rates are then plotted against all possible thresholds to form a chart known as a *received operator characteristic (ROC) chart*, like this:
```
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# calculate ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
```
The ROC chart shows the curve of the true and false positive rates for different threshold values between 0 and 1. A perfect classifier would have a curve that goes straight up the left side and straight across the top. The diagonal line across the chart represents the probability of predicting correctly with a 50/50 random prediction; so you obviously want the curve to be higher than that (or your model is no better than simply guessing!).
The area under the curve (AUC) is a value between 0 and 1 that quantifies the overall performance of the model. The closer to 1 this value is, the better the model. Once again, scikit-Learn includes a function to calculate this metric.
```
from sklearn.metrics import roc_auc_score
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
```
### Perform preprocessing in a pipeline
In this case, the ROC curve and its AUC indicate that the model performs better than a random guess which is not bad considering we performed very little preprocessing of the data.
In practice, it's common to perform some preprocessing of the data to make it easier for the algorithm to fit a model to it. There's a huge range of preprocessing transformations you can perform to get your data ready for modeling, but we'll limit ourselves to a few common techniques:
- Scaling numeric features so they're on the same scale. This prevents feaures with large values from producing coefficients that disproportionately affect the predictions.
- Encoding categorical variables. For example, by using a *one hot encoding* technique you can create individual binary (true/false) features for each possible category value.
To apply these preprocessing transformations, we'll make use of a Scikit-Learn feature named *pipelines*. These enable us to define a set of preprocessing steps that end with an algorithm. You can then fit the entire pipeline to the data, so that the model encapsulates all of the preprocessing steps as well as the regression algorithm. This is useful, because when we want to use the model to predict values from new data, we need to apply the same transformations (based on the same statistical distributions and catagory encodings used with the training data).
>**Note**: The term *pipeline* is used extensively in machine learning, often to mean very different things! In this context, we're using it to refer to pipeline objects in Scikit-Learn, but you may see it used elsewhere to mean something else.
```
# Train the model
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
import numpy as np
# Define preprocessing for numeric columns (normalize them so they're on the same scale)
numeric_features = [0,1,2,3,4,5,6]
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
# Define preprocessing for categorical features (encode the Age column)
categorical_features = [7]
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
# Combine preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Create preprocessing and training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('logregressor', LogisticRegression(C=1/reg, solver="liblinear"))])
# fit the pipeline to train a logistic regression model on the training set
model = pipeline.fit(X_train, (y_train))
print (model)
```
The pipeline encapsulates the preprocessing steps as well as model training.
Let's use the model trained by this pipeline to predict labels for our test set, and compare the performance metrics with the basic model we created previously.
```
# Get predictions from test data
predictions = model.predict(X_test)
# Get evaluation metrics
cm = confusion_matrix(y_test, predictions)
print ('Confusion Matrix:\n',cm, '\n')
print('Accuracy:', accuracy_score(y_test, predictions))
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
# calculate ROC curve
y_scores = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
```
The results look a little better, so clearly preprocessing the data has made a difference.
### Try a different algorithm
Now let's try a different algorithm. Previously we used a logistic regression algorithm, which is a *linear* algorithm. There are many kinds of classification algorithm we could try, including:
- **Support Vector Machine algorithms**: Algorithms that define a *hyperplane* that separates classes.
- **Tree-based algorithms**: Algorithms that build a decision tree to reach a prediction
- **Ensemble algorithms**: Algorithms that combine the outputs of multiple base algorithms to improve generalizability.
This time, We'll use the same preprocessing steps as before, but we'll train the model using an *ensemble* algorithm named *Random Forest* that combines the outputs of multiple random decision trees (for more details, see the [Scikit-Learn documentation](https://scikit-learn.org/stable/modules/ensemble.html#forests-of-randomized-trees)).
```
from sklearn.ensemble import RandomForestClassifier
# Create preprocessing and training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('logregressor', RandomForestClassifier(n_estimators=100))])
# fit the pipeline to train a random forest model on the training set
model = pipeline.fit(X_train, (y_train))
print (model)
```
Let's look at the performance metrics for the new model.
```
predictions = model.predict(X_test)
cm = confusion_matrix(y_test, predictions)
print ('Confusion Matrix:\n',cm, '\n')
print('Accuracy:', accuracy_score(y_test, predictions))
print("Overall Precision:",precision_score(y_test, predictions))
print("Overall Recall:",recall_score(y_test, predictions))
auc = roc_auc_score(y_test,y_scores[:,1])
print('\nAUC: ' + str(auc))
# calculate ROC curve
y_scores = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
# plot ROC curve
fig = plt.figure(figsize=(6, 6))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
```
That looks better!
### Use the Model for Inferencing
Now that we have a reasonably useful trained model, we can save it for use later to predict labels for new data:
```
import joblib
# Save the model as a pickle file
filename = './models/diabetes_model.pkl'
joblib.dump(model, filename)
```
When we have some new observations for which the label is unknown, we can load the model and use it to predict values for the unknown label:
```
# Load the model from the file
model = joblib.load(filename)
# predict on a new sample
# The model accepts an array of feature arrays (so you can predict the classes of multiple patients in a single call)
# We'll create an array with a single array of features, representing one patient
X_new = np.array([[2,180,74,24,21,23.9091702,1.488172308,22]])
print ('New sample: {}'.format(list(X_new[0])))
# Get a prediction
pred = model.predict(X_new)
# The model returns an array of predictions - one for each set of features submitted
# In our case, we only submitted one patient, so our prediction is the first one in the resulting array.
print('Predicted class is {}'.format(pred[0]))
```
## Multiclass Classification
Binary classification techniques work well when the data observations belong to one of two classes or categories, such as "True" or "False". When the data can be categorized into more than two classes, you must use a multiclass classification algorithm.
Fortunately, in most machine learning frameworks, including scikit-learn, implementing a multiclass classifier is not significantly more complex than binary classification - and in many cases, the classification algorithm classes used for binary classification implicitly support multiclass classification.
### Explore the data
Let's start by examining a dataset that contains observations of multiple classes. We'll use a dataset that contains observations of three different species of penguin.
> **Citation**: The penguins dataset used in the this exercise is a subset of data collected and made available by [Dr. Kristen
Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php)
and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a
member of the [Long Term Ecological Research
Network](https://lternet.edu/).
```
import pandas as pd
# load the training dataset
penguins = pd.read_csv('data/penguins.csv')
# Display a random sample of 10 observations
sample = penguins.sample(10)
sample
```
The dataset contains the following columns:
* **CulmenLength**: The length in mm of the penguin's culmen (bill).
* **CulmenDepth**: The depth in mm of the penguin's culmen.
* **FlipperLength**: The length in mm of the penguin's flipper.
* **BodyMass**: The body mass of the penguin in grams.
* **Species**: An integer value that represents the species of the penguin.
The **Species** column is the label we want to train a model to predict. The dataset includes three possible species, which are encoded as 0, 1, and 2. The actual species names are revealed by the code below:
```
penguin_classes = ['Amelie', 'Gentoo', 'Chinstrap']
print(sample.columns[0:5].values, 'SpeciesName')
for index, row in penguins.sample(10).iterrows():
print('[',row[0], row[1], row[2], row[3], int(row[4]),']',penguin_classes[int(row[4])])
```
Now that we know what the feaures and labels in the data represent, let's explore the dataset. First, let's see if there are any missing (*null*) values.
```
# Count the number of null values for each column
penguins.isnull().sum()
```
It looks like there are some missing feature values, but no missing labels. Let's dig a little deeper and see the rows that contain nulls.
```
# Show rows containing nulls
penguins[penguins.isnull().any(axis=1)]
```
There are two rows that contain no feature values at all (*NaN* stands for "not a number"), so these won't be useful in training a model. Let's discard them from the dataset.
```
# Drop rows containing NaN values
penguins=penguins.dropna()
#Confirm there are now no nulls
penguins.isnull().sum()
```
Now that we've dealt with the missing values, let's explore how the features relate to the label by creating some box charts.
```
from matplotlib import pyplot as plt
%matplotlib inline
penguin_features = ['CulmenLength','CulmenDepth','FlipperLength','BodyMass']
penguin_label = 'Species'
for col in penguin_features:
penguins.boxplot(column=col, by=penguin_label, figsize=(6,6))
plt.title(col)
plt.show()
```
From the box plots, it looks like species 0 and 2 (Amelie and Chinstrap) have similar data profiles for culmen depth, flipper length, and body mass, but Chinstraps tend to have longer culmens. Species 1 (Gentoo) tends to have fairly clearly differentiated features from the others; which should help us train a good classification model.
### Prepare the data
Just as for binary classification, before training the model, we need to separate the features and label, and then split the data into subsets for training and validation. We'll also apply a *stratification* technique when splitting the data to maintain the proportion of each label value in the training and validation datasets.
```
from sklearn.model_selection import train_test_split
# Separate features and labels
penguins_X, penguins_y = penguins[penguin_features].values, penguins[penguin_label].values
# Split data 70%-30% into training set and test set
x_penguin_train, x_penguin_test, y_penguin_train, y_penguin_test = train_test_split(penguins_X, penguins_y,
test_size=0.30,
random_state=0,
stratify=penguins_y)
print ('Training Set: %d, Test Set: %d \n' % (x_penguin_train.size, x_penguin_test.size))
```
### Train and evaluate a multiclass classifier
Now that we have a set of training features and corresponding training labels, we can fit a multiclass classification algorithm to the data to create a model. Most scikit-learn classification algorithms inherently supports multiclass classification. We'll try a logistic regression algorithm.
```
from sklearn.linear_model import LogisticRegression
# Set regularization rate
reg = 0.1
# train a logistic regression model on the training set
multi_model = LogisticRegression(C=1/reg, solver='lbfgs', multi_class='auto', max_iter=10000).fit(x_penguin_train, y_penguin_train)
print (multi_model)
```
Now we can use the trained model to predict the labels for the test features, and compare the predicted labels to the actual labels:
```
penguin_predictions = multi_model.predict(x_penguin_test)
print('Predicted labels: ', penguin_predictions[:15])
print('Actual labels : ' ,y_penguin_test[:15])
```
Let's look at a classification report.
```
from sklearn. metrics import classification_report
print(classification_report(y_penguin_test, penguin_predictions))
```
As with binary classification, the report includes *precision* and *recall* metrics for each class. However, while with binary classification we could focus on the scores for the *positive* class; in this case, there are multiple classes so we need to look at an overall metric (either the macro or weighted average) to get a sense of how well the model performs across all three classes.
You can get the overall metrics separately from the report using the scikit-learn metrics score classes, but with multiclass results you must specify which average metric you want to use for precision and recall.
```
from sklearn.metrics import accuracy_score, precision_score, recall_score
print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions))
print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro'))
print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro'))
```
Now let's look at the confusion matrix for our model:
```
from sklearn.metrics import confusion_matrix
# Print the confusion matrix
mcm = confusion_matrix(y_penguin_test, penguin_predictions)
print(mcm)
```
The confusion matrix shows the intersection of predicted and actual label values for each class - in simple terms, the diagonal intersections from top-left to bottom-right indicate the number of correct predictions.
When dealing with multiple classes, it's generally more intuitive to visualize this as a heat map, like this:
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=45)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()
```
The darker squares in the confusion matrix plot indicate high numbers of cases, and you can hopefully see a diagonal line of darker squares indicating cases where the predicted and actual label are the same.
### Preprocess data in a pipeline
Again, just like with binary classification, you can use a pipeline to apply preprocessing steps to the data before fitting it to an algorithm to train a model. Let's see if we can improve the penguin predictor by scaling the numeric features in a transformation steps before training. We'll also try a different algorithm (a support vector machine), just to show that we can!
```
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# Define preprocessing for numeric columns (scale them)
feature_columns = [0,1,2,3]
feature_transformer = Pipeline(steps=[
('scaler', StandardScaler())
])
# Create preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('preprocess', feature_transformer, feature_columns)])
# Create training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', SVC())])
# fit the pipeline to train a linear regression model on the training set
multi_model = pipeline.fit(x_penguin_train, y_penguin_train)
print (multi_model)
```
Now we can evaluate the new model.
```
# Get predictions from test data
penguin_predictions = multi_model.predict(x_penguin_test)
# Overall metrics
print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions))
print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro'))
print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro'))
# Confusion matrix
plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(penguin_classes))
plt.xticks(tick_marks, penguin_classes, rotation=45)
plt.yticks(tick_marks, penguin_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()
```
### Use the model with new data observations
Now let's save our trained model so we can use it again later.
```
import joblib
# Save the model as a pickle file
filename = './models/penguin_model.pkl'
joblib.dump(multi_model, filename)
```
OK, so now we have a trained model. Let's use it to predict the class of a new penguin observation:
```
# Load the model from the file
multi_model = joblib.load(filename)
# The model accepts an array of feature arrays (so you can predict the classes of multiple penguin observations in a single call)
# We'll create an array with a single array of features, representing one penguin
x_new = np.array([[50.4,15.3,224,5550]])
print ('New sample: {}'.format(x_new[0]))
# The model returns an array of predictions - one for each set of features submitted
# In our case, we only submitted one penguin, so our prediction is the first one in the resulting array.
penguin_pred = multi_model.predict(x_new)[0]
print('Predicted class is', penguin_classes[penguin_pred])
```
You can also submit a batch of penguin observations to the model, and get back a prediction for each one.
```
# This time our input is an array of two feature arrays
x_new = np.array([[49.5,18.4,195, 3600],
[38.2,20.1,190,3900]])
print ('New samples:\n{}'.format(x_new))
# Call the web service, passing the input data
predictions = multi_model.predict(x_new)
# Get the predicted classes.
for prediction in predictions:
print(prediction, '(' + penguin_classes[prediction] +')')
```
## Learn More
Classification is one of the most common forms of machine learning, and by following the basic principles we've discussed in this notebook you should be able to train and evaluate classification models with scikit-learn. It's worth spending some time investigating classification algorithms in more depth, and a good starting point is the [Scikit-Learn documentation](https://scikit-learn.org/stable/user_guide.html).
| true |
code
| 0.622258 | null | null | null | null |
|
# Preprocessing
This notebook does the preprocessing for the dataset.
1. The bounding boxes and labels are extracted from the annotation files
2. The image, bounding box and label are grouped and accumulated in a list
3. For training, a train-validation split of 80/20 is done by shuffling the extracted training data and splitting
4. These split data is saved into a CSV file for a `CSVGenerator` in the training section to consume
```
import glob
import csv
import numpy as np
import os
import matplotlib.pyplot as plt
def extract_box(path):
"""extract_box
Extract annotation box positions for each labels from VIVA hand dataset.
output is a list of tuples.
:param path: text file path
"""
with open(path) as temp:
output = []
for i, line in enumerate(temp):
if i != 0 and line:
label, x_1, y_1, x_off, y_off, *_ = line.split()
pt_1 = (int(x_1), int(y_1))
pt_2 = (pt_1[0] + int(x_off), (pt_1[1] + int(y_off)))
output.append((label, pt_1, pt_2))
return output
def create_csv(image_dir, annotation_dir, csv_out_path, val_out_path=None, val_split=None):
image_paths = sorted(glob.glob(image_dir + '*'))
annotations_paths = sorted(glob.glob(annotation_dir + '*'))
# each image can have up to 4 hand bboxes
rows = []
for image_path, annotations_path in zip(image_paths, annotations_paths):
annotations = extract_box(annotations_path)
for annotation in annotations:
# annotation [label, (x1, y1), (x2, y2)]
# save as image,x1,y2,x2,y2,label
if not os.path.isabs(image_path):
image_path = '../' + image_path
rows.append([image_path,
annotation[1][0], annotation[1][1],
annotation[2][0], annotation[2][1],
annotation[0]])
if val_split:
# shuffle and split
np.random.shuffle(rows)
val_size = int(len(rows) * val_split)
val_rows = rows[:val_size]
with open('./data/validation.csv' if val_out_path is None else val_out_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for row in val_rows:
writer.writerow(row)
rows = rows[val_size:]
with open(csv_out_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for row in rows:
writer.writerow(row)
# this is the root directory where the training data is extracted
data_dir = '../data/'
# training data path
train_dir = data_dir + 'detectiondata/train/'
train_image_dir = train_dir + 'pos/'
train_annotation_dir = train_dir + 'posGt/'
out_path = './data/train.csv'
create_csv(train_image_dir, train_annotation_dir, out_path, val_split=0.2)
# the test data images are in the same root dir as training
test_image_dir = data_dir + 'detectiondata/test/pos/'
# but the annotations are downloaded separately and extracted into data_dir/evaluation/
test_annotation_dir = data_dir + 'detectiondata/test/posGt/'
test_out_path = './data/test.csv'
create_csv(test_image_dir, test_annotation_dir, test_out_path)
```
## Visualization
Some visualization of the training data.
```
train_annotations_paths = sorted(glob.glob(train_annotation_dir + '*'))
#class names
lh_d_label = 'leftHand_driver'
rh_d_label = 'rightHand_driver'
lh_p_label = 'leftHand_passenger'
rh_p_label = 'rightHand_passenger'
# each image can have up to 4 hand bboxes
lh_d_count = 0
rh_d_count = 0
lh_p_count = 0
rh_p_count = 0
for annotations_path in train_annotations_paths:
anns = extract_box(annotations_path)
viewpoint = os.path.splitext(os.path.split(annotations_path)[1])[0].split('_')[-1]
for ann in anns:
if ann[0] == lh_d_label:
lh_d_count += 1
elif ann[0] == rh_d_label:
rh_d_count += 1
elif ann[0] == lh_p_label:
lh_p_count += 1
elif ann[0] == rh_p_label:
rh_p_count += 1
print('Left Hand Driver Bounding Boxes: {}'.format(lh_d_count))
print('Right Hand Driver Bounding Boxes: {}'.format(rh_d_count))
print('Left Hand Passenger Bounding Boxes: {}'.format(lh_p_count))
print('Right Hand Passenger Bounding Boxes: {}'.format(rh_p_count))
#plot
plt.bar([0, 1, 2, 3],
[lh_d_count, rh_d_count, lh_p_count, rh_p_count],
tick_label=['Left Hand\nDriver', 'Right Hand\nDriver', 'Left Hand\nPassenger', 'Right Hand\nPassenger'])
plt.savefig('./charts/classes_count.png')
plt.show()
```
The challenge evaluates on two different levels:
Level-1 (L1): hand instances with minimum height of 70 pixels, only over the shoulder (back) camera view.
- Check height from annotation >= 70
- From image name format `videoID_framenumber_vehicletype_driversubjectID_passengersubjectID_viewpoint.png` viewpoint is 3.
Level-2 (L2): hand instances with minimum height of 25 pixels, all camera views.
- Check height from annotation >= 25
```
test_annotations_paths = sorted(glob.glob(test_annotation_dir + '*'))
L1 = 0
L2 = 0
for annotations_path in test_annotations_paths:
anns = extract_box(annotations_path)
viewpoint = os.path.splitext(os.path.split(annotations_path)[1])[0].split('_')[-1]
for ann in anns:
height = ann[2][1] - ann[1][1]
if viewpoint == '3' and height >= 70:
L1 += 1
if height >= 25:
L2 += 1
print('L1: {}'.format(L1))
print('L2: {}'.format(L2))
```
| true |
code
| 0.45048 | null | null | null | null |
|
# Two modifications of mean-variance portfolio theory
#### Daniel Csaba, Thomas J. Sargent and Balint Szoke
#### December 2016
### Remarks about estimating means and variances
The famous **Black-Litterman** (1992) portfolio choice model that we describe in this notebook is motivated by the finding that with high or moderate frequency data, means are more difficult to estimate than variances.
A model of **robust portfolio choice** that we'll describe also begins from the same
starting point.
To begin, we'll take for granted that means are more difficult to estimate that covariances and will focus on how Black and Litterman, on the one hand, an robust control theorists, on the other, would recommend modifying the **mean-variance portfolio choice model** to take that into account.
At the end of this notebook, we shall use some rates of convergence results and some simulations to verify how means are more difficult to estimate than variances.
Among the ideas in play in this notebook will be
* mean-variance portfolio theory
* Bayesian approaches to estimating linear regressions
* A risk-sensitivity operator and its connection to robust control theory
```
%matplotlib inline
import numpy as np
import scipy as sp
import scipy.stats as stat
import matplotlib.pyplot as plt
from ipywidgets import interact, FloatSlider
```
## Adjusting mean-variance portfolio choice theory for distrust of mean excess returns
This notebook describes two lines of thought that modify the classic mean-variance
portfolio choice model in ways designed to make its recommendations more plausible.
As we mentioned above, the two approaches build on a common hunch -- that because it is much easier statistically to estimate covariances of excess returns than it is to estimate their means, it makes sense to contemplated the consequences of adjusting investors'
subjective beliefs about mean returns in order to render more sensible decisions.
Both of the adjustments that we describe are designed to confront a widely recognized embarassment to mean-variance portfolio theory, namely, that it usually implies taking
very extreme long-short portfolio positions.
##### Mean-variance portfolio choice
A risk free security earns one-period net return $r_f$.
An $n \times 1$ vector of risky securities earns an $n \times 1$
vector $\vec r - r_f {\bf 1}$ of *excess returns*, where ${\bf 1}$ is
an $n \times 1$ vector of ones.
The excess return vector is multivariate normal with mean $\mu$ and covariance
matrix $\Sigma$, which we express either as
$$ \vec r - r_f {\bf 1} \sim {\mathcal N}(\mu, \Sigma) $$
or
$$ \vec r - r_f {\bf 1} = \mu + C \epsilon $$
where $\epsilon \sim {\mathcal N}(0, I)$ is an $n \times 1 $ random vector.
Let $w $ be an $n\times 1$ vector of portfolio weights.
A portfolio consisting $w$ earns returns
$$ w' (\vec r - r_f {\bf 1}) \sim {\mathcal N}(w' \mu, w' \Sigma w ) $$
The **mean-variance portfolio choice problem** is to choose $w$ to maximize
$$ U(\mu,\Sigma;w) = w'\mu - \frac{\delta}{2} w' \Sigma w , \quad (1) $$
where $\delta > 0 $ is a risk-aversion parameter. The first-order condition for
maximizing (1) with respect to the vector $w$ is
$$ \mu = \delta \Sigma w , \quad (2) $$
which implies the following design of a risky portfolio:
$$ w = (\delta \Sigma)^{-1} \mu , \quad (3) $$
##### Estimating $\mu$ and $\Sigma$
The key inputs into the portfolio choice model (3) are
* estimates of the parameters $\mu, \Sigma$ of the random excess return vector $(\vec r - r_f {\bf 1})$
* the risk-aversion parameter $\delta$
A standard way of estimating $\mu$ is maximum-likelihood or least squares; that
amounts to estimating $\mu$ by a sample mean of excess returns and estimating $\Sigma$
by a sample covariance matrix.
###### The Black-Litterman starting point
When estimates of $\mu$ and $\Sigma$ from historical sample means and covariances
have been combined with ``reasonable'' values of the risk-aversion parameter $\delta$
to compute an optimal portfolio from formula (3), a typical outcome has been
$w$'s with **extreme long and short positions**. A common reaction to these outcomes
is that they are so unreasonable that a portfolio manager cannot recommend them to a
customer.
```
#========================================
# Primitives of the laboratory
#========================================
np.random.seed(12)
N = 10 # number of assets
T = 200 # sample size
# random market portfolio (sum is normalized to 1)
w_m = np.random.rand(N)
w_m = w_m/(w_m.sum())
# True risk premia and variance of excess return
# (constructed so that the Sharpe ratio is 1)
Mu = (np.random.randn(N) + 5)/100 # mean excess return (risk premium)
S = np.random.randn(N, N) # random matrix for the covariance matrix
V = S @ S.T # turn the random matrix into symmetric psd
Sigma = V * (w_m @ Mu)**2 / (w_m @ V @ w_m) # make sure that the Sharpe ratio is one
# Risk aversion of market portfolio holder
delta = 1 / np.sqrt(w_m @ Sigma @ w_m)
# Generate a sample of excess returns
excess_return = stat.multivariate_normal(Mu, Sigma)
sample = excess_return.rvs(T)
#==================================
# Mean-variance portfolio
#==================================
# Estimate mu and sigma
Mu_est = sample.mean(0).reshape(N, 1)
Sigma_est = np.cov(sample.T)
# Solve the constrained problem for the weights (with iota @ w = 1)
#iota = np.ones((N, 1)) # column vector of ones
#def lamb(mu, sigma, delta):
# aux_vector = iota.T @ np.linalg.inv(delta * sigma) # save memory
# return ((1 - aux_vector @ mu) / (aux_vector @ iota))[0] # lagrange multiplier on the const
#w = np.linalg.solve(d_m * sigma_hat, mu_hat) + lamb(mu_hat, sigma_hat, delta) * iota)
w = np.linalg.solve(delta * Sigma_est, Mu_est)
fig, ax = plt.subplots(figsize = (8, 5))
ax.set_title('Mean-variance portfolio weights recommendation and the market portfolio ', fontsize = 12)
ax.plot(np.arange(N) + 1, w, 'o', color = 'k', label = '$w$ (mean-variance)')
ax.plot(np.arange(N) + 1, w_m, 'o', color = 'r', label = '$w_m$ (market portfolio)')
ax.vlines(np.arange(N) + 1, 0, w, lw = 1)
ax.vlines(np.arange(N) + 1, 0, w_m, lw = 1)
ax.axhline(0, color = 'k')
ax.axhline(-1, color = 'k', linestyle = '--')
ax.axhline(1, color = 'k', linestyle = '--')
ax.set_xlim([0, N+1])
ax.set_xlabel('Assets', fontsize = 11)
ax.xaxis.set_ticks(np.arange(1, N + 1, 1))
plt.legend(numpoints = 1, loc = 'best', fontsize = 11)
plt.show()
```
Black and Litterman's responded to this situation in the following way:
* They continue to accept (3) as a good model for choosing an optimal portfolio $w$
* They want to continue to allow the customer to express his or her risk tolerance by
setting $\delta$
* Leaving $\Sigma$ at its maximum-likelihood value, they push $\mu$ away from its maximum value in a way designed to make portfolio choices that are more plausible in terms of conforming to what most people actually do.
In particular, given $\Sigma$ and a reasonable value of $\delta$, Black and Litterman
reverse engineered a vector $\mu_{BL}$ of mean excess returns that makes the $w$ implied
by formula (3) equal the **actual** market portfolio $w_m$, so that
$$ w_m = (\delta \Sigma)^{-1} \mu_{BL} \quad (4) $$
##### Details
Let's define
$$ w_m' \mu \equiv ( r_m - r_f) $$
as the (scalar) excess return on the market portfolio $w_m$.
Define
$$ \sigma^2 = w_m' \Sigma w_m $$
as the variance of the excess return on the market portfolio $w_m$.
Define
$$ {\bf SR}_m = \frac{ r_m - r_f}{\sigma} $$
as the **Sharpe-ratio** on the market portfolio $w_m$.
Let $\delta_m$ be the value of the risk aversion parameter that induces an investor
to hold the market portfolio in light of the optimal portfolio choice rule (3).
Evidently, portfolio rule (3) then implies that $ r_m - r_f = \delta_m \sigma^2 $
or
$$ \delta_m = \frac{r_m - r_f}{\sigma^2} $$
or
$$ \delta_m = \frac{\bf SR}{\sigma}, \quad (5) $$
Following the Black-Litterman philosophy, our first step will be to back a value
of $\delta_m$ from
* an estimate of the Sharpe-ratio, and
* our maximum likelihood estimate of $\sigma$ drawn from our estimates or $w_m$ and $\Sigma$
The second key Black-Litterman step is then to use this value of $\delta$ together
with the maximum likelihood estimate of $\Sigma$ to deduce a $\mu_{\bf BL}$ that
verifies portfolio rule (3) at the market portfolio $w = w_m$:
$$ \mu_m = \delta_m \Sigma w_m $$
The starting point of the Black-Litterman portfolio choice model is thus
a pair $(\delta_m, \mu_m) $ that tells the customer to hold the market portfolio.
```
# =====================================================
# Derivation of Black-Litterman pair (\delta_m, \mu_m)
# =====================================================
# Observed mean excess market return
r_m = w_m @ Mu_est
# Estimated variance of market portfolio
sigma_m = w_m @ Sigma_est @ w_m
# Sharpe-ratio
SR_m = r_m/np.sqrt(sigma_m)
# Risk aversion of market portfolio holder
d_m = r_m/sigma_m
# Derive "view" which would induce market portfolio
mu_m = (d_m * Sigma_est @ w_m).reshape(N, 1)
fig, ax = plt.subplots(figsize = (8, 5))
ax.set_title(r'Difference between $\hat{\mu}$ (estimate) and $\mu_{BL}$ (market implied)', fontsize = 12)
ax.plot(np.arange(N) + 1, Mu_est, 'o', color = 'k', label = '$\hat{\mu}$')
ax.plot(np.arange(N) + 1, mu_m, 'o', color = 'r', label = '$\mu_{BL}$')
ax.vlines(np.arange(N) + 1, mu_m, Mu_est, lw = 1)
ax.axhline(0, color = 'k', linestyle = '--')
ax.set_xlim([0, N + 1])
ax.set_xlabel('Assets', fontsize = 11)
ax.xaxis.set_ticks(np.arange(1, N + 1, 1))
plt.legend(numpoints = 1, loc = 'best', fontsize = 11)
plt.show()
```
##### Adding ``views''
Black and Litterman start with a baseline customer who asserts that he or she shares
the ``market's views'', which means that his or her believes that excess returns
are governed by
$$ \vec r - r_f {\bf 1} \sim {\mathcal N}( \mu_{BL}, \Sigma) , \quad (6) $$
Black and Litterman would advise that customer to hold the market portfolio of risky securities.
Black and Litterman then imagine a consumer who would like to express a view that
differs from the market's. The consumer wants appropriately to mix his view with
the market's before using (3) to choose a portfolio.
Suppose that the customer's view is expressed by a hunch that rather than (6),
excess returns are governed by
$$ \vec r - r_f {\bf 1} \sim {\mathcal N}( \hat \mu, \tau \Sigma) , \quad (7) $$
where $\tau > 0$ is a scalar parameter that determines how the decision maker
wants to mix his view $\hat \mu$ with the market's view $\mu_{\bf BL}$.
Black and Litterman would then use a formula like the following one to mix the
views $\hat \mu$ and $\mu_{\bf BL}$:
$$\tilde \mu = (\Sigma^{-1} + (\tau \Sigma)^{-1})^{-1} (\Sigma^{-1} \mu_{BL} + (\tau \Sigma)^{-1} \hat \mu) , \quad (8) $$
Black and Litterman would then advice the customer to hold the portfolio associated
with these views implied by rule (3):
$$ \tilde w = (\delta \Sigma)^{-1} \tilde \mu , \quad (9) $$
This portfolio $\tilde w$ will deviate from the portfolio $w_{BL}$ in amounts that
depend on the mixing parameter $\tau$.
If $\hat \mu$ is the maximum likelihood estimator and $\tau$ is chosen heavily to
weight this view, then the customer's portfolio will involve big short-long positions.
```
def Black_Litterman(lamb, mu_1, mu_2, Sigma_1, Sigma_2):
"""
This function calculates the Black-Litterman mixture
mean excess return and covariance matrix
"""
sigma1_inv = np.linalg.inv(Sigma_1)
sigma2_inv = np.linalg.inv(Sigma_2)
mu_tilde = np.linalg.solve(sigma1_inv + lamb * sigma2_inv,
sigma1_inv @ mu_1 + lamb * sigma2_inv @ mu_2)
return mu_tilde
#===================================
# Example cont'
# mean : MLE mean
# cov : scaled MLE cov matrix
#===================================
tau = 1
mu_tilde = Black_Litterman(1, mu_m, Mu_est, Sigma_est, tau * Sigma_est)
# The Black-Litterman recommendation for the portfolio weights
w_tilde = np.linalg.solve(delta * Sigma_est, mu_tilde)
tau_slider = FloatSlider(min = 0.05, max = 10, step = 0.5, value = tau)
@interact(tau = tau_slider)
def BL_plot(tau):
mu_tilde = Black_Litterman(1, mu_m, Mu_est, Sigma_est, tau * Sigma_est)
w_tilde = np.linalg.solve(delta * Sigma_est, mu_tilde)
fig, ax = plt.subplots(1, 2, figsize = (16, 6))
ax[0].set_title(r'Relationship between $\hat{\mu}$, $\mu_{BL}$ and $\tilde{\mu}$', fontsize = 15)
ax[0].plot(np.arange(N) + 1, Mu_est, 'o', color = 'k', label = r'$\hat{\mu}$ (subj view)')
ax[0].plot(np.arange(N) + 1, mu_m, 'o', color = 'r', label = r'$\mu_{BL}$ (market)')
ax[0].plot(np.arange(N) + 1, mu_tilde, 'o', color = 'y', label = r'$\tilde{\mu}$ (mixture)')
ax[0].vlines(np.arange(N) + 1, mu_m, Mu_est, lw = 1)
ax[0].axhline(0, color = 'k', linestyle = '--')
ax[0].set_xlim([0, N + 1])
ax[0].xaxis.set_ticks(np.arange(1, N + 1, 1))
ax[0].set_xlabel('Assets', fontsize = 14)
ax[0].legend(numpoints = 1, loc = 'best', fontsize = 13)
ax[1].set_title('Black-Litterman portfolio weight recommendation', fontsize = 15)
ax[1].plot(np.arange(N) + 1, w, 'o', color = 'k', label = r'$w$ (mean-variance)')
ax[1].plot(np.arange(N) + 1, w_m, 'o', color = 'r', label = r'$w_{m}$ (market, BL)')
ax[1].plot(np.arange(N) + 1, w_tilde, 'o', color = 'y', label = r'$\tilde{w}$ (mixture)')
ax[1].vlines(np.arange(N) + 1, 0, w, lw = 1)
ax[1].vlines(np.arange(N) + 1, 0, w_m, lw = 1)
ax[1].axhline(0, color = 'k')
ax[1].axhline(-1, color = 'k', linestyle = '--')
ax[1].axhline(1, color = 'k', linestyle = '--')
ax[1].set_xlim([0, N + 1])
ax[1].set_xlabel('Assets', fontsize = 14)
ax[1].xaxis.set_ticks(np.arange(1, N + 1, 1))
ax[1].legend(numpoints = 1, loc = 'best', fontsize = 13)
plt.show()
```
### Bayes interpretation of the Black-Litterman recommendation
Consider the following Bayesian interpretation of the Black-Litterman recommendation.
The prior belief over the mean excess returns is consistent with the market porfolio and is given by
$$ \mu \sim \mathcal{N}(\mu_{BL}, \Sigma).$$
Given a particular realization of the mean excess returns $\mu$ one observes the average excess returns $\hat \mu$ on the market according to the distribution
$$\hat \mu \mid \mu, \Sigma \sim \mathcal{N}(\mu, \tau\Sigma).$$
where $\tau$ is typically small capturing the idea that the variation in the mean is smaller than the variation of the individual random variable.
Given the realized excess returns one should then update the prior over the mean excess returns according to Bayes rule. The corresponding posterior over mean excess returns is normally distributed with mean
$$ (\Sigma^{-1} + (\tau \Sigma)^{-1})^{-1} (\Sigma^{-1}\mu_{BL} + (\tau \Sigma)^{-1} \hat \mu)$$
The covariance matrix is
$$ (\Sigma^{-1} + (\tau \Sigma)^{-1})^{-1}.$$
Hence, the Black-Litterman recommendation is consistent with the Bayes update of the prior over the mean excess returns in light of the realized average excess returns on the market.
### Curve Decolletage
Consider two independent "competing" views on the excess market returns.
$$ \vec r_e \sim {\mathcal N}( \mu_{BL}, \Sigma) $$
and
$$ \vec r_e \sim {\mathcal N}( \hat{\mu}, \tau\Sigma). $$
A special feature of the multivariate normal random variable $Z$ is that its density function depends only on the (Euclidiean) length of its realization $z$. Formally, let the $k$-dimensional random vector be $Z\sim \mathcal{N}(\mu, \Sigma)$, then $\bar{Z} \equiv \Sigma(Z-\mu)\sim \mathcal{N}(\mathbf{0}, I)$ and so the points where the density takes the same value can be described by the ellipse
$$\bar z \cdot \bar z = (z - \mu)'\Sigma^{-1}(z - \mu) = \bar d \quad \quad (10)$$
where $\bar d\in\mathbb{R}_+$ denotes the (transformation) of a particular density value. The curves defined by equation (10) can be labelled as iso-likelihood ellipses.
> **Remark:** More generally there is a class of density functions that possesses this feature, i.e.
> $$\exists g: \mathbb{R}_+ \mapsto \mathbb{R}_+ \ \ \text{ and } \ \ c \geq 0, \ \ \text{s.t. the density } \ \ f \ \ \text{of} \ \ Z \ \ \text{ has the form } \quad f(z) = c g(z\cdot z)$$
> This property is called **spherical symmetry** (see p 81. in Leamer (1978)).
In our specific example, we can use the pair $(\bar d_1, \bar d_2)$ as being two "likelihood" values for which the corresponding isolikelihood ellipses in the excess return space are given by
\begin{align}
(\vec r_e - \mu_{BL})'\Sigma^{-1}(\vec r_e - \mu_{BL}) &= \bar d_1 \\
(\vec r_e - \hat \mu)'\left(\tau \Sigma\right)^{-1}(\vec r_e - \hat \mu) &= \bar d_2
\end{align}
Notice that for particular $\bar d_1$ and $\bar d_2$ values the two ellipses have a tangency point. These tangency points, indexed by the pairs $(\bar d_1, \bar d_2)$, characterize points $\vec r_e$ from which there exists no deviation where one can increase the likelihood of one view without decreasing the likelihood of the other view. The pairs $(\bar d_1, \bar d_2)$ for which there is such a point outlines a curve in the excess return space. This curve is reminiscent of the Pareto curve in an Edgeworth-box setting.
Leamer (1978) calls this curve *information contract curve* and describes it by the following program: maximize the likelihood of one view, say the Black-Litterman recommendation, while keeping the likelihood of the other view at least at a prespecified constant $\bar d_2$.
\begin{align*}
\bar d_1(\bar d_2) &\equiv \max_{\vec r_e} \ \ (\vec r_e - \mu_{BL})'\Sigma^{-1}(\vec r_e - \mu_{BL}) \\
\text{subject to } \quad &(\vec r_e - \hat\mu)'(\tau\Sigma)^{-1}(\vec r_e - \hat \mu) \geq \bar d_2
\end{align*}
Denoting the multiplier on the constraint by $\lambda$, the first-order condition is
$$ 2(\vec r_e - \mu_{BL} )'\Sigma^{-1} + \lambda 2(\vec r_e - \hat\mu)'(\tau\Sigma)^{-1} = \mathbf{0} $$
which defines the *information contract curve* between $\mu_{BL}$ and $\hat \mu$
$$ \vec r_e = (\Sigma^{-1} + \lambda (\tau \Sigma)^{-1})^{-1} (\Sigma^{-1} \mu_{BL} + \lambda (\tau \Sigma)^{-1}\hat \mu ) \quad \quad (11)$$
Note that if $\lambda = 1$, (11) is equivalent with (8) and it identifies one point on the information contract curve. Furthermore, because $\lambda$ is a function of the minimum likelihood $\bar d_2$ on the RHS of the constraint, by varying $\bar d_2$ (or $\lambda$), we can trace out the whole curve as the figure below illustrates.
```
#========================================
# Draw a new sample for two assets
#========================================
np.random.seed(1987102)
N_new = 2 # number of assets
T_new = 200 # sample size
tau_new = .8
# random market portfolio (sum is normalized to 1)
w_m_new = np.random.rand(N_new)
w_m_new = w_m_new/(w_m_new.sum())
Mu_new = (np.random.randn(N_new) + 5)/100
S_new = np.random.randn(N_new, N_new)
V_new = S_new @ S_new.T
Sigma_new = V_new * (w_m_new @ Mu_new)**2 / (w_m_new @ V_new @ w_m_new)
excess_return_new = stat.multivariate_normal(Mu_new, Sigma_new)
sample_new = excess_return_new.rvs(T_new)
Mu_est_new = sample_new.mean(0).reshape(N_new, 1)
Sigma_est_new = np.cov(sample_new.T)
sigma_m_new = w_m_new @ Sigma_est_new @ w_m_new
d_m_new = (w_m_new @ Mu_est_new)/sigma_m_new
mu_m_new = (d_m_new * Sigma_est_new @ w_m_new).reshape(N_new, 1)
N_r1, N_r2 = 100, 100
r1 = np.linspace(-0.04, .1, N_r1)
r2 = np.linspace(-0.02, .15, N_r2)
lamb_grid = np.linspace(.001, 20, 100)
curve = np.asarray([Black_Litterman(l, mu_m_new, Mu_est_new, Sigma_est_new,
tau_new*Sigma_est_new).flatten() for l in lamb_grid])
lamb_slider = FloatSlider(min = .1, max = 7, step = .5, value = 1)
@interact(lamb = lamb_slider)
def decolletage(lamb):
dist_r_BL = stat.multivariate_normal(mu_m_new.squeeze(), Sigma_est_new)
dist_r_hat = stat.multivariate_normal(Mu_est_new.squeeze(), tau_new * Sigma_est_new)
X, Y = np.meshgrid(r1, r2)
Z_BL = np.zeros((N_r1, N_r2))
Z_hat = np.zeros((N_r1, N_r2))
for i in range(N_r1):
for j in range(N_r2):
Z_BL[i, j] = dist_r_BL.pdf(np.hstack([X[i, j], Y[i, j]]))
Z_hat[i, j] = dist_r_hat.pdf(np.hstack([X[i, j], Y[i, j]]))
mu_tilde_new = Black_Litterman(lamb, mu_m_new, Mu_est_new, Sigma_est_new,
tau_new * Sigma_est_new).flatten()
fig, ax = plt.subplots(figsize = (10, 6))
ax.contourf(X, Y, Z_hat, cmap = 'viridis', alpha =.4)
ax.contourf(X, Y, Z_BL, cmap = 'viridis', alpha =.4)
ax.contour(X, Y, Z_BL, [dist_r_BL.pdf(mu_tilde_new)], cmap = 'viridis', alpha = .9)
ax.contour(X, Y, Z_hat, [dist_r_hat.pdf(mu_tilde_new)], cmap = 'viridis', alpha = .9)
ax.scatter(Mu_est_new[0], Mu_est_new[1])
ax.scatter(mu_m_new[0], mu_m_new[1])
ax.scatter(mu_tilde_new[0], mu_tilde_new[1], color = 'k', s = 20*3)
ax.plot(curve[:, 0], curve[:, 1], color = 'k')
ax.axhline(0, color = 'k', alpha = .8)
ax.axvline(0, color = 'k', alpha = .8)
ax.set_xlabel(r'Excess return on the first asset, $r_{e, 1}$', fontsize = 12)
ax.set_ylabel(r'Excess return on the second asset, $r_{e, 2}$', fontsize = 12)
ax.text(Mu_est_new[0] + 0.003, Mu_est_new[1], r'$\hat{\mu}$', fontsize = 20)
ax.text(mu_m_new[0] + 0.003, mu_m_new[1] + 0.005, r'$\mu_{BL}$', fontsize = 20)
```
Note that the line that connects the two points $\hat \mu$ and $\mu_{BL}$ is linear, which comes from the fact that the covariance matrices of the two competing distributions (views) are proportional to each other.
To illustrate the fact that this is not necessarily the case, consider another example using the same parameter values, except that the "second view" constituting the constraint has covariance matrix $\tau I$ instead of $\tau \Sigma$. This leads to the following figure, on which the curve connecting $\hat \mu$ and $\mu_{BL}$ are bending.
```
lamb_grid2 = np.linspace(.001, 20000, 1000)
curve2 = np.asarray([Black_Litterman(l, mu_m_new, Mu_est_new, Sigma_est_new,
tau_new*np.eye(N_new)).flatten() for l in lamb_grid2])
lamb_slider2 = FloatSlider(min = 5, max = 1500, step = 100, value = 200)
@interact(lamb = lamb_slider2)
def decolletage(lamb):
dist_r_BL = stat.multivariate_normal(mu_m_new.squeeze(), Sigma_est_new)
dist_r_hat = stat.multivariate_normal(Mu_est_new.squeeze(), tau_new * np.eye(N_new))
X, Y = np.meshgrid(r1, r2)
Z_BL = np.zeros((N_r1, N_r2))
Z_hat = np.zeros((N_r1, N_r2))
for i in range(N_r1):
for j in range(N_r2):
Z_BL[i, j] = dist_r_BL.pdf(np.hstack([X[i, j], Y[i, j]]))
Z_hat[i, j] = dist_r_hat.pdf(np.hstack([X[i, j], Y[i, j]]))
mu_tilde_new = Black_Litterman(lamb, mu_m_new, Mu_est_new, Sigma_est_new,
tau_new * np.eye(N_new)).flatten()
fig, ax = plt.subplots(figsize = (10, 6))
ax.contourf(X, Y, Z_hat, cmap = 'viridis', alpha = .4)
ax.contourf(X, Y, Z_BL, cmap = 'viridis', alpha = .4)
ax.contour(X, Y, Z_BL, [dist_r_BL.pdf(mu_tilde_new)], cmap='viridis', alpha =.9)
ax.contour(X, Y, Z_hat, [dist_r_hat.pdf(mu_tilde_new)], cmap='viridis', alpha =.9)
ax.scatter(Mu_est_new[0], Mu_est_new[1])
ax.scatter(mu_m_new[0], mu_m_new[1])
ax.scatter(mu_tilde_new[0], mu_tilde_new[1], color = 'k', s = 20*3)
ax.plot(curve2[:, 0], curve2[:, 1], color = 'k')
ax.axhline(0, color = 'k', alpha = .8)
ax.axvline(0, color = 'k', alpha = .8)
ax.set_xlabel(r'Excess return on the first asset, $r_{e, 1}$', fontsize = 12)
ax.set_ylabel(r'Excess return on the second asset, $r_{e, 2}$', fontsize = 12)
ax.text(Mu_est_new[0] + 0.003, Mu_est_new[1], r'$\hat{\mu}$', fontsize = 20)
ax.text(mu_m_new[0] + 0.003, mu_m_new[1] + 0.005, r'$\mu_{BL}$', fontsize = 20)
```
### Black-Litterman recommendation as regularization
First, consider the OLS regression.
$$\min_{\beta} \Vert X\beta - y \Vert^2 $$
which yields the solution
$$ \hat{\beta}_{OLS} = (X'X)^{-1}X'y.$$
A common performance measure of estimators is the *mean squared error (MSE)*. An estimator is "good" if its MSE is realtively small. Suppose that $\beta_0$ is the "true" value of the coefficent, then the MSE of the OLS estimator is
$$\text{mse}(\hat \beta_{OLS}, \beta_0) := \mathbb E \Vert \hat \beta_{OLS} - \beta_0\Vert^2 = \underbrace{\mathbb E \Vert \hat \beta_{OLS} - \mathbb E \beta_{OLS}\Vert^2}_{\text{variance}} + \underbrace{\Vert \mathbb E \hat\beta_{OLS} - \beta_0\Vert^2}_{\text{bias}}$$
From this decomposition one can see that in order for the MSE to be small, both the bias and the variance terms must be small. For example, consider the case when $X$ is a $T$-vector of ones (where $T$ is the sample size), so $\hat\beta_{OLS}$ is simply the sample average, while $\beta_0\in \mathbb{R}$ is defined by the true mean of $y$. In this example the MSE is
$$ \text{mse}(\hat \beta_{OLS}, \beta_0) = \underbrace{\frac{1}{T^2} \mathbb E \left(\sum_{t=1}^{T} (y_{t}- \beta_0)\right)^2 }_{\text{variance}} + \underbrace{0}_{\text{bias}}$$
However, because there is a trade-off between the estimator's bias and variance, there are cases when by permitting a small bias we can substantially reduce the variance so overall the MSE gets smaller. A typical scenario when this proves to be useful is when the number of coefficients to be estimated is large relative to the sample size.
In these cases one approach to handle the bias-variance trade-off is the so called *Tikhonov regularization*. A general form with regularization matrix $\Gamma$ can be written as
$$\min_{\beta} \Big\{ \Vert X\beta - y \Vert^2 + \Vert \Gamma (\beta - \tilde \beta) \Vert^2 \Big\}$$
which yields the solution
$$ \hat{\beta}_{Reg} = (X'X + \Gamma'\Gamma)^{-1}(X'y + \Gamma'\Gamma\tilde \beta).$$
Substituting the value of $\hat{\beta}_{OLS}$ yields
$$ \hat{\beta}_{Reg} = (X'X + \Gamma'\Gamma)^{-1}(X'X\hat{\beta}_{OLS} + \Gamma'\Gamma\tilde \beta).$$
Often, the regularization matrix takes the form $\Gamma = \lambda I$ with $\lambda>0$ and $\tilde \beta = \mathbf{0}$. Then the Tikhonov regularization is equivalent to what is called *ridge regression* in statistics.
To illustrate how this estimator addresses the bias-variance trade-off, we compute the MSE of the ridge estimator
$$ \text{mse}(\hat \beta_{\text{ridge}}, \beta_0) = \underbrace{\frac{1}{(T+\lambda)^2} \mathbb E \left(\sum_{t=1}^{T} (y_{t}- \beta_0)\right)^2 }_{\text{variance}} + \underbrace{\left(\frac{\lambda}{T+\lambda}\right)^2 \beta_0^2}_{\text{bias}}$$
The ridge regression shrinks the coefficients of the estimated vector towards zero relative to the OLS estimates thus reducing the variance term at the cost of introducing a "small" bias. However, there is nothing special about the zero vector. When $\tilde \beta \neq \mathbf{0}$ shrinkage occurs in the direction of $\tilde \beta$.
Now, we can give a regularization interpretation of the Black-Litterman portfolio recommendation. To this end, simplify first the equation (8) characterizing the Black-Litterman recommendation
\begin{align*}
\tilde \mu &= (\Sigma^{-1} + (\tau \Sigma)^{-1})^{-1} (\Sigma^{-1}\mu_{BL} + (\tau \Sigma)^{-1}\hat \mu) \\
&= (1 + \tau^{-1})^{-1}\Sigma \Sigma^{-1} (\mu_{BL} + \tau ^{-1}\hat \mu) \\
&= (1 + \tau^{-1})^{-1} ( \mu_{BL} + \tau ^{-1}\hat \mu)
\end{align*}
In our case, $\hat \mu$ is the estimated mean excess returns of securities. This could be written as a vector autoregression where
* $y$ is the stacked vector of observed excess returns of size $(N T\times 1)$ -- $N$ securities and $T$ observations
* $X = \sqrt{T^{-1}}(I_{N} \otimes \iota_T)$ where $I_N$ is the identity matrix and $\iota_T$ is a column vector of ones.
Correspondingly, the OLS regression of $y$ on $X$ would yield the mean excess returns as coefficients. With $\Gamma = \sqrt{\tau T^{-1}}(I_{N} \otimes \iota_T)$ we can write the regularized version of the mean excess return estimation.
\begin{align*}
\hat{\beta}_{Reg} &= (X'X + \Gamma'\Gamma)^{-1}(X'X\hat{\beta}_{OLS} + \Gamma'\Gamma\tilde \beta) \\
&= (1 + \tau)^{-1}X'X (X'X)^{-1} (\hat \beta_{OLS} + \tau \tilde \beta) \\
&= (1 + \tau)^{-1} (\hat \beta_{OLS} + \tau \tilde \beta) \\
&= (1 + \tau^{-1})^{-1} ( \tau^{-1}\hat \beta_{OLS} + \tilde \beta)
\end{align*}
Given that $\hat \beta_{OLS} = \hat \mu$ and $\tilde \beta = \mu_{BL}$ in the Black-Litterman model we have the following interpretation of the model's recommendation.
The estimated (personal) view of the mean excess returns, $\hat{\mu}$ that would lead to extreme short-long positions are "shrunk" towards the conservative market view, $\mu_{BL}$.
that leads to the more conservative market portfolio.
So the Black-Litterman procedure results in a recommendation that is a compromise between the conservative market portfolio and the more extreme portfolio that is implied by estimated "personal" views.
### Digression on ${\sf T}$ operator
The Black-Litterman approach is partly inspired by the econometric insight that it
is easier to estimate covariances of excess returns than the means. That is what gave
Black and Litterman license to adjust investors' perception of mean excess returns while not tampering with the covariance matrix of excess returns.
The robust control theory is another approach that also hinges on adjusting mean excess
returns but not covariances.
Associated with a robust control problem is what Hansen and Sargent call
a ${\sf T}$ operator.
Let's define the ${\sf T}$ operator as it applies to the problem at hand.
Let $x$ be an $n \times 1$ Gaussian random vector with mean vector $\mu$ and
covariance matrix $\Sigma = C C'$. This means that $x$ can be represented
as
$$ x = \mu + C \epsilon $$
where $\epsilon \sim {\mathcal N}(0,I)$.
Let $\phi(\epsilon)$ denote the associated standardized Gaussian density.
Let $m(\epsilon,\mu)$ be a **likelihood ratio**, meaning that it satisfies
* $m(\epsilon, \mu) > 0 $
* $ \int m(\epsilon,\mu) \phi(\epsilon) d \epsilon =1 $
That is, $m(\epsilon, \mu)$ is a nonnegative random variable with mean 1.
Multiplying $\phi(\epsilon) $ by the likelihood ratio $m(\epsilon, \mu)$ produces
a distorted distribution for $\epsilon$, namely,
$$ \tilde \phi(\epsilon) = m(\epsilon,\mu) \phi(\epsilon) $$
The next concept that we need is the **entropy** of the distorted distribution
$\tilde \phi$ with respect to $\phi$.
**Entropy** is defined as
$$ {\rm ent} = \int \log m(\epsilon,\mu) m(\epsilon,\mu) \phi(\epsilon) d \epsilon $$
or
$$ {\rm ent} = \int \log m(\epsilon,\mu) \tilde \phi(\epsilon) d \epsilon $$
That is, relative entropy is the expected value of the likelihood ratio $m$ where the
expectation is taken with respect to the twisted density $\tilde \phi$.
Relative entropy is nonnegative. It is a measure of the discrepancy between two probability
distributions. As such, it plays an important role in governing the behavior of statistical
tests designed to discriminate one probability distribution from another.
We are ready to define the ${\sf T}$ operator.
Let $V(x)$ be a value function.
Define
$$ \eqalign{ {\sf T}\left(V(x)\right) & = \min_{m(\epsilon,\mu)} \int m(\epsilon,\mu)[V(\mu + C \epsilon) + \theta \log m(\epsilon,\mu) ] \phi(\epsilon) d \epsilon \cr
& = - \log \theta \int \exp \left( \frac{- V(\mu + C \epsilon)}{\theta} \right) \phi(\epsilon) d \epsilon } $$
This asserts that ${\sf T}$ is an indirect utility function for a minimization problem in
which an ``evil agent'' chooses a distorted probablity distribution $\tilde \phi$ to
lower expected utility, subject to a penalty term that gets bigger the larger is relative
entropy.
Here the penalty parameter $\theta \in [\underline \theta, +\infty] $ is a robustness parameter; when it is $+\infty$, there is no scope for the minimizing agent to distort the
distribution, so no robustness to alternative distributions is acquired. As $\theta$ is lowered, more robustness is achieved.
**Note:** The ${\sf T}$ operator is sometimes called a *risk-sensitivity* operator.
We shall apply ${\sf T}$ to the special case of a linear value function $w'
(\vec r - r_f {\bf 1}) $ where $\vec r - r_f {\bf 1} \sim \mathcal N(\mu , \Sigma)$
or $\vec r - r_f {\bf 1} = \mu + C \epsilon$ and $\epsilon \sim {\mathcal N}(0,I)$.
The associated worst-case distribution of $\epsilon$ is Gaussian with mean $v =-\theta^{-1} C' w$ and covariance matrix $I$. (When the value function is
affine, the worst-case distribution distorts the mean vector of $\epsilon$ but not the covariance matrix of $\epsilon$.)
For utility function argument $w'
(\vec r - r_f {\bf 1}) $,
$$ {\sf T} (\vec r - r_f {\bf 1}) = w' \mu + \zeta - \frac{1}{2\theta} w' \Sigma w $$
and entropy is
$$ \frac{v'v}{2} = \frac{1}{2\theta^2} w' C C' w. $$
#### A robust mean-variance portfolio model
According to criterion (1), the mean-variance portfolio choice problem chooses $w$ to maximize
$$ E [w ( \vec r - r_f {\bf 1})]] - {\rm var} [ w ( \vec r - r_f {\bf 1}) ] \quad (10) $$
which equals
$$ w'\mu - \frac{\delta}{2} w' \Sigma w $$
A robust decision maker can be modelled as replacing the mean return $ E [w ( \vec r - r_f {\bf 1})]$
with the risk-sensitive
$$ {\sf T} [w ( \vec r - r_f {\bf 1})] = w' \mu - \frac{1}{2 \theta} w' \Sigma w $$
that comes from replacing the mean $\mu$ of $ \vec r - r_f {\bf 1} $ with the worst-case mean
$$ \mu - \theta^{-1} \Sigma w $$
Notice how the worst-case mean vector depends on the portfolio $w$.
The operator ${\sf T}$ is the indirect utility function that emerges from solving
a problem in which an agent who chooses probabilities does so in order to minimize
the expected utility of a maximizing agent (in our case, the maximizing agent chooses
portfolio weights $w$).
The robust version of the mean-variance portfolio choice problem is then
to choose a portfolio $w$ that maximizes
$$ {\sf T} [w ( \vec r - r_f {\bf 1})] - \frac{\delta}{2} w' \Sigma w , \quad (11) $$
or
$$ w' (\mu - \theta^{-1} \Sigma w ) - \frac{\delta}{2} w' \Sigma w, \quad (12) $$
The minimizer of (12) is
$$ w_{\rm rob} = \frac{1}{\delta + \gamma } \Sigma^{-1} \mu , \quad (13) $$
where $\gamma \equiv \theta^{-1}$ is sometimes called the risk-sensitivity parameter.
An increase in the risk-sensitivity paramter $\gamma$ shrinks the portfolio weights toward zero in the same way that an increase in risk aversion does.
----------------------------------------
# Appendix
We want to illustrate the "folk theorem" that with high or moderate frequency data, it is more difficult to esimate means than variances.
In order to operationalize this statement, we take two analog estimators:
- sample average: $\bar X_N = \frac{1}{N}\sum_{i=1}^{N} X_i$
- sample variance: $S_N = \frac{1}{N-1}\sum_{t=1}^{N} (X_i - \bar X_N)^2$
to estimate the unconditional mean and unconditional variance of the random variable $X$, respectively.
To measure the "difficulty of estimation", we use *mean squared error* (MSE), that is the average squared difference between the estimator and the true value. Assuming that the process $\{X_i\}$ is ergodic, both analog estimators are known to converge to their true values as the sample size $N$ goes to infinity. More precisely, for all $\varepsilon > 0$,
\begin{align}
\lim_{N\to \infty} \ \ P\left\{ \left |\bar X_N - \mathbb E X \right| > \varepsilon \right\} = 0 \quad \quad \text{and}\quad \quad \lim_{N\to \infty} \ \ P \left\{ \left| S_N - \mathbb V X \right| > \varepsilon \right\} = 0
\end{align}
A necessary condition for these convergence results is that the associated MSEs vanish as $N$ goes to infintiy, or in other words,
$$\text{MSE}(\bar X_N, \mathbb E X) = o(1) \quad \quad \text{and} \quad \quad \text{MSE}(S_N, \mathbb V X) = o(1)$$
Even if the MSEs converge to zero, the associated rates might be different. Looking at the limit of the *relative MSE* (as the sample size grows to infinity)
$$ \frac{\text{MSE}(S_N, \mathbb V X)}{\text{MSE}(\bar X_N, \mathbb E X)} = \frac{o(1)}{o(1)} \underset{N \to \infty}{\to} B $$
can inform us about the relative (asymptotic) rates.
We will show that in general, with dependent data, the limit $B$ depends on the sampling frequency. In particular, we find that the rate of convergence of the variance estimator is less sensitive to increased sampling frequency than the rate of convergence of the mean estimator. Hence, we can expect the relative asymptotic rate, $B$, to get smaller with higher frequency data, illustrating that "it is more difficult to estimate means than variances". That is, we need significantly more data to obtain a given precision of the mean estimate than for our variance estimate.
### A special case -- i.i.d. sample
We start our analysis with the benchmark case of iid data. Consider a sample of size $N$ generated by the following iid process,
$$X_i \sim \mathcal{N}(\mu, \sigma^2).$$
Taking $\bar X_N$ to estimate the mean, the MSE is
$$ \text{MSE}(\bar X_N, \mu) = \frac{\sigma^2}{N} .$$
Taking $S_N$ to estimate the variance, the MSE is
$$ \text{MSE}(S_N, \sigma^2) = \frac{2\sigma^4}{N-1} .$$
Both estimators are unbiased and hence the MSEs reflect the corresponding variances of the estimators. Furthermore, both MSEs are $o(1)$ with a (multiplicative) factor of difference in their rates of convergence:
$$ \frac{\text{MSE}(S_N, \sigma^2)}{\text{MSE}(\bar X_N, \mu)} = \frac{N2\sigma^2}{N-1} \quad \underset{N \to \infty}{\to} \quad 2\sigma^2$$
We are interested in how this (asymptotic) relative rate of convergence changes as increasing sampling frequency puts dependence into the data.
### Dependence and sampling frequency
To investigate how sampling frequency affects relative rates of convergence, we assume that the data are generated by a mean-reverting continuous time process of the form
$$dX_t = -\kappa (X_t -\mu)dt + \sigma dW_t\quad\quad $$
where $\mu$ is the unconditional mean, $\kappa > 0$ is a persistence parameter,
and $\{W_t\}$ is a standardized Brownian motion.
Observations arising from this system in particular discrete periods $\mathcal T(h) \equiv \{nh : n \in \mathbb Z \}$ with $h>0$ can be described by the following process
$$X_{t+1} = (1 - \exp(-\kappa h))\mu + \exp(-\kappa h)X_t + \epsilon_{t, h}$$
where
$$\epsilon_{t, h} \sim \mathcal{N}(0, \Sigma_h) \quad \text{with}\quad \Sigma_h = \frac{\sigma^2(1-\exp(-2\kappa h))}{2\kappa}$$
We call $h$ the *frequency* parameter, whereas $n$ represents the number of *lags* between observations. Hence, the effective distance between two observations $X_t$ and $X_{t+n}$ in the discrete time notation is equal to $h\cdot n$ in terms of the underlying continuous time process.
Straightforward calculations show that the autocorrelation function for the stochastic process $\{X_{t}\}_{t\in \mathcal T(h)}$ is
$$\Gamma_h(n) \equiv \text{corr}(X_{t + h n}, X_t) = \exp(-\kappa h n)$$
and the autocovariance function is
$$\gamma_h(n) \equiv \text{cov}(X_{t + h n}, X_t) = \frac{\exp(-\kappa h n)\sigma^2}{2\kappa} .$$
It follows that if $n=0$, the unconditional variance is given by $\gamma_h(0) = \frac{\sigma^2}{2\kappa}$ irrespective of the sampling frequency.
The following figure illustrates how the dependence between the observations is related to sampling frequency.
- For any given $h$, the autocorrelation converges to zero as we increase the distance -- $n$ -- between the observations. This represents the "weak dependence" of the $X$ process.
- Moreover, for a fixed lag length, $n$, the dependence vanishes as the sampling frequency goes to infinity. In fact, letting $h$ go to $\infty$ gives back the case of i.i.d. data.
```
mu = .0
kappa = .1
sigma = .5
var_uncond = sigma**2 / (2 * kappa)
n_grid = np.linspace(0, 40, 100)
autocorr_h1 = np.exp(- kappa * n_grid * 1)
autocorr_h2 = np.exp(- kappa * n_grid * 2)
autocorr_h5 = np.exp(- kappa * n_grid * 5)
autocorr_h1000 = np.exp(- kappa * n_grid * 1e8)
fig, ax = plt.subplots(figsize = (8, 4))
ax.plot(n_grid, autocorr_h1, label = r'$h = 1$', color = 'darkblue', lw = 2)
ax.plot(n_grid, autocorr_h2, label = r'$h = 2$', color = 'darkred', lw = 2)
ax.plot(n_grid, autocorr_h5, label = r'$h = 5$', color = 'orange', lw = 2)
ax.plot(n_grid, autocorr_h1000, label = r'"$h = \infty$"', color = 'darkgreen', lw = 2)
ax.legend(loc = 'best', fontsize = 13)
ax.grid()
ax.set_title(r'Autocorrelation functions, $\Gamma_h(n)$', fontsize = 13)
ax.set_xlabel(r'Lags between observations, $n$', fontsize = 11)
plt.show()
```
### Frequency and the mean estimator
Consider again the AR(1) process generated by discrete sampling with frequency $h$. Assume that we have a sample of size $N$ and we would like to estimate the unconditional mean -- in our case the true mean is $\mu$.
Again, the sample average is an unbiased estimator of the unconditional mean.
$$ \mathbb{E}[\bar X_N] = \frac{1}{N}\sum_{i = 1}^N \mathbb{E}[X_i] = \mathbb{E}[X_0] = \mu $$
The variance of the sample mean is given by
\begin{align}
\mathbb{V}\left(\bar X_N\right) &= \mathbb{V}\left(\frac{1}{N}\sum_{i = 1}^N X_i\right) \\
&= \frac{1}{N^2} \left(\sum_{i = 1}^N \mathbb{V}(X_i) + 2 \sum_{i = 1}^{N-1} \sum_{s = i+1}^N \text{cov}(X_i, X_s) \right) \\
&= \frac{1}{N^2} \left( N \gamma(0) + 2 \sum_{i=1}^{N-1} i \cdot \gamma\left(h\cdot (N - i)\right) \right) \\
&= \frac{1}{N^2} \left( N \frac{\sigma^2}{2\kappa} + 2 \sum_{i=1}^{N-1} i \cdot \exp(-\kappa h (N - i)) \frac{\sigma^2}{2\kappa} \right)
\end{align}
It is explicit in the above equation that time dependence in the data inflates the variance of the mean estimator through the covariance terms. Moreover, as we can see, a higher sampling frequency---smaller $h$---makes all the covariance terms larger everything else being fixed. This implies a relatively slower rate of convergence of the sample average for high frequency data. Intuitively, the stronger dependence across observations for high frequency data reduces the "information content" of each observation relative to the iid case.
We can upper bound the variance term in the following way.
\begin{align}
\mathbb{V}(\bar X_N) &= \frac{1}{N^2} \left( N \sigma^2 + 2 \sum_{i=1}^{N-1} i \cdot \exp(-\kappa h (N - i)) \sigma^2 \right) \\
&\leq \frac{\sigma^2}{2\kappa N} \left(1 + 2 \sum_{i=1}^{N-1} \cdot \exp(-\kappa h (i)) \right) \\
&= \underbrace{\frac{\sigma^2}{2\kappa N}}_{\text{i.i.d. case}} \left(1 + 2 \frac{1 - \exp(-\kappa h)^{N-1}}{1 - \exp(-\kappa h)} \right)
\end{align}
Asymptotically the $\exp(-\kappa h)^{N-1}$ vanishes and the dependence in the data inflates the benchmark iid variance by a factor of $\left(1 + 2 \frac{1}{1 - \exp(-\kappa h)} \right)$. This long run factor is larger the higher is the frequency (the smaller is $h$).
Therefore, we expect the asymptotic relative MSEs, $B$, to change with time dependent data. We just saw that the mean estimator's rate is roughly changing by a factor of $\left(1 + 2 \frac{1}{1 - \exp(-\kappa h)} \right)$. Unfortunately, the variance estimator's MSE is harder to derive. Nonetheless, we can approximate it by using (large sample) simulations, thus getting an idea about how the asymptotic relative MSEs changes in the sampling frequency $h$ relative to the iid case that we compute in closed form.
```
def sample_generator(h, N, M):
phi = (1 - np.exp(- kappa * h)) * mu
rho = np.exp(- kappa * h)
s = sigma**2 * (1 - np.exp(-2 * kappa * h)) / (2 * kappa)
mean_uncond = mu
std_uncond = np.sqrt(sigma**2 / (2 * kappa))
eps_path = stat.norm(0, np.sqrt(s)).rvs((M, N))
y_path = np.zeros((M, N + 1))
y_path[:, 0] = stat.norm(mean_uncond, std_uncond).rvs(M)
for i in range(N):
y_path[:, i + 1] = phi + rho*y_path[:, i] + eps_path[:, i]
return y_path
# generate large sample for different frequencies
N_app, M_app = 1000, 30000 # sample size, number of simulations
h_grid = np.linspace(.1, 80, 30)
var_est_store = []
mean_est_store = []
labels = []
for h in h_grid:
labels.append(h)
sample = sample_generator(h, N_app, M_app)
mean_est_store.append(np.mean(sample, 1))
var_est_store.append(np.var(sample, 1))
var_est_store = np.array(var_est_store)
mean_est_store = np.array(mean_est_store)
# save mse of estimators
mse_mean = np.var(mean_est_store, 1) + (np.mean(mean_est_store, 1) - mu)**2
mse_var = np.var(var_est_store, 1) + (np.mean(var_est_store, 1) - var_uncond)**2
benchmark_rate = 2*var_uncond # iid case
#relative MSE for large samples
rate_h = mse_var/mse_mean
fig, ax = plt.subplots(figsize = (8, 5))
ax.plot(h_grid, rate_h, color = 'darkblue', lw = 2,
label = r'large sample relative MSE, $B(h)$')
ax.axhline(benchmark_rate, color = 'k', linestyle = '--', label = r'iid benchmark')
ax.set_title('Relative MSE for large samples as a function of sampling frequency \n MSE($S_N$) relative to MSE($\\bar X_N$)',
fontsize = 12)
ax.set_xlabel('Sampling frequency, $h$', fontsize = 11)
ax.set_ylim([1, 2.9])
ax.legend(loc = 'best', fontsize = 10)
plt.show()
```
The above figure illustrates the relationship between the asymptotic relative MSEs and the sampling frequency.
* We can see that with low frequency data -- large values of $h$ -- the ratio of asymptotic rates approaches the iid case.
* As $h$ gets smaller -- the higher the frequency -- the relative performance of the variance estimator is better in the sense that the ratio of asymptotic rates gets smaller. That is, as the time dependence gets more pronounced, the rate of convergence of the mean estimator's MSE deteriorates more than that of the variance estimator.
-----------------------------------------------------------
#### References
Black, F. and Litterman, R., 1992. "Global portfolio optimization". Financial analysts journal, 48(5), pp.28-43.
Dickey, J. 1975. "Bayesian alternatives to the F-test and least-squares estimate in the normal linear model", in: S.E. Fienberg and A. Zellner, eds., "Studies in Bayesian econometrics and statistics" (North-Holland, Amsterdam) 515-554.
Hansen, Lars Peter and Thomas J. Sargent. 2001. "Robust Control and Model Uncertainty." American Economic Review, 91(2): 60-66.
Leamer, E.E., 1978. **Specification searches: Ad hoc inference with nonexperimental data**, (Vol. 53). John Wiley & Sons Incorporated.
| true |
code
| 0.703053 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from IPython.display import HTML
%matplotlib inline
```
#Configurable parameters for pure pursuit
+ How fast do you want the robot to move? It is fixed at $v_{max}$ in this exercise
+ When can we declare the goal has been reached?
+ What is the lookahead distance? Determines the next position on the reference path that we want the vehicle to catch up to
```
vmax = 0.75
goal_threshold = 0.05
lookahead = 3.0
# Unicycle model
def simulate_unicycle(pose, v,w, dt=0.1):
x, y, t = pose
return x + v*np.cos(t)*dt, y + v*np.sin(t)*dt, t+w*dt
class PurePursuitTracker(object):
def __init__(self, x, y, v, lookahead = 3.0):
"""
Tracks the path defined by x, y at velocity v
x and y must be numpy arrays
v and lookahead are floats
"""
self.length = len(x)
self.ref_idx = 0 #index on the path that tracker is to track
self.lookahead = lookahead
self.x, self.y = x, y
self.v, self.w = v, 0 # For starting
def update(self, xc, yc, theta):
"""
Input: xc, yc, theta - current pose of the robot
Update v, w based on current pose
Returns True if trajectory is over.
"""
#Calculate ref_x, ref_y using current ref_idx
#Check if we reached the end of path, then return TRUE
#Two conditions must satisfy
#1. ref_idx exceeds length of traj
#2. ref_x, ref_y must be within goal_threshold
# Write your code to check end condition
ref_x, ref_y = self.x[self.ref_idx], self.y[self.ref_idx]
goal_x, goal_y = self.x[-1], self.y[-1]
if (self.ref_idx > self.length) and \
(np.linalg.norm([ref_x-goal_x, ref_y-goal_y])) < goal_threshold:
# Ended, within goal
return True
#End of path has not been reached
#update ref_idx using np.hypot([ref_x-xc, ref_y-yc]) < lookahead
while np.hypot(ref_x-xc, ref_y-yc) < lookahead:
self.ref_idx += 1 # Reached the current index, next
if self.ref_idx < self.length:
ref_x, ref_y = self.x[self.ref_idx], self.y[self.ref_idx]
else:
return True
#Find the anchor point
# this is the line we drew between (0, 0) and (x, y)
anchor = np.asarray([ref_x - xc, ref_y - yc])
#Remember right now this is drawn from current robot pose
#we have to rotate the anchor to (0, 0, pi/2)
#code is given below for this
theta = np.pi/2 - theta
rot = np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
anchor = np.dot(rot, anchor)
Lsq = (anchor[0] ** 2 + anchor[1] **2) # dist to reference path^2
X = anchor[0] #cross-track error
#from the derivation in notes, plug in the formula for omega
self.w = -2*self.v*X/Lsq
return False
```
## Visualize given trajectory
```
x = np.arange(0, 50, 0.5)
y = [np.sin(idx / 5.0) * idx / 2.0 for idx in x]
#write code here
plt.figure()
plt.plot(x,y)
plt.show()
```
## Run the tracker simulation
1. Instantiate the tracker class
2. Initialize some starting pose
3. Simulate robot motion 1 step at a time - get $v$, $\omega$ from tracker, predict new pose using $v$, $\omega$, current pose in simulate_unicycle()
4. Stop simulation if tracker declares that end-of-path is reached
5. Record all parameters
```
#write code to instantiate the tracker class
tracker = PurePursuitTracker(x, y, vmax)
pose = -1, 0, np.pi/2 #arbitrary initial pose
x0,y0,t0 = pose # record it for plotting
traj =[]
while True:
#write the usual code to obtain successive poses
pose = simulate_unicycle(pose, tracker.v, tracker.w)
if tracker.update(*pose):
print("ARRIVED!!")
break
traj.append([*pose, tracker.w, tracker.ref_idx])
xs,ys,ts,ws,ids = zip(*traj)
plt.figure(figsize=(12,12))
plt.plot(x,y,label='Reference')
plt.quiver(x0,y0, np.cos(t0), np.sin(t0),scale=12)
plt.plot(xs,ys,label='Tracked')
xf,yf,tf = pose
plt.quiver(xf,yf, np.cos(tf), np.sin(tf),scale=12)
plt.title('Pure Pursuit trajectory')
plt.legend()
plt.grid()
```
# Visualize curvature
```
plt.figure(figsize=(12,12))
plt.title('Curvature')
plt.plot(np.abs(ws))
plt.grid()
```
## Animate
Make a video to plot the current pose of the robot and reference pose it is trying to track. You can use funcAnimation in matplotlib. A reference can be found [here](http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-notebooks/)
```
# Convert trajectory to Numpy array
np_traj = np.array(traj) # N, 5 shape && float64
# Create figure and axis
fig, ax = plt.subplots()
fig.set_dpi(200)
ax.set_xlim((-5, 52))
ax.set_ylim((-17, 21))
# Get lines (to modify)
line_tracked, = ax.plot([], [], label="Tracked", lw=2.0)
line_ref = ax.plot(x,y,label='Reference')
ax.legend()
# Initialize everything
def init_func():
line_tracked.set_data([], []) # Clear tracking data
return (line_tracked, )
# Animation function
def animate(fnum):
line_tracked.set_data(np_traj[:fnum, 0], np_traj[:fnum, 1])
return (line_tracked, )
# Animation object
anim = animation.FuncAnimation(fig, animate, init_func=init_func,
frames=np_traj.shape[0], interval=20, blit=True)
HTML(anim.to_html5_video())
```
## Effect of noise in simulations
What happens if you add a bit of Gaussian noise to the simulate_unicycle() output? Is the tracker still robust?
The noise signifies that $v$, $\omega$ commands did not get realized exactly
```
# Unicycle model
def simulate_unicycle(pose, v,w, dt=0.1):
x, y, t = pose
t += np.random.normal(0.0, np.deg2rad(5)) # Localization orientation error
x += np.random.normal(0.0, 0.1)
y += np.random.normal(0.0, 0.1)
return x + v*np.cos(t)*dt, y + v*np.sin(t)*dt, t+w*dt
```
Rest everything is the same as before
```
# Do the simulation
tracker = PurePursuitTracker(x, y, vmax)
pose_noisy = -1, 0, np.pi/2 #arbitrary initial pose
x0,y0,t0 = pose_noisy # record it for plotting
traj_noisy =[]
while True:
#write the usual code to obtain successive poses
pose_noisy = simulate_unicycle(pose_noisy, tracker.v, tracker.w)
if tracker.update(*pose_noisy):
print("ARRIVED!!")
break
traj_noisy.append([*pose_noisy, tracker.w, tracker.ref_idx])
# Visualize everything
xs,ys,ts,ws,ids = zip(*traj_noisy)
xsc, ysc, tsc, wsc, idsc = zip(*traj) # Clean trajectory
plt.figure(figsize=(12,12))
# Reference
plt.plot(x,y,label='Reference')
plt.plot(xs,ys,label='Tracked (noisy)', lw=2.0)
plt.plot(xsc, ysc, label='Tracked (clean)')
plt.quiver(x0,y0, np.cos(t0), np.sin(t0),scale=12)
xf,yf,tf = pose_noisy
plt.quiver(xf,yf, np.cos(tf), np.sin(tf),scale=12)
plt.title('Pure Pursuit trajectory')
plt.legend()
plt.grid()
```
| true |
code
| 0.675631 | null | null | null | null |
|
# How to Understand and Manipulate the Periodogram of an Oscillating Star
---
## Learning Goals
By the end of this tutorial you will:
- Understand the key features of periodograms of oscillating stars.
- Understand how these features change depending on the type of star being studied.
- Be able to manipulate the periodogram to focus in on areas you're interested in.
- Be able to smooth a periodogram.
- Be able to remove features such as the convective background in solar-like oscillators.
## Introduction
The brightnesses of stars can oscillate — that is, vary over time — for many different reasons. For example, in the companion tutorials we explored light curves that oscillated due to an eclipsing binary pair transiting in front of one another, and we looked at a star that showed variability due to star spots on its surface rotating in and out of view.
In this tutorial, we will focus on *intrinsic* oscillators: stars that exhibit variability due to processes inside the stars. For example, one of these internal processes is the presence of standing waves trapped in the interior. When the light curve of a star is transformed into the frequency domain, such waves can be observed as distinct peaks in the frequency spectrum of the star. The branch of astronomy that focuses on studying these signals is called [*asteroseismology*](https://en.wikipedia.org/wiki/Asteroseismology).
Asteroseismology is an important tool because it allows intrinsic properties of a star, such as its mass and radius, to be estimated from the light curve alone. The only requirement is that the quality of the light curve — its duration, sampling, and precision — must be good enough to provide a high-resolution view of the star in the frequency domain. *Kepler* data is particularly well-suited for this purpose.
In this tutorial, we will explore two types of intrinsic oscillators that are commonly studied by asteroseismologists:
1. **$\delta$ Scuti stars**: a class of oscillating stars typically 1.5 to 2.5 times as massive as the Sun, which oscillate due to fluctuations in the opacity of the outer layers of the star.
2. **Solar-Like Oscillators**: a class that includes all stars that oscillate in the same manner as the Sun, namely due to turbulent motion in the convective outer layers of their atmospheres. This includes both main sequence stars as well as red giant stars.
## Imports
This tutorial only requires **[Lightkurve](https://docs.lightkurve.org)**, which in turn uses **[Matplotlib](https://matplotlib.org/)** for plotting.
```
import lightkurve as lk
%matplotlib inline
```
---
## 1. Exploring the Frequency Spectrum of a $\delta$ Scuti Oscillator
[$\delta$ Scuti stars](https://en.wikipedia.org/wiki/Delta_Scuti_variable) are stars roughly 1.5 to 2.5 as massive as the Sun, and oscillate due to fluctuations in the opacity of the outer layers of the star ([known as the Kappa mechanism](https://en.wikipedia.org/wiki/Kappa%E2%80%93mechanism)), alternately appearing brighter and fainter.
An example star of this type is HD 42608, which was recently observed by the *TESS* space telescope. We can search for these data using Lightkurve:
```
lk.search_lightcurve('HD 42608', mission='TESS')
```
Success! A light curve for the object appears to be available in the data archive. Let's go ahead and download the data and convert it straight to a [`periodogram`](https://docs.lightkurve.org/api/lightkurve.periodogram.Periodogram.html) using the [`to_periodogram()`](https://docs.lightkurve.org/api/lightkurve.lightcurve.KeplerLightCurve.html#lightkurve.lightcurve.KeplerLightCurve.to_periodogram) function.
```
lc = lk.search_lightcurve('HD 42608', sector=6).download()
pg = lc.normalize().to_periodogram()
pg.plot();
```
We can see that there is a strong power excess around 50 cycles per day. These indicate stellar oscillations.
To study these peaks in more detail, we can zoom in by recreating the periodogram using the [`minimum_frequency`](https://docs.lightkurve.org/api/lightkurve.periodogram.LombScarglePeriodogram.html#lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve) and [`maximum_frequency`](https://docs.lightkurve.org/api/lightkurve.periodogram.LombScarglePeriodogram.html#lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve) keywords:
```
pg = lc.normalize().to_periodogram(minimum_frequency=35,
maximum_frequency=60)
pg.plot();
```
This is much clearer!
Stars of this type are known to display multiple types of oscillation, including:
- **Radial Oscillations**: caused by the star shrinking and expanding radially. Also called a "breathing mode."
- **Dipole Oscillations**: caused by the star's hemispheres shrinking and expanding alternately.
Both types of oscillations are on display in the figure above. Identifying exactly what type of oscillation a given peak represents is challenging. Fortunately, this star (HD 42608) is part of a set of stars for which the oscillations have been analyzed in detail in a research paper by [Bedding et al. (2020)](https://arxiv.org/pdf/2005.06157.pdf), so you can consult that paper to learn more about the details.
Note that the modes of oscillation are very "sharp" in the figure above. This is because $\delta$ Scuti oscillations are *coherent*, which is a term astronomers in the field use for signals that have long lifetimes and are not heavily damped. Because of this, their exact oscillation frequencies can be observed in a fairly straightforward way. This sets $\delta$ Scuti stars apart from solar-like oscillators, which are damped. Let's look at an example of such a star next.
## 2. Exploring the Frequency Spectrum of a Solar-Like Oscillator
Solar-like oscillators exhibit variability driven by a different mechanism than $\delta$ Scuti stars. They encompass the class of stars that [oscillate in the same manner as the Sun](https://en.wikipedia.org/wiki/Helioseismology). Because they have lower masses than $\delta$ Scuti stars, solar-like oscillators have convective outer envelopes. The turbulent motion of these envelopes excites standing waves inside the stars which cause brightness changes on the surface. Unlike $\delta$ Scuti stars however, these waves are not coherent. Instead, these waves are stochastic and damped, which means that the lifetimes and amplitudes of the waves are limited and variable.
While the name might imply that only stars like the Sun are solar-like oscillators, this is not true. All stars with convective outer layers can exhibit solar-like oscillations, including red giant stars!
Let's have a look at the Sun-like star KIC 10963065 ([also known as Rudy](https://arxiv.org/pdf/1612.00436.pdf)), observed with *Kepler*. Because solar-like oscillation amplitudes are low, we will need to combine multiple quarters of data to improve our signal-to-noise.
We can list the available data sets as follows:
```
search_result = lk.search_lightcurve('KIC 10963065', mission='Kepler')
search_result
```
To create and plot this periodogram, we will apply a few common practices in the field:
- We will combine multiple quarters to improve the frequency resolution.
- We will [`normalize`](https://docs.lightkurve.org/api/lightkurve.lightcurve.LightCurve.html#lightkurve.lightcurve.LightCurve.normalize) the light curve to parts per million (`ppm`).
- We will use the `psd` normalization option when calling [`to_periodogram`](https://docs.lightkurve.org/api/lightkurve.lightcurve.KeplerLightCurve.html#lightkurve.lightcurve.KeplerLightCurve.to_periodogram), which sets the units of frequency to microhertz, and normalizes the power using the spacing between bins of frequency.
We'll also plot the resulting figure in log-log space.
```
lc = search_result[0:10].download_all().stitch()
pg = lc.normalize(unit='ppm').to_periodogram(normalization='psd')
pg.plot(scale='log');
```
This periodogram looks very different to that of the $\delta$ Scuti star above. There is a lot of power excess at low frequencies: this is what we call the *convective background*, which is additional noise contributed by the convective surface of the star constantly changing. We do not see any clear peaks like we did for the $\delta$ Scuti oscillator however.
There is a good reason for this: this main sequence star oscillates at frequencies too large to be seen on this periodogram, lying above the periodogram's [Nyquist frequency](https://en.wikipedia.org/wiki/Nyquist_frequency).
The Nyquist frequency is a property of a time series that describes the maximum frequency that can be reliably determined in a periodogram. It stems from the assumption that you need a minimum of two observations per oscillation period to observe a pattern (one observation on the "up," and one on the "down" oscillation). It is defined as follows:
$\nu_{\rm nyq} = \frac{1}{2\Delta t}$ ,
where $\Delta t$ is the observing cadence.
The reason that we can't see Rudy's oscillations in the periodogram above is because we constructed this periodogram using the *Kepler* 30-minute Long Cadence data. Solar-like oscillators on the main sequence typically oscillate on the order of minutes (five minutes for the Sun), at frequencies much higher than will be visible on this periodogram. To see Rudy's oscillations, we will need to use the *Kepler* Short Cadence (SC) observations, which used a time sampling of one minute. We can obtain these data as follows:
```
search_result = lk.search_lightcurve('KIC 10963065',
mission='Kepler',
cadence='short')
lc = search_result[0:10].download_all().stitch()
pg = lc.normalize(unit='ppm').to_periodogram(normalization='psd')
pg.plot(scale='log');
```
Now we can see a power excess near $2000\, \mu\rm{Hz}$. This frequency is almost 10 times higher than we could view using the Long Cadence data alone. Let's zoom in on this region so we can look at the signals in more detail, like we did for the $\delta$ Scuti star.
```
zoom_pg = lc.normalize(unit='ppm').to_periodogram(normalization='psd',
minimum_frequency=1500,
maximum_frequency=2700)
zoom_pg.plot();
```
Compared to the $\delta$ Scuti, the modes of oscillation in the figure above are less sharp, even though we used much more data to create the periodogram. This is because the modes in solar-like oscillators are damped due to the turbulent motion of the convective envelope. This lowers their amplitudes and also causes the lifetimes of the oscillations to be short. The short lifetimes create some uncertainty around the exact oscillation frequency, and so the peaks that appear in the periodogram are a little broader (usually Lorentzian-like in shape). This may not be immediately apparant from these figures, but is much clearer if you zoom in on an individual mode.
## 3. How to Smooth and Detrend a Periodogram
### 3.1. The box kernel filter
To further explore the oscillation modes, we will demonstrate some of Lightkurve's smoothing tools. There are two types of smoothing functions we can call through the [`smooth()`](https://docs.lightkurve.org/api/lightkurve.periodogram.Periodogram.html#lightkurve.periodogram.Periodogram.smooth) function. Let's start with a basic "moving median," also known as a 1D box kernel.
```
smooth_pg = zoom_pg.smooth(method='boxkernel', filter_width=0.5)
ax = zoom_pg.plot(label='Original')
smooth_pg.plot(ax=ax, color='red', label='Smoothed');
```
In the figure above, the smoothed periodogram is plotted over the top of the original periodogram. In this case we have used the [Astropy `Box1DKernel`](https://docs.astropy.org/en/stable/api/astropy.convolution.Box1DKernel.html) filter, with a filter width of $0.5\, \mu \rm{Hz}$. The filter takes the median value of power in a region $0.5\, \mu \rm{Hz}$ around a data point, and replaces that point with the median value. It then moves on to the next data point. This creates a smoothed periodogram of the same length as the original. Because the power values are now correlated, these smoothed periodograms usually aren't used for computational analysis, but they can aid visual explorations of the location of the oscillation modes.
### 3.2. The log median filter
While the [`Box1DKernel`](https://docs.astropy.org/en/stable/api/astropy.convolution.Box1DKernel.html) filter can be used to help identify modes of oscillation in the presence of noise, it is mostly good for smoothing on small scales. For large scales, we can instead use Lightkurve's log median filter.
As we saw above, solar-like oscillators exhibit a large power excess at low frequencies due to the turbulent convection visible near the stellar surface. When studying modes of oscillation, we typically aren't interested in the convective background, and prefer to remove it.
The log median filter performs a similar operation to the [`Box1DKernel`](https://docs.astropy.org/en/stable/api/astropy.convolution.Box1DKernel.html) filter, but does so in log space. This means that at low frequencies the number of frequency bins of which the median is taken is small, and that at high frequencies many frequency bins are included in the median calculation. As a result, the log median filter smooths over the convection background but ignores the modes of oscillation at high frequencies.
The result of applying a log median filter is demonstrated using the red line in the figure below:
```
smooth_pg = pg.smooth(method='logmedian', filter_width=0.1)
ax = pg.plot(label='Original')
smooth_pg.plot(ax=ax, linewidth=2, color='red', label='Smoothed', scale='log');
```
### 3.3. Flattening
When studying modes of oscillation, it is typically preferred to remove the convective background. In a detailed analysis this would involve fitting a model to the background. As can be seen in the figure above, however, Lightkurve's log median [`smooth()`](https://docs.lightkurve.org/api/lightkurve.periodogram.LombScarglePeriodogram.html#lightkurve.periodogram.LombScarglePeriodogram.smooth) method provides a useful first-order approximation of the background without the need for a model.
To divide the power spectrum by the background, we can use Lightkurve's [`flatten()`](https://docs.lightkurve.org/api/lightkurve.periodogram.LombScarglePeriodogram.html#lightkurve.periodogram.LombScarglePeriodogram.flatten) method. This function uses the log median smoothing method to determine the background, and returns a new [`periodogram`](https://docs.lightkurve.org/api/lightkurve.periodogram.Periodogram.html) object in which the background has been divided out.
```
snrpg = pg.flatten()
snrpg
```
The periodogram obtained by dividing by the noise in this way is commonly called a Signal-to-Noise periodogram (`SNRPeriodogram`), because the noise, in the form of the convective background, has been removed. This is a little bit of a misnomer, because there is still noise present in the periodogram.
We plot the `SNRPeriodogram` below, and see that the modes of oscillation stick out from the noise much more clearly now that the convective background has been removed.
```
snrpg.plot();
```
## 4. Closing Comments
In this tutorial, we explored two common types of oscillating stars, and demonstrated how Lightkurve can be used to study their power specta. In the accompanying tutorials, you can learn how to use these tools to extract more detailed information from them, including the radius and mass of a star!
For further reading on $\delta$ Scuti stars, solar-like oscillators, and Fourier Transforms, we recommend you consult the following papers:
- [Vanderplas (2017)](https://arxiv.org/pdf/1703.09824.pdf) – A detailed paper on Fourier Transforms and Lomb-Scargle Periodograms.
- [Bedding et al. (2020)](https://arxiv.org/pdf/2005.06157.pdf) – A demonstration of mode identification in $\delta$ Scuti stars.
- [Chaplin & Miglio (2013)](https://arxiv.org/pdf/1303.1957.pdf) – A review paper on asteroseismology of solar-like oscillators with *Kepler*.
- [Aerts (2019)](https://arxiv.org/pdf/1912.12300.pdf) – A comprehensive review that covers asteroseismology of a wide range of oscillating stars, including solar-like oscillators and $\delta$ Scutis.
## About this Notebook
**Authors**: Oliver Hall (oliver.hall@esa.int), Geert Barentsen
**Updated On**: 2020-09-29
# Citing Lightkurve and Astropy
If you use `lightkurve` or `astropy` for published research, please cite the authors. Click the buttons below to copy BibTeX entries to your clipboard.
```
lk.show_citation_instructions()
```
<img style="float: right;" src="https://raw.githubusercontent.com/spacetelescope/notebooks/master/assets/stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="Space Telescope Logo" width="200px"/>
| true |
code
| 0.639511 | null | null | null | null |
|
# Iris classification in Keras
Author: Michał Słapek
Classification example for Iris dataset.
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Dense
from utils import get_iris_data
from plot_iris import plot_contours
data = get_iris_data()
type(data)
data.head(5)
sns.set()
plt.figure(dpi=100)
sns.scatterplot(
x='sepal length (cm)',
y='sepal width (cm)',
hue='family',
data=data
)
plt.show()
```
## Model plot
```
plot_x_lim = (4, 9)
plot_y_lim = (1.5, 5)
plot_X, plot_Y = np.meshgrid(
np.arange(*plot_x_lim, step=0.1),
np.arange(*plot_y_lim, step=0.1)
)
def format_loss(l):
return '[' + ', '.join(f'{v:0.2f}' for v in l) + ']'
def plot_model(model, name='model'):
fig, ax = plt.subplots(dpi=100)
a = plot_contours(ax, model, plot_X, plot_Y)
fig.colorbar(a)
sns.scatterplot(
x=X_train[:, 0],
y=X_train[:, 1],
hue='T' + pd.Series(y_train).astype(str),
hue_order=[f'T{i}' for i in range(3)],
marker='x',
legend=False
)
sns.scatterplot(
x=X_valid[:, 0],
y=X_valid[:, 1],
hue='V' + pd.Series(y_valid).astype(str),
hue_order=[f'V{i}' for i in range(3)],
legend=False
)
text = f'train: {format_loss(model.evaluate(X_train, y_train, verbose=False))}\n' \
f'valid: {format_loss(model.evaluate(X_valid, y_valid, verbose=False))}'
ax.text(4.5, 4.5, text, color='white')
```
## Train, test, validation split
```
from sklearn.model_selection import train_test_split
X = data[['sepal length (cm)', 'sepal width (cm)']].values
y = data['class'].values
X_train_valid, X_test, y_train_valid, y_test = train_test_split(
X, y,
test_size=0.25,
random_state=910797
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_valid, y_train_valid,
test_size=1/3,
random_state=142385
)
for m in [X_train, X_valid, X_test]:
print(type(m))
print(m.shape)
```
## Model class ~ (sepal width) * (sepal length)
```
models = {}
```
### Flat model
```
model = Sequential()
model.add(Dense(units=3, input_dim=2, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True),
metrics=['accuracy']
)
model.summary()
# model.load_weights('weights/iris_flat.hdf5')
model.fit(
X_train, y_train,
epochs=1_000,
batch_size=len(y_train),
validation_data=(X_valid, y_valid)
)
# model.save_weights('weights/iris_flat.hdf5')
models['flat'] = model
plot_model(model, 'flat')
```
## Medium model
```
model = Sequential()
model.add(Dense(units=5, input_dim=2, activation='relu'))
model.add(Dense(units=3, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True),
metrics=['accuracy']
)
model.summary()
model.load_weights('weights/iris_medium.hdf5')
# model.fit(
# X_train, y_train,
# epochs=10_000,
# batch_size=len(y_train),
# validation_data=(X_valid, y_valid),
# verbose=False
# )
# model.save_weights('weights/iris_medium.hdf5')
models['medium'] = model
plot_model(model)
```
## Big model
```
model = Sequential()
model.add(Dense(units=100, input_dim=2, activation='relu'))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=3, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True),
metrics=['accuracy']
)
model.summary()
model.load_weights('weights/iris_big.hdf5')
# model.fit(
# X_train, y_train,
# epochs=10_000,
# batch_size=len(y_train),
# validation_data=(X_valid, y_valid),
# verbose=False
# )
# model.save_weights('weights/iris_big.hdf5')
models['big'] = model
plot_model(model)
for k, m in models.items():
score = m.evaluate(X_valid, y_valid, verbose=False)
print(f'Model {k:10}: {score}')
models['????'].evaluate(X_test, y_test)
```
| true |
code
| 0.716169 | null | null | null | null |
|
# Table of Contents
* [Intro](#Intro)
* [Generative Adversarial Networks (GANs)](#Generative-Adversarial-Networks-%28GANs%29)
* [Gaussian Distribution Approximation (Keras)](#Gaussian-Distribution-Approximation-%28Keras%29)
* [MNIST GAN (Keras)](#MNIST-GAN-%28Keras%29)
* [Generator Model](#Generator-Model)
* [Discriminator Model](#Discriminator-Model)
* [GAN Model](#GAN-Model)
* [Train](#Train)
* [Wasserstein GAN](#Wasserstein-GAN)
* [Generator Model](#Generator-Model)
* [Discriminator Model](#Discriminator-Model)
* [GAN Model](#GAN-Model)
* [Train](#Train)
* [Plot Losses](#Plot-Losses)
* [Generate gif](#Generate-gif)
# Intro
Exploratory notebook related to Generative Adversarial Networks (GANs). Includes toy examples implementation and testing of related techniques or subjects.
## Generative Adversarial Networks (GANs)
Architecture that learns by posing two networks in competition with each others. Goal is to learn parameters in order to produce a distribution close to our dataset distribution (true distribution).
* Discriminator: detect if an image belongs to a target dataset or not (generated by the generator)
* Generator: generate new examples that looks like the training/target data (fool the discriminator)
```
import time
from PIL import Image
import numpy as np
import pdb
import os
import sys
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from keras.models import Sequential
from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D
from keras.layers.advanced_activations import LeakyReLU
from keras import backend as K
from keras import optimizers
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from tqdm import tqdm_notebook as tqdm
%matplotlib notebook
sns.set_style("dark")
sys.path.append(os.path.join(os.getcwd(), os.pardir))
from utils.plot_utils import plot_sample_imgs
from utils.generative_utils import NoiseDistribution, set_trainable
RES_DIR = os.path.join(*[os.pardir]*2, 'data', 'deep_learning')
%load_ext autoreload
%autoreload 2
```
# Gaussian Distribution Approximation (Keras)
Example adapted from [Aylien blog](http://blog.aylien.com/introduction-generative-adversarial-networks-code-tensorflow/).
Check also [here](https://medium.com/towards-data-science/gan-by-example-using-keras-on-tensorflow-backend-1a6d515a60d0) for Keras code
```
# target 1D gaussian distribution class
class GaussianDistribution:
def __init__(self, mu=4, sigma=0.5):
self.mu = mu
self.sigma = sigma
def sample(self, N):
samples = np.random.normal(self.mu, self.sigma, N)
samples.sort()
return samples
# generator input noise distribution class
class GeneratorNoiseDistribution:
def __init__(self, vrange):
self.vrange = vrange
def sample(self, N):
return np.linspace(-self.vrange, self.vrange, N) + \
np.random.random(N) * 0.01
def generator(input_dim, hidden_size):
g = Sequential()
g.add(Dense(hidden_size, input_dim=input_dim, activation=K.softplus))
g.add(Dense(input_dim))
return g
def discriminator(input_dim, hidden_size):
d = Sequential()
d.add(Dense(hidden_size*2, input_dim=input_dim, activation=K.tanh))
d.add(Dense(hidden_size*2, activation=K.tanh))
d.add(Dense(hidden_size*2, activation=K.tanh))
d.add(Dense(1, activation=K.sigmoid))
return d
# init distributions
gaussian_d = GaussianDistribution()
generator_d = GeneratorNoiseDistribution(8)
# init GAN components
d = discriminator(1, 128)
g = generator(1, 128)
# discriminator model
optimizer = optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=6e-8)
discriminator_model = d
discriminator_model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# adversarial model
optimizer = optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=3e-8)
adversarial_model = Sequential()
adversarial_model.add(g)
adversarial_model.add(d)
adversarial_model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
batch_size = 64
fig, ax = plt.subplots(dpi=100, figsize=(5, 4))
true_dist = np.reshape(gaussian_d.sample(1000), (1000, 1))
plt.show()
def animate(step):
#for step in range(100):
# generate data
# first we sample from the true distribution, then we generate some
# "fake" data by feeding noise to the generator
true_sample = np.reshape(gaussian_d.sample(batch_size), (batch_size, 1))
noise = generator_d.sample(batch_size)
fake_sample = g.predict(noise)
#pdb.set_trace()
# train discriminator
# feed true and fake samples with respective labels (1, 0) to the discriminator
x = np.reshape(np.concatenate((true_sample, fake_sample)), (batch_size*2, 1))
y = np.ones([batch_size*2, 1])
y[batch_size:, :] = 0
d_loss = discriminator_model.train_on_batch(x, y)
# train GAN
# feed noise to the model and expect true (1) response from discriminator,
# which is in turn fed with data generated by the generator
noise = np.reshape(generator_d.sample(batch_size), (batch_size, 1))
y = np.ones([batch_size, 1])
a_loss = adversarial_model.train_on_batch(noise, y)
log_mesg = "%d: [D loss: %f, acc: %f]" % (step, d_loss[0], d_loss[1])
log_mesg = "%s [A loss: %f, acc: %f]" % (log_mesg, a_loss[0], a_loss[1])
# plot
fig.clf()
fake = sns.distplot(fake_sample)
fake.set_xlim([0,8])
fake.set_ylim([0,3])
sns.distplot(true_dist)
sns.plt.text(3, 2, "Epoch {}, a_loss {:.3f}".format(step, a_loss[0]))
anim = animation.FuncAnimation(fig, animate, 200, repeat=False)
noise = generator_d.sample(batch_size)
fake_sample = g.predict(noise)
sns.distplot(fake_sample)
sns.plt.show()
```
# MNIST GAN (Keras)
Example adapted from [MNIST Generative Adversarial Model in Keras](http://www.kdnuggets.com/2016/07/mnist-generative-adversarial-model-keras.html)
```
noise_d = NoiseDistribution()
input_dim = 100
img_shape = (28,28,1)
```
## Generator Model
```
# model takes real values vector of size input_dim and via upsampling,
# reshaping, and various convolutional filters generates a 28x28 b/w image
def generator_model(input_dim, n_channels=128, init_side=7):
m = Sequential()
m.add(Dense(init_side*init_side*n_channels, input_dim=input_dim, activation=LeakyReLU()))
m.add(BatchNormalization(mode=2))
m.add(Reshape((init_side, init_side, n_channels)))
m.add(UpSampling2D())
m.add(Convolution2D(n_channels//2, 3, 3, border_mode='same', activation=LeakyReLU()))
m.add(BatchNormalization(mode=2))
m.add(UpSampling2D())
m.add(Convolution2D(n_channels//4, 3, 3, border_mode='same', activation=LeakyReLU()))
m.add(BatchNormalization(mode=2))
#?? Tanh
m.add(Convolution2D(1, 1, 1, border_mode='same', activation='sigmoid'))
return m
g = generator_model(input_dim=input_dim, n_channels=512)
g.summary()
# plot random generated image
plt.imshow(g.predict(noise_d.sample((1, input_dim)))[0]
.reshape(28, 28))
plt.show()
```
## Discriminator Model
```
# model takes image and after convolution and flattening
# outputs a probability value
def discriminator_model(input_shape, init_filters=64):
m = Sequential()
m.add(Convolution2D(init_filters, 5, 5, subsample=(2, 2), input_shape=input_shape, border_mode='same',
activation=LeakyReLU(0.2)))
#?? maxpooling and dropout? MaxPool2D(pool_size=2)
m.add(Convolution2D(init_filters*2, 5, 5, subsample=(2, 2), border_mode='same',
activation=LeakyReLU(0.2)))
#m.add(Convolution2D(init_filters*4, 3, 5, border_mode='same',
# activation=LeakyReLU(0.2)))
m.add(Flatten())
m.add(Dense(256, activation=LeakyReLU()))
m.add(Dense(1, activation='sigmoid'))
return m
d = discriminator_model(input_shape=(28,28,1), init_filters=256)
d.summary()
# print prediction for random image
d.predict(g.predict(noise_d.sample((1, input_dim))))
```
## GAN Model
```
# init GAN components
g = generator_model(input_dim)
d = discriminator_model(img_shape)
# compile generator
#g_optimizer = optimizers.Adam(lr=0.0001)
#g.compile(loss='binary_crossentropy', optimizer=g_optimizer)
# compile discriminator
d_optimizer = optimizers.Adam(lr=0.001)
d.compile(loss='binary_crossentropy', optimizer=d_optimizer)
# build adversarial model
gan = Sequential()
gan.add(g)
gan.add(d)
gan_optimizer = optimizers.Adam(lr=0.0001)
gan.compile(loss='binary_crossentropy', optimizer=gan_optimizer)
gan.summary()
```
## Train
```
generator_fun = lambda num_samples : generator.predict(noise_d.sample((num_samples, input_dim)))
# load mnist data using Keras
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape and normalize train data
X_train = np.expand_dims(X_train, axis=-1)
X_train = X_train.astype('float32')/255
print(X_train.shape)
print(y_train.shape)
def train_discriminator(d, g, noise_d, input_dim, X_train, batch_size, epoch):
# generate data
# first we sample from the true distribution (mnist dataset), then we generate some
# "fake" images by feeding noise to the generator
# generate random indexes for train data
batch_idxs = np.random.randint(0, len(X_train), batch_size)
# collect images corresponsing to previously generated index, and add a dimension
true_sample = X_train[batch_idxs,:,:,:]
# generate fake sample
fake_sample = g.predict(noise_d.sample((batch_size, input_dim)))
# prepare train batch data
# concatenativ true and fake samples and adjusting labels accordingly
x = np.concatenate((true_sample, fake_sample))
y = np.ones([batch_size*2, 1])
y[batch_size:,:] = 0
# train discriminator
# feed true and fake samples with respective labels (1, 0) to the discriminator
set_trainable(d, True, None, None)
d_loss = d.train_on_batch(x, y)
#print("Epoch {}: [D loss: {}]".format(epoch, d_loss))
return d_loss
def train(d, g, gan, noise_d, input_dim, X_train, batch_size=32, n_epochs=1, add_epoch=0):
losses = {'g':[], 'd':[]}
for epoch in range(n_epochs):
# train discriminator
d_loss = train_discriminator(d, g, noise_d, input_dim, X_train, batch_size, epoch)
losses['d'].append(d_loss)
set_trainable(d, False, None, None)
# train GAN
# feed noise to the model and expect true (1) response from discriminator,
# which is in turn fed with data generated by the generator
noise = noise_d.sample((batch_size, input_dim))
y = np.ones([batch_size, 1])
g_loss = gan.train_on_batch(noise, y)
losses['g'].append(g_loss)
#print("Epoch {}: [G loss: {}]".format(epoch, g_loss))
if (epoch%10)==0:
plot_sample_imgs(generator_fun, img_shape[:2], savepath=os.path.join('data', 'mnist_gan', 'mnist_gen{}.jpg'.format(epoch+add_epoch)))
return losses
# pretrain discriminator
batch_size = 128
n_epochs = 1
for epoch in range(n_epochs):
train_discriminator(d, g, noise_d, input_dim, X_train, batch_size, epoch)
#plt.ion()
plt.ioff()
K.set_value(d.optimizer.lr, 1e-3)
K.set_value(gan.optimizer.lr, 1e-3)
losses = train(d, g, gan, noise_d, input_dim, X_train,
batch_size=256, n_epochs=1000, add_epoch=120)
# plot random generated image
plt.imshow(g.predict(np.random.randn(input_dim).reshape(1, -1))
.reshape(28, 28))
plt.show()
plt.imshow(true_sample[2].reshape(28, 28))
plt.show()
gan.test_on_batch(noise, y)
gan.train_on_batch(noise, y)
```
# Wasserstein GAN
[Source](https://myurasov.github.io/2017/09/24/wasserstein-gan-keras.html)
```
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.initializers import *
from keras.callbacks import *
from keras.utils.generic_utils import Progbar
noise_d = NoiseDistribution()
input_dim = 100
img_shape = (28,28,1)
num_classes = 10
```
## Generator Model
```
# utility for the standard deconvolution block used in the generator
def generator_deconv_block(filters, block_input, kernel_size=(3, 3), strides=(1, 1)):
block = UpSampling2D()(block_input)
block = Convolution2D(filters, (3, 3), strides=strides, padding='same')(block)
block = LeakyReLU()(block)
block = BatchNormalization()(block)
return block
# different from basic DCGAN, this WGAN model
# takes as input both the prior sample (noise) and the image class
def generator_model(input_dim, voc_size, init_filters=128, init_side=7, num_deconv_blocks=2):
# Input combination part
input_class = Input(shape=(1, ), dtype='int32')
e = Embedding(voc_size, input_dim)(input_class)
embedded_class = Flatten()(e)
# noise
noise = Input(shape=(input_dim, ))
# hadamard product
h = multiply([noise, embedded_class])
# CNN part
x = Dense(1024)(h)
x = LeakyReLU()(x)
x = Dense(init_side*init_side*init_filters)(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = Reshape((init_side, init_side, init_filters))(x)
for i in range(num_deconv_blocks):
x = generator_deconv_block(init_filters//(2**(i+1)), block_input=x, kernel_size=(5, 5))
x = Convolution2D(1, (2, 2), padding='same', activation='tanh')(x)
return Model(inputs=[noise, input_class], outputs=x)
# instantiate generate model
gen = generator_model(input_dim=input_dim, voc_size=10, init_filters=128)
gen.summary()
# plot random generated image
plt.imshow(gen.predict([noise_d.sample((1, input_dim)), np.array([7])])[0]
.reshape(28, 28))
plt.show()
```
## Discriminator Model
```
# utility for the standard convolution block used in the discriminator
def discriminator_conv_block(filters, block_input, kernel_size=(3, 3), strides=(1, 1), pool_size=None):
block = Convolution2D(filters, kernel_size, strides=strides, padding='same')(block_input)
block = LeakyReLU()(block)
block = BatchNormalization()(block)
# if given, add max pooling
if pool_size:
block = MaxPool2D(pool_size=pool_size)(block)
return block
# different from basic DCGAN, this WGAN discriminator model
# takes an image as input, and output both a prediction about image autheticity
# as well as one for the image class
def discriminator_model(input_shape, num_classes, init_filters=32, num_conv_blocks=3):
input_image = Input(shape=input_shape)
x = input_image
for i in range(num_conv_blocks):
x = discriminator_conv_block(init_filters*(2**i), block_input=x, pool_size=None)
features = Flatten()(x)
out_autheticity = Dense(1, activation='linear')(features)
out_class = Dense(num_classes, activation='softmax')(features)
return Model(inputs=[input_image], outputs=[out_autheticity, out_class])
# instantiate discriminator model
dis = discriminator_model(input_shape=img_shape, num_classes=10, init_filters=32)
dis.summary()
# print prediction for random image
dis.predict(gen.predict([noise_d.sample((1, input_dim)), np.array([3])]))
```
## GAN Model
```
# loss function for discriminator
def d_loss(y_true, y_pred):
return K.mean(y_true * y_pred)
# init GAN components
gen = generator_model(input_dim=input_dim, voc_size=num_classes, init_filters=128)
dis = discriminator_model(input_shape=img_shape, num_classes=num_classes, init_filters=32)
# compile discriminator
dis.compile(loss=[d_loss, 'sparse_categorical_crossentropy'],
optimizer=RMSprop(lr=1e-4))
# Build adversarial model
noise = Input(shape=(input_dim, ))
input_class = Input(shape=(1, ), dtype='int32')
out_autheticity, out_class = dis(gen(inputs=[noise, input_class]))
gan = Model(inputs=[noise, input_class], outputs=[out_autheticity, out_class])
gan.compile(loss=[d_loss, 'sparse_categorical_crossentropy'],
optimizer=RMSprop(lr=1e-4))
gan.summary()
```
## Train
```
# given that generator uses tanh activation function,
# we need to process its output to make it a valid image
#deprocess = lambda img : np.transpose((img/2+0.5).clip(0,1), (1,2,0))
deprocess = lambda img : (img/2+0.5).clip(0,1)
generator_fun = lambda num_samples : deprocess(gen.predict([noise_d.sample((num_samples, input_dim)),
np.random.randint(0, num_classes, num_samples)]))
# load mnist data using Keras
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# normalize to -1..1 range and reshape
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=-1)
print(X_train.shape)
print(y_train.shape)
def train_discriminator(dis, gen, noise_d, input_dim, num_classes, X_train, Y_train, batch_size, epoch):
# clip weights
for l in dis.layers:
weights = l.get_weights()
weights = [np.clip(w, -0.01, 0.01) for w in weights]
l.set_weights(weights)
# generate data
# first we sample from the true distribution (mnist dataset), then we generate some
# "fake" images by feeding noise to the generator
# generate random indexes for train data
batch_idxs = np.random.randint(0, len(X_train), batch_size)
# collect images corresponsing to previously generated index, and add a dimension
true_sample = X_train[batch_idxs]
true_sample_classes = y_train[batch_idxs]
# train on true samples
dis_true_loss = dis.train_on_batch(true_sample,
[-np.ones(batch_size), true_sample_classes])
# generate fake sample
noise = noise_d.sample((batch_size, input_dim))
generated_classes = np.random.randint(0, num_classes, batch_size)
fake_sample = gen.predict([noise, generated_classes.reshape(-1, 1)])
# train on fake samples
dis_fake_loss = dis.train_on_batch(fake_sample,
[np.ones(batch_size), generated_classes])
#print("Epoch {}: [D loss: {}]".format(epoch, d_loss))
return dis_true_loss, dis_fake_loss
def train(dis, gen, gan, noise_d, input_dim, num_classes, X_train, Y_train,
batch_size=32, n_epochs=1, add_epochs=0):
losses = {'gan':[], 'dis_fake_loss':[], 'dis_true_loss':[]}
for epoch in tqdm(range(n_epochs), desc='Training GAN'):
if (epoch+add_epochs % 1000) < 15 or epoch+add_epochs % 500 == 0: # 25 times in 1000, every 500th
d_iters = 40
else:
d_iters = 5#D_ITERS
# train discriminator
set_trainable(dis, True, None, None)
for d_epoch in range(d_iters):
dis_true_loss, dis_fake_loss = train_discriminator(dis, gen, noise_d, input_dim, num_classes,
X_train, Y_train, batch_size, epoch)
losses['dis_fake_loss'].append(dis_fake_loss)
losses['dis_true_loss'].append(dis_true_loss)
set_trainable(dis, False, None, None)
# train GAN
# feed noise to the model and expect true (1) response from discriminator,
# which is in turn fed with data generated by the generator
noise = noise_d.sample((batch_size, input_dim))
generated_classes = np.random.randint(0, num_classes, batch_size)
gan_loss = gan.train_on_batch(
[noise, generated_classes.reshape((-1, 1))],
[-np.ones(batch_size), generated_classes])
losses['gan'].append(gan_loss)
#print("Epoch {}: [G loss: {}]".format(epoch, g_loss))
if epoch%10 == 0:
plot_sample_imgs(generator_fun, img_shape[:2],
savepath=os.path.join(RES_DIR, 'data', 'mnist_wgan', 'mnist_gen{}.jpg'.format(epoch+add_epochs)))
return losses
add_epochs = 0
#plt.ion()
plt.ioff()
n_epochs = 500
losses = train(dis, gen, gan, noise_d, input_dim, num_classes,
X_train, y_train,
batch_size=64, n_epochs=n_epochs, add_epochs=add_epochs)
add_epochs += n_epochs
```
## Plot Losses
```
def plot_losses(losses):
f = plt.figure()
#plt.plot(losses['dis_fake_loss'], label='dis_fake_loss')
#plt.plot(losses['dis_fake_loss'], label='dis_true_loss')
plt.plot(np.array(losses['gan'])[:,2], label='gan loss')
plt.legend()
plt.show()
plot_losses(losses)
np.array(losses['gan'])[:,0]
```
# Generate gif
```
import imageio
import os
RES_DIR = os.path.join(os.pardir, os.pardir, 'data', 'deep_learning')
dir_path = os.path.join(RES_DIR, 'data', 'mnist_wgan', 'test_2')
filenames = [(os.path.join(dir_path, filename), int(filename[9:-4])) for filename in os.listdir(dir_path)]
images = []
for filename in sorted(filenames, key=lambda x:x[1]):
images.append(imageio.imread(filename[0]))
imageio.mimsave(os.path.join(RES_DIR, 'data', 'mnist_wgan', 'wgan.gif'), images)
```
| true |
code
| 0.788919 | null | null | null | null |
|
### Deliverable 1: Preprocessing the Data for a Neural Network
```
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import pandas as pd
import tensorflow as tf
# Import and read the charity_data.csv.
import pandas as pd
application_df = pd.read_csv("C:/Users/hzlip/Documents/VU_DataAnalytics/Mod_19_Neural_Networks_Machine_Learning/Neural_Network_Charity_Analysis/Resources/charity_data.csv")
application_df.head()
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
application_df = application_df.drop(["EIN", "NAME"], axis=1)
application_df.head()
# Determine the number of unique values in each column.
counts = application_df.nunique()
counts
# Look at APPLICATION_TYPE value counts for binning
app_type_value_counts = application_df.APPLICATION_TYPE.value_counts()
app_type_value_counts
# Visualize the value counts of APPLICATION_TYPE
app_type_value_counts.plot.density()
# Determine which values to replace if counts are less than ...?
replace_application = list(app_type_value_counts[app_type_value_counts < 500].index)
# Replace in dataframe
for app in replace_application:
application_df.APPLICATION_TYPE = application_df.APPLICATION_TYPE.replace(app,"Other")
# Check to make sure binning was successful
application_df.APPLICATION_TYPE.value_counts()
# Look at CLASSIFICATION value counts for binning
classification_value_counts = application_df.CLASSIFICATION.value_counts()
classification_value_counts
# Visualize the value counts of CLASSIFICATION
classification_value_counts.plot.density()
# Determine which values to replace if counts are less than ..?
replace_class = list(classification_value_counts[classification_value_counts < 1800].index)
# Replace in dataframe
for cls in replace_class:
application_df.CLASSIFICATION = application_df.CLASSIFICATION.replace(cls,"Other")
# Check to make sure binning was successful
application_df.CLASSIFICATION.value_counts()
# Generate our categorical variable lists
application_cat = application_df.dtypes[application_df.dtypes == "object"].index.tolist()
application_cat
# Create a OneHotEncoder instance
enc = OneHotEncoder(sparse=False)
# Fit and transform the OneHotEncoder using the categorical variable list
encode_df = pd.DataFrame(enc.fit_transform(application_df[application_cat]))
# Add the encoded variable names to the dataframe
encode_df.columns = enc.get_feature_names(application_cat)
encode_df.head()
# Merge one-hot encoded features and drop the originals
# Merge one-hot encoded features and drop the originals
application_df = application_df.merge(encode_df,left_index=True, right_index=True)
application_df = application_df.drop(application_cat,1)
application_df.head()
# Split our preprocessed data into our features and target arrays
y = application_df["IS_SUCCESSFUL"].values
X = application_df.drop(["IS_SUCCESSFUL"],1).values
# Split the preprocessed data into a training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
### Deliverable 2: Compile, Train and Evaluate the Model
```
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
number_input_features = len(X_train[0])
hidden_nodes_layer1 = 180
hidden_nodes_layer2 = 90
hidden_nodes_layer3 = 60
nn = tf.keras.models.Sequential()
# First hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation="relu"))
# Second hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation="relu"))
# Third hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer3, activation="relu"))
# Output layer
nn.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Check the structure of the model
nn.summary()
# Compile the model
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Import checkpoint dependencies
import os
from tensorflow.keras.callbacks import ModelCheckpoint
# Define the checkpoint path and filenames
os.makedirs("checkpoints/",exist_ok=True)
checkpoint_path = "checkpoints/weights.{epoch:02d}.hdf5"
# Create a callback that saves the model's weights every 5 epochs
cp_callback = ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq="epoch",
period=5)
# Train the model
fit_model = nn.fit(X_train_scaled,y_train,epochs=100,callbacks=[cp_callback])
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# Export our model to HDF5 file
nn.save("AlphabetSoupCharity_Optimization2.h5")
```
| true |
code
| 0.649301 | null | null | null | null |
|
# Factor Risk Exposure
By Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot.
Part of the Quantopian Lecture Series:
* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
---
##DISCLAIMER:
As always, this analysis is based on historical data, and risk exposures estimated on historical data may or may not affect the exposures going forward. As such, computing the risk exposure of to a factor is not enough. You must put confidence bounds on that risk exposure, and determine whether the risk exposure can even be modeled reasonably. For more information on this, please see our other lectures, especially Instability of Parameter Estimates.
##Using Factor Models to Determine Risk Exposure
We can use factor models to analyze the sources of risks and returns in portfolios. Recall that a factor model expresses the returns as
$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$
By modelling the historical returns, we can see how much of them is due to speculation on different factors and how much to asset-specific fluctuations ($\epsilon_p$). We can also examine what sources of risk the portfolio is exposed to.
In risk analysis, we often model active returns (returns relative to a benchmark) and active risk (standard deviation of active returns, also known as tracking error or tracking risk).
For instance, we can find a factor's marginal contribution to active risk squared (FMCAR). For factor $j$, this is
$$ \text{FMCAR}_j = \frac{b_j^a \sum_{i=1}^K b_i^a Cov(F_j, F_i)}{(\text{Active risk})^2} $$
where $b_i^a$ is the portfolio's active exposure to factor $i$. This tells us how much risk we incur by being exposed to factor $j$, given all the other factors we're already exposed to.
Fundamental factor models are often used to evaluate portfolios because they correspond directly to investment choices (e.g. whether we invest in small-cap or large-cap stocks, etc.). Below, we construct a model to evaluate a single asset; for more information on the model construction, check out the fundamental factor models notebook.
We'll use the canonical Fama-French factors for this example, which are the returns of portfolios constructred based on fundamental factors.
##How many factors do you want?
In the Arbitrage Pricing Theory lecture we mention that for predictive models you want fewer parameters. However, this doesn't quite hold for risk exposure. Instead of trying to not overfit a predictive model, you are looking for any possible risk factor that could be influencing your returns. Therefore it's actually safer to estimate exposure to many many risk factors to see if any stick. Anything left over in our $\alpha$ is risk exposure that is currently unexplained by the selected factors. You want your strategy's return stream to be all alpha, and to be unexplained by as many parameters as possible. If you can show that your historical returns have little to no dependence on many factors, this is very positive. Certainly some unrelated risk factors might have spurious relationships over time in a large dataset, but those are not likely to be consistent.
## Setup
The first thing we do is compute a year's worth of factor returns.
### NOTE
The process for doing this is described in the Fundamental Factor Models lecture and uses pipeline. For more information please see that lecture.
```
import numpy as np
import statsmodels.api as sm
import scipy.stats as stats
from statsmodels import regression
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import Fundamentals
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import CustomFactor, Returns
# Here's the raw data we need, everything else is derivative.
class MarketCap(CustomFactor):
# Here's the data we need for this factor
inputs = [Fundamentals.shares_outstanding, USEquityPricing.close]
# Only need the most recent values for both series
window_length = 1
def compute(self, today, assets, out, shares, close_price):
# Shares * price/share = total price = market cap
out[:] = shares * close_price
class BookToPrice(CustomFactor):
# pb = price to book, we'll need to take the reciprocal later
inputs = [Fundamentals.pb_ratio]
window_length = 1
def compute(self, today, assets, out, pb):
out[:] = 1 / pb
def make_pipeline():
"""
Create and return our pipeline.
We break this piece of logic out into its own function to make it easier to
test and modify in isolation.
In particular, this function can be copy/pasted into research and run by itself.
"""
pipe = Pipeline()
# Add our factors to the pipeline
market_cap = MarketCap()
# Raw market cap and book to price data gets fed in here
pipe.add(market_cap, "market_cap")
book_to_price = BookToPrice()
pipe.add(book_to_price, "book_to_price")
# We also get daily returns
returns = Returns(inputs=[USEquityPricing.close], window_length=2)
pipe.add(returns, "returns")
# We compute a daily rank of both factors, this is used in the next step,
# which is computing portfolio membership.
market_cap_rank = market_cap.rank()
pipe.add(market_cap_rank, 'market_cap_rank')
book_to_price_rank = book_to_price.rank()
pipe.add(book_to_price_rank, 'book_to_price_rank')
# Build Filters representing the top and bottom 1000 stocks by our combined ranking system.
biggest = market_cap_rank.top(1000)
smallest = market_cap_rank.bottom(1000)
highpb = book_to_price_rank.top(1000)
lowpb = book_to_price_rank.bottom(1000)
# Don't return anything not in this set, as we don't need it.
pipe.set_screen(biggest | smallest | highpb | lowpb)
# Add the boolean flags we computed to the output data
pipe.add(biggest, 'biggest')
pipe.add(smallest, 'smallest')
pipe.add(highpb, 'highpb')
pipe.add(lowpb, 'lowpb')
return pipe
pipe = make_pipeline()
start_date = '2014-1-1'
end_date = '2015-1-1'
from quantopian.research import run_pipeline
results = run_pipeline(pipe, start_date, end_date)
R_biggest = results[results.biggest]['returns'].groupby(level=0).mean()
R_smallest = results[results.smallest]['returns'].groupby(level=0).mean()
R_highpb = results[results.highpb]['returns'].groupby(level=0).mean()
R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean()
SMB = R_smallest - R_biggest
HML = R_highpb - R_lowpb
```
How did each factor do over 2014?
```
SMB_CUM = np.cumprod(SMB+1)
HML_CUM = np.cumprod(HML+1)
plt.plot(SMB_CUM.index, SMB_CUM.values)
plt.plot(HML_CUM.index, HML_CUM.values)
plt.ylabel('Cumulative Return')
plt.legend(['SMB Portfolio Returns', 'HML Portfolio Returns']);
```
## Computing Risk Exposure
Now we can determine how exposed another return stream is to each of these factors. We can do this by running static or rolling linear regressions between our return stream and the factor portfolio returns. First we'll compute the active returns (returns - benchmark) of some random asset and then model that asset as a linear combination of our two factors. The more a factor contributes to the active returns, the more exposed the active returns are to that factor.
```
# Get returns data for our portfolio
portfolio = get_pricing(['MSFT', 'AAPL', 'YHOO', 'FB', 'TSLA'],
fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
R = np.mean(portfolio, axis=1)
bench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# The excess returns of our active management, in this case just holding a portfolio of our one asset
active = R - bench
# Define a constant to compute intercept
constant = pd.TimeSeries(np.ones(len(active.index)), index=active.index)
df = pd.DataFrame({'R': active,
'F1': SMB,
'F2': HML,
'Constant': constant})
df = df.dropna()
# Perform linear regression to get the coefficients in the model
b1, b2 = regression.linear_model.OLS(df['R'], df[['F1', 'F2']]).fit().params
# Print the coefficients from the linear regression
print 'Sensitivities of active returns to factors:\nSMB: %f\nHML: %f' % (b1, b2)
```
Using the formula from the start of the notebook, we can compute the factors' marginal contributions to active risk squared:
```
F1 = df['F1']
F2 = df['F2']
cov = np.cov(F1, F2)
ar_squared = (active.std())**2
fmcar1 = (b1*(b2*cov[0,1] + b1*cov[0,0]))/ar_squared
fmcar2 = (b2*(b1*cov[0,1] + b2*cov[1,1]))/ar_squared
print 'SMB Risk Contribution:', fmcar1
print 'HML Risk Contribution:', fmcar2
```
The rest of the risk can be attributed to active specific risk, i.e. factors that we did not take into account or the asset's idiosyncratic risk.
However, as usual we will look at how the exposure to these factors changes over time. As we lose a tremendous amount of information by just looking at one data point. Let's look at what happens if we run a rolling regression over time.
```
# Compute the rolling betas
model = pd.stats.ols.MovingOLS(y = df['R'], x=df[['F1', 'F2']],
window_type='rolling',
window=100)
rolling_parameter_estimates = model.beta
rolling_parameter_estimates.plot();
plt.title('Computed Betas');
plt.legend(['F1 Beta', 'F2 Beta', 'Intercept']);
```
Now we'll look at FMCAR as it changes over time.
```
# Remove the first 99, which are all NaN for each case
# Compute covariances
covariances = pd.rolling_cov(df[['F1', 'F2']], window=100)[99:]
# Compute active risk squared
active_risk_squared = pd.rolling_std(active, window = 100)[99:]**2
# Compute betas
betas = rolling_parameter_estimates[['F1', 'F2']]
# Set up empty dataframe
FMCAR = pd.DataFrame(index=betas.index, columns=betas.columns)
# For each factor
for factor in betas.columns:
# For each bar in our data
for t in betas.index:
# Compute the sum of the betas and covariances
s = np.sum(betas.loc[t] * covariances[t][factor])
# Get the beta
b = betas.loc[t][factor]
# Get active risk squared
AR = active_risk_squared.loc[t]
# Put them all together to estimate FMCAR on that date
FMCAR[factor][t] = b * s / AR
```
Let's plot this.
```
plt.plot(FMCAR['F1'].index, FMCAR['F1'].values)
plt.plot(FMCAR['F2'].index, FMCAR['F2'].values)
plt.ylabel('Marginal Contribution to Active Risk Squared')
plt.legend(['F1 FMCAR', 'F2 FMCAR']);
```
###Problems with using this data
Whereas it may be interesting to know how a portfolio was exposed to certain factors historically, it is really only useful if we can make predictions about how it will be exposed to risk factors in the future. It's not always a safe assumption to say that future exposure will be the current exposure. As you saw the exposure varies quite a bit, so taking the average is dangerous. We could put confidence intervals around that average, but that would only work if the distribution of exposures were normal or well behaved. Let's check using our old buddy, the Jarque-Bera test.
```
from statsmodels.stats.stattools import jarque_bera
_, pvalue1, _, _ = jarque_bera(FMCAR['F1'].dropna().values)
_, pvalue2, _, _ = jarque_bera(FMCAR['F2'].dropna().values)
print 'p-value F1_FMCAR is normally distributed', pvalue1
print 'p-value F2_FMCAR is normally distributed', pvalue2
```
The p-values are below our default cutoff of 0.05. We can't even put good confidence intervals on the risk exposure of the asset without extra effort, so making any statement about exposure in the future is very difficult right now. Any hedge we took out to cancel the exposure to one of the factors might be way over or under hedged.
We are trying to predict future exposure, and predicting the future is incredibly difficult. One must be very careful with statistical methods to ensure that false predictions are not made.
# Factor and tracking portfolios
We can use factor and tracking portfolios to tweak a portfolio's sensitivities to different sources of risk.
A <i>factor portfolio</i> has a sensitivity of 1 to a particular factor and 0 to all other factors. In other words, it represents the risk of that one factor. We can add a factor portfolio to a larger portfolio to adjust its exposure to that factor.
A similar concept is a <i>tracking portfolio</i>, which is constructed to have the same factor sensitivities as a benchmark or other portfolio. Like a factor portfolio, this allows us to either speculate on or hedge out the risks associated with that benchmark or portfolio. For instance, we regularly hedge out the market, because we care about how our portfolio performs relative to the market, and we don't want to be subject to the market's fluctuations.
To construct a factor or tracking portfolio, we need the factor sensitivities of what we want to track. We already know what these are in the former case, but we need to compute them in the latter using usual factor model methods. Then, we pick some $K+1$ assets (where $K$ is the number of factors we're considering) and solve for the weights of the assets in the portfolio.
##Portfolio Exposure
The portfolio exposure can be computed directly from the return stream, or as the weighted average of all the assets held.
## Example
Say we have two factors $F_1$ and $F_2$, and a benchmark with sensitivities of 1 and 1.1 to the factors, respectively. We identify 3 securities $x_1, x_2, x_3$ that we would like to use in composing a portfolio that tracks the benchmark, whose sensitivities are $b_{11} = 0.7$, $b_{12} = 1.1$, $b_{21} = 0.1$, $b_{22} = 0.5$, $b_{31} = 1.5$, $b_{32} = 1.3$. We would like to compute weights $w_1$, $w_2$, $w_3$ so that our tracking portfolio is
$$ P = w_1 x_1 + w_2 x_2 + w_3 x_3 $$
We want our portfolio sensitivities to match the benchmark:
$$ w_1 b_{11} + w_2 b_{21} + w_3 b_{31} = 1 $$
$$ w_1 b_{12} + w_2 b_{22} + w_3 b_{32} = 1.1 $$
Also, the weights need to sum to 1:
$$ w_1 + w_2 + w_3 = 1 $$
Solving this system of 3 linear equations, we find that $w_1 = 1/3$, $w_2 = 1/6$, and $w_3 = 1/2$. Putting the securities together into a portfolio using these weights, we obtain a portfolio with the same risk profile as the benchmark.
##How to Use Risk Exposure Models
Once we know our risk exposures, we can do a few things. We can not enter into positions that have high exposures to certain factors, or we can hedge our positions to try to neutralize the exposure.
###Risk Management
Often times funds will have a layer of protection over their traders/algorithms. This layer of protection takes in the trades that the fund wants to make, then computes the exposure of the new portfolio, and checks to make sure they're within pre-defined ranges. If they are not, it does not place the trade and files a warning.
###Hedging
Another method of dealing with exposure is to take out hedges. You can determine, for example, your exposure to each sector of the market. You can then take out a hedge if a particular sector seems to affect your returns too much. For more information on hedging, please see our Beta Hedging lecture. Good algorithms will have built-in hedging logic that ensures they are never over-exposed.
*This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| true |
code
| 0.73614 | null | null | null | null |
|
# sell-short-in-may-and-go-away
see: https://en.wikipedia.org/wiki/Sell_in_May
The reason for this example is to demonstrate short selling (algo), and short selling using adjust_percent function (algo2).
algo - Sell short in May and go away, buy to cover in Nov
algo2 - first trading day of the month, adjust position to 50%
(Select the one you want to call in the Strategy.run() function
```
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
# Format price data
pd.options.display.float_format = '{:0.2f}'.format
%matplotlib inline
# Set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
pf.DEBUG = False
```
Some global data
```
#symbol = '^GSPC'
symbol = 'SPY'
capital = 10000
start = datetime.datetime(2015, 10, 30)
#start = datetime.datetime(*pf.SP500_BEGIN)
end = datetime.datetime.now()
```
Define Strategy Class
```
class Strategy:
def __init__(self, symbol, capital, start, end):
self.symbol = symbol
self.capital = capital
self.start = start
self.end = end
self.ts = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high; low = row.low; close = row.close;
end_flag = pf.is_last_row(self.ts, i)
shares = 0
# Buy to cover (at the open on first trading day in Nov)
if self.tlog.shares > 0:
if (row.month == 11 and row.first_dotm) or end_flag:
shares = self.tlog.buy2cover(date, row.open)
# Sell short (at the open on first trading day in May)
else:
if row.month == 5 and row.first_dotm:
shares = self.tlog.sell_short(date, row.open)
if shares > 0:
pf.DBG("{0} SELL SHORT {1} {2} @ {3:.2f}".format(
date, shares, self.symbol, row.open))
elif shares < 0:
pf.DBG("{0} BUY TO COVER {1} {2} @ {3:.2f}".format(
date, -shares, self.symbol, row.open))
# Record daily balance
self.dbal.append(date, high, low, close)
def _algo2(self):
pf.TradeLog.cash = self.capital
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high; low = row.low; close = row.close;
end_flag = pf.is_last_row(self.ts, i)
shares = 0
# On the first day of the month, adjust short position to 50%
if (row.first_dotm or end_flag):
weight = 0 if end_flag else 0.5
self.tlog.adjust_percent(date, close, weight, pf.Direction.SHORT)
# Record daily balance
self.dbal.append(date, high, low, close)
def run(self):
self.ts = pf.fetch_timeseries(self.symbol)
self.ts = pf.select_tradeperiod(self.ts, self.start, self.end,
use_adj=True)
# add calendar columns
self.ts = pf.calendar(self.ts)
self.tlog = pf.TradeLog(self.symbol)
self.dbal = pf.DailyBal()
self.ts, self.start = pf.finalize_timeseries(self.ts, self.start)
# Pick either algo or algo2
self._algo()
#self._algo2()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog = self.tlog.get_log_raw()
self.tlog = self.tlog.get_log()
self.dbal = self.dbal.get_log(self.tlog)
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
```
Run Strategy
```
s = Strategy(symbol, capital, start, end)
s.run()
s.rlog.head()
s.tlog.head()
s.dbal.tail()
```
Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats
```
benchmark = pf.Benchmark(symbol, s.capital, s.start, s.end)
benchmark.run()
```
Plot Equity Curves: Strategy vs Benchmark
```
pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)
```
Plot Trades
```
pf.plot_trades(s.dbal, benchmark=benchmark.dbal)
```
Bar Graph: Strategy vs Benchmark
```
df = pf.plot_bar_graph(s.stats, benchmark.stats)
df
```
| true |
code
| 0.528108 | null | null | null | null |
|
```
from awesome_panel_extensions.awesome_panel.notebook import Header
Header(folder="examples/reference/frameworks/fast", notebook="FastTextInput.ipynb")
```
# Fast TextInput - Reference Guide
The `FastTextInput` widget is based on the [fast-text-field](https://explore.fast.design/components/fast-text-field) web component and extends the built in [Panel TextInput](https://panel.holoviz.org/reference/widgets/TextInput.html).
<table>
<tr>
<td><img src="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel-extensions/master/assets/images/frameworks/fast/fast-text-input.png"></td>
</tr>
</table>
#### Parameters:
##### Core
* **``value``** (str): A string value.
###### Display
* **``name``** (str): The label of the TextInput.
* **``disabled``** (boolean): Whether or not the TextInput is disabled. Default is False.
* **``placeholder``** (string): A placeholder string displayed when no value is entered.
###### Fast
* **``apperance``** (string): Determines the appearance of the textinput. One of `outline` or `filled`. Defaults to `outline`.
* **``autofocus``** (bool): The autofocus attribute. Defaults to `False`.
* **``type_of_text``** (bool): The type of text input. One of `email`, `password`, `tel`, `text`, `url`. Defaults to `text`.
* **``readonly``** (bool): Whether or not the TextInput is read only. Defaults to `False`.
The `FastTextInput` has the same layout and styling parameters as most other widgets. For example `width` and `sizing_mode`.
Please note that you can only use the Fast components inside a custom Panel template that
- Loads the [Fast `javascript` library](https://www.fast.design/docs/components/getting-started#from-cdn).
- Wraps the content of the `<body>` html tag inside the [fast-design-system-provider](https://www.fast.design/docs/components/getting-started#add-components) tag.
We provide the `FastTemplate` for easy usage.
You can also develop your own custom [Panel template](https://panel.holoviz.org/user_guide/Templates.html) if you need something special. For example combining it with more [fast.design](https://fast.design/) web components and the [Fluent Design System](https://www.microsoft.com/design/fluent/#/) to create **VS Code** and **Microsoft Office** like experiences.
Please note that Fast components will not work in older, legacy browser like Internet Explorer.
___
Let's start by importing the dependencies
```
import param
import panel as pn
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
pn.config.sizing_mode = "stretch_width"
pn.extension()
```
## Parameters
Let's explore the parameters of the `FastTextInput`.
```
textinput = FastTextInput(name="The label", sizing_mode="fixed", width=300, appearance="outline", placeholder="write something")
textinput_parameters = ["name", "value", "disabled", "placeholder", "appearance", "autofocus", "type_of_text", "readonly", "height", "width", "sizing_mode"]
app=pn.Row(
textinput
)
template=FastTemplate(main=[app])
template
settings_pane = pn.WidgetBox(pn.Param(textinput, parameters=textinput_parameters, show_name=False))
settings_pane
```
## pn.Param
Let's verify that that `FastTextInput` can be used as a widget by `pn.Param`.
```
WIDGETS = {
"some_text": {
"type": FastTextInput, "sizing_mode": "fixed", "width": 400, "placeholder": "write some text!"
}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text", label="This is a label")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template
pn.Param(parameterized_app.param.some_text)
```
## Resources
- [fast.design](https://fast.design/)
- [fast-text-field](https://explore.fast.design/components/fast-text-field)
## Known Issues
- The `fast-text-field` web component has additional attributes (`max_length`, `min_length`, `pattern`, `size`, `spellcheck`, `required`) that does not seem to work. If you think they are important please upvote [Fast Github Issue 3852](https://github.com/microsoft/fast/issues/3852).
<table>
<tr>
<td><img src="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel-extensions/master/assets/images/frameworks/fast/fast-panel-logo.png"></td>
</tr>
</table>
| true |
code
| 0.671282 | null | null | null | null |
|
If we have smaller data it can be useful to benefit from k-fold cross-validation to maximize our ability to evaluate the neural network's performance. This is possible in Keras because we can "wrap" any neural network such that it can use the evaluation features available in scikit-learn, including k-fold cross-validation. To accomplish this, we first have to create a function that returns a compiled neural network. Next we use `KerasClassifier` (if we have a classifier, if we have a regressor we can use `KerasRegressor`) to wrap the model so it can be used by scikit-learn. After this, we can use our neural network like any other scikit-learn learning algorithm (e.g. random forests, logistic regression). In our solution, we used `cross_val_score` to run a 3-fold cross-validation on our neural network.
## Preliminaries
```
# Load libraries
import numpy as np
from keras import models
from keras import layers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_classification
# Set random seed
np.random.seed(0)
```
## Create Feature And Target Data
```
# Number of features
number_of_features = 100
# Generate features matrix and target vector
features, target = make_classification(n_samples = 10000,
n_features = number_of_features,
n_informative = 3,
n_redundant = 0,
n_classes = 2,
weights = [.5, .5],
random_state = 0)
```
## Create Function That Constructs Neural Network
```
# Create function returning a compiled network
def create_network():
# Start neural network
network = models.Sequential()
# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=16, activation='relu', input_shape=(number_of_features,)))
# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=16, activation='relu'))
# Add fully connected layer with a sigmoid activation function
network.add(layers.Dense(units=1, activation='sigmoid'))
# Compile neural network
network.compile(loss='binary_crossentropy', # Cross-entropy
optimizer='rmsprop', # Root Mean Square Propagation
metrics=['accuracy']) # Accuracy performance metric
# Return compiled network
return network
```
## Wrap Function In KerasClassifier
```
# Wrap Keras model so it can be used by scikit-learn
neural_network = KerasClassifier(build_fn=create_network,
epochs=10,
batch_size=100,
verbose=0)
```
## Conduct k-Fold Cross-Validation Using scikit-learn
```
# Evaluate neural network using three-fold cross-validation
cross_val_score(neural_network, features, target, cv=3)
```
| true |
code
| 0.736021 | null | null | null | null |
|
## Simple neural network in plain Python
This notebook implements a simple neural network architecture that can map $2$ dimensional input vectors onto binary output values. Our network will have $2$ input neurons, one hidden layer with $6$ hidden neurons and an output layer with $1$ output neuron.
We will represent the architecture by means of the weight matrices between the layers. In our example, the weight matrix between the input and hidden layer will be denoted as $W_h$, the weight matrix between the hidden and output layer as $W_o$. In addition to the weights connecting the neurons, each hidden and output neuron will have a bias weight with a constant input of $+1$.
Our training set consists of $m = 750$ examples. Therefore, we will have the following matrix shapes:
- Training set shape: $X = (750, 2)$
- Targets shape: $Y = (750, 1)$
- $W_h$ shape: $(n_{features}, n_{hidden}) = (2, 6)$
- $b_h$ shape (bias vector): $(1, n_{hidden}) = (1, 6)$
- $W_o$ shape: $(n_{hidden}, n_{outputs}) = (6, 1)$
- $b_o$ shape (bias vector): $(1, n_{outputs}) = (1, 1)$

### Loss Function
We will use the same loss function as in logistic regression:
\begin{equation}
J(\boldsymbol{w},b) = - \frac{1}{m} \sum_{i=1}^m \Big[ y^{(i)} \log(\hat{y}^{(i)}) + (1 - y^{(i)}) \big(1 - \log(\hat{y}^{(i)})\big) \Big]
\end{equation}
For a classification task with more than two classes, we would use a generalization of this function, namely the categorical cross-entropy.
### Training
We will train our network with gradient descent and we will use backpropagation to compute the required partial derivatives. The training procedure has the following steps:
1. Initialize the parameters (i.e. the weights and biases)
2. Repeat until convergence:
2.1. Propagate the current input batch forward through the network. To do so, compute the activations and outputs of all hidden and output units.
2.2 Compute the partial derivatives of the loss function with respect to each parameter
2.3 Update the parameters
### Forward Pass
We start by computing the activation and output of each unit in our network. To speed up the implementation, we won't do this for each input example individually but for all examples at once, using vectorization. We will use the following notation:
- $\boldsymbol{A}_h$: matrix with activations of all hidden units for all training examples
- $\boldsymbol{O}_h$: matrix with outputs of all hidden units for all training examples
The hidden neurons will have $\tanh$ as their activation function:
\begin{equation}
\tanh(x) = \frac{sinh(x)}{cosh(x)} = \frac{\exp(x) - exp(-x)}{\exp(x) + exp(-x)}
\end{equation}
\begin{equation}
\tanh'(x) = 1 - tanh^2(x)
\end{equation}
The output neurons will have the $\textit{sigmoid}$ activation function:
\begin{equation}
\sigma(x) = \frac{1}{1 + \exp(-x)}
\end{equation}
\begin{equation}
\sigma'(x) = 1 - (1 + \sigma(x))
\end{equation}
The activations and outputs can then be computed as follows ($\cdot$ denotes the dot product):
\begin{equation}
\boldsymbol{A}_h = \boldsymbol{X} \cdot \boldsymbol{W}_h + \boldsymbol{b}_h, \text{shape: } (750, 6)
\end{equation}
\begin{equation}
\boldsymbol{O}_h = \sigma(\boldsymbol{A}_h), \text{shape: } (750, 6)
\end{equation}
\begin{equation}
\boldsymbol{A}_o = \boldsymbol{O}_h \cdot \boldsymbol{W}_o + b_o, \text{shape: } (750, 1)
\end{equation}
\begin{equation}
\boldsymbol{O}_o = \sigma(\boldsymbol{A}_o), \text{shape: } (750, 1)
\end{equation}
### Backward pass
To compute the weight updates we need the partial derivatives of the loss function with respect to each unit. I won't give the derivation of these equations here, you will find plenty of good explanations on other websites (for example [here](https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/)).
For the output neurons, the gradients are given by (matrix notation):
$\frac{\partial L}{\partial \boldsymbol{A}_o} = d\boldsymbol{A}_o = (\boldsymbol{O}_o - \boldsymbol{Y})$
$\frac{\partial L}{\partial \boldsymbol{W}_o} = \frac{1}{m} (\boldsymbol{O}_h^T \cdot d\boldsymbol{A}_o)$
$\frac{\partial L}{\partial \boldsymbol{b}_o} = \frac{1}{m} \sum d\boldsymbol{A}_o$
For the weight matrix between input and hidden layer we have:
$\frac{\partial L}{\partial \boldsymbol{A}_h} = d\boldsymbol{A}_h = (\boldsymbol{W}_o^T \cdot d\boldsymbol{A}_o) * (1 - \tanh^2 (\boldsymbol{A}_h))$
$\frac{\partial L}{\partial \boldsymbol{W}_h} = \frac{1}{m} (\boldsymbol{X}^T \cdot d\boldsymbol{A}_h)$
$\frac{\partial L}{\partial \boldsymbol{b}_h} = \frac{1}{m} \sum d\boldsymbol{A}_h$
### Weight Update
$\boldsymbol{W}_h = \boldsymbol{W}_h - \eta * \frac{\partial L}{\partial \boldsymbol{W}_h}$
$\boldsymbol{b}_h = \boldsymbol{b}_h - \eta * \frac{\partial L}{\partial \boldsymbol{b}_h} $
$\boldsymbol{W}_o = \boldsymbol{W}_o - \eta * \frac{\partial L}{\partial \boldsymbol{W}_o} $
$\boldsymbol{b}_o = \boldsymbol{b}_o - \eta * \frac{\partial L}{\partial \boldsymbol{b}_o} $
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.model_selection import train_test_split
np.random.seed(123)
% matplotlib inline
```
## Dataset
```
X, y = make_circles(n_samples=1000, factor=0.5, noise=.1)
fig = plt.figure(figsize=(8,6))
plt.scatter(X[:,0], X[:,1], c=y)
plt.xlim([-1.5, 1.5])
plt.ylim([-1.5, 1.5])
plt.title("Dataset")
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.show()
# reshape targets to get column vector with shape (n_samples, 1)
y_true = y[:, np.newaxis]
# Split the data into a training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y_true)
print(f'Shape X_train: {X_train.shape}')
print(f'Shape y_train: {y_train.shape}')
print(f'Shape X_test: {X_test.shape}')
print(f'Shape y_test: {y_test.shape}')
```
## Neural Network Class
Some parts of this implementation are inspired by the exercises of Andrew Ng's [coursera course](https://www.coursera.org/learn/neural-networks-deep-learning)
```
class NeuralNet():
def __init__(self, n_inputs, n_outputs, n_hidden):
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.hidden = n_hidden
# Initialize weight matrices and bias vectors
self.W_h = np.random.randn(self.n_inputs, self.hidden)
self.b_h = np.zeros((1, self.hidden))
self.W_o = np.random.randn(self.hidden, self.n_outputs)
self.b_o = np.zeros((1, self.n_outputs))
def sigmoid(self, a):
return 1 / (1 + np.exp(-a))
def forward_pass(self, X):
"""
Propagates the given input X forward through the net.
Returns:
A_h: matrix with activations of all hidden neurons for all input examples
O_h: matrix with outputs of all hidden neurons for all input examples
A_o: matrix with activations of all output neurons for all input examples
O_o: matrix with outputs of all output neurons for all input examples
"""
# Compute activations and outputs of hidden units
A_h = np.dot(X, self.W_h) + self.b_h
O_h = np.tanh(A_h)
# Compute activations and outputs of output units
A_o = np.dot(O_h, self.W_o) + self.b_o
O_o = self.sigmoid(A_o)
outputs = {
"A_h": A_h,
"A_o": A_o,
"O_h": O_h,
"O_o": O_o,
}
return outputs
def cost(self, y_true, y_predict, n_samples):
"""
Computes and returns the cost over all examples
"""
# same cost function as in logistic regression
cost = (- 1 / n_samples) * np.sum(y_true * np.log(y_predict) + (1 - y_true) * (np.log(1 - y_predict)))
cost = np.squeeze(cost)
assert isinstance(cost, float)
return cost
def backward_pass(self, X, Y, n_samples, outputs):
"""
Propagates the errors backward through the net.
Returns:
dW_h: partial derivatives of loss function w.r.t hidden weights
db_h: partial derivatives of loss function w.r.t hidden bias
dW_o: partial derivatives of loss function w.r.t output weights
db_o: partial derivatives of loss function w.r.t output bias
"""
dA_o = (outputs["O_o"] - Y)
dW_o = (1 / n_samples) * np.dot(outputs["O_h"].T, dA_o)
db_o = (1 / n_samples) * np.sum(dA_o)
dA_h = (np.dot(dA_o, self.W_o.T)) * (1 - np.power(outputs["O_h"], 2))
dW_h = (1 / n_samples) * np.dot(X.T, dA_h)
db_h = (1 / n_samples) * np.sum(dA_h)
gradients = {
"dW_o": dW_o,
"db_o": db_o,
"dW_h": dW_h,
"db_h": db_h,
}
return gradients
def update_weights(self, gradients, eta):
"""
Updates the model parameters using a fixed learning rate
"""
self.W_o = self.W_o - eta * gradients["dW_o"]
self.W_h = self.W_h - eta * gradients["dW_h"]
self.b_o = self.b_o - eta * gradients["db_o"]
self.b_h = self.b_h - eta * gradients["db_h"]
def train(self, X, y, n_iters=500, eta=0.3):
"""
Trains the neural net on the given input data
"""
n_samples, _ = X.shape
for i in range(n_iters):
outputs = self.forward_pass(X)
cost = self.cost(y, outputs["O_o"], n_samples=n_samples)
gradients = self.backward_pass(X, y, n_samples, outputs)
if i % 100 == 0:
print(f'Cost at iteration {i}: {np.round(cost, 4)}')
self.update_weights(gradients, eta)
def predict(self, X):
"""
Computes and returns network predictions for given dataset
"""
outputs = self.forward_pass(X)
y_pred = [1 if elem >= 0.5 else 0 for elem in outputs["O_o"]]
return np.array(y_pred)[:, np.newaxis]
```
## Initializing and training the neural network
```
nn = NeuralNet(n_inputs=2, n_hidden=6, n_outputs=1)
print("Shape of weight matrices and bias vectors:")
print(f'W_h shape: {nn.W_h.shape}')
print(f'b_h shape: {nn.b_h.shape}')
print(f'W_o shape: {nn.W_o.shape}')
print(f'b_o shape: {nn.b_o.shape}')
print()
print("Training:")
nn.train(X_train, y_train, n_iters=2000, eta=0.7)
```
## Testing the neural network
```
n_test_samples, _ = X_test.shape
y_predict = nn.predict(X_test)
print(f"Classification accuracy on test set: {(np.sum(y_predict == y_test)/n_test_samples)*100} %")
```
## Visualizing the decision boundary
In the lowermost plot we can see which parts of the input space are classified as positive and which are classified as negative by the trained network.
```
X_temp, y_temp = make_circles(n_samples=60000, noise=.5)
y_predict_temp = nn.predict(X_temp)
y_predict_temp = np.ravel(y_predict_temp)
fig = plt.figure(figsize=(8,12))
ax = fig.add_subplot(2,1,1)
plt.scatter(X[:,0], X[:,1], c=y)
plt.xlim([-1.5, 1.5])
plt.ylim([-1.5, 1.5])
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.title("Training and test set")
ax = fig.add_subplot(2,1,2)
plt.scatter(X_temp[:,0], X_temp[:,1], c=y_predict_temp)
plt.xlim([-1.5, 1.5])
plt.ylim([-1.5, 1.5])
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.title("Decision boundary")
```
| true |
code
| 0.764364 | null | null | null | null |
|
# Homework pandas
<table align="left">
<tr>
<td><a href="https://colab.research.google.com/github/airnandez/numpandas/blob/master/exam/2020-exam.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a></td>
<td><a href="https://mybinder.org/v2/gh/airnandez/numpandas/master?filepath=exam%2F2020-exam.ipynb">
<img src="https://mybinder.org/badge_logo.svg" alt="Launch Binder"/>
</a></td>
</tr>
</table>
*Author: Fabio Hernandez*
*Last updated: 2021-04-02*
*Location:* https://github.com/airnandez/numpandas/exam
--------------------
## Instructions
For this excercise we will use a public dataset curated and made available by [Our World in Data](https://ourworldindata.org) located in [this repository](https://github.com/owid/covid-19-data/tree/master/public/data). We will use a snapshot of the dataset as of 2021-04-02.
For your convenience, this notebook is prepared with code for downloading the snapshot dataset from its source, loading it into memory as a **pandas** dataframe and with some cleaning and helper functions. Your mission is execute the provided cells and to write the code to answer the questions below.
You must not modify the code provided. You must provide code for answering the questions, following the instructions for each one of them.
When you have finished, please save your notebook in the form of a `.ipynb` file and send it to your instructor according to the instructions you received by e-mail.
---------------------
## Dependencies
```
import datetime
import os
import glob
import pandas as pd
pd.set_option('display.max_columns', None)
pd.__version__
import numpy as np
np.__version__
```
------
## Download the dataset
Define a helper function for downloading data to a local file:
```
import requests
def download(url, path):
"""Download file at url and save it locally at path"""
with requests.get(url, stream=True) as resp:
mode, data = 'wb', resp.content
if 'text/plain' in resp.headers['Content-Type']:
mode, data = 'wt', resp.text
with open(path, mode) as f:
f.write(data)
```
Download the data files. We store the downloaded data in the directory `../data` relative to the location of this notebook. If a file has been already been downloaded, don't download it again.
```
# Download files
data_sources = (
"https://raw.githubusercontent.com/airnandez/numpandas/master/data/2021-04-02-owid-covid-data.csv",
)
# Create destination directory
os.makedirs(os.path.join('..', 'data'), exist_ok=True)
for url in data_sources:
# Build the URL and the destination file path
path = os.path.join('..', 'data', os.path.basename(url))
# If file already exists don't download it again
if not os.path.isfile(path) :
print(f'downloading {url} to {path}')
download(url, path)
```
Check what files we have for our analysis:
```
file_paths = glob.glob(os.path.join('..', 'data', '2021-*-owid-*'))
print('\n'.join(f for f in file_paths))
```
---------------------
## Load the data
Load the file `2021-04-02-owid-covid-data.csv` to a **pandas** dataframe.
⚠️ **Make sure you get familiar with the contents of that file, by reading the [codebook](https://github.com/owid/covid-19-data/blob/master/public/data/owid-covid-codebook.csv), which describes the meaning of each column.**
```
path = os.path.join('..', 'data', '2021-04-02-owid-covid-data.csv')
df = pd.read_csv(path, parse_dates=['date'])
df.sample(5)
```
------------------------
## Question 1: number of cases, incidence and fatality ratio
We want to compute the total number of cases, deaths and fatality ratio in France and in the world as of 2021-04-01.
The fatality ratio is the fraction of deaths over the total number of confirmed COVID-19 cases. The incidence is the ratio of the total number of confirmed cases over the population.
### Question 1a (3 points)
Compute the total number of cases, deaths, incidence and fatality ratio for France. You must write code to extract the relevant information from the dataframe and assign the appropriate values to the variables defined in the cell below.
```
# Total confirmed cases of COVID-19 in France
...
total_cases_fr = ...
# Population in France
population_fr = ...
# Total number of deaths attributed to COVID-19 in France
total_deaths_fr = ...
# Incidence in France
incidence_fr = (total_cases_fr / population_fr) * 100
# Fatality ratio: deaths vs confirmed cases
fatality_fr = (total_deaths_fr / total_cases_fr) * 100
print(f'Population in France: {population_fr:>12,.0f}')
print(f'Total number of cases in France: {total_cases_fr:>12,.0f}')
print(f'Total number of deaths in France: {total_deaths_fr:>12,.0f}')
print(f'Incidence in France: {incidence_fr:>12,.2f}%')
print(f'Fatality ratio in France: {fatality_fr:>12.2f}%')
```
### Question 1b (3 points)
As done for France in the previous question, here you need to compute the total number of cases, deaths, incidence and fatality ratio for the entire world:
```
# Select data for the whole world
...
population_world = ...
total_cases_world = ...
total_deaths_world = ...
incidence_world = (total_cases_world / population_world) * 100
fatality_world = (total_deaths_world / total_cases_world) * 100
print(f'World population {population_world:>14,.0f}')
print(f'Total number of cases in the world: {total_cases_world:>14,.0f}')
print(f'Total number of deaths in the world: {total_deaths_world:>14,.0f}')
print(f'Incidence in the world: {incidence_world:>14,.2f}%')
print(f'Fatality ratio in the world: {fatality_world:>14.2f}%')
```
------------------
## Question 2 (7 points)
Compute and print a list with the name of the **countries** that have administered 80% of the global number of vaccination doses.
⚠️ Please note that in this dataframe there are rows that contain information about a region (e.g. Europe, Asia, World), in addition to information about individual countries.
```
# Select only the rows which contain information about a country (as opposed to a region)
# Regions are encoded with a 'iso_code' of the form 'OWID_XXXXX'
...
# Compute the vaccination doses administered by all countries
...
# Sort the countries by their value of vaccination doses administered
...
# Build the list of countries which have administered 80% of the
# doses administered around the world
....
```
_____
## Question 3 (7 points)
Compute an ordered list of the top 10 countries with population more than 1 million, ranked by the fraction of their population which have already taken **all the doses** prescribed by the vaccination protocol.
```
# Build a dataframe with one row per country and two columns: 'people_fully_vaccinated' and 'population'
....
# Extract the number of people fully vaccinated and the population for each country
...
# Among the countries with more than 1M people, select the top 10
# ranked by percentage of fully vaccinated population
....
```
----------------
## Bonus question (3 points)
The function `plot` below generates an displays a figure for visualizing a set of countries and the percentage of their population which is fully vaccinated. You need to provide the information to visualize the top 10 countries with populations at least 1 million people which have the largest fraction of their population fully vaccinated (see Question 3).
To use this function, you must compute two Python lists:
* the list `countries` which contains the name of the top 10 countries with population of at least 1 million people, which have the largest fraction of their population fully vaccinated,
* the list `percents` which contains the percentage of the fully vaccinated population of those 10 countries
After computing those two lists call the function `plot` to visualize the figure, as shown below:
```python
countries = [ 'France', 'Germany', 'Italy', ... ]
percents = [ 0.3, 0.2, 0.1, ... ]
plot(countries, percent)
```
```
import bokeh
import bokeh.plotting
bokeh.plotting.output_notebook()
def plot(countries, percents):
"""Generates and displays a Bokeh plot with horizontal bars, one bar per country"""
figure = bokeh.plotting.figure(
title = 'Percentage of population fully vaccinated (countries with population ≥ 1M)',
x_axis_label = 'percentage',
x_range = (0, 1),
y_range = countries,
plot_width = 800,
plot_height = 400,
background_fill_color = 'whitesmoke',
background_fill_alpha = 0.8
)
figure.xaxis.formatter = bokeh.models.formatters.NumeralTickFormatter(format='0%')
figure.ygrid.grid_line_color = None
figure.hbar(right=percents, y=countries, height=0.5, color='coral')
bokeh.plotting.show(figure)
countries = ...
percents = ...
plot(countries, percents)
```
| true |
code
| 0.560253 | null | null | null | null |
|
# Exp 110 analysis
See `./informercial/Makefile` for experimental
details.
```
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.exp import epsilon_bandit
from infomercial.exp import beta_bandit
from infomercial.exp import softbeta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
def plot_meta(env_name, result):
"""Plots!"""
# episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies
episodes = result["episodes"]
actions =result["actions"]
bests =result["p_bests"]
scores_E = result["scores_E"]
scores_R = result["scores_R"]
values_R = result["values_R"]
values_E = result["values_E"]
ties = result["ties"]
policies = result["policies"]
# -
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Plotz
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# Policy
policies = np.asarray(policies)
episodes = np.asarray(episodes)
plt.subplot(grid[1, 0])
m = policies == 0
plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_E$", color="purple")
m = policies == 1
plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_R$", color="grey")
plt.ylim(-.1, 1+.1)
plt.ylabel("Controlling\npolicy")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# score
plt.subplot(grid[2, 0])
plt.scatter(episodes, scores_E, color="purple", alpha=0.4, s=2, label="E")
plt.plot(episodes, scores_E, color="purple", alpha=0.4)
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.plot(episodes, scores_R, color="grey", alpha=0.4)
plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1),
color="violet", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[3, 0])
plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="$Q_E$")
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$")
plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1),
color="violet", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Value")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Ties
plt.subplot(grid[4, 0])
plt.scatter(episodes, bests, color="red", alpha=.5, s=2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
# Ties
plt.subplot(grid[5, 0])
plt.scatter(episodes, ties, color="black", alpha=.5, s=2, label="$\pi_{tie}$ : 1\n $\pi_\pi$ : 0")
plt.ylim(-.1, 1+.1)
plt.ylabel("Ties index")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def plot_epsilon(env_name, result):
"""Plots!"""
# episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies
episodes = result["episodes"]
actions =result["actions"]
bests =result["p_bests"]
scores_R = result["scores_R"]
values_R = result["values_R"]
epsilons = result["epsilons"]
# -
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Plotz
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.ylabel("Score")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$")
plt.ylabel("Value")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# best
plt.subplot(grid[3, 0])
plt.scatter(episodes, bests, color="red", alpha=.5, s=2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
# Decay
plt.subplot(grid[4, 0])
plt.scatter(episodes, epsilons, color="black", alpha=.5, s=2)
plt.ylabel("$\epsilon_R$")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def plot_beta(env_name, result):
"""Plots!"""
# episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies
episodes = result["episodes"]
actions =result["actions"]
bests =result["p_bests"]
scores_R = result["scores_R"]
values_R = result["values_R"]
beta = result["beta"]
# -
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Plotz
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.ylabel("Score")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$")
plt.ylabel("Value")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# best
plt.subplot(grid[3, 0])
plt.scatter(episodes, bests, color="red", alpha=.5, s=2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
def plot_critic(critic_name, env_name, result):
# -
env = gym.make(env_name)
best = env.best
# Data
critic = result[critic_name]
arms = list(critic.keys())
values = list(critic.values())
# Plotz
fig = plt.figure(figsize=(8, 3))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0])
plt.scatter(arms, values, color="black", alpha=.5, s=30)
plt.plot([best]*10, np.linspace(min(values), max(values), 10), color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Value")
plt.xlabel("Arm")
```
# Load and process data
```
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp110"
sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl"))
# print(sorted_params.keys())
best_params = sorted_params[0]
beta = best_params['beta']
sorted_params
```
# Performance
of best parameters
```
env_name = 'BanditHardAndSparse10-v0'
num_episodes = 50000
# Run w/ best params
result = beta_bandit(
env_name=env_name,
num_episodes=num_episodes,
lr_R=best_params["lr_R"],
beta=best_params["beta"],
seed_value=2,
)
print(best_params)
plot_beta(env_name, result=result)
plot_critic('critic', env_name, result)
```
# Sensitivity
to parameter choices
```
total_Rs = []
betas = []
lrs_R = []
lrs_E = []
trials = list(sorted_params.keys())
for t in trials:
total_Rs.append(sorted_params[t]['total_R'])
lrs_R.append(sorted_params[t]['lr_R'])
betas.append(sorted_params[t]['beta'])
# Init plot
fig = plt.figure(figsize=(5, 18))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("total R")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.scatter(trials, lrs_R, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("lr_R")
_ = sns.despine()
plt.subplot(grid[2, 0])
plt.scatter(lrs_R, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("lrs_R")
plt.ylabel("total_Rs")
_ = sns.despine()
plt.subplot(grid[3, 0])
plt.scatter(betas, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("beta")
plt.ylabel("total_Rs")
_ = sns.despine()
```
# Parameter correlations
```
from scipy.stats import spearmanr
spearmanr(total_Rs, lrs_R)
spearmanr(betas, total_Rs)
spearmanr(betas, lrs_R)
```
# Distributions
of parameters
```
# Init plot
fig = plt.figure(figsize=(5, 6))
grid = plt.GridSpec(3, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(betas, color="black")
plt.xlabel("beta")
plt.ylabel("Count")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.hist(lrs_R, color="black")
plt.xlabel("lr_R")
plt.ylabel("Count")
_ = sns.despine()
```
of total reward
```
# Init plot
fig = plt.figure(figsize=(5, 2))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(total_Rs, color="black", bins=50)
plt.xlabel("Total reward")
plt.ylabel("Count")
# plt.xlim(0, 10)
_ = sns.despine()
```
| true |
code
| 0.647185 | null | null | null | null |
|
# Regression with Amazon SageMaker XGBoost algorithm
_**Single machine training for regression with Amazon SageMaker XGBoost algorithm**_
---
---
## Contents
1. [Introduction](#Introduction)
2. [Setup](#Setup)
1. [Fetching the dataset](#Fetching-the-dataset)
2. [Data Ingestion](#Data-ingestion)
3. [Training the XGBoost model](#Training-the-XGBoost-model)
1. [Plotting evaluation metrics](#Plotting-evaluation-metrics)
4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
1. [Import model into hosting](#Import-model-into-hosting)
2. [Create endpoint configuration](#Create-endpoint-configuration)
3. [Create endpoint](#Create-endpoint)
5. [Validate the model for use](#Validate-the-model-for-use)
---
## Introduction
This notebook demonstrates the use of Amazon SageMaker’s implementation of the XGBoost algorithm to train and host a regression model. We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html) originally from the UCI data repository [1]. More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In the libsvm converted [version](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), the nominal feature (Male/Female/Infant) has been converted into a real valued feature. Age of abalone is to be predicted from eight physical measurements. Dataset is already processed and stored on S3. Scripts used for processing the data can be found in the [Appendix](#Appendix). These include downloading the data, splitting into train, validation and test, and uploading to S3 bucket.
>[1] Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
## Setup
This notebook was tested in Amazon SageMaker Studio on a ml.t3.medium instance with Python 3 (Data Science) kernel.
Let's start by specifying:
1. The S3 buckets and prefixes that you want to use for saving the model and where training data is located. This should be within the same region as the Notebook Instance, training, and hosting.
1. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
```
%%time
import os
import boto3
import re
import sagemaker
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
# S3 bucket where the training data is located.
# Feel free to specify a different bucket and prefix
data_bucket = f"jumpstart-cache-prod-{region}"
data_prefix = "1p-notebooks-datasets/abalone/libsvm"
data_bucket_path = f"s3://{data_bucket}"
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket and prefix
output_bucket = sagemaker.Session().default_bucket()
output_prefix = "sagemaker/DEMO-xgboost-abalone-default"
output_bucket_path = f"s3://{output_bucket}"
```
## Training the XGBoost model
After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between 5 and 6 minutes.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region, "xgboost", "1.0-1")
%%time
import boto3
from time import gmtime, strftime
job_name = f"DEMO-xgboost-regression-{strftime('%Y-%m-%d-%H-%M-%S', gmtime())}"
print("Training job", job_name)
# Ensure that the training and validation data folders generated above are reflected in the "InputDataConfig" parameter below.
create_training_params = {
"AlgorithmSpecification": {"TrainingImage": container, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": f"{output_bucket_path}/{output_prefix}/single-xgboost"},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.m5.2xlarge", "VolumeSizeInGB": 5},
"TrainingJobName": job_name,
"HyperParameters": {
"max_depth": "5",
"eta": "0.2",
"gamma": "4",
"min_child_weight": "6",
"subsample": "0.7",
"silent": "0",
"objective": "reg:linear",
"num_round": "50",
},
"StoppingCondition": {"MaxRuntimeInSeconds": 3600},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": f"{data_bucket_path}/{data_prefix}/train",
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "libsvm",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": f"{data_bucket_path}/{data_prefix}/validation",
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "libsvm",
"CompressionType": "None",
},
],
}
client = boto3.client("sagemaker", region_name=region)
client.create_training_job(**create_training_params)
import time
status = client.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print(status)
while status != "Completed" and status != "Failed":
time.sleep(60)
status = client.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print(status)
```
Note that the "validation" channel has been initialized too. The SageMaker XGBoost algorithm actually calculates RMSE and writes it to the CloudWatch logs on the data passed to the "validation" channel.
## Set up hosting for the model
In order to set up hosting, we have to import the model from training to hosting.
### Import model into hosting
Register the model with hosting. This allows the flexibility of importing models trained elsewhere.
```
%%time
import boto3
from time import gmtime, strftime
model_name = f"{job_name}-model"
print(model_name)
info = client.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
primary_container = {"Image": container, "ModelDataUrl": model_data}
create_model_response = client.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
```
### Create endpoint configuration
SageMaker supports configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. In addition, the endpoint configuration describes the instance type required for model deployment.
```
from time import gmtime, strftime
endpoint_config_name = f"DEMO-XGBoostEndpointConfig-{strftime('%Y-%m-%d-%H-%M-%S', gmtime())}"
print(endpoint_config_name)
create_endpoint_config_response = client.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.m5.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print(f"Endpoint Config Arn: {create_endpoint_config_response['EndpointConfigArn']}")
```
### Create endpoint
Lastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
```
%%time
import time
endpoint_name = f'DEMO-XGBoostEndpoint-{strftime("%Y-%m-%d-%H-%M-%S", gmtime())}'
print(endpoint_name)
create_endpoint_response = client.create_endpoint(
EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name
)
print(create_endpoint_response["EndpointArn"])
resp = client.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
while status == "Creating":
print(f"Status: {status}")
time.sleep(60)
resp = client.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print(f"Arn: {resp['EndpointArn']}")
print(f"Status: {status}")
```
## Validate the model for use
Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
```
runtime_client = boto3.client("runtime.sagemaker", region_name=region)
```
Download test data
```
FILE_TEST = "abalone.test"
s3 = boto3.client("s3")
s3.download_file(data_bucket, f"{data_prefix}/test/{FILE_TEST}", FILE_TEST)
```
Start with a single prediction.
```
!head -1 abalone.test > abalone.single.test
%%time
import json
from itertools import islice
import math
import struct
file_name = "abalone.single.test" # customize to your test file
with open(file_name, "r") as f:
payload = f.read().strip()
response = runtime_client.invoke_endpoint(
EndpointName=endpoint_name, ContentType="text/x-libsvm", Body=payload
)
result = response["Body"].read()
result = result.decode("utf-8")
result = result.split(",")
result = [math.ceil(float(i)) for i in result]
label = payload.strip(" ").split()[0]
print(f"Label: {label}\nPrediction: {result[0]}")
```
OK, a single prediction works. Let's do a whole batch to see how good is the predictions accuracy.
```
import sys
import math
def do_predict(data, endpoint_name, content_type):
payload = "\n".join(data)
response = runtime_client.invoke_endpoint(
EndpointName=endpoint_name, ContentType=content_type, Body=payload
)
result = response["Body"].read()
result = result.decode("utf-8")
result = result.split(",")
preds = [float((num)) for num in result]
preds = [math.ceil(num) for num in preds]
return preds
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
if offset + batch_size < items:
results = do_predict(data[offset : (offset + batch_size)], endpoint_name, content_type)
arrs.extend(results)
else:
arrs.extend(do_predict(data[offset:items], endpoint_name, content_type))
sys.stdout.write(".")
return arrs
```
The following helps us calculate the Median Absolute Percent Error (MdAPE) on the batch dataset.
```
%%time
import json
import numpy as np
with open(FILE_TEST, "r") as f:
payload = f.read().strip()
labels = [int(line.split(" ")[0]) for line in payload.split("\n")]
test_data = [line for line in payload.split("\n")]
preds = batch_predict(test_data, 100, endpoint_name, "text/x-libsvm")
print(
"\n Median Absolute Percent Error (MdAPE) = ",
np.median(np.abs(np.array(labels) - np.array(preds)) / np.array(labels)),
)
```
### Delete Endpoint
Once you are done using the endpoint, you can use the following to delete it.
```
client.delete_endpoint(EndpointName=endpoint_name)
```
# Appendix
### Data split and upload
Following methods split the data into train/test/validation datasets and upload files to S3.
```
import io
import boto3
import random
def data_split(
FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST
):
data = [l for l in open(FILE_DATA, "r")]
train_file = open(FILE_TRAIN, "w")
valid_file = open(FILE_VALIDATION, "w")
tests_file = open(FILE_TEST, "w")
num_of_data = len(data)
num_train = int((PERCENT_TRAIN / 100.0) * num_of_data)
num_valid = int((PERCENT_VALIDATION / 100.0) * num_of_data)
num_tests = int((PERCENT_TEST / 100.0) * num_of_data)
data_fractions = [num_train, num_valid, num_tests]
split_data = [[], [], []]
rand_data_ind = 0
for split_ind, fraction in enumerate(data_fractions):
for i in range(fraction):
rand_data_ind = random.randint(0, len(data) - 1)
split_data[split_ind].append(data[rand_data_ind])
data.pop(rand_data_ind)
for l in split_data[0]:
train_file.write(l)
for l in split_data[1]:
valid_file.write(l)
for l in split_data[2]:
tests_file.write(l)
train_file.close()
valid_file.close()
tests_file.close()
def write_to_s3(fobj, bucket, key):
return (
boto3.Session(region_name=region).resource("s3").Bucket(bucket).Object(key).upload_fileobj(fobj)
)
def upload_to_s3(bucket, channel, filename):
fobj = open(filename, "rb")
key = f"{prefix}/{channel}"
url = f"s3://{bucket}/{key}/{filename}"
print(f"Writing to {url}")
write_to_s3(fobj, bucket, key)
```
### Data ingestion
Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
```
%%time
import urllib.request
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/DEMO-xgboost-abalone-default"
# Load the dataset
FILE_DATA = "abalone"
urllib.request.urlretrieve(
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone", FILE_DATA
)
# split the downloaded data into train/test/validation files
FILE_TRAIN = "abalone.train"
FILE_VALIDATION = "abalone.validation"
FILE_TEST = "abalone.test"
PERCENT_TRAIN = 70
PERCENT_VALIDATION = 15
PERCENT_TEST = 15
data_split(
FILE_DATA, FILE_TRAIN, FILE_VALIDATION, FILE_TEST, PERCENT_TRAIN, PERCENT_VALIDATION, PERCENT_TEST
)
# upload the files to the S3 bucket
upload_to_s3(bucket, "train", FILE_TRAIN)
upload_to_s3(bucket, "validation", FILE_VALIDATION)
upload_to_s3(bucket, "test", FILE_TEST)
```
| true |
code
| 0.324383 | null | null | null | null |
|
```
%matplotlib inline
```
# Quantization Quickstart
Here is a four-minute video to get you started with model quantization.
.. youtube:: MSfV7AyfiA4
:align: center
Quantization reduces model size and speeds up inference time by reducing the number of bits required to represent weights or activations.
In NNI, both post-training quantization algorithms and quantization-aware training algorithms are supported.
Here we use `QAT_Quantizer` as an example to show the usage of quantization in NNI.
## Preparation
In this tutorial, we use a simple model and pre-train on MNIST dataset.
If you are familiar with defining a model and training in pytorch, you can skip directly to `Quantizing Model`_.
```
import torch
import torch.nn.functional as F
from torch.optim import SGD
from scripts.compression_mnist_model import TorchModel, trainer, evaluator, device, test_trt
# define the model
model = TorchModel().to(device)
# define the optimizer and criterion for pre-training
optimizer = SGD(model.parameters(), 1e-2)
criterion = F.nll_loss
# pre-train and evaluate the model on MNIST dataset
for epoch in range(3):
trainer(model, optimizer, criterion)
evaluator(model)
```
## Quantizing Model
Initialize a `config_list`.
Detailed about how to write ``config_list`` please refer :doc:`compression config specification <../compression/compression_config_list>`.
```
config_list = [{
'quant_types': ['input', 'weight'],
'quant_bits': {'input': 8, 'weight': 8},
'op_types': ['Conv2d']
}, {
'quant_types': ['output'],
'quant_bits': {'output': 8},
'op_types': ['ReLU']
}, {
'quant_types': ['input', 'weight'],
'quant_bits': {'input': 8, 'weight': 8},
'op_names': ['fc1', 'fc2']
}]
```
finetuning the model by using QAT
```
from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer
dummy_input = torch.rand(32, 1, 28, 28).to(device)
quantizer = QAT_Quantizer(model, config_list, optimizer, dummy_input)
quantizer.compress()
```
The model has now been wrapped, and quantization targets ('quant_types' setting in `config_list`)
will be quantized & dequantized for simulated quantization in the wrapped layers.
QAT is a training-aware quantizer, it will update scale and zero point during training.
```
for epoch in range(3):
trainer(model, optimizer, criterion)
evaluator(model)
```
export model and get calibration_config
```
model_path = "./log/mnist_model.pth"
calibration_path = "./log/mnist_calibration.pth"
calibration_config = quantizer.export_model(model_path, calibration_path)
print("calibration_config: ", calibration_config)
```
build tensorRT engine to make a real speedup, for more information about speedup, please refer :doc:`quantization_speedup`.
```
from nni.compression.pytorch.quantization_speedup import ModelSpeedupTensorRT
input_shape = (32, 1, 28, 28)
engine = ModelSpeedupTensorRT(model, input_shape, config=calibration_config, batchsize=32)
engine.compress()
test_trt(engine)
```
| true |
code
| 0.754778 | null | null | null | null |
|
```
# Some basic imports and setup
import numpy as np, numpy.random as nr, gym
import matplotlib.pyplot as plt
import time
np.set_printoptions(precision=3)
from robolearn.envs.frozen_lake import FrozenLakeEnv
class MDP(object):
"""
mdp.P is a two-level dict where the first key is the state and the second key is the action.
The 2D grid cells are associated with indices [0, 1, 2, ..., 15] from left to right and top to down, as in
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]]
Action indices [0, 1, 2, 3] correspond to West, South, East and North.
mdp.P[state][action] is a list of tuples (probability, nextstate, reward).
For example, state 0 is the initial state, and the transition information for s=0, a=0 is P[0][0] = [(1.0, 0, 0.0)]",
As another example, state 5 corresponds to a hole in the ice, in which all actions lead to the same
state with probability 1 and reward 0:
P[5][0] = [(1.0, 5, 0.0)]
P[5][1] = [(1.0, 5, 0.0)]
P[5][2] = [(1.0, 5, 0.0)]
P[5][3] = [(1.0, 5, 0.0)]
"""
def __init__(self, P, nS, nA, desc=None):
self.P = P # state transition and reward probabilities, explained below
self.nS = nS # number of states
self.nA = nA # number of actions
self.desc = desc # 2D array specifying what each grid cell means (used for plotting)
def value_iteration(mdp, gamma, nIt, show_log=False):
"""
Inputs:
mdp: MDP
gamma: discount factor
nIt: number of iterations, corresponding to n above
Outputs:
(value_functions, policies)
len(value_functions) == nIt+1 and len(policies) == nIt
"""
if show_log:
print("Iteration | max|V-Vprev| | # chg actions | V[0]")
print("----------+--------------+---------------+---------")
Vs = [np.zeros(mdp.nS)] # list of value functions contains the initial value function V^{(0)}, which is zero
pis = []
for it in range(nIt):
oldpi = pis[-1] if len(pis) > 0 else None # \pi^{(it)} = Greedy[V^{(it-1)}]. Just used for printout
Vprev = Vs[-1] # V^{(it)}
# Your code should fill in meaningful values for the following two variables
# pi: greedy policy for Vprev (not V),
# corresponding to the math above: \pi^{(it)} = Greedy[V^{(it)}]
# ** it needs to be numpy array of ints **
# V: bellman backup on Vprev
# corresponding to the math above: V^{(it+1)} = T[V^{(it)}]
# ** numpy array of floats **
# V = Vprev # REPLACE THIS LINE WITH YOUR CODE
# pi = oldpi # REPLACE THIS LINE WITH YOUR CODE
V = np.zeros_like(Vprev)
pi = np.zeros_like(V)
for s in range(mdp.nS):
q = np.zeros(mdp.nA)
for a in range(mdp.nA):
for s_prime_prob, s_prime, reward in mdp.P[s][a]:
q[a] += s_prime_prob*(reward + gamma * Vprev[s_prime])
V[s] = np.max(q)
pi[s] = np.argmax(q)
if show_log:
max_diff = np.abs(V - Vprev).max()
nChgActions="N/A" if oldpi is None else (pi != oldpi).sum()
print("%4i | %6.5f | %4s | %5.3f"%(it, max_diff, nChgActions, V[0]))
Vs.append(V)
pis.append(pi)
return Vs, pis
def plot_value_iteration(Vs, pis, iters_to_plot=None, ncol=4, nrow=4):
square_size = .5
if iters_to_plot is None:
iters_to_plot = [-1]
Vs_to_plot = [Vs[iteration] for iteration in iters_to_plot]
pis_to_plot = [pis[iteration] for iteration in iters_to_plot]
for pp, (_V, _pi) in enumerate(zip(Vs_to_plot, pis_to_plot)):
plt.figure()#figsize=(3,3))
plt.imshow(_V.reshape(ncol,nrow), cmap='gray', interpolation='none', clim=(0,1))
ax = plt.gca()
ax.set_xticks(np.arange(ncol)-square_size)
ax.set_yticks(np.arange(nrow)-square_size)
ax.set_xticklabels([])
ax.set_yticklabels([])
Y, X = np.mgrid[0:4, 0:4]
a2uv = {0: (-1, 0), 1:(0, -1), 2:(1,0), 3:(0, 1)}
Pi = _pi.reshape(ncol,nrow)
for y in range(ncol):
for x in range(nrow):
a = Pi[y, x]
# Arrow
u, v = a2uv[a]
plt.arrow(x, y,u*.3, -v*.3, color='m', head_width=0.1, head_length=0.1)
# Text
plt.text(x, y, str(env.desc[y,x].item().decode()),
color='g', size=12, verticalalignment='center',
horizontalalignment='center', fontweight='bold')
plt.text(x-square_size/3, y-square_size/2, str(np.around(_V[4*y+x], decimals=2)),
color='r', size=8, verticalalignment='center',
horizontalalignment='center')
plt.grid(color='b', lw=2, ls='-')
print('Iter %d' % pp)
plt.show()
print('-'*20)
class Agent(object):
def __init__(self, policy):
self.policy = policy
def act(self, state):
return self.policy[state]
GAMMA = 0.95 # we'll be using this same value in subsequent problems
nIt = 300
env = FrozenLakeEnv(map_name='4x4', is_slippery=False, reward_dict={'G':1, 'S':0, 'H':-1, 'F':-0.00})
# Create a MDP from the environment
mdp = MDP( {s : {a : [tup[:3] for tup in tups] for (a, tups) in a2d.items()} for (s, a2d) in env.P.items()}, env.nS, env.nA, env.desc)
# Learn with Value Iteration a policy
Vs_VI, pis_VI = value_iteration(mdp, gamma=GAMMA, nIt=nIt, show_log=False)
# Visualize Policy
%matplotlib inline
iters_to_plot = [-1] # Plot only last VI values
plot_value_iteration(Vs_VI, pis_VI, iters_to_plot, env.ncol, env.nrow)
%matplotlib notebook
# Seed RNGs
env.seed(0); from gym.spaces import prng; prng.seed(10)
# Notebook figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# Agent from previous VI
agent = Agent(pis_VI[-1])
# Generate the episode
ob = env.reset()
render = ax.imshow(env.render(mode='rgb_array'))
ret = 0
for t in range(50):
fig.canvas.set_window_title('Frozen Lake -- Iteration: %02d' % t)
time.sleep(0.5)
render.set_data(env.render(mode='rgb_array'))
fig.canvas.draw()
# a = env.action_space.sample()
a = agent.act(ob)
ob, rew, done, _ = env.step(a)
ret += rew
print('iter', t, '| ob',ob,'| action',a,'| reward',rew)
if done:
break
assert done
fig.canvas.set_window_title('Frozen Lake -- Iteration: %02d' % t)
time.sleep(0.5)
render.set_data(env.render(mode='rgb_array'))
fig.canvas.draw()
# env.render(close=True)
print('Finished at t=%d'%(t+1),'| Acum reward:', ret)
```
| true |
code
| 0.646014 | null | null | null | null |
|
# CHEM 1000 - Spring 2022
Prof. Geoffrey Hutchison, University of Pittsburgh
## 5 Scalar and Vector Operators
Chapter 5 in [*Mathematical Methods for Chemists*](http://sites.bu.edu/straub/mathematical-methods-for-molecular-science/)
By the end of this session, you should be able to:
- Understand the concept of vector-valued functions and vector fields
- Identify sinks, sources, and saddle points of vector fields
- Understand the concept of vector operators
- Understand the gradient and applications to chemistry (e.g., forces)
### Scalars vs. Vectors
Reminder...
**Scalars** are just numbers - they have a magnitude, a size. The mass of a molecule would be an example, e.g., 120 amu.
**Vectors** have both a magnitude and a direction:
- velocity $\mathbf{v}$
- acceleration $\mathbf{a}$
- force $\mathbf{F}$
- electric field $\mathbf{E}$
### Scalar Functions and Vector Functions
A **function** takes in a number, a vector, etc. and returns a number:
$$
\sin 0 = 0
$$
Notice that $\sin x$ is a scalar function. You give it something, and it returns a **scalar**.
By extension, there must be **vector functions** too - one that returns a vector for every point.
Right now, you're experiencing force due to gravity. If you stand up, the forces acting on your body change over time. (Consider if you go on a roller coaster or fly in an airplane.)
$$
\overrightarrow{\boldsymbol{F}}(t)
$$
Notice that time is a scalar - it's just a number. So a vector function returns a vector regardless of what the input is. It might be one-dimensional (e.g., the force we feel at a given time $t$) or 2D or 3D, etc. (e.g., the forces on a satellite in space .. we probably care about the position of the satellite but maybe also time.)
### Vector Fields
When we have a vector function in 2D or 3D, we usually call these **vector fields**.
<div class="alert alert-block alert-success">
A **vector field** is a function that return a vector for every (x,y) or (x, y, z) point.
</div>
It sounds abstract, but we're actually already familiar with the concept. Consider a weather map showing wind:
<img src='../images/wind-vectors.png' width="540" />
Depending on our location, the wind speed and vector will differ. Let's look at a [tropical cyclone (Hurricane
Sally)](https://en.wikipedia.org/wiki/Tropical_Storm_Sally_(2020)) in the Gulf of Mexico.
<img src='../images/hurricane.png' width="505" />
Obviously, the further away from the hurricane, the wind speed (magnitude) decreases, and there's an *eye* in the center, where there's no wind at all. Also, the vector direction differs depending on where you are.
In chemistry, we encounter a range of vector functions and vector fields. For example:
- The force acting on a charge in an electric field. At each point in space, the force acting on the charge will have a specific magnitude and a direction.
- Fluid flow for which each element of the fluid at some point in space has a given speed and direction of flow.
There are a few key terms with vector fields:
- a **sink** is a point in which all vectors flow inward
- a **source** is a point at which all vectors flow outward
- a **saddle-point** has no net inward or outward flow (i.e., they balance exactly)
<img src='../images/vector-field.png' />
(Image from [*Mathematical Methods for Chemists*](http://sites.bu.edu/straub/mathematical-methods-for-molecular-science/))
Here, the red point is a **source** and all the arrows point away from it (imagine the forces from a negative charge towards an electron).
The black point is a **sink** and all the arrows point towards it (e.g., a positive charge that will attract an electron).
The two "X" points are **saddle points**. Notice that they come inward along one direction and outward along another direction. (Think about a horse saddle that rises up to the neck and head and also to the tail, and slopes down along the sides.)
### Example Vector Field
Let's try plotting
$$
\vec{F}=\frac{x}{5} \hat{\mathbf{x}}+\frac{y}{5} \hat{\mathbf{y}}
$$
We'll use `numpy` and `matplotlib`. The code is a bit different because we're creating a "quiverplot".
```
# Let's plot some vector fields with numpy and matplotlib
# this is just our normal 'import numpy and matplotlib' code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.style.use('./chem1000.mplstyle')
# Example adapted from:
# https://pythonforundergradengineers.com/quiver-plot-with-matplotlib-and-jupyter-notebooks.html
# we're going to create a set of points from -5 to +5
# along x and y axes
x = np.arange(-5,6,1) # remember that np.arange() doesn't include the end value
y = np.arange(-5,6,1)
# remember, this takes the numpy arrays above and makes a mesh
X, Y = np.meshgrid(x, y)
# here's how we express the function
F_x = X/5 # x component
F_y = Y/5 # y component
# here's how we create a "quiver plot" with matplotlib
# if you want to know more, please ask
fig, ax = plt.subplots() # create a figure
ax.quiver(X,Y,F_x,F_y) # axis.quiver( X, Y mesh, Func X, Func Y )
# We'll go from -6 to +6 on each axis
# to see the arrows
ax.axis([-6, 6, -6, 6])
ax.set_aspect('equal') # make sure it's exactly square
plt.show()
```
<div class="alert alert-block alert-info">
**In the plot above, what kind of a point is at the origin?**
</div>
Let's try another example. This time, we'll plot:
$$
\vec{F}=\frac{x}{5} \hat{\mathbf{x}} - \frac{y}{5} \hat{\mathbf{y}}
$$
```
# here's how we express the function
F_x = +X/5 # x component
F_y = -Y/5 # y component
fig, ax = plt.subplots() # create a figure
ax.quiver(X,Y,F_x,F_y) # axis.quiver( X, Y mesh, Func X, Func Y )
# We'll go from -6 to +6 on each axis
# to see the arrows
ax.axis([-6, 6, -6, 6])
ax.set_aspect('equal') # make sure it's exactly square
plt.show()
```
<div class="alert alert-block alert-info">
**Now what kind of point is at the origin?**
</div>
**Gradients**
Perhaps one of the most useful vector functions / vector fields in chemistry comes from the **gradient** of a scalar function.
<div class="alert alert-block alert-success">
The **gradient** operator in 2D Cartesian coordinates (x, y) is
$$
\boldsymbol{\nabla} \equiv \hat{\mathbf{x}} \frac{\partial}{\partial x}+\hat{\mathbf{y}} \frac{\partial}{\partial y}
$$
</div>
We use $\boldsymbol{\nabla}$ as a short-hand for the gradient operator, no matter what coordinate system is. Particularly in polar or spherical coordinates the expression can get complicated.
So what does it do?
$$
\boldsymbol{\nabla} V(x, y) = \left(\hat{\mathbf{x}} \frac{\partial}{\partial x}+\hat{\mathbf{y}} \frac{\partial}{\partial y}\right) V(x, y)
$$
At every point (x,y) we take the partial derivative of $V(x,y)$ (a scalar function) with respect to x and y, and use those as the x-component and y-component of a vector.
In other words, the gradient operator returns a **vector** from a **scalar** function.
**Laplace Operator**
<div class="alert alert-block alert-success">
The **Laplace** operator (sometimes called the Laplacian) in 2D Cartesian coordinates (x, y) is
$$
\nabla^{2} \equiv \frac{\partial^{2}}{\partial x^{2}}+\frac{\partial^{2}}{\partial y^{2}}
$$
</div>
We use $\nabla^2$ as a short-hand for the Laplace operator, no matter what coordinate system we use. At every point, we take the second partial derivatives of a function, and add the components.
The Laplace operator takes a scalar function and returns a new scalar function.
**Potential Energy and Forces**
Consider the interaction of two atoms according to the [Lennard-Jones potential](https://en.wikipedia.org/wiki/Lennard-Jones_potential):
$$
V(r)=4 \varepsilon\left[\left(\frac{\sigma}{r}\right)^{12}-\left(\frac{\sigma}{r}\right)^{6}\right]
$$
where $r$ is the distance between the two atoms, $\sigma$ represents the atomic diameter (or the sum of the two atomic radii), and $\epsilon$ represents the binding energy. In this case, we know that there will be a minimum energy at:
$$
r_{\min }=\sqrt[6]{2} \sigma
$$
Image from [*Mathematical Methods for Chemists*](http://sites.bu.edu/straub/mathematical-methods-for-molecular-science/):
<img src="../images/lennard-jones.png" />
Now the force will depend on the potential energy:
$$
\mathbf{F}=-\left[\hat{\mathbf{x}} \frac{\partial}{\partial x}+\hat{\mathbf{y}} \frac{\partial}{\partial y}+\hat{\mathbf{z}} \frac{\partial}{\partial z}\right] V(x, y, z)=-\nabla V(x, y, z)
$$
Put simply, the forces are the gradient of the potential energy!
$$
\mathbf{F}=-\boldsymbol{\nabla} V
$$
We can even do the work in this case - it's one-dimensional in $\hat{\mathbf{r}}$:
$$
\mathbf{F}(r)=-\frac{d V(r)}{d r} \hat{\mathbf{r}}=4 \varepsilon\left[12\left(\frac{\sigma^{12}}{r^{13}}\right)-6\left(\frac{\sigma^6}{r^7}\right)\right] \hat{\mathbf{r}}
$$
So when $r < r_{min}$ the repulsive force proportional to $1/r^{12}$ dominates the total force and pushes in a positive direction, while at distances $r > r_{min}$ the attractive force proportional to $1/r^6$ is dominant and pushes in a negative direction.
<div class="alert alert-block alert-info">
Notice that the forces on the atom always aim to minimize the energy:
- If the two atoms are close together, the repulsive force pushes in a direction towards $r_{min}$.
- If the two atoms are far apart, the attractive force pulls in a direction towards $r_{min}$.
</div>
While the Lennard-Jones potential has a very simple one-dimensional form (e.g., one atom is at the origin and the other is some distance $r$ away), the concept of the gradient and forces applies regardless of the potential energy.
If we have some method to calculate the potential energy (e.g., quantum chemistry, etc.) we can take the gradient, get the forces on the atoms, and move them accordingly to get a minimum energy.
<img src="../images/atom-forces.png" width="341" />
In this case, we can see that the central carbon-carbon bond is too short and the carbon atoms are pulling apart, resulting in the hydrogen atoms moving in different directions.
Eventually as we repeat the process (find the gradient and forces, move the atoms a bit, re-calculate), we can minimize the potential energy and find an optimized geometry.
**Example problem:**
For a scalar potential energy, $V(x, y, z)=x^{2}+y^{2}+z^{2}$, derive the force defined as the negative gradient of the potential.
```
from sympy import init_session
init_session()
V = x**2 + y**2 + z**2
# get the x-component
diff(V, x)
# the y-component
diff(V, y)
# the z-component
diff(V, z)
```
Okay, we probably could have done that by inspection.
$$
\boldsymbol{\nabla} V(x,y,z) = 2x \hat{\mathbf{x}} + 2y \hat{\mathbf{y}} + 2z \hat{\mathbf{z}}
$$
<div class="alert alert-block alert-info">
**If that's the gradient, what are the forces?**
</div>
### Gradient in Spherical Coordinates
One last thing.. so far, we've expressed the gradient in 2D or 3D Cartesian coordinates, and it's not too bad.
In spherical coordinates, it's a little messier:
$$
\boldsymbol{\nabla} V=\hat{\mathbf{r}} \frac{\partial V}{\partial r}+\hat{\boldsymbol{\theta}} \frac{1}{r} \frac{\partial V}{\partial \theta}+\hat{\boldsymbol{\varphi}} \frac{1}{r \sin \theta} \frac{\partial V}{\partial \varphi}
$$
The Laplace operator is similarly messy:
$$
\nabla^{2} V=\frac{1}{r^{2}} \frac{\partial}{\partial r}\left(r^{2} \frac{\partial V}{\partial r}\right)+\frac{1}{r^{2} \sin \theta} \frac{\partial}{\partial \theta}\left(\sin \theta \frac{\partial V}{\partial \theta}\right)+\frac{1}{r^{2} \sin ^{2} \theta} \frac{\partial^{2} V}{\partial \varphi^{2}}
$$
We'll come back to these as we need them, but it's a nice illustration of why we use operators. We can write a short symbol and it represents a longer, messier operator.
-------
This notebook is from Prof. Geoffrey Hutchison, University of Pittsburgh
https://github.com/ghutchis/chem1000
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a>
| true |
code
| 0.72086 | null | null | null | null |
|
# Pandas Tips & Tricks & More
### Hello Kaggler!
### <span style="color:PURPLE">Objective of this kernal is to demonstrate most commonly used</span> <span style="color:red">Pandas Tips & Tricks and More</span> .
# Contents
Note : Please use below links to navigate the note book
1. [Check Package Version](#CheckPackageVersion)
1. [Ignore Warnings](#IgnoreWarnings)
1. Pandas Basics
1. [Data Read and Peak](#ReadandPeak)
1. [Shape, Columns](#ShapeColumns)
1. [pandas series to pandas dataframe](#series2df)
1. [Query Data Type](#QueryDataType)
1. [Columns With Missing Values as a List](#ColumnsWithMissingValues)
1. [Columns of object Data Type (Categorical Columns) as a List](#CatColumns)
1. [Columns that Contain Numeric Values as a List](#NumColumns)
1. [Categorical Columns with Cardinality less than N](#CatColsCar)
1. [Count of Unique Values in a Column](#UniqueCount)
1. [Select Columns Based on Data Types](#DTypeColSelect)
1. [Check whether data is ready for training](#checkDataBeforeTraining)
1. [Subplotting in Notebooks](#SublottinginNotebooks)
1. Tools to Deal with Missing Values
1. [Get Missing Values Info](#GetMissingValuesInfo)
1. [Fill missing values](#FillMissingValues)
1. [Drop columns where data is missing more than x%](#dropDataMissingColumns)
1. [Columns With Missing Values as a List](#ColumnsWithMissingValues)
1. Feature Engineering
1. [Drop rows where target is missing](#dropTargetMissingRows)
1. [OneHot Encode the Dataframe](#OneHotEncode)
1. [Convert categorical columns in numerical dtype to object type](#Convertnumericalcategoricalobject)
1. [Select Columns Based on Data Types](#DTypeColSelect)
1. [Get feature columns df and target column from training data](#getTrainX_TrainY)
1. Modelling
1. [Logistic Regression](#LogisticRegression)
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from pprint import pprint
print(os.listdir("../input"))
```
#### Check Package Version[^](#CheckPackageVersion)<a id="CheckPackageVersion" ></a><br>
```
print(pd.__version__)
print(np.__version__)
```
#### Ignore Warnings[^](#IgnoreWarnings)<a id="IgnoreWarnings" ></a><br>
```
import warnings
warnings.filterwarnings("ignore")
```
### Data Read and Peak [^](#ReadandPeak)<a id="ReadandPeak" ></a><br>
```
# Read & peek top
data = pd.read_csv("../input/titanic/train.csv")
data.head()
# Reading with Index column & peek tail
data2 = pd.read_csv("../input/titanic/train.csv",index_col='PassengerId')
data2.tail()
```
#### Shape, Columns[^](#ShapeColumns)<a id="ShapeColumns" ></a><br>
```
# Shape, Row Count, Column Count & Column Names
print('Shape of dataframe \t:', data.shape)
print('# of Rows \t\t:', data.shape[0])
print('# of Columns \t\t:', data.shape[1])
print('Columns in dataframe \t:', data.columns)
```
#### Query Data Type[^](#QueryDataType)<a id="QueryDataType" ></a><br>
```
values = {}
arr = []
print('values is a ' ,type(values))
type(arr)
```
#### Columns With Missing Values as a List[^](#ColumnsWithMissingValues)<a id="ColumnsWithMissingValues" ></a><br>
```
def getColumnsWithMissingValuesList(df):
return [col for col in df.columns if df[col].isnull().any()]
getColumnsWithMissingValuesList(data)
```
#### Columns of object Data Type (Categorical Columns) as a List[^](#CatColumns)<a id="CatColumns" ></a><br>
```
def getObjectColumnsList(df):
return [cname for cname in df.columns if df[cname].dtype == "object"]
cat_cols = getObjectColumnsList(data)
cat_cols
```
#### Columns that Contain Numeric Values as a List[^](#NumColumns)<a id="NumColumns" ></a><br>
```
def getNumericColumnsList(df):
return [cname for cname in df.columns if df[cname].dtype in ['int64', 'float64']]
num_cols = getNumericColumnsList(data)
num_cols
```
#### Categorical Columns with Cardinality less than N[^](#CatColsCar)<a id="CatColsCar" ></a><br>
```
def getLowCardinalityColumnsList(df,cardinality):
return [cname for cname in df.columns if df[cname].nunique() < cardinality and df[cname].dtype == "object"]
LowCardinalityColumns = getLowCardinalityColumnsList(data,10)
LowCardinalityColumns
```
#### Count of Unique Values in a Column[^](#UniqueCount)<a id="UniqueCount" ></a><br>
```
data['Embarked'].nunique()
```
#### OneHot Encode the Dataframe[^](#OneHotEncode)<a id="OneHotEncode" ></a><br>
```
def PerformOneHotEncoding(df,columnsToEncode):
return pd.get_dummies(df,columns = columnsToEncode)
oneHotEncoded_df = PerformOneHotEncoding(data,getLowCardinalityColumnsList(data,10))
oneHotEncoded_df.head()
```
#### Select Columns Based on Data Types[^](#DTypeColSelect)<a id="DTypeColSelect" ></a><br>
```
# select only int64 & float64 columns
numeric_data = data.select_dtypes(include=['int64','float64'])
# select only object columns
categorical_data = data.select_dtypes(include='object')
numeric_data.head()
categorical_data.head()
```
#### Get Missing Values Info[^](#GetMissingValuesInfo)<a id="GetMissingValuesInfo" ></a><br>
```
def missingValuesInfo(df):
total = df.isnull().sum().sort_values(ascending = False)
percent = round(df.isnull().sum().sort_values(ascending = False)/len(df)*100, 2)
temp = pd.concat([total, percent], axis = 1,keys= ['Total', 'Percentage'])
return temp.loc[(temp['Total'] > 0)]
missingValuesInfo(data)
```
#### Fill missing values[^](#FillMissingValues)<a id="FillMissingValues" ></a><br>
```
# for Object columns fill using 'UNKOWN'
# for Numeric columns fill using median
def fillMissingValues(df):
num_cols = [cname for cname in df.columns if df[cname].dtype in ['int64', 'float64']]
cat_cols = [cname for cname in df.columns if df[cname].dtype == "object"]
values = {}
for a in cat_cols:
values[a] = 'UNKOWN'
for a in num_cols:
values[a] = df[a].median()
df.fillna(value=values,inplace=True)
HandleMissingValues(data)
data.head()
#check for NaN values
data.isnull().sum().sum()
```
#### Drop columns where data is missing more than x%[^](#dropDataMissingColumns)<a id="dropDataMissingColumns" ></a><br>
```
# pass the DataFrame and percentage
def dropDataMissingColumns(df,percentage):
print("Dropping columns where more than {}% values are Missing..".format(percentage))
nan_percentage = df.isnull().sum().sort_values(ascending=False) / df.shape[0]
missing_val = nan_percentage[nan_percentage > 0]
to_drop = missing_val[missing_val > percentage/100].index.values
df.drop(to_drop, axis=1, inplace=True)
```
#### Drop rows where target is missing[^](#dropTargetMissingRows)<a id="dropTargetMissingRows" ></a><br>
```
def dropTargetMissingRows(df,target):
print("Dropping Rows where Target is Missing..")
df.dropna(axis=0, subset=[target], inplace=True)
```
Logistic Regression[^](#LogisticRegression)<a id="LogisticRegression" ></a><br>
```
def logistic(X,y):
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=42,test_size=0.2)
lr=LogisticRegression()
lr.fit(X_train,y_train)
y_pre=lr.predict(X_test)
print('Accuracy : ',accuracy_score(y_test,y_pre))
```
#### pandas series to pandas dataframe[^](#series2df)<a id="series2df" ></a><br>
```
series = data['Fare']
d = {series.name : series}
df = pd.DataFrame(d)
df.head()
```
#### Convert categorical columns in numerical dtype to object type[^](#Convertnumericalcategoricalobject)<a id="Convertnumericalcategoricalobject" ></a><br>
Sometimes categorical columns comes in numerical data types. This is the case for all most all ordinal columns. If not converted to 'category' descriptive statistic summary does not makes sense.
```
PassengerClass = data['Pclass'].astype('category')
PassengerClass.describe()
```
#### Check whether data is ready for training[^](#checkDataBeforeTraining)<a id="checkDataBeforeTraining" ></a>
```
# checks whether df contatins null values or object columns
def checkDataBeforeTraining(df):
if(df.isnull().sum().sum() != 0):
print("Error : Null Values Exist in Data")
return False;
if(len([cname for cname in df.columns if df[cname].dtype == "object"])>0):
print("Error : Object Columns Exist in Data")
return False;
print("Data is Ready for Training")
return True;
```
#### Get feature columns df and target column from training data[^](#getTrainX_TrainY)<a id="getTrainX_TrainY" ></a>
```
def getTrainX_TrainY(train_df,target):
trainY = train_df.loc[:,target]
trainX = train_df.drop(target, axis=1)
return trainX,trainY
```
#### Subplotting in Notebooks[^](#SublottinginNotebooks)<a id="SublottinginNotebooks" ></a>
```
#impoting required libraries for demo
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_regression
X, y = make_regression(n_samples=500, n_features=4, n_informative=2,random_state=0, shuffle=False)
# Subploting lets 2*2 figure with sizes (14*14)
f,ax=plt.subplots(2,2,figsize=(14,14))
#first plot
sns.scatterplot(x=X[:,0], y=y, ax=ax[0,0])
ax[0,0].set_xlabel('Feature 1 Values')
ax[0,0].set_ylabel('Y Values')
ax[0,0].set_title('Sactter Plot : Feature 1 vs Y')
#second plot
sns.scatterplot(x=X[:,1], y=y,ax=ax[0,1])
ax[0,1].set_xlabel('Feature 2 Values')
ax[0,1].set_ylabel('Y Values')
ax[0,1].set_title('Sactter Plot : Feature 2 vs Y')
#Third plot
sns.scatterplot(x=X[:,2], y=y,ax=ax[1,0])
ax[1,0].set_xlabel('Feature 3 Values')
ax[1,0].set_ylabel('Y Values')
ax[1,0].set_title('Sactter Plot : Feature 3 vs Y')
#Fourth plot
sns.scatterplot(x=X[:,3], y=y,ax=ax[1,1])
ax[1,1].set_xlabel('Feature 4 Values')
ax[1,1].set_ylabel('Y Values')
ax[1,1].set_title('Sactter Plot : Feature 4 vs Y')
plt.show()
```
| true |
code
| 0.50891 | null | null | null | null |
|
# The surface energy balance
____________
<a id='section1'></a>
## 1. Energy exchange mechanisms at the Earth's surface
____________
The surface of the Earth is the boundary between the atmosphere and the land, ocean, or ice. Understanding the energy fluxes across the surface are very important for three main reasons:
1. We are most interested in the climate at the surface because we live at the surface.
2. The surface energy budget determines how much energy is available to evaporate water and moisten the atmosphere.
3. Air-sea energy fluxes set the thermal structure of the oceans, which in turn act to redistribute energy around the planet, with many important consequences for climate.
The energy budget at the surface is more complex that the budget at the top of the atmosphere. At the TOA the only energy transfer mechanisms are radiative (shortwave and longwave). At the surface, in addition to radiation we need to consider fluxes of energy by conduction and by convection of heat and moisture through turbulent fluid motion.
### Major terms in the surface energy budget
We will denote the **net upward energy flux at the surface** as $F_S$.
As we mentioned back in the lecture on [heat transport](heat-transport.ipynb), there are four principal contributions to $F_S$:
1. Shortwave radiation
2. Longwave radiation
3. Sensible heat flux
4. Evaporation or latent heat flux
Wherever $F_S \ne 0$, there is a net flux of energy between the atmosphere and the surface below. This implies either that there is heat storage or release occuring below the surface (e.g. warming or cooling of water, melting of snow and ice), and/or there is horizontal heat transport by fluid motions occuring below the surface (ocean circulation, groundwater flow).
### Minor terms in the surface energy budget
All of these terms are small globally but can be significant locally or seasonally.
- Latent heat of fusion required for melting ice and snow
- Conversion of the kinetic energy of winds and waves to thermal energy
- Heat transport by precipitation, if precipitation is at a different temperature than the surface
- Biological uptake of solar energy through photosynthesis
- Biological release of energy through oxidation (respiration, decay, fires)
- Geothermal heat sources (hot springs, volcanoes, etc.)
- Anthropogenic heat released through fossil fuel burning and nuclear power generation.
____________
<a id='section2'></a>
## 2. The surface energy budget in CESM simulations
____________
We will examine the surface budget in the CESM slab ocean simulations. The advantage of looking at surface fluxes in a model rather than observations is that the model fluxes are completely consistent with the model climate, so that the net flux $F_S$ will be a meaningful measure of the heat storage in the system.
The model also gives us an opportunity to look at how the surface budget reponds to global warming under a doubling of CO$_2$.
### First, load the data
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from climlab import constants as const
# The path to the THREDDS server, should work from anywhere
basepath = 'http://thredds.atmos.albany.edu:8080/thredds/dodsC/CESMA/'
#basepath = '../Data/CESMA/'
# First get topography
cesm_input_path = basepath + 'som_input/'
topo = xr.open_dataset(cesm_input_path + 'USGS-gtopo30_1.9x2.5_remap_c050602.nc')
# Then control and 2xCO2 simulations
casenames = {'control': 'som_1850_f19',
'2xCO2': 'som_1850_2xCO2',
}
casepaths = {}
for model in casenames:
casepaths[model] = basepath + casenames[model] + '/concatenated/'
# make a dictionary of all the CAM atmosphere output
atm = {}
for model in casenames:
path = casepaths[model] + casenames[model] + '.cam.h0.nc'
print('Attempting to open the dataset ', path)
atm[model] = xr.open_dataset(path, decode_times=False)
lat = atm['control'].lat
lon = atm['control'].lon
lev = atm['control'].lev
```
### Annual mean surface energy budget
```
# Surface energy budget terms, all defined as positive up (from ocean to atmosphere)
surface_budget = {}
for (name, run) in atm.items():
budget = xr.Dataset()
budget['LHF'] = run.LHFLX
budget['SHF'] = run.SHFLX
budget['LWsfc'] = run.FLNS
budget['LWsfc_clr'] = run.FLNSC
budget['SWsfc'] = -run.FSNS
budget['SWsfc_clr'] = -run.FSNSC
budget['SnowFlux'] = ((run.PRECSC+run.PRECSL)
*const.rho_w*const.Lhfus)
# net upward radiation from surface
budget['NetRad'] = budget['LWsfc'] + budget['SWsfc']
budget['NetRad_clr'] = budget['LWsfc_clr'] + budget['SWsfc_clr']
# net upward surface heat flux
budget['Net'] = (budget['NetRad'] + budget['LHF'] +
budget['SHF'] + budget['SnowFlux'])
surface_budget[name] = budget
```
### Compute anomalies for all terms
```
# Here we take advantage of xarray!
# We can simply subtract the two xarray.Dataset objects
# to get anomalies for every term
surface_budget['anom'] = surface_budget['2xCO2'] - surface_budget['control']
# Also compute zonal averages
zonal_budget = {}
for run, budget in surface_budget.items():
zonal_budget[run] = budget.mean(dim='lon')
```
### Plot the annual mean net upward flux $F_S$ (control and anomaly after warming)
```
fig, axes = plt.subplots(1,2, figsize=(16,5))
cax1 = axes[0].pcolormesh(lon, lat, surface_budget['control'].Net.mean(dim='time'),
cmap=plt.cm.seismic, vmin=-200., vmax=200. )
axes[0].set_title('Annual mean net surface heat flux (+ up) - CESM control')
cax2 = axes[1].pcolormesh(lon, lat, surface_budget['anom'].Net.mean(dim='time'),
cmap=plt.cm.seismic, vmin=-20., vmax=20. )
fig.colorbar(cax1, ax=axes[0]); fig.colorbar(cax2, ax=axes[1])
axes[1].set_title('Anomaly after CO2 doubling')
for ax in axes:
ax.set_xlim(0, 360); ax.set_ylim(-90, 90); ax.contour( lon, lat, topo.LANDFRAC, [0.5], colors='k');
```
Some notable points about the control state:
- The net flux over all land surfaces is very close to zero!
- In the long-term annual mean, a non-zero $F_S$ must be balanced by heat transport.
- The spatial pattern of $F_S$ over the oceans is essentially just the prescribed q-flux that we have imposed on the slab ocean to represent ocean heat transport.
- Net heat uptake by the oceans occurs mostly along the equator and the cold tongues on the eastern sides of the tropical basins.
- Net heat release from oceans to atmosphere occurs mostly in mid- to high latitudes. Hot spots include the Gulf Stream and Kuroshio regions on the western sides of the mid-latitude basins, as well as the subpolar North Atlantic. These features are largely determined by ocean dynamics.
**After greenhouse warming**:
- The net change in $F_S$ is very small in most locations.
- This indicates that the model has reached quasi-equilibrium. Non-zero changes in $F_S$ would indicate either
- heat storage below the surface
- changes in ocean heat transport (not permitted in a slab ocean model).
- Non-zero changes are found in areas where the sea ice cover is changing in the model.
### Variation of energy balance components with latitude
```
fieldlist = ['SWsfc', 'LWsfc', 'LHF', 'SHF', 'Net']
fig, axes = plt.subplots(1,2, figsize=(16,5))
for ax, run in zip(axes, ['control', 'anom']):
for field in fieldlist:
ax.plot(lat, zonal_budget[run][field].mean(dim='time'), label=field)
ax.set_xlim(-90, 90); ax.grid(); ax.legend()
axes[0].set_title('Components of ANNUAL surface energy budget (+ up) - CESM control')
axes[1].set_title('Anomaly after CO2 doubling');
```
In these graphs, the curve labeled "Net" is the net flux $F_S$. It is just the zonal average of the maps from the previous figure, and shows the ocean heat uptake at the equator and release in mid- to high latitudes.
More interestingly, these graphs show the contribution of the various terms to $F_S$. They are all plotted as positive up. A **negative** value thus indicates **heating of the surface**, and a **positive** value indicates a **cooling of the surface**.
Key points about the control simulation:
- Solar radiation acts to warm the surface everywhere.
- Note that this is a net shortwave flux, so it is the amount that is actually absorbed by the surface after accounting for the reflected fraction.
- All other mechanisms act to cool the surface.
- The dominant balance across the **tropics** is between **warming by solar radiation** and **cooling by evaporation** (latent heat flux or LHF).
- The latent heat flux decreases poleward.
- Latent heat flux is dominant over sensible heat flux at most latitudes except close to the poles.
- The net longwave radiation also acts to cool the surface.
- This is the residual between the surface emissions (essentially $\sigma~T_s^4$) and the back-radiation from the atmosphere.
### Exercise: Discuss the surface energy budget anomalies to greenhouse warming
For **each term** on the right panel of the plot identify the following:
- The sign of the change
- The physical mechanism responsible for the change
- The consequence for surface temperature change
### Seasonal variations
We will compute the budgets for the months of January and July, and plot their differences.
```
# July minus January
julminusjan_budget = {}
for name, budget in surface_budget.items():
# xarray.Dataset objects let you "select" a subset in various ways
# Here we are using the integer time index (0-11)
julminusjan_budget[name] = budget.isel(time=6) - budget.isel(time=0)
fieldlist = ['SWsfc', 'LWsfc', 'LHF', 'SHF', 'Net']
fig,axes = plt.subplots(1,2,figsize=(16,5))
for field in fieldlist:
axes[0].plot(lat, julminusjan_budget['control'][field].mean(dim='lon'), label=field)
axes[0].set_title('Components of JUL-JAN surface energy budget (+ up) - CESM control')
for field in fieldlist:
axes[1].plot(lat, julminusjan_budget['anom'][field].mean(dim='lon'), label=field)
axes[1].set_title('Anomaly after CO2 doubling')
for ax in axes:
ax.set_xlim(-90, 90)
ax.grid()
ax.legend()
```
Seasonally, the dominant balance by far is between solar radiation and heat storage!
____________
<a id='section3'></a>
## 3. Sensible and Latent Heat Fluxes in the boundary layer
____________
These notes largely follow Chapter 4 of Hartmann (1994) "Global Physical Climatology", Academic Press.
Turbulent fluxes of heat: eddy fluxes of heat and moisture at some level in the atmospheric boundary layer
$$ \text{SH} = c_p ~\rho ~ \overline{w^\prime T^\prime} $$
$$ \text{LE} = L ~\rho ~\overline{w^\prime q^\prime} $$
where $c_p$ is the specific heat of air at constant pressure, $L$ is the latent heat of vaporization, $\text{SH}$ is the sensible heat flux and $\text{LE}$ is the latent heat flux.
### Bulk aerodynamic formulas
From theory of boundary layer turbulence, we suppose that the eddy heat flux is related to boundary layer temperature gradients, as well as the mean wind speed:
$$ \text{SH} = c_p ~\rho ~ C_D ~ U \left( T_s - T_a \right) $$
where $T_s$ is the surface temperature and $T_a$ is the air temperature at some reference height above the surface. $U$ is the wind speed at the reference height, and $C_D$ is a dimensionless aerodynamic drag coefficient.
$C_D$ will depend, among other things, on the roughness of the surface.
Similarly, we assume that the latent heat flux is related to boundary layer moisture gradients:
$$ \text{LE} = L ~\rho ~ C_D ~ U \left( q_s - q_a \right) $$
where $q_s$ is the specific humidity of air immediately above the surface, and $q_a$ is the specific humidity at the reference height.
In general the transfer coefficients $C_D$ could be different for sensible and latent heat flux, but empirically they are found to be very similar to each other. We will assume they are equal here.
### The Bowen ratio
The **Bowen ratio** is a dimensionless number defined as
$$ B_o = \frac{\text{SH}}{\text{LE}} $$
i.e. the ratio of **sensible heat loss** to **evaporative cooling**.
From the above plots, the Bowen ratio tends to be small in the low latitudes.
### The Bowen ratio for wet surfaces
Over a water surface or a very wet land surface, we may assume that the mixing ratio of water vapor at the surface is equal to the saturation mixing ratio $q^*$ at the temperature of the surface:
$$ q_s = q^*(T_s) $$
Recall that the saturation vapor pressure $q^*$ is a sensitive function of temperature through the Clausius-Claperyon relation. (It also depends on pressure)
Let's approximate the mixing ratio for **saturated air** at the reference height through a first-order Taylor series expansion:
$$ q_a^* \approx q_s^*(T_s) + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) $$
The actual mixing ratio at the reference height can be expressed as
$$ q_a = r ~ q_a^* $$
where $r$ is the relative humidity at that level.
Then we have an appoximation for $q_a$ in terms of temperature gradients:
$$ q_a \approx r \left( q_s^*(T_s) + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) \right) $$
Substituting this into the bulk formula for latent heat flux, we get
$$ \text{LE} \approx L ~\rho ~ C_D ~ U \left( q_s^* - r \left( q_s^* + \frac{\partial q^*}{\partial T} \left( T_a - T_s \right) \right) \right) $$
or, rearranging a bit,
$$ \text{LE} \approx L ~\rho ~ C_D ~ U \left( (1-r) ~ q_s^* + r \frac{\partial q^*}{\partial T} \left( T_s - T_a \right) \right) $$
The Bowen ratio is thus
$$ B_o = \frac{c_p}{ L \left( \frac{(1-r)}{\left( T_s - T_a \right)} q_s^* + r \frac{\partial q^*}{\partial T} \right)} $$
### The equilibrium Bowen ratio (for saturated air)
Notice that **if the boundary layer air is saturated**, then $r=1$ and the Bowen ratio takes on a special value
$$ B_e = \frac{c_p}{ L \frac{\partial q^*}{\partial T} } $$
When the surface and the air at the reference level are saturated, the Bowen ratio approaches the value $B_e$, which is called the equilibrium Bowen ratio. We presume that the flux of moisture from the boundary layer to the free atmosphere is sufficient to just balance the upward flux of moisture from the surface so that the humidity at the reference height is in equilibrium at the saturation value.
Recall that from the Clausius-Claperyon relation, the rate of change of the saturation mixing ratio is itself a strong function of temperature:
$$ \frac{\partial q^*}{\partial T} = q^*(T) \frac{L}{R_v ~ T^2} $$
Here the quasi-exponential dependence of $q^*$ on $T$ far outweighs the inverse square dependence, so the **equilibrium Bowen ratio decreases roughly exponentially with temperature**.
The following code reproduces Figure 4.10 of Hartmann (1994).
```
from climlab.utils.thermo import qsat
T = np.linspace(-40, 40) + const.tempCtoK
qstar = qsat(T, const.ps) # in kg / kg
def Be(T):
qstar = qsat(T, const.ps) # in kg / kg
dqstardT = qstar * const.Lhvap / const.Rv / T**2
return const.cp / const.Lhvap / dqstardT
fig, ax = plt.subplots()
ax.semilogy(T + const.tempKtoC, qstar*1000, label='$q^*$')
ax.semilogy(T + const.tempKtoC, Be(T), label='$B_e$')
ax.grid()
ax.set_xlabel('Temperature (degC)')
ax.legend(loc='upper center')
ax.set_title('Saturation specific humidity (g/kg) and equilibrium Bowen ratio');
```
- Equilibrium Bowen ratio is near 1 at 0ºC, and decreases to about 0.2 at 30ºC.
- As relative humidity is decreased from 1 to smaller values, **evaporative cooling increases**.
- The equilibrium Bowen ratio is the **maximum possible Bowen ratio for a wet surface**.
- Actual Bowen ratio over a wet surface will generally be smaller than $B_e$, because the air is usually not saturated.
- Because of the strong temperature dependence of the saturation specific humidity:
- Evaporative cooling (latent heat flux) dominates over sensible cooling of wet surfaces at **tropical** temperatures.
- Sensible heat flux becomes important wherever the surface is either **cold** or **dry**.
____________
<a id='section4'></a>
## 4. Bowen ratio in CESM simulations
____________
```
Bo_control = (surface_budget['control'].SHF.mean(dim='time') /
surface_budget['control'].LHF.mean(dim='time'))
Be_control = Be(atm['control'].TS.mean(dim='time'))
fig,axes = plt.subplots(1,3,figsize=(16,4))
cax1 = axes[0].pcolormesh(lon, lat, Bo_control,
vmin=0., vmax=5. )
fig.colorbar(cax1, ax=axes[0])
axes[0].set_title('$B_o$ (CESM control)', fontsize=20)
cax2 = axes[1].pcolormesh(lon, lat, Be_control,
vmin=0., vmax=5. )
fig.colorbar(cax2, ax=axes[1])
axes[1].set_title('$B_e$ (CESM control)', fontsize=20)
cax3 = axes[2].pcolormesh(lon, lat, (Bo_control - Be_control),
cmap='seismic', vmin=-10., vmax=10. )
fig.colorbar(cax3, ax=axes[2])
axes[2].set_title('$B_o - B_e$ (CESM control)', fontsize=20)
for ax in axes:
ax.set_xlim(0, 360)
ax.set_ylim(-90, 90)
ax.contour( lon, lat, topo.variables['LANDFRAC'][:], [0.5], colors='k');
```
On the difference plot, the blue colors indicate where the actual Bowen ratio is smaller than the equilibrium Bowen ratio. This will typically occur for **wet surfaces** with **undersaturated air**.
The red colors indicate where the actual Bowen ratio is larger than the equilibrium Bowen ratio. This typically occurs for **dry surfaces** where there is not enough water available to satisfy the energetic demand for evaporation.
____________
## Credits
This notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook), an open-source textbook developed and maintained by [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. It has been modified by [Nicole Feldl](http://nicolefeldl.com), UC Santa Cruz.
It is licensed for free and open consumption under the
[Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) license.
Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.
____________
| true |
code
| 0.587056 | null | null | null | null |
|
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets as well as other dependencies. Right now this requires the current master branch of both. Uncomment the following cell and run it.
```
#! pip install git+https://github.com/huggingface/transformers.git
#! pip install git+https://github.com/huggingface/datasets.git
#! pip install rouge-score nltk
```
If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.
To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.
First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your username and password (this only works on Colab, in a regular notebook, you need to do this in a terminal):
```
from huggingface_hub import notebook_login
notebook_login()
```
Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
```
# !apt install git-lfs
# !git config --global user.email "you@example.com"
# !git config --global user.name "Your Name"
```
Make sure your version of Transformers is at least 4.8.1 since the functionality was introduced in that version:
```
import transformers
print(transformers.__version__)
```
You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq).
# Fine-tuning a model on a summarization task
In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a summarization task. We will use the [XSum dataset](https://arxiv.org/pdf/1808.08745.pdf) (for extreme summarization) which contains BBC articles accompanied with single-sentence summaries.

We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using the `Trainer` API.
```
model_checkpoint = "t5-small"
```
This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`t5-small`](https://huggingface.co/t5-small) checkpoint.
## Loading the dataset
We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.
```
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("xsum")
metric = load_metric("rouge")
```
The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set:
```
raw_datasets
```
To access an actual element, you need to select a split first, then give an index:
```
raw_datasets["train"][0]
```
To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.
```
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])
```
The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Metric):
```
metric
```
You can call its `compute` method with your predictions and labels, which need to be list of decoded strings:
```
fake_preds = ["hello there", "general kenobi"]
fake_labels = ["hello there", "general kenobi"]
metric.compute(predictions=fake_preds, references=fake_labels)
```
## Preprocessing the data
Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that the model requires.
To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:
- we get a tokenizer that corresponds to the model architecture we want to use,
- we download the vocabulary used when pretraining this specific checkpoint.
That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
```
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
```
By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library.
You can directly call this tokenizer on one sentence or a pair of sentences:
```
tokenizer("Hello, this one sentence!")
```
Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.
Instead of one sentence, we can pass along a list of sentences:
```
tokenizer(["Hello, this one sentence!", "This is another sentence."])
```
To prepare the targets for our model, we need to tokenize them inside the `as_target_tokenizer` context manager. This will make sure the tokenizer uses the special tokens corresponding to the targets:
```
with tokenizer.as_target_tokenizer():
print(tokenizer(["Hello, this one sentence!", "This is another sentence."]))
```
If you are using one of the five T5 checkpoints we have to prefix the inputs with "summarize:" (the model can also translate and it needs the prefix to know which task it has to perform).
```
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "summarize: "
else:
prefix = ""
```
We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.
```
max_input_length = 1024
max_target_length = 128
def preprocess_function(examples):
inputs = [prefix + doc for doc in examples["document"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
examples["summary"], max_length=max_target_length, truncation=True
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
```
This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
```
preprocess_function(raw_datasets["train"][:2])
```
To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.
```
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
```
Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.
Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently.
## Fine-tuning the model
Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
```
from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
```
Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case.
Next we set some parameters like the learning rate and the `batch_size`and customize the weight decay.
The last two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of push_to_hub_model_id to something you would prefer.
```
batch_size = 8
learning_rate = 2e-5
weight_decay = 0.01
num_train_epochs = 1
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-xsum"
```
Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels. Note that our data collators are multi-framework, so make sure you set `return_tensors='tf'` so you get `tf.Tensor` objects back and not something else!
```
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
tokenized_datasets["train"]
```
Now we convert our input datasets to TF datasets using this collator. There's a built-in method for this: `to_tf_dataset()`. Make sure to specify the collator we just created as our `collate_fn`!
```
train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
batch_size=8,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
```
Now we initialize our loss and optimizer and compile the model. Note that most Transformers models compute loss internally - we can train on this as our loss value simply by not specifying a loss when we `compile()`.
```
from transformers import AdamWeightDecay
import tensorflow as tf
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)
model.compile(optimizer=optimizer)
```
Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.
```
from transformers.keras_callbacks import PushToHubCallback
callback = PushToHubCallback(
output_dir="./summarization_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
model.fit(train_dataset, validation_data=validation_dataset, epochs=1, callbacks=[callback])
```
Hopefully you saw your loss value declining as training continued, but that doesn't really tell us much about the quality of the model. Let's use the ROUGE metric we loaded earlier to quantify our model's ability in more detail. First we need to get the model's predictions for the validation set.
```
import numpy as np
decoded_predictions = []
decoded_labels = []
for batch in validation_dataset:
labels = batch["labels"]
predictions = model.predict_on_batch(batch)["logits"]
predicted_tokens = np.argmax(predictions, axis=-1)
decoded_predictions.extend(
tokenizer.batch_decode(predicted_tokens, skip_special_tokens=True)
)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels.extend(tokenizer.batch_decode(labels, skip_special_tokens=True))
```
Now we need to prepare the data as the metric expects, with one sentence per line.
```
import nltk
import numpy as np
# Rouge expects a newline after each sentence
decoded_predictions = [
"\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_predictions
]
decoded_labels = [
"\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels
]
result = metric.compute(
predictions=decoded_predictions, references=decoded_labels, use_stemmer=True
)
# Extract a few results
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
# Add mean generated length
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions
]
result["gen_len"] = np.mean(prediction_lens)
print({k: round(v, 4) for k, v in result.items()})
```
If you used the callback above, you can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:
```python
from transformers import TFAutoModelForSeq2SeqLM
model = TFAutoModelForSeq2SeqLM.from_pretrained("your-username/my-awesome-model")
```
| true |
code
| 0.623205 | null | null | null | null |
|
# BOSS Calibration Tutorial
The purpose of this tutorial is to reconstruct and document the calibration steps from detected electrons to calibrated flux, as described [here](https://trac.sdss3.org/wiki/BOSS/pipeline/FluxToPhotons) (requires SDSS3 login).
```
%pylab inline
import astropy.io.fits as fits
import bossdata
print(bossdata.__version__)
finder = bossdata.path.Finder()
mirror = bossdata.remote.Manager()
```
Define a utility function to take the inverse of an array that might be masked or contain zeros. The result is always an unmasked array with any invalid entries set to zero.
```
def inverse(data):
if isinstance(data, ma.core.MaskedArray):
# Add any zero entries to the original mask.
mask = data.mask
mask[~data.mask] = (data[~data.mask] == 0)
else:
mask = (data == 0)
inv = np.zeros(data.shape)
inv[~mask] = 1 / data[~mask]
return inv
```
Catch any warnings since there shouldn't be any:
```
#import warnings
#warnings.simplefilter('error')
```
With the default plate 6641, all files are mirrored from https://dr12.sdss.org/sas/dr12/boss/spectro/redux/v5_7_0/6641/.
```
def plot_calib(plate=6641, mjd=None, fiber=1, expidx=0, band='blue',
mask=None, save=None):
"""
"""
assert band in ('blue', 'red')
# Infer the MJD if possible, when none is specified.
if mjd is None:
mjds = bossdata.meta.get_plate_mjd_list(plate, finder, mirror)
if len(mjds) == 0:
print('Plate {} has never been observed with good quality.')
elif len(mjds) > 1:
print('Plate {} observed multiple on multiple MJDs (pick one): {}.'
.format(','.join(mjds)))
else:
mjd = mjds[0]
print('Using MJD {}.'.format(mjd))
if not mjd:
return
# Which spectrograph does this fiber belong to?
num_fibers = bossdata.plate.get_num_fibers(plate)
spec_num = 1 if fiber <= num_fibers // 2 else 2
camera = band[0] + str(spec_num)
print('Fiber {} read out by {} camera {}.'.format(fiber, band, camera))
# Load the list of exposures used for the science coadd of PLATE-MJD
# and the associated calibration exposures.
spec_name = finder.get_spec_path(plate, mjd, fiber, lite=True)
exposures = bossdata.spec.SpecFile(mirror.get(spec_name)).exposures
nexp = len(exposures.table)
if expidx >= nexp:
print('Invalid exposure index {} (should be 0-{}).'
.format(expidx, nexp - 1))
return
expnum = exposures.table[expidx]['science']
print('Analyzing exposure[{}] = #{} of {} used in coadd.'
.format(expidx, expnum, nexp))
# Load the calibrated flux and wavelength solution and ivars from the spCFrame file.
name = exposures.get_exposure_name(expidx, camera, 'spCFrame')
path = mirror.get(finder.get_plate_path(plate, name))
spCFrame = bossdata.plate.FrameFile(path, calibrated=True)
data = spCFrame.get_valid_data(
[fiber], include_sky=True,use_ivar=True, pixel_quality_mask=mask)[0]
wave, flux, sky, ivar = data['wavelength'], data['flux'], data['sky'], data['ivar']
# Lookup the metadata for this fiber.
fiber_index = spCFrame.get_fiber_offsets([fiber])[0]
info = spCFrame.plug_map[fiber_index]
objtype = info['OBJTYPE'].rstrip()
print('Fiber {} objtype is {}.'.format(fiber, objtype))
# Load the uncalibrated flux in flat-fielded electrons from the spFrame file.
name = exposures.get_exposure_name(expidx, camera, 'spFrame')
path = mirror.get(finder.get_plate_path(plate, name))
spFrame = bossdata.plate.FrameFile(path, calibrated=False)
data = spFrame.get_valid_data(
[fiber], include_sky=True, use_ivar=True, pixel_quality_mask=mask)[0]
ewave, eflux, esky, eivar = data['wavelength'], data['flux'], data['sky'], data['ivar']
# Look up the trace position on the CCD.
tracex = spFrame.hdulist[7].read()[fiber_index]
# Load the fluxcorr for this fiber.
name = exposures.get_exposure_name(expidx, camera, 'spFluxcorr')
path = mirror.get(finder.get_plate_path(plate, name))
with fits.open(path) as spFluxcorr:
corr = spFluxcorr[0].data[fiber_index]
# Load the fluxcalib for this fiber.
name = exposures.get_exposure_name(expidx, camera, 'spFluxcalib')
path = mirror.get(finder.get_plate_path(plate, name))
with fits.open(path) as spFluxcalib:
spcalib = spFluxcalib[0].data[fiber_index]
# The spFrame uses a TraceSet instead of tabulated log(lambda) values.
# The b-camera spCFrame, spFluxcorr arrays have 16 extra entries compared
# with spFrame, so trim those now.
n = len(ewave)
wave = wave[:n]
assert np.allclose(wave, ewave)
flux = flux[:n]
sky = sky[:n]
ivar = ivar[:n]
corr = corr[:n]
# Load the superflat from the spFrame file.
superflat = spFrame.get_superflat([fiber])[0]
# Load the fiberflat and neff from the spFlat file.
name = exposures.get_exposure_name(expidx, camera, 'spFlat')
path = mirror.get(finder.get_plate_path(plate, name))
with fits.open(path) as spFlat:
fiberflat = spFlat[0].data[fiber_index]
neff = bossdata.plate.TraceSet(spFlat[3]).get_y()[fiber_index]
# Get the flux distortion map for this plate's coadd.
path = mirror.get(finder.get_fluxdistort_path(plate, mjd))
with fits.open(path) as spFluxdistort:
distort_coadd = spFluxdistort[0].data[fiber_index]
# Build the coadded loglam grid.
hdr = spFluxdistort[0].header
loglam0, idx0, dloglam = hdr['CRVAL1'], hdr['CRPIX1'], hdr['CD1_1']
loglam = loglam0 + (np.arange(len(distort_coadd)) - idx0) * dloglam
wave_coadd = 10 ** loglam
# Linearly interpolate the distortion to our wavelength grid.
distort = np.interp(wave, wave_coadd, distort_coadd)
# Calculate ratio of dloglam=10e-4 bin sizes to native pixel binsizes.
R = dloglam / np.gradient(np.log10(wave))
# Combine the flat-field corrections.
flat = superflat * fiberflat
# Calculate the raw electron counts, including the sky. Note that this
# can be negative due to read noise.
electrons = flat * (eflux + esky)
# Lookup the measured readnoise measured in each amplifier quadrant.
readnoise_per_quad = np.empty(4)
for quadrant in range(4):
readnoise_per_quad[quadrant] = spFrame.header['RDNOISE{}'.format(quadrant)]
print('Readnoise is {:.2f}/{:.2f}/{:.2f}/{:.2f} electrons'
.format(*readnoise_per_quad))
# Get the quadrant of each wavelength pixel along this trace.
ampsizes = {'blue': (2056, 2048), 'red': (2064, 2057)}
ysize, xsize = ampsizes[band]
yamp = 1 * (np.arange(2 * ysize) >= ysize)
xamp = 2 * (tracex >= xsize)
quad = xamp + yamp
# Lookup the read noise for each wavelength pixel along this trace.
readnoise_per_pixel = readnoise_per_quad[quad]
mean_readnoise = np.mean(readnoise_per_pixel)
# Estimate the readnoise per wavelength pixel.
# Why is scale~2.35 necessary to reproduce the pipeline noise??
##scale = np.sqrt(8 * np.log(2))
scale = (4 * np.pi) ** 0.25
readnoise = readnoise_per_pixel * neff * scale
# Calculate the pipeline variance in detected electrons.
evar = flat ** 2 * inverse(eivar)
# Predict what the variance in detected electrons should be.
# Clip bins with electrons < 0 (due to read noise), to match what
# the pipeline does (in sdssproc).
evar_pred = np.clip(electrons, a_min=0, a_max=None) + readnoise ** 2
# Calculate the actual flux / eflux calibration used by the pipeline.
ecalib1 = flux * inverse(eflux)
# Calculate the flux / eflux calibration from the components described at
# https://trac.sdss3.org/wiki/BOSS/pipeline/FluxToPhotons
ecalib2 = corr * distort * R * inverse(spcalib)
# Compare the actual and predicted calibrations.
nonzero = (ecalib1 > 0)
absdiff = np.abs(ecalib1[nonzero] - ecalib2[nonzero])
reldiff = absdiff / np.abs(ecalib1[nonzero] + ecalib2[nonzero])
print('calibration check: max(absdiff) = {:.5f}, max(reldiff) = {:.5f}'
.format(np.max(absdiff), np.max(reldiff)))
# Calculate the flux variance.
var = inverse(ivar)
# Predict the flux variance by scaling the eflux variance.
var_pred = ecalib1 ** 2 * inverse(eivar)
# Limit plots to wavelengths where the flat is nonzero.
nonzero = np.where(flat > 0)[0]
wmin, wmax = wave[nonzero[[0,-1]]]
# Truncate tails.
evar_max = np.percentile(evar, 99)
var_max = np.percentile(var, 99)
# Initialize plots.
fig, ax = plt.subplots(3, 2, figsize=(8.5, 11))
ax = ax.flatten()
ax[0].plot(wave, flux + sky, 'k.', ms=1, label='flux+sky')
ax[0].plot(wave, var, 'r.', ms=1, label='var')
ax[0].plot(wave, var_pred, 'b.', ms=1, label='pred')
ax[0].set_xlim(wmin, wmax)
ax[0].set_ylim(0, np.percentile(flux + sky, 99))
ax[0].set_xlabel('Wavelength [A]')
ax[0].set_ylabel('Flux, Variance [flux]')
ax[0].legend(ncol=3)
ax[1].plot(wave, eflux + esky, 'k.', ms=1, label='flux+sky')
ax[1].plot(wave, evar, 'r.', ms=1, label='var')
ax[1].plot(wave, evar_pred, 'b.', ms=1, label='pred')
ax[1].plot(wave, readnoise, 'g-', label='readnoise')
ax[1].set_xlim(wmin, wmax)
ax[1].set_ylim(0, evar_max)
ax[1].set_xlabel('Wavelength [A]')
ax[1].set_ylabel('Flux, Variance [elec]')
ax[1].legend(ncol=2)
ax[2].plot(wave, flat, 'k-', label='both')
ax[2].plot(wave, superflat, 'r-', label='super')
ax[2].plot(wave, fiberflat, 'b-', label='fiber')
ax[2].set_xlim(wmin, wmax)
ax[2].set_xlabel('Wavelength [A]')
ax[2].set_ylabel('Flat Field Correction')
ax[2].legend(ncol=3)
ax[3].plot(wave, ecalib1, 'k-', label='All')
ax[3].plot(wave, corr, 'b-', label='corr')
ax[3].plot(wave, 5 * inverse(spcalib), 'r-', label='5/spcalib')
ax[3].plot(wave, distort, '-', c='magenta', label='distort')
ax[3].plot(wave, R, 'g-', label='R')
ax[3].set_xlim(wmin, wmax)
ax[3].set_xlabel('Wavelength [A]')
ax[3].set_ylabel('Flux Calibration [flux/elec]')
ax[3].legend(ncol=3)
'''
ax[4].plot(var_pred, var, 'k.', ms=1)
ax[4].set_xlim(0, var_max)
ax[4].set_ylim(0, var_max)
ax[4].set_xlabel('Predicted Variance [flux]')
ax[4].set_ylabel('Pipeline Variance [flux]')
'''
excess_rms_per_pix = np.sqrt(evar - electrons)
ax[4].plot(wave, excess_rms_per_pix, 'k.', ms=1)
ax[4].plot(wave, readnoise, 'r-')
ax[4].set_xlim(wmin, wmax)
ax[4].set_ylim(0., np.percentile(excess_rms_per_pix, 95))
ax[4].set_xlabel('Wavelength [A]')
ax[4].set_ylabel('(Pipeline Var - Shot Noise)$^{1/2}$ [det elec]')
ax[5].plot(evar_pred, evar, 'k.', ms=1)
ax[5].plot([0, evar_max], [0, evar_max], 'r--')
ax[5].set_xlim(0, evar_max)
ax[5].set_ylim(0, evar_max)
ax[5].set_xlabel('Predicted Variance [det elec$^2$]')
ax[5].set_ylabel('Pipeline Variance [det elec$^2$]')
title = '{}-{}-{} {}[{}]={} OBJ={} RDNOISE={:.1f}e'.format(
plate, mjd, fiber, camera, expidx, expnum, objtype, mean_readnoise)
plt.suptitle(title)
plt.subplots_adjust(top=0.95, right=0.99)
if save:
plt.savefig(save)
plot_calib(fiber=1, band='blue')
plot_calib(fiber=1, band='red')
plot_calib(fiber=486, band='blue')
plot_calib(fiber=486, band='red')
plot_calib(fiber=12, band='red')
```
| true |
code
| 0.570989 | null | null | null | null |
|
# Current SARS-CoV-2 Viral Diversity Supports Transmission Rule-Out by Genomic Sequencing
When community transmission levels are high, there will be many coincidences in which individuals in the same workplace, classroom, nursing home, or other institution test positive for SARS-CoV-2 purely by chance. Genomic sequencing can separate such coincidences from true transmission clusters.
Demonstrating that an epidemiologically-linked cluster does not have genomic links provides reassurance to stakeholders that infection control practices are working. If an epidemiologically-linked cluster does have genomic links, transmission in the identified setting is more likely and decision-makers can focus on revising infection control practices or policies to prevent future transmission.
This is possible because the SARS-CoV-2 virus mutates, on average, once every 2 weeks. (See [nextstrain](https://nextstrain.org/ncov/gisaid/global?l=clock) for an up-to-date estimate. As of 10-01-21, the rate estimate was 23.87 substitutions per year, or one every 2.18 weeks.) If two people are part of the same transmission event (A infected B, or some C infected both A and B), then the genome sequences of the virus from each case involved will differ by at most 1 or 2 mutations.

The converse is not necessarily true: it is possible for the genomes of virus to match even when the cases are epidemiologically quite distant, especially when superspreader events are involved. (Early in the pandemic we [documented](https://twitter.com/thebasepoint/status/1278057767983448064) instances where a viral genomic sequence was observed identically across dozens of countries, and persisted for months.)
Inspired by a potential program doing sequencing in schools, we asked: at this point in the pandemic, how sensitive is genomic sequencing for ruling out transmission? If two cases are unrelated, if the epi link is a coincidence, will genomic sequencing tell you that?
The more diverse the circulating population of SARS-CoV-2, the more powerful sequencing will be.
### Analysis
To answer this question, we picked an American city with a very high level of genomic sequencing being done: San Diego, California. The [SEARCH Alliance](https://searchcovid.info/) has been sequencing SARS-CoV-2 for almost a year and a half, and over the summer, they regularly sequenced 10-20% of the reported daily cases. We chose to analyze samples from August 2021, because it is after the Delta sweep, and so represents the current pandemic phase, and has a large number of genomes already sequenced and deposited into GISAID.
We downloaded the 2429 high-quality genomes from August 2021 from GISAID, and computed their pairwise SNP distances using `snp-dists` on a `mafft` alignment.

We define a *potential coincidental epi link* to be a pair of samples from the time period, with collection dates less than 2 weeks apart. We say that potential epi link would be ruled out if the genomes are more than 2 SNPs away from each other.
**We find that 99.5% of potential spurious epi links would be ruled out by sequencing!**
One way to think about this: a given person A was infected by one person B. Just 0.5% of the cases in the area have genotypes close enough to that of A to plausibly be the infector B, so the chances that a coincidentally epi-linked case is also a genetic link is very low.
Analysis below:
```
# Inputs
fasta_file = 'data/1633204764832.sequences.fasta'
meta_file = 'data/1633204764832.metadata.tsv'
reference_file = 'data/ref.fasta'
# Intermediates
working_dir = 'scratch/'
aligned_file = working_dir + 'aligned.aln'
aligned_masked_file = working_dir + 'aligned_masked.aln'
dists_file = working_dir + 'snp-dists.tsv'
dists_masked_file = working_dir + 'snp-dists_masked.tsv'
```
Align whole genomes to reference.
```
# Flags just align to reference
!mafft \
--6merpair --thread 10 --keeplength --addfragments \
{fasta_file} {reference_file} > {aligned_file}
```
Mask sites
```
import pandas as pd
from Bio import AlignIO
import copy
algn = AlignIO.read(aligned_file, "fasta")
masked_algn = copy.deepcopy(algn) # Create copy to test differences
masked_vcf_url = "https://raw.githubusercontent.com/W-L/ProblematicSites_SARS-CoV2/master/problematic_sites_sarsCov2.vcf"
masked_vcf = pd.read_csv(masked_vcf_url, sep="\t", comment="#", names=["region", "pos", "ref", "alt", "x", "y", "mask", "comment"])
masked_sites = masked_vcf[masked_vcf["mask"] == "mask"]["pos"].tolist()
for i in masked_sites:
pos = i-1
for rec in masked_algn:
rec.seq = rec.seq[:pos] + "N" + rec.seq[pos+1:]
AlignIO.write(masked_algn, aligned_masked_file, "fasta")
```
Compute SNP distances between samples.
```
!/home/gk/code/snp-dists/snp-dists -j 20 \
-m {aligned_file} > {dists_file}
!/home/gk/code/snp-dists/snp-dists -j 20 \
-m {aligned_masked_file} > {dists_masked_file}
import datetime
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import tqdm
import seaborn as sns
%matplotlib inline
# Load metadata
meta = pd.read_csv(meta_file, sep='\t')
date_lookup = dict()
for sample, date in zip(meta['strain'], meta['date']):
if pd.isna(date):
print(f"Warning: {sample} is missing collection date.")
date_lookup[sample] = np.nan
else:
date_lookup[sample] = datetime.datetime.strptime(date, '%Y-%m-%d').toordinal()
# Close = SNP dist <= 2, Far = SNP dist > 2, for samples collected
# within 2 weeks of each other
def compute_pairs(dist_matrix_file):
close_pairs = 0
far_pairs = 0
distances = []
with open(dist_matrix_file, 'r') as infile:
for line in tqdm.tqdm(infile):
(sample1, sample2, distance) = line.split()
distance = int(distance)
if sample1 not in date_lookup or sample2 not in date_lookup:
continue
if abs(date_lookup[sample1] - date_lookup[sample2]) > 14:
continue
if sample1 == sample2:
continue
if distance <= 2:
close_pairs += 1
if distance > 2:
far_pairs += 1
distances.append(distance)
return close_pairs, far_pairs, distances
close_pairs, far_pairs, distances = compute_pairs(dists_file)
close_pairs, far_pairs
np.round(100*far_pairs/(close_pairs + far_pairs), 1)
masked_close_pairs, masked_far_pairs, masked_distances = compute_pairs(dists_masked_file)
masked_close_pairs, masked_far_pairs
np.round(100*masked_far_pairs/(masked_close_pairs + masked_far_pairs), 1)
masked_close_pairs, masked_far_pairs
close_pairs, far_pairs
```
## Measuring Diversity Within and Between Lineages
A more detailed histogram of all pairwise SNP distances between samples collected within 2 weeks of one another shows a tri-modal plot.
Only pairs of samples on the far left (to the left of the 2-SNP red line) could have direct transmission links.
```
plt.subplots(figsize=(10,7.5))
plt.hist(distances, bins = 40)
plt.title("SNP distances between random pairs of samples\n(San Diego, August 2021, n=3830)")
plt.xlabel('SNP distance')
plt.ylabel('count')
plt.axvline(2, color='red')
plt.tight_layout()
plt.savefig('SNP-dist-san-diego.png')
plt.subplots(figsize=(10,7.5))
plt.hist(masked_distances, bins = 40)
plt.title("SNP masked distances between random pairs of samples\n(San Diego, August 2021, n=3830)")
plt.xlabel('SNP distance')
plt.ylabel('count')
plt.axvline(2, color='red')
plt.tight_layout()
plt.savefig('SNP-dist-masked-san-diego.png')
```
We then investigated the source of the three peaks for pairwise SNP distances: around 15, 35, and 70.
Presumably, these are generated by the typical distances within or between certain lineages. For example, the founding genotypes of B.1.617.2 and P.1 (Delta and Gamma) are some distance apart on the tree, and the distance from any Delta to any Gamma should be approximately the same. (With a mutation rate of ~1 mut/2 weeks, the distance should be approximately the number of weeks since Delta and Gamma diverged.)
```
meta.groupby('pangolin_lineage')['date'].count().sort_values(ascending=False)
lineages = meta['pangolin_lineage'].unique()
n_lineages = len(lineages)
lin_to_row = dict(zip(lineages, range(n_lineages)))
sample_to_lin = dict(zip(meta['strain'], meta['pangolin_lineage']))
counts = np.zeros((n_lineages, n_lineages))
distances = np.zeros((n_lineages, n_lineages))
with open(dists_file, 'r') as infile:
for line in tqdm.tqdm(infile):
(sample1, sample2, distance) = line.split()
if sample1 == sample2:
continue
distance = int(distance)
if sample1 not in date_lookup or sample2 not in date_lookup:
continue
if abs(date_lookup[sample1] - date_lookup[sample2]) > 14:
continue
lin1 = sample_to_lin[sample1]
lin2 = sample_to_lin[sample2]
# distances.loc[lin1, lin2] += distance
# counts.loc[lin1, lin2] += 1
idx1 = lin_to_row[lin1]
idx2 = lin_to_row[lin2]
distances[idx1, idx2] += distance
counts[idx1, idx2] += 1
distances = pd.DataFrame(index = lineages,
columns = lineages,
data = distances)
counts = pd.DataFrame(index = lineages,
columns = lineages,
data = counts)
mean_distances = distances/counts
```
Remove None (uncalled lineages), and AY.10 (few pairs w/in 2 week window).
```
md = mean_distances.drop(['None'], axis = 0).drop(['None'], axis = 1)
within_lineage_dist = (
md
.stack()
.reset_index()
.query('level_0 == level_1')
.sort_values(0)
)
plt.subplots(figsize=(10,7.5))
plt.barh(within_lineage_dist["level_0"].tolist(), within_lineage_dist[0].tolist())
plt.title("Within lineage distance in San Diego from Aug (n=3830)")
plt.tight_layout()
plt.savefig("within_lineage_dist.png", dpi = 300)
sns.clustermap(md.fillna(0), vmin=-5, mask=md.isna())
```
The distance between a typical Delta lineage (say, AY.25) and Gamma (P.1) is 71 SNPs. The lineages diverged in Jan 2020, or 86 weeks before these samples were collected.
```
mean_distances.loc['AY.25', 'P.1']
```
In contrast, the average distance between AY.3 and AY.25 samples is 16.6, which have a least common ancestor in April 2021, or 16 weeks before the samples were collected.
```
mean_distances.loc['AY.25', 'AY.3']
```
Even within PANGO lineages, there is significant diversity. The average distance within AY.25 is 8, meaning that most pairs could still be ruled out.
```
mean_distances.loc['AY.25', 'AY.25']
```
Inside Delta more broadly, those still categorized as `B.1.617.2`, the diversity is even greater.
```
mean_distances.loc['B.1.617.2', 'B.1.617.2']
within_lineage_data = []
within_lineage_distances = []
lineage_distribution = {}
with open(dists_file, 'r') as infile:
for line in tqdm.tqdm(infile):
(sample1, sample2, distance) = line.split()
if sample1 == sample2:
continue
distance = int(distance)
if sample1 not in date_lookup or sample2 not in date_lookup:
continue
if abs(date_lookup[sample1] - date_lookup[sample2]) > 14:
continue
lin1 = sample_to_lin[sample1]
lin2 = sample_to_lin[sample2]
row = [sample1, sample2, lin1, lin2, distance]
within_lineage_data.append(row)
if lin1 == lin2:
within_lineage_distances.append(distance)
if lin1 in lineage_distribution:
lineage_distribution[lin1].append(distance)
else:
lineage_distribution[lin1] = [distance]
within_lineage_df = pd.DataFrame(within_lineage_data, columns = ["sample1", "sample2", "sample1_lineage", "sample2_lineage", "snp_distance"])
(np.array(within_lineage_distances) > 2).sum()/len(within_lineage_distances)
```
Even if we restrict ourselves to pairs of samples within the same PANGO lineage, 98.5% of samples are still more than 2 SNPs away.
This means that **for transmission cluster rule-out, it is essential to use actual SNP distances, and not just PANGO lineage assignments**. Relying on lineage assignments alone gives up substantial power.
```
lineage_match = 0
lineage_mismatch = 0
with open(dists_file, 'r') as infile:
for line in tqdm.tqdm(infile):
(sample1, sample2, distance) = line.split()
if sample1 == sample2:
continue
distance = int(distance)
if sample1 not in date_lookup or sample2 not in date_lookup:
continue
if abs(date_lookup[sample1] - date_lookup[sample2]) > 14:
continue
lin1 = sample_to_lin[sample1]
lin2 = sample_to_lin[sample2]
if lin1 == lin2:
lineage_match += 1
else:
lineage_mismatch += 1
100 - np.round(lineage_mismatch/(lineage_match + lineage_mismatch)*100,2)
# Within lineage distances
plt.hist(lineage_distribution['B.1.617.2'], bins=20)
plt.title("SNP Distances within B.1.617.2")
plt.axvline(2, color='red')
plt.hist(lineage_distribution['P.1'], bins=20)
plt.title("SNP Distances within P.1")
plt.axvline(2, color='red')
# Within lineage distances
plt.hist(lineage_distribution['B.1.621'], bins=20)
plt.title("SNP Distances within B.1.621")
plt.axvline(2, color='red')
plt.hist(lineage_distribution['B.1.617.2'], bins=20)
```
## Rule-in?
How reliable is genetic confirmation of an epi link?
We can think of this in a Bayesian way:
$$P(\textrm{transmission}| \textrm{epi}, \textrm{genomics}) = \frac{P(\textrm{genomics}| \textrm{epi}, \textrm{transmission})*P(\textrm{transmission} | \textrm{epi})}{P(\textrm{genomics}|\textrm{epi})}.$$
The probability of seeing a genomic link (<= 2 SNPs) given transmission is very high, say 99% (the 1% accounts for a burst of mutations, as in a long latent infection, or sample mixups in the sequencing lab).
The prior probability of seeing transmission given the epi link alone, $P(\textrm{transmission} | \textrm{epi})$ depends on the circumstance, but might reasonably range from 1% (same school but no shared classes) to 90% (same household).
The denominator is a sum of two terms: the probability of seeing a genomic link given transmission, weighted by the prior probability of transmission given the epi data, plus the probability of seeing a genomic link given no transmission, weighted by the prior probability of no transmission given the epi data.
$$P(\textrm{genomics}|\textrm{epi}) = P(\textrm{genomics}| \textrm{epi}, \textrm{transmission})*P(\textrm{transmission} | \textrm{epi}) \\+ P(\textrm{genomics}| \textrm{epi}, \textrm{no transmission})*P(\textrm{no transmission} | \textrm{epi})$$
The key variable that we approximated above is $P(\textrm{genomics}| \textrm{epi}, \textrm{no transmission})$, the probability of seeing a genomic link given the epi circumstance *if transmission did not take place*. That is a measure of the genomic diversity of the community from which these individuals were drawn. We estimated this to be 0.5% in San Diego at this time, but if a smaller community is considered (eg, a neighborhood, or a socio-economic/demographic group), then data from within that community should be used.
If we say that the probability of a genomic link given no transmission were 2% (accounting for a smaller community with less genomic diversity), then we would have:
$$P(\textrm{transmission}| \textrm{epi}, \textrm{genomics}) = \frac{0.99*p}{0.99*p + 0.02*(1-p)},$$
where $p$ was the prior probability of transmission given the epi evidence alone.
```
def posterior_probability(prior, background=0.02, sequencing_accuracy=0.99):
return sequencing_accuracy*prior/(sequencing_accuracy*prior + background*(1-prior))
```
If the epi evidence were reasonably strong, say, p = 75% (say, in the case of household transmission), the genomics would boost it to 99.3%.
```
posterior_probability(0.75)
```
If the epi evidence were very weak, say, p = 0.1% (say, if the two cases are just in the same neighborhood), then genomics would boost it to 4.7%.
```
posterior_probability(0.001)
```
| true |
code
| 0.53965 | null | null | null | null |
|
# Import requried libraries
```
import pandas as pd # for manipulating data
import numpy as np # Manipulating arrays
import keras # High level neural network API
import tensorflow as tf # Framework use for dataflow
from sklearn.model_selection import train_test_split # To split the data into train and validation
from tensorflow.keras.models import Sequential # To build neural network
from tensorflow.keras.layers import Dense # To add the dense layer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # To calculate the matrix
from sklearn.metrics import classification_report # To print the report
from sklearn.metrics import precision_recall_curve # To print the precision-recall curve
from tensorflow.keras.optimizers import Adam # optimizer
import matplotlib.pyplot as plt # To plot the graph
from tensorflow.keras.models import load_model # To load the model
from keras.utils import CustomObjectScope #Provides a scope that changes to _GLOBAL_CUSTOM_OBJECTS cannot escape.
from keras.initializers import glorot_uniform #Initializations define the way to set the initial random weights of Keras layers.
# Read the file
df = pd.read_csv('musk_csv.csv')
df.head()
```
# Pre-processing
```
df.describe()
print("Length of Musk is :",len(df[df['class']== 1]))
print("Length of Non-Musk is :",len(df[df['class']== 0]))
# check the, wether there is any null value
df.isnull().sum()
# Drop unnecessary columns(ID ,molecule_name,conformation_name)
df = df.drop(columns=['ID','molecule_name','conformation_name'])
df.head()
```
### Split the data into train and test
```
train, test= train_test_split(df,test_size=0.20,random_state=6)
print(f"Row in training set:{len(train)}\nRow in testing set:{len(test)} ")
train_X = train[train.columns[:-1]]
train_Y = train[train.columns[-1]]
test_X = test[test.columns[:-1]]
test_Y = test[test.columns[-1]]
# CHeck the shape of the data
train_X.shape
train_Y.shape
```
# Small model
### Named small model because only one hidden layer with 60 nodes and a ReLU activation function are used followed by an output layer with a single node and a sigmoid activation function
### The model will predict whether the taken compound is Non-Musk(0) or Musk(1)
```
model = Sequential()
model.add(Dense(60, input_dim=166, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
```
### The model will be fit using the binary cross entropy loss function and we will use the efficient Adam version of stochastic gradient descent. The model will also monitor the classification accuracy metric
```
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
```
### Fitting the model with 9 epochs with the batch size of 100
```
history = model.fit(train_X,train_Y, epochs = 9, batch_size=100, validation_data=(test_X, test_Y) )
```
### Visualize the model accuracy and model loss
```
def plot_learningCurve(history, epoch):
# Plot training & validation accuracy values
epoch_range = range(1, epoch+1)
plt.plot(epoch_range, history.history['acc'])
plt.plot(epoch_range, history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(epoch_range, history.history['loss'])
plt.plot(epoch_range, history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
plot_learningCurve(history, 9)
```
# Make predictions
```
preds= model.predict(test_X)
preds_classes = model.predict_classes(test_X)
```
# Calculate matrix
### Three matrics, in addition to classification accuracy, that are commonly required for a neural network model on a binary classification problem are:
### Accuracy : (TP + TN) / (TP +TN+FP+FN)
### Precision : TP / (TP + FP)
### Recall : TP / (TP + FN)
### F1 Score : 2 TP / (2 TP + TP + FN)
```
# reduce to 1d array before calculating the matrix
preds = preds[:, 0]
preds_classes =preds_classes[:, 0]
print(classification_report(test_Y, preds_classes))
precision, recall, thresholds = precision_recall_curve(test_Y, preds_classes)
# create plot
plt.plot(precision, recall, label='Precision-recall curve')
_ = plt.xlabel('Precision')
_ = plt.ylabel('Recall')
_ = plt.title('Precision-recall curve')
_ = plt.legend(loc="lower left")
```
# Large model
### Named large model because more than one hidden layers are used. 60 nodes in 1st layer, 30 nodes in 2rd layers along with a output layer with a single node
```
model = Sequential()
model.add(Dense(60, input_dim=166, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_X,train_Y, epochs = 9, batch_size=100, validation_data=(test_X, test_Y) )
```
### Visualize the model accuracy and model loss
```
def plot_learningCurve(history, epoch):
# Plot training & validation accuracy values
epoch_range = range(1, epoch+1)
plt.plot(epoch_range, history.history['acc'])
plt.plot(epoch_range, history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(epoch_range, history.history['loss'])
plt.plot(epoch_range, history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
plot_learningCurve(history, 9)
```
# Make predictions
```
preds= model.predict(test_X)
preds_classes = model.predict_classes(test_X)
```
# Calculate metrix
```
# reduce to 1d array before calculating the matrix
preds = preds[:, 0]
preds_classes =preds_classes[:, 0]
print(classification_report(test_Y, preds_classes))
precision, recall, thresholds = precision_recall_curve(test_Y, preds_classes)
# create plot
plt.plot(precision, recall, label='Precision-recall curve')
_ = plt.xlabel('Precision')
_ = plt.ylabel('Recall')
_ = plt.title('Precision-recall curve')
_ = plt.legend(loc="lower left")
```
# Model save
### This function saves
.The architecture of the model, allowing to create the model
.The weights of the model
.The training configuration(loss, optimizer)
.The state of the optimizer, allowing to resume training exactly where you left off.
```
model.save('model.h5')
```
# Load model
```
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
new_model = load_model('model.h5')
```
| true |
code
| 0.726826 | null | null | null | null |
|
<h1 align="center">ML For Defect Analysis</h1>
## 1. Building the Model
```
import warnings
warnings.filterwarnings('ignore')
import os
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#Split the data into train, validation & test
import splitfolders
input_folder = 'Data'
# Split with a ratio.
# To only split into training and validation set, set a tuple to `ratio`, i.e, `(.8, .2)`.
#Train, val, test
splitfolders.ratio(input_folder, output="Data_split",
seed=42, ratio=(.75, .2, .05),
group_prefix=None)
train_dir = os.path.join(os.getcwd(), 'Data_split\\train')
validation_dir = os.path.join(os.getcwd(), 'Data_split\\val')
# Directory with our training 'proper' pictures
train_proper_dir = os.path.join(train_dir, 'proper')
# Directory with our training 'defective' pictures
train_defective_dir = os.path.join(train_dir, 'defective')
# Directory with our validation 'proper' pictures
validation_proper_dir = os.path.join(validation_dir, 'proper')
# Directory with our validation 'defective' pictures
validation_defective_dir = os.path.join(validation_dir, 'defective')
# The model has already been trained. Run this codeblock only to re-train the model.
'''
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
'''
# The model has already been trained. Run this codeblock only to re-train the model.
'''
#Compiling the model
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['accuracy'])
'''
# The model has already been trained. Run this codeblock only to re-train the model.
'''
# Image Augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
'''
# The model has already been trained. Run this codeblock only to re-train the model.
'''
# Flow training images in batches of 16 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=16,
class_mode='binary')
# Flow validation images in batches of 16 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=16,
class_mode='binary')
'''
# The model has already been trained. Run this codeblock only to re-train the model.
'''
history = model.fit(
train_generator,
steps_per_epoch=50, # 2000 images = batch_size * steps
epochs=20,
validation_data=validation_generator,
validation_steps=30, # 1000 images = batch_size * steps
verbose=2)
'''
#This will work only if you have run the previous codeblocks
'''
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
'''
#Saving the model
#model.save('model.h5') Current best model has train_acc = 82.75 and val_acc = 84.93
```
## 2. Loading The Model
```
import warnings
warnings.filterwarnings("ignore")
import os
import tensorflow as tf
from IPython.display import Image, display
from tensorflow.keras.models import load_model
new_model = load_model('model_tacc8275_val8493.h5')
new_model.summary()
img_path = os.path.join(os.getcwd(), 'Data\\Defective\\Defective (5).jpg')
display(Image(filename=img_path))
listOfImageNames = [img_path]
#For multiple images
'''
from IPython.display import Image, display
listOfImageNames = [os.path.join(os.getcwd(), 'Data\\Defective\\Defective (1).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (2).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (3).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (4).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (5).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (6).jpg'),
os.path.join(os.getcwd(), 'Data\\Defective\\Defective (7).jpg'),
]
for imageName in listOfImageNames:
display(Image(filename=imageName))
'''
import numpy as np
from keras.preprocessing import image
#for single image
path = img_path
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = new_model.predict(images, batch_size=10)
print(classes[0])
if classes[0]<0.5:
print("Bottle is Defective")
else:
print("Bottle is Proper")
#for multiple images
'''
for fn in listOfImageNames:
path = fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = new_model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print("Bottle is proper")
else:
print("Bottle is defective")
'''
```
| true |
code
| 0.704033 | null | null | null | null |
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_02_callbacks import *
```
# Initial Setup
```
x_train, y_train, x_valid, y_valid = get_data(url=MNIST_URL)
train_ds = Dataset(x=x_train, y=y_train)
valid_ds = Dataset(x=x_valid, y=y_valid)
nh = 50
bs = 16
c = y_train.max().item() + 1
loss_func = F.cross_entropy
data = DataBunch(*get_dls(train_ds, valid_ds, bs=bs), c=c)
#export
def create_learner(model_func, loss_func, data):
return Learner(*model_func(data), loss_func, data)
learner = create_learner(get_model, loss_func, data)
run = Runner(cbs=[AvgStatsCallback(metrics=[accuracy])])
run.fit(epochs=3, learner=learner)
learner = create_learner(partial(get_model, lr=0.3), loss_func, data)
run = Runner(cbs=[AvgStatsCallback(metrics=[accuracy])])
run.fit(epochs=3, learner=learner)
#export
def get_model_func(lr=0.5):
return partial(get_model, lr=lr)
```
# Annealing
```
We define two new callbacks:
1. a Recorder: to save track of the loss and our scheduled learning rate
2. a ParamScheduler: that can schedule any hyperparameter as long as it's registered in the state_dict of the optimizer
```
```
#export
class Recorder(Callback):
def begin_fit(self):
self.lrs = []
self.losses = []
def after_batch(self):
if not self.in_train:
return
self.lrs.append(self.opt.param_groups[-1]["lr"])
self.losses.append(self.loss.detach().cpu())
def plot_lr(self):
plt.plot(self.lrs)
def plot_loss(self):
plt.plot(self.losses)
class ParamScheduler(Callback):
_order = 1
def __init__(self, pname, sched_func):
self.pname = pname
self.sched_func = sched_func
def set_param(self):
for pg in self.opt.param_groups:
### print(self.sched_func, self.n_epochs, self.epochs)
pg[self.pname] = self.sched_func(self.n_epochs/self.epochs)
def begin_batch(self):
if self.in_train:
self.set_param()
```
```
Let's start with a simple linear schedule going from start to end.
It returns a function that takes a "pos" argument (going from 0 to 1) such that this function goes from "start" (at pos=0) to "end" (at pos=1) in a linear fashion.
```
```
def sched_linear(start, end, pos):
def _inner(start, end, pos):
return start + (end-start)*pos
return partial(_inner, start, end)
```
```
We can refator the above sched_linear function using decorators so that we donot need to create a separate instance of sched_linear for every pos value
```
```
#export
def annealer(f):
def _inner(start, end):
return partial(f, start, end)
return _inner
@annealer
def sched_linear(start, end, pos):
return start + (end-start)*pos
f = sched_linear(1,2)
f
f(pos=0.3)
f(0.3)
f(0.5)
```
```
Some more important acheduler functions
```
```
#export
@annealer
def sched_cos(start, end, pos):
return start + (end-start) * (1 + math.cos(math.pi*(1-pos))) / 2.
@annealer
def sched_no(start, end, pos):
return start
@annealer
def sched_exp(start, end, pos):
return start * ((end/start) ** pos)
annealings = "NO LINEAR COS EXP".split(" ")
a = torch.arange(start=0, end=100)
p = torch.linspace(start=0.01, end=1, steps=100)
fns = [sched_no, sched_linear, sched_cos, sched_exp]
for fn, t in zip(fns, annealings):
f = fn(start=2, end=1e-2)
plt.plot(a, [f(i) for i in p], label=t)
plt.legend();
### in earlier version of Pytorch, a Tensor object did not had "ndim" attribute
### we can add any attribute to any Python object using property() function.
### here we are adding "ndim" attribute to Tensor object using the below monkey-patching
# torch.Tensor.ndim = property(lambda x: len(x.shape))
```
```
In practice we will want to use multiple schedulers and the below function helps us do that
```
```
#export
def combine_scheds(pcts, scheds):
"""
pcts : list of %ages of each scheduler
scheds: list of all schedulers
"""
assert sum(pcts) == 1
pcts = torch.tensor([0] + listify(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(input=pcts, dim=0)
def _inner(pos):
"""pos is a value b/w (0,1)"""
idx = (pos >= pcts).nonzero().max()
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](pos=actual_pos)
return _inner
### Example of a learning rate scheduler annealing:
### using 30% of training budget to go from 0.3 to 0.6 using cosine scheduler
### using the rest 70% of the trainign budget to go from 0.6 to 0.2 using another cosine scheduler
sched = combine_scheds(pcts=[0.3, 0.7], scheds=[sched_cos(start=0.3, end=0.6), sched_cos(start=0.6, end=0.2)])
plt.plot(a, [sched(i) for i in p])
```
```
We can use it for trainign quite easily.
```
```
cbfs = [Recorder,
partial(AvgStatsCallback, metrics=accuracy),
partial(ParamScheduler, pname="lr", sched_func=sched)]
cbfs
bs=512
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c=c)
learner = create_learner(model_func=get_model_func(lr=0.3), loss_func=loss_func, data=data)
run = Runner(cb_funcs=cbfs)
run.fit(epochs=2, learner=learner)
run.recorder.plot_lr()
run.recorder.plot_loss()
```
# Export
```
!python notebook_to_script.py imflash217__02_anneal.ipynb
pct = [0.3, 0.7]
pct = torch.tensor([0] + listify(pct))
pct = torch.cumsum(pct, 0)
pos = 2
(pos >= pct).nonzero().max()
```
| true |
code
| 0.754186 | null | null | null | null |
|
# SageMaker/DeepAR demo on electricity dataset
This notebook complements the [DeepAR introduction notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/deepar_synthetic/deepar_synthetic.ipynb).
Here, we will consider a real use case and show how to use DeepAR on SageMaker for predicting energy consumption of 370 customers over time, based on a [dataset](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014) that was used in the academic papers [[1](https://media.nips.cc/nipsbooks/nipspapers/paper_files/nips29/reviews/526.html)] and [[2](https://arxiv.org/abs/1704.04110)].
In particular, we will see how to:
* Prepare the dataset
* Use the SageMaker Python SDK to train a DeepAR model and deploy it
* Make requests to the deployed model to obtain forecasts interactively
* Illustrate advanced features of DeepAR: missing values, additional time features, non-regular frequencies and category information
For more information see the DeepAR [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html) or [paper](https://arxiv.org/abs/1704.04110),
### Lab time
Running this notebook takes around 35 to 40 minutes on a ml.c4.2xlarge for the training, and inference is done on a ml.m4.xlarge (the usage time will depend on how long you leave your served model running).
```
import timeit
start_time = timeit.default_timer()
%matplotlib inline
import sys
from urllib.request import urlretrieve
import zipfile
from dateutil.parser import parse
import json
from random import shuffle
import random
import datetime
import os
import boto3
import s3fs
import sagemaker
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from ipywidgets import IntSlider, FloatSlider, Checkbox
# set random seeds for reproducibility
np.random.seed(42)
random.seed(42)
sagemaker_session = sagemaker.Session()
```
Before starting, we can override the default values for the following:
- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these.
```
# s3_bucket = sagemaker.Session().default_bucket() # replace with an existing bucket if needed
s3_bucket='<put your S3 bucket name>' # customize to your bucket
s3_prefix = 'deepar-electricity-demo-notebook' # prefix used for all data stored within the bucket
role = sagemaker.get_execution_role() # IAM role to use by SageMaker
region = sagemaker_session.boto_region_name
s3_data_path = "s3://{}/{}/data".format(s3_bucket, s3_prefix)
s3_output_path = "s3://{}/{}/output".format(s3_bucket, s3_prefix)
```
Next, we configure the container image to be used for the region that we are running in.
```
image_name = sagemaker.amazon.amazon_estimator.get_image_uri(region, "forecasting-deepar", "latest")
```
### Import electricity dataset and upload it to S3 to make it available for Sagemaker
As a first step, we need to download the original data set of from the UCI data set repository.
```
DATA_HOST = "https://archive.ics.uci.edu"
DATA_PATH = "/ml/machine-learning-databases/00321/"
ARCHIVE_NAME = "LD2011_2014.txt.zip"
FILE_NAME = '/tmp/' + ARCHIVE_NAME[:-4] # Modified to use '/tmp' directory, Pil 8thOct2018
def progress_report_hook(count, block_size, total_size):
mb = int(count * block_size // 1e6)
if count % 500 == 0:
sys.stdout.write("\r{} MB downloaded".format(mb))
sys.stdout.flush()
if not os.path.isfile(FILE_NAME):
print("downloading dataset (258MB), can take a few minutes depending on your connection")
urlretrieve(DATA_HOST + DATA_PATH + ARCHIVE_NAME, '/tmp/' + ARCHIVE_NAME, reporthook=progress_report_hook)
print("\nextracting data archive")
zip_ref = zipfile.ZipFile('/tmp/' + ARCHIVE_NAME, 'r')
zip_ref.extractall("/tmp")
zip_ref.close()
else:
print("File found skipping download")
```
Then, we load and parse the dataset and convert it to a collection of Pandas time series, which makes common time series operations such as indexing by time periods or resampling much easier. The data is originally recorded in 15min interval, which we could use directly. Here we want to forecast longer periods (one week) and resample the data to a granularity of 2 hours.
```
data = pd.read_csv(FILE_NAME, sep=";", index_col=0, parse_dates=True, decimal=',')
num_timeseries = data.shape[1]
data_kw = data.resample('2H').sum() / 8
timeseries = []
for i in range(num_timeseries):
timeseries.append(np.trim_zeros(data_kw.iloc[:,i], trim='f'))
```
Let us plot the resulting time series for the first ten customers for the time period spanning the first two weeks of 2014.
```
fig, axs = plt.subplots(5, 2, figsize=(20, 20), sharex=True)
axx = axs.ravel()
for i in range(0, 10):
timeseries[i].loc["2014-01-01":"2014-01-14"].plot(ax=axx[i])
axx[i].set_xlabel("date")
axx[i].set_ylabel("kW consumption")
axx[i].grid(which='minor', axis='x')
```
### Train and Test splits
Often times one is interested in evaluating the model or tuning its hyperparameters by looking at error metrics on a hold-out test set. Here we split the available data into train and test sets for evaluating the trained model. For standard machine learning tasks such as classification and regression, one typically obtains this split by randomly separating examples into train and test sets. However, in forecasting it is important to do this train/test split based on time rather than by time series.
In this example, we will reserve the last section of each of the time series for evalutation purpose and use only the first part as training data.
```
# we use 2 hour frequency for the time series
freq = '2H'
# we predict for 7 days
prediction_length = 7 * 12
# we also use 7 days as context length, this is the number of state updates accomplished before making predictions
context_length = 7 * 12
```
We specify here the portion of the data that is used for training: the model sees data from 2014-01-01 to 2014-09-01 for training.
```
start_dataset = pd.Timestamp("2014-01-01 00:00:00", freq=freq)
end_training = pd.Timestamp("2014-09-01 00:00:00", freq=freq)
```
The DeepAR JSON input format represents each time series as a JSON object. In the simplest case each time series just consists of a start time stamp (``start``) and a list of values (``target``). For more complex cases, DeepAR also supports the fields ``dynamic_feat`` for time-series features and ``cat`` for categorical features, which we will use later.
```
training_data = [
{
"start": str(start_dataset),
"target": ts[start_dataset:end_training - 1].tolist() # We use -1, because pandas indexing includes the upper bound
}
for ts in timeseries
]
print(len(training_data))
```
As test data, we will consider time series extending beyond the training range: these will be used for computing test scores, by using the trained model to forecast their trailing 7 days, and comparing predictions with actual values.
To evaluate our model performance on more than one week, we generate test data that extends to 1, 2, 3, 4 weeks beyond the training range. This way we perform *rolling evaluation* of our model.
```
num_test_windows = 4
test_data = [
{
"start": str(start_dataset),
"target": ts[start_dataset:end_training + k * prediction_length].tolist()
}
for k in range(1, num_test_windows + 1)
for ts in timeseries
]
print(len(test_data))
```
Let's now write the dictionary to the `jsonlines` file format that DeepAR understands (it also supports gzipped jsonlines and parquet).
```
def write_dicts_to_file(path, data):
with open(path, 'wb') as fp:
for d in data:
fp.write(json.dumps(d).encode("utf-8"))
fp.write("\n".encode('utf-8'))
%%time
write_dicts_to_file("/tmp/train.json", training_data)
write_dicts_to_file("/tmp/test.json", test_data)
```
Now that we have the data files locally, let us copy them to S3 where DeepAR can access them. Depending on your connection, this may take a couple of minutes.
```
s3 = boto3.resource('s3')
def copy_to_s3(local_file, s3_path, override=False):
assert s3_path.startswith('s3://')
split = s3_path.split('/')
bucket = split[2]
path = '/'.join(split[3:])
buk = s3.Bucket(bucket)
if len(list(buk.objects.filter(Prefix=path))) > 0:
if not override:
print('File s3://{}/{} already exists.\nSet override to upload anyway.\n'.format(s3_bucket, s3_path))
return
else:
print('Overwriting existing file')
with open(local_file, 'rb') as data:
print('Uploading file to {}'.format(s3_path))
buk.put_object(Key=path, Body=data)
%%time
copy_to_s3("/tmp/train.json", s3_data_path + "/train/train.json")
copy_to_s3("/tmp/test.json", s3_data_path + "/test/test.json")
```
Let's have a look to what we just wrote to S3.
```
s3filesystem = s3fs.S3FileSystem()
with s3filesystem.open(s3_data_path + "/train/train.json", 'rb') as fp:
print(fp.readline().decode("utf-8")[:100] + "...")
```
We are all set with our dataset processing, we can now call DeepAR to train a model and generate predictions.
### Train a model
Here we define the estimator that will launch the training job.
```
estimator = sagemaker.estimator.Estimator(
sagemaker_session=sagemaker_session,
image_name=image_name,
role=role,
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
base_job_name='deepar-electricity-demo',
output_path=s3_output_path
)
```
Next we need to set the hyperparameters for the training job. For example frequency of the time series used, number of data points the model will look at in the past, number of predicted data points. The other hyperparameters concern the model to train (number of layers, number of cells per layer, likelihood function) and the training options (number of epochs, batch size, learning rate...). We use default parameters for every optional parameter in this case (you can always use [Sagemaker Automated Model Tuning](https://aws.amazon.com/blogs/aws/sagemaker-automatic-model-tuning/) to tune them).
```
hyperparameters = {
"time_freq": freq,
"epochs": "400",
"early_stopping_patience": "40",
"mini_batch_size": "64",
"learning_rate": "5E-4",
"context_length": str(context_length),
"prediction_length": str(prediction_length)
}
estimator.set_hyperparameters(**hyperparameters)
```
We are ready to launch the training job. SageMaker will start an EC2 instance, download the data from S3, start training the model and save the trained model.
If you provide the `test` data channel as we do in this example, DeepAR will also calculate accuracy metrics for the trained model on this test. This is done by predicting the last `prediction_length` points of each time-series in the test set and comparing this to the actual value of the time-series.
**Note:** the next cell may take a few minutes to complete, depending on data size, model complexity, training options.
```
%%time
data_channels = {
"train": "{}/train/".format(s3_data_path),
"test": "{}/test/".format(s3_data_path)
}
estimator.fit(inputs=data_channels, wait=True)
```
Since you pass a test set in this example, accuracy metrics for the forecast are computed and logged (see bottom of the log).
You can find the definition of these metrics from [our documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html). You can use these to optimize the parameters and tune your model or use SageMaker's [Automated Model Tuning service](https://aws.amazon.com/blogs/aws/sagemaker-automatic-model-tuning/) to tune the model for you.
### Create endpoint and predictor
Now that we have a trained model, we can use it to perform predictions by deploying it to an endpoint.
**Note: Remember to delete the endpoint after running this experiment. A cell at the very bottom of this notebook will do that: make sure you run it at the end.**
To query the endpoint and perform predictions, we can define the following utility class: this allows making requests using `pandas.Series` objects rather than raw JSON strings.
```
class DeepARPredictor(sagemaker.predictor.RealTimePredictor):
def __init__(self, *args, **kwargs):
super().__init__(*args, content_type=sagemaker.content_types.CONTENT_TYPE_JSON, **kwargs)
def predict(self, ts, cat=None, dynamic_feat=None,
num_samples=100, return_samples=False, quantiles=["0.1", "0.5", "0.9"]):
"""Requests the prediction of for the time series listed in `ts`, each with the (optional)
corresponding category listed in `cat`.
ts -- `pandas.Series` object, the time series to predict
cat -- integer, the group associated to the time series (default: None)
num_samples -- integer, number of samples to compute at prediction time (default: 100)
return_samples -- boolean indicating whether to include samples in the response (default: False)
quantiles -- list of strings specifying the quantiles to compute (default: ["0.1", "0.5", "0.9"])
Return value: list of `pandas.DataFrame` objects, each containing the predictions
"""
prediction_time = ts.index[-1] + 1
quantiles = [str(q) for q in quantiles]
req = self.__encode_request(ts, cat, dynamic_feat, num_samples, return_samples, quantiles)
res = super(DeepARPredictor, self).predict(req)
return self.__decode_response(res, ts.index.freq, prediction_time, return_samples)
def __encode_request(self, ts, cat, dynamic_feat, num_samples, return_samples, quantiles):
instance = series_to_dict(ts, cat if cat is not None else None, dynamic_feat if dynamic_feat else None)
configuration = {
"num_samples": num_samples,
"output_types": ["quantiles", "samples"] if return_samples else ["quantiles"],
"quantiles": quantiles
}
http_request_data = {
"instances": [instance],
"configuration": configuration
}
return json.dumps(http_request_data).encode('utf-8')
def __decode_response(self, response, freq, prediction_time, return_samples):
# we only sent one time series so we only receive one in return
# however, if possible one will pass multiple time series as predictions will then be faster
predictions = json.loads(response.decode('utf-8'))['predictions'][0]
prediction_length = len(next(iter(predictions['quantiles'].values())))
prediction_index = pd.DatetimeIndex(start=prediction_time, freq=freq, periods=prediction_length)
if return_samples:
dict_of_samples = {'sample_' + str(i): s for i, s in enumerate(predictions['samples'])}
else:
dict_of_samples = {}
return pd.DataFrame(data={**predictions['quantiles'], **dict_of_samples}, index=prediction_index)
def set_frequency(self, freq):
self.freq = freq
def encode_target(ts):
return [x if np.isfinite(x) else "NaN" for x in ts]
def series_to_dict(ts, cat=None, dynamic_feat=None):
"""Given a pandas.Series object, returns a dictionary encoding the time series.
ts -- a pands.Series object with the target time series
cat -- an integer indicating the time series category
Return value: a dictionary
"""
obj = {"start": str(ts.index[0]), "target": encode_target(ts)}
if cat is not None:
obj["cat"] = cat
if dynamic_feat is not None:
obj["dynamic_feat"] = dynamic_feat
return obj
```
Now we can deploy the model and create and endpoint that can be queried using our custom DeepARPredictor class.
```
predictor = estimator.deploy(
initial_instance_count=1,
instance_type='ml.m4.xlarge',
predictor_cls=DeepARPredictor)
```
### Make predictions and plot results
Now we can use the `predictor` object to generate predictions.
```
predictor.predict(ts=timeseries[120], quantiles=[0.10, 0.5, 0.90]).head()
```
Below we define a plotting function that queries the model and displays the forecast.
```
def plot(
predictor,
target_ts,
cat=None,
dynamic_feat=None,
forecast_date=end_training,
show_samples=False,
plot_history=7 * 12,
confidence=80
):
print("calling served model to generate predictions starting from {}".format(str(forecast_date)))
assert(confidence > 50 and confidence < 100)
low_quantile = 0.5 - confidence * 0.005
up_quantile = confidence * 0.005 + 0.5
# we first construct the argument to call our model
args = {
"ts": target_ts[:forecast_date],
"return_samples": show_samples,
"quantiles": [low_quantile, 0.5, up_quantile],
"num_samples": 100
}
if dynamic_feat is not None:
args["dynamic_feat"] = dynamic_feat
fig = plt.figure(figsize=(20, 6))
ax = plt.subplot(2, 1, 1)
else:
fig = plt.figure(figsize=(20, 3))
ax = plt.subplot(1,1,1)
if cat is not None:
args["cat"] = cat
ax.text(0.9, 0.9, 'cat = {}'.format(cat), transform=ax.transAxes)
# call the end point to get the prediction
prediction = predictor.predict(**args)
# plot the samples
if show_samples:
for key in prediction.keys():
if "sample" in key:
prediction[key].plot(color='lightskyblue', alpha=0.2, label='_nolegend_')
# plot the target
target_section = target_ts[forecast_date-plot_history:forecast_date+prediction_length]
target_section.plot(color="black", label='target')
# plot the confidence interval and the median predicted
ax.fill_between(
prediction[str(low_quantile)].index,
prediction[str(low_quantile)].values,
prediction[str(up_quantile)].values,
color="b", alpha=0.3, label='{}% confidence interval'.format(confidence)
)
prediction["0.5"].plot(color="b", label='P50')
ax.legend(loc=2)
# fix the scale as the samples may change it
ax.set_ylim(target_section.min() * 0.5, target_section.max() * 1.5)
if dynamic_feat is not None:
for i, f in enumerate(dynamic_feat, start=1):
ax = plt.subplot(len(dynamic_feat) * 2, 1, len(dynamic_feat) + i, sharex=ax)
feat_ts = pd.Series(
index=pd.DatetimeIndex(start=target_ts.index[0], freq=target_ts.index.freq, periods=len(f)),
data=f
)
feat_ts[forecast_date-plot_history:forecast_date+prediction_length].plot(ax=ax, color='g')
```
We can interact with the function previously defined, to look at the forecast of any customer at any point in (future) time.
For each request, the predictions are obtained by calling our served model on the fly.
Here we forecast the consumption of an office after week-end (note the lower week-end consumption).
You can select any time series and any forecast date, just click on `Run Interact` to generate the predictions from our served endpoint and see the plot.
```
style = {'description_width': 'initial'}
@interact_manual(
customer_id=IntSlider(min=0, max=369, value=91, style=style),
forecast_day=IntSlider(min=0, max=100, value=51, style=style),
confidence=IntSlider(min=60, max=95, value=80, step=5, style=style),
history_weeks_plot=IntSlider(min=1, max=20, value=1, style=style),
show_samples=Checkbox(value=False),
continuous_update=False
)
def plot_interact(customer_id, forecast_day, confidence, history_weeks_plot, show_samples):
plot(
predictor,
target_ts=timeseries[customer_id],
forecast_date=end_training + datetime.timedelta(days=forecast_day),
show_samples=show_samples,
plot_history=history_weeks_plot * 12 * 7,
confidence=confidence
)
```
# Additional features
We have seen how to prepare a dataset and run DeepAR for a simple example.
In addition DeepAR supports the following features:
* missing values: DeepAR can handle missing values in the time series during training as well as for inference.
* Additional time features: DeepAR provides a set default time series features such as hour of day etc. However, you can provide additional feature time series via the `dynamic_feat` field.
* generalize frequencies: any integer multiple of the previously supported base frequencies (minutes `min`, hours `H`, days `D`, weeks `W`, month `M`) are now allowed; e.g., `15min`. We already demonstrated this above by using `2H` frequency.
* categories: If your time series belong to different groups (e.g. types of product, regions, etc), this information can be encoded as one or more categorical features using the `cat` field.
We will now demonstrate the missing values and time features support. For this part we will reuse the electricity dataset but will do some artificial changes to demonstrate the new features:
* We will randomly mask parts of the time series to demonstrate the missing values support.
* We will include a "special-day" that occurs at different days for different time series during this day we introduce a strong up-lift
* We train the model on this dataset giving "special-day" as a custom time series feature
## Prepare dataset
As discussed above we will create a "special-day" feature and create an up-lift for the time series during this day. This simulates real world application where you may have things like promotions of a product for a certain time or a special event that influences your time series.
```
def create_special_day_feature(ts, fraction=0.05):
# First select random day indices (plus the forecast day)
num_days = (ts.index[-1] - ts.index[0]).days
rand_indices = list(np.random.randint(0, num_days, int(num_days * 0.1))) + [num_days]
feature_value = np.zeros_like(ts)
for i in rand_indices:
feature_value[i * 12: (i + 1) * 12] = 1.0
feature = pd.Series(index=ts.index, data=feature_value)
return feature
def drop_at_random(ts, drop_probability=0.1):
assert(0 <= drop_probability < 1)
random_mask = np.random.random(len(ts)) < drop_probability
return ts.mask(random_mask)
special_day_features = [create_special_day_feature(ts) for ts in timeseries]
```
We now create the up-lifted time series and randomly remove time points.
The figures below show some example time series and the `special_day` feature value in green.
```
timeseries_uplift = [ts * (1.0 + feat) for ts, feat in zip(timeseries, special_day_features)]
time_series_processed = [drop_at_random(ts) for ts in timeseries_uplift]
fig, axs = plt.subplots(5, 2, figsize=(20, 20), sharex=True)
axx = axs.ravel()
for i in range(0, 10):
ax = axx[i]
ts = time_series_processed[i][:400]
ts.plot(ax=ax)
ax.set_ylim(-0.1 * ts.max(), ts.max())
ax2 = ax.twinx()
special_day_features[i][:400].plot(ax=ax2, color='g')
ax2.set_ylim(-0.2, 7)
%%time
training_data_new_features = [
{
"start": str(start_dataset),
"target": encode_target(ts[start_dataset:end_training]),
"dynamic_feat": [special_day_features[i][start_dataset:end_training].tolist()]
}
for i, ts in enumerate(time_series_processed)
]
print(len(training_data_new_features))
# as in our previous example, we do a rolling evaluation over the next 7 days
num_test_windows = 7
test_data_new_features = [
{
"start": str(start_dataset),
"target": encode_target(ts[start_dataset:end_training + 2*k*prediction_length]),
"dynamic_feat": [special_day_features[i][start_dataset:end_training + 2*k*prediction_length].tolist()]
}
for k in range(1, num_test_windows + 1)
for i, ts in enumerate(timeseries_uplift)
]
def check_dataset_consistency(train_dataset, test_dataset=None):
d = train_dataset[0]
has_dynamic_feat = 'dynamic_feat' in d
if has_dynamic_feat:
num_dynamic_feat = len(d['dynamic_feat'])
has_cat = 'cat' in d
if has_cat:
num_cat = len(d['cat'])
def check_ds(ds):
for i, d in enumerate(ds):
if has_dynamic_feat:
assert 'dynamic_feat' in d
assert num_dynamic_feat == len(d['dynamic_feat'])
for f in d['dynamic_feat']:
assert len(d['target']) == len(f)
if has_cat:
assert 'cat' in d
assert len(d['cat']) == num_cat
check_ds(train_dataset)
if test_dataset is not None:
check_ds(test_dataset)
check_dataset_consistency(training_data_new_features, test_data_new_features)
%%time
write_dicts_to_file("/tmp/train_new_features.json", training_data_new_features)
write_dicts_to_file("/tmp/test_new_features.json", test_data_new_features)
%%time
s3_data_path_new_features = "s3://{}/{}-new-features/data".format(s3_bucket, s3_prefix)
s3_output_path_new_features = "s3://{}/{}-new-features/output".format(s3_bucket, s3_prefix)
print('Uploading to S3 this may take a few minutes depending on your connection.')
copy_to_s3("/tmp/train_new_features.json", s3_data_path_new_features + "/train/train_new_features.json", override=True)
copy_to_s3("/tmp/test_new_features.json", s3_data_path_new_features + "/test/test_new_features.json", override=True)
%%time
estimator_new_features = sagemaker.estimator.Estimator(
sagemaker_session=sagemaker_session,
image_name=image_name,
role=role,
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
base_job_name='deepar-electricity-demo-new-features',
output_path=s3_output_path_new_features
)
hyperparameters = {
"time_freq": freq,
"context_length": str(context_length),
"prediction_length": str(prediction_length),
"epochs": "400",
"learning_rate": "5E-4",
"mini_batch_size": "64",
"early_stopping_patience": "40",
"num_dynamic_feat": "auto", # this will use the `dynamic_feat` field if it's present in the data
}
estimator_new_features.set_hyperparameters(**hyperparameters)
estimator_new_features.fit(
inputs={
"train": "{}/train/".format(s3_data_path_new_features),
"test": "{}/test/".format(s3_data_path_new_features)
},
wait=True
)
```
As before, we spawn an endpoint to visualize our forecasts on examples we send on the fly.
```
%%time
predictor_new_features = estimator_new_features.deploy(
initial_instance_count=1,
instance_type='ml.m4.xlarge',
predictor_cls=DeepARPredictor)
customer_id = 120
predictor_new_features.predict(
ts=time_series_processed[customer_id][:-prediction_length],
dynamic_feat=[special_day_features[customer_id].tolist()],
quantiles=[0.1, 0.5, 0.9]
).head()
```
As before, we can query the endpoint to see predictions for arbitrary time series and time points.
```
@interact_manual(
customer_id=IntSlider(min=0, max=369, value=13, style=style),
forecast_day=IntSlider(min=0, max=100, value=21, style=style),
confidence=IntSlider(min=60, max=95, value=80, step=5, style=style),
missing_ratio=FloatSlider(min=0.0, max=0.95, value=0.2, step=0.05, style=style),
show_samples=Checkbox(value=False),
continuous_update=False
)
def plot_interact(customer_id, forecast_day, confidence, missing_ratio, show_samples):
forecast_date = end_training + datetime.timedelta(days=forecast_day)
target = time_series_processed[customer_id][start_dataset:forecast_date + prediction_length]
target = drop_at_random(target, missing_ratio)
dynamic_feat = [special_day_features[customer_id][start_dataset:forecast_date + prediction_length].tolist()]
plot(
predictor_new_features,
target_ts=target,
dynamic_feat=dynamic_feat,
forecast_date=forecast_date,
show_samples=show_samples,
plot_history=7*12,
confidence=confidence
)
```
### Delete endpoints
```
predictor.delete_endpoint()
predictor_new_features.delete_endpoint()
# code you want to evaluate
elapsed = timeit.default_timer() - start_time
print(elapsed/60)
```
| true |
code
| 0.425068 | null | null | null | null |
|
## 1. Bitcoin and Cryptocurrencies: Full dataset, filtering, and reproducibility
<p>Since the <a href="https://newfronttest.bitcoin.com/bitcoin.pdf">launch of Bitcoin in 2008</a>, hundreds of similar projects based on the blockchain technology have emerged. We call these cryptocurrencies (also coins or cryptos in the Internet slang). Some are extremely valuable nowadays, and others may have the potential to become extremely valuable in the future<sup>1</sup>. In fact, on the 6th of December of 2017, Bitcoin has a <a href="https://en.wikipedia.org/wiki/Market_capitalization">market capitalization</a> above $200 billion. </p>
<p><center>
<img src="https://assets.datacamp.com/production/project_82/img/bitcoint_market_cap_2017.png" style="width:500px"> <br>
<em>The astonishing increase of Bitcoin market capitalization in 2017.</em></center></p>
<p>*<sup>1</sup> <strong>WARNING</strong>: The cryptocurrency market is exceptionally volatile<sup>2</sup> and any money you put in might disappear into thin air. Cryptocurrencies mentioned here <strong>might be scams</strong> similar to <a href="https://en.wikipedia.org/wiki/Ponzi_scheme">Ponzi Schemes</a> or have many other issues (overvaluation, technical, etc.). <strong>Please do not mistake this for investment advice</strong>. *</p>
<p><em><sup>2</sup> <strong>Update on March 2020</strong>: Well, it turned out to be volatile indeed :D</em></p>
<p>That said, let's get to business. We will start with a CSV we conveniently downloaded on the 6th of December of 2017 using the coinmarketcap API (NOTE: The public API went private in 2020 and is no longer available) named <code>datasets/coinmarketcap_06122017.csv</code>. </p>
```
# Importing pandas
import pandas as pd
# Importing matplotlib and setting aesthetics for plotting later.
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
plt.style.use('fivethirtyeight')
# Reading datasets/coinmarketcap_06122017.csv into pandas
dec6 = pd.read_csv('datasets/coinmarketcap_06122017.csv')
# Selecting the 'id' and the 'market_cap_usd' columns
market_cap_raw = dec6[['id','market_cap_usd']]
# Counting the number of values
print(market_cap_raw.count)
# ... YOUR CODE FOR TASK 2 ...
```
## 2. Discard the cryptocurrencies without a market capitalization
<p>Why do the <code>count()</code> for <code>id</code> and <code>market_cap_usd</code> differ above? It is because some cryptocurrencies listed in coinmarketcap.com have no known market capitalization, this is represented by <code>NaN</code> in the data, and <code>NaN</code>s are not counted by <code>count()</code>. These cryptocurrencies are of little interest to us in this analysis, so they are safe to remove.</p>
```
# Filtering out rows without a market capitalization
cap = market_cap_raw.query('market_cap_usd > 0')
# Counting the number of values again
print(cap.count)
# ... YOUR CODE FOR TASK 3 ...
```
## 3. How big is Bitcoin compared with the rest of the cryptocurrencies?
<p>At the time of writing, Bitcoin is under serious competition from other projects, but it is still dominant in market capitalization. Let's plot the market capitalization for the top 10 coins as a barplot to better visualize this.</p>
```
#Declaring these now for later use in the plots
TOP_CAP_TITLE = 'Top 10 market capitalization'
TOP_CAP_YLABEL = '% of total cap'
# Selecting the first 10 rows and setting the index
cap10 = cap.head(10).set_index('id')
# Calculating market_cap_perc
cap10 = cap10.assign(market_cap_perc =
lambda x: (x.market_cap_usd / cap.market_cap_usd.sum()) * 100)
# Plotting the barplot with the title defined above
ax = cap10.plot.bar(title="Top 10 market capitalization")
# Annotating the y axis with the label defined above
ax.set(ylabel = "% of total cap")
# ... YOUR CODE FOR TASK 4 ...
```
## 4. Making the plot easier to read and more informative
<p>While the plot above is informative enough, it can be improved. Bitcoin is too big, and the other coins are hard to distinguish because of this. Instead of the percentage, let's use a log<sup>10</sup> scale of the "raw" capitalization. Plus, let's use color to group similar coins and make the plot more informative<sup>1</sup>. </p>
<p>For the colors rationale: bitcoin-cash and bitcoin-gold are forks of the bitcoin <a href="https://en.wikipedia.org/wiki/Blockchain">blockchain</a><sup>2</sup>. Ethereum and Cardano both offer Turing Complete <a href="https://en.wikipedia.org/wiki/Smart_contract">smart contracts</a>. Iota and Ripple are not minable. Dash, Litecoin, and Monero get their own color.</p>
<p><sup>1</sup> <em>This coloring is a simplification. There are more differences and similarities that are not being represented here.</em></p>
<p><sup>2</sup> <em>The bitcoin forks are actually <strong>very</strong> different, but it is out of scope to talk about them here. Please see the warning above and do your own research.</em></p>
```
# Colors for the bar plot
COLORS = ['orange', 'green', 'orange', 'cyan', 'cyan', 'blue', 'silver', 'orange', 'red', 'green']
# Plotting market_cap_usd as before but adding the colors and scaling the y-axis
ax = cap10.market_cap_usd.head(10).plot(kind = 'bar', title = TOP_CAP_TITLE, color = COLORS, logy = True)
# Annotating the y axis with 'USD'
ax.set_ylabel('USD')
# ... YOUR CODE FOR TASK 5 ...
# Final touch! Removing the xlabel as it is not very informative
ax.set_xlabel('')
# ... YOUR CODE FOR TASK 5 ...
```
## 5. What is going on?! Volatility in cryptocurrencies
<p>The cryptocurrencies market has been spectacularly volatile since the first exchange opened. This notebook didn't start with a big, bold warning for nothing. Let's explore this volatility a bit more! We will begin by selecting and plotting the 24 hours and 7 days percentage change, which we already have available.</p>
```
# Selecting the id, percent_change_24h and percent_change_7d columns
volatility = dec6[['id','percent_change_24h','percent_change_7d']]
# Setting the index to 'id' and dropping all NaN rows
volatility = volatility.set_index('id').dropna()
# Sorting the DataFrame by percent_change_24h in ascending order
volatility = volatility.sort_values(by=['percent_change_24h'])
# Checking the first few rows
print(volatility.head())
# ... YOUR CODE FOR TASK 6 ...
```
## 6. Well, we can already see that things are *a bit* crazy
<p>It seems you can lose a lot of money quickly on cryptocurrencies. Let's plot the top 10 biggest gainers and top 10 losers in market capitalization.</p>
```
#Defining a function with 2 parameters, the series to plot and the title
def top10_subplot(volatility_series, title):
# Making the subplot and the figure for two side by side plots
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 6))
# Plotting with pandas the barchart for the top 10 losers
ax = volatility_series[:10].plot.bar(ax=axes[0], color='darkred')
# Setting the figure's main title to the text passed as parameter
# ... YOUR CODE FOR TASK 7 ...
fig.suptitle(t = title)
# Setting the ylabel to '% change'
# ... YOUR CODE FOR TASK 7 ...
ax.set_ylabel('% change')
# Same as above, but for the top 10 winners
ax = volatility_series[-10:].plot.bar(ax=axes[1], color='darkblue')
# Returning this for good practice, might use later
return fig, ax
DTITLE = "24 hours top losers and winners"
# Calling the function above with the 24 hours period series and title DTITLE
fig, ax = top10_subplot(volatility.percent_change_24h, DTITLE)
```
## 7. Ok, those are... interesting. Let's check the weekly Series too.
<p>800% daily increase?! Why are we doing this tutorial and not buying random coins?<sup>1</sup></p>
<p>After calming down, let's reuse the function defined above to see what is going weekly instead of daily.</p>
<p><em><sup>1</sup> Please take a moment to understand the implications of the red plots on how much value some cryptocurrencies lose in such short periods of time</em></p>
```
# Sorting in ascending order
volatility7d = volatility[['percent_change_7d']].sort_values(by=['percent_change_7d'])
WTITLE = "Weekly top losers and winners"
# Calling the top10_subplot function
fig, ax = top10_subplot(volatility7d, WTITLE)
```
## 8. How small is small?
<p>The names of the cryptocurrencies above are quite unknown, and there is a considerable fluctuation between the 1 and 7 days percentage changes. As with stocks, and many other financial products, the smaller the capitalization, the bigger the risk and reward. Smaller cryptocurrencies are less stable projects in general, and therefore even riskier investments than the bigger ones<sup>1</sup>. Let's classify our dataset based on Investopedia's capitalization <a href="https://www.investopedia.com/video/play/large-cap/">definitions</a> for company stocks. </p>
<p><sup>1</sup> <em>Cryptocurrencies are a new asset class, so they are not directly comparable to stocks. Furthermore, there are no limits set in stone for what a "small" or "large" stock is. Finally, some investors argue that bitcoin is similar to gold, this would make them more comparable to a <a href="https://www.investopedia.com/terms/c/commodity.asp">commodity</a> instead.</em></p>
```
# Selecting everything bigger than 10 billion
largecaps = cap.query('market_cap_usd>10000000000')
# Printing out largecaps
print(largecaps)
```
## 9. Most coins are tiny
<p>Note that many coins are not comparable to large companies in market cap, so let's divert from the original Investopedia definition by merging categories.</p>
<p><em>This is all for now. Thanks for completing this project!</em></p>
```
# Making a nice function for counting different marketcaps from the
# "cap" DataFrame. Returns an int.
# INSTRUCTORS NOTE: Since you made it to the end, consider it a gift :D
def capcount(query_string):
return cap.query(query_string).count().id
# Labels for the plot
LABELS = ["biggish", "micro", "nano"]
# Using capcount count the biggish cryptos
biggish = capcount('(market_cap_usd>10000000000) or (market_cap_usd>2000000000 and market_cap_usd<10000000000) or (market_cap_usd>300000000 and market_cap_usd<2000000000)')
# Same as above for micro ...
micro = capcount('market_cap_usd>50000000 and market_cap_usd<300000000')
# ... and for nano
nano = capcount('market_cap_usd<50000000')
# Making a list with the 3 counts
values = [biggish,micro,nano]
# Plotting them with matplotlib
plt.bar(x=LABELS, height=values)
```
| true |
code
| 0.614625 | null | null | null | null |
|
# Scalar uniform quantisation of random variables
This tutorial considers scalar quantisation implemented using a uniform quantiser and applied over random variables with different Probability Mass Functions (PMFs). In particular we will consider uniform- and Gaussian-distributed random variables so to comment on the optimality of such a simple quantiser.
## Preliminary remarks
Quantisation is an irreversible operation which reduces the precision used to represent our data to be encoded. Such a precision reduction translates into less bits used to transmit the information. Accordingly, the whole dynamic range associated with the input data ($X$) is divided into intervals denoted as *quantisation bins*, each having a given width. Each quantisation bin $b_i$ is also associated with its reproduction level $l_i$ which corresponds to the value used to represent all original data values belonging to $b_i$. From this description, it is easy to realise why quantisation is an irreversible process: it is indeed a *many-to-one* mapping, hence after a value $x$ is quantised it cannot be recovered. Usually a quantiser is associated with its number of bits $qb$ which determines the number of reproduction levels, given as $2^{qb}$. If scalar (1D) quantities are presented to the quantiser as input, then we talk about *scalar quantisation* (i.e. the subject of this tutorial) if group of samples are considered together as input to the quantiser, we talk about *vector quantisation*.
During encoding the quantiser will output the index $i$ of each quantisation bin $b_i$ where each input sample belongs to. The decoder will receive these indexes and write to the output the corresponding reproduction level $l_i$. The mapping $\{b_i \leftrightarrow l_i\}$ must be known at the decoder side. Working out the optimal partitioning of the input data range (i.e. the width of each $b_i$) and the associated set of $\{l_i\}$ can be a computational intensive process, although it can provide significant gains in the overall rate distortion performance of our coding system.
A widely and well-known used quantiser is the so-called *uniform quantiser*, characterised by having each $b_i$ with the same width and the reproduction level $l_i$ placed at the mid-value of the quantisation bin, that is:
$$
\large
l_i = \frac{b_i + b_{i+1}}{2}.
$$
The width of each quantisation bin is usually denoted as the quantisation step $\Delta$, given as:
$$
\large
\Delta = \frac{\max(X) - \min(X)}{2^{qb}}.
$$
Using the Mean Square Error (MSE) as distortion measure for the quantisation error ($e$) and considering the input data ($X$) to have a uniform PMF, the variance of $e$, $\sigma^2_e$ is given by:
$$
\large
\sigma^2_e = \frac{\Delta^2}{12}.
$$
If $X$ ranges in $\left[-\frac{M\Delta}{2},\frac{M\Delta}{2}\right]$ and if we consider the Signal-to-Noise-Ratio (SNR) as alternative measure to express the reproduction quality, then we have the so-called ***six dB rule***:
$$
SNR = 6 \cdot qb\quad[dB],
$$
That is, each bit added to increase the number of reproduction levels will provide a 6 dB improvement to our reconstructed quality. More details about rate distortion theory and quantisation are provided in these two good references:
* Allen Gersho and Robert M. Gray. Vector Quantization and Signal Compression. Kluwer Academic Press, 732 pages, 1992.
* David S. Taubman and Micheal W. Marcellin, "JPEG2000: Image compression fundamentals, standards and practice", Kluwer Academic Press, 773 pages, 2002.
## Rate distortion performance of a uniform quantiser
We demonstrate now the rate-distortion performance of a uniform quantiser and see how this resembles the six dB rule. We also note from the remarks above that a uniform quantiser is a sort of low complexity solution to quantisation. In fact, encoding results in a simple integer precision division by $\Delta$ rather than a comparison of each input data with the different quantisation bins' extrema. Moreover, at the decoder side, the only additional information one would require is only $\Delta$. Accordingly, it is interesting to verify whether a uniform quantiser is able to attain the six dB rule also for other PMFs, most notably knowing that when transformation is used in our coding scheme, the distribution of coefficients tends to be more Laplacian or Gaussian.
The following Python code cell will generate two input data: one with uniform and another with Gaussian PMF (zero mean and variance $\sigma^2$ equal to four). Uniform quantisation is applied to both inputs and the SNR is computed on the reconstructed values. A plot of the rate distortion perform is shown along the straight line associated with the 6 dB rule.
```
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
# Total samples
N = 1000
# Quantiser's bits
qb = np.arange(0, 8, 1)
# Generate a random variable uniformly distributed in [0, 255]
X = np.round(255*rnd.rand(N, 1)).astype(np.int32)
var_X = np.var(X)
# Generate a random Gaussian variable with mean 128 and variance 4
Xg = 2.0*rnd.randn(N, 1) + 128
var_Xg = np.var(Xg)
B = np.max(Xg) - np.min(Xg)
snr_data = np.zeros(len(qb))
snr_data_g = np.zeros(len(qb))
for i, b in enumerate(qb):
levels = 2**b
Q = 256.0 / float(levels)
Qg = B / float( levels)
Y = Q * np.round(X / Q)
Yg = Qg*np.round(Xg / Qg)
mse = np.mean(np.square(X - Y))
mse_g = np.mean(np.square(Xg - Yg))
snr_data[i] = 10*np.log10(var_X / mse)
snr_data_g[i] = 10*np.log10(var_Xg / mse_g)
six_dB_rule = 6.0 * qb
# Plot the results and verify the 6dB rule
plt.figure(figsize=(8,8))
plt.plot(qb, snr_data, 'b-o', label='Uniform quantiser with uniform variable')
plt.xlabel('Quantiser bits', fontsize=16)
plt.ylabel('Signal-to-Noise-Ratio SNR [dB]', fontsize=16)
plt.grid()
plt.plot(qb, snr_data_g, 'k-+', label='Uniform quantiser with Gaussian variable')
plt.plot(qb, six_dB_rule, 'r-*', label='Six dB rule')
plt.legend();
```
As expected, the uniform quantiser applied over a uniformly distributed input provides a rate-distortion performance which follows the six dB rule. Conversely, when the input is Gaussian, then the performance is offset by approximately 4 dB. Such a suboptimal performance is due to the fact that the reproduction levels are placed at the mid-point of each interval, which for a uniform PMF is absolutely fine since each value in a given bin $b_i$ has equal chance to appear. This is not the case for a Gaussian PMF where in each bin some values have higher chance to appear than others. Accordingly, it would make sense to place the reproduction levels around those values which are more likely to appear. The procedure which does this automatically is the subject of the next section of our tutorial.
## Towards an optimal quantiser: The Lloyd-Max algorithm
As mentioned above, we want to find a procedure which adjusts the reproduction levels to fit the underlying PMF of the data. In particular, by using again the MSE as distortion measure, one can show that the reproduction levels which minimise the MSE in each quantisation bin is given by:
$$
\large
l_i = E[X|X\in b_i]= \frac{\sum_{x_i\in b_i}x_i\cdot P_X(x_i)}{\sum_{x_i\in b_i}P_X(x_i)},
$$
where $P_X$ denotes the PMF of the input $X$. The condition above is usually denoted as *centroid condition* and, for a continuos variable, becomes:
$$
\large
l_i = E[X|X\in b_i]= \frac{\int_{x\in b_i}x\cdot f_X(x)}{\int_{x\in b_i}f_X(x)},
$$
where now $f_X$ denotes the Probability Density Function (PDF). We note that the centroid condition above requires to know the partitioning of the input data into quantisation bins $b_i$. Given that we do not know beforehand what such a partitioning would look like, we could assume an initial partitioning with equal width (as for a uniform quantiser) and then compute the reproduction levels according to the centroid condition above. Once all reproduction levels have been computed, we can derive a new set of quantisation bins whereby the extrema of each bin are given by the mid point of the reproduction levels computed previously. We then compute a new set of reproduction levels and continue to iterate until convergence is reached. More precisely, the following pseudo code represents the workflow we just described in plain text:
* k = 0
* set $b_i^k$ equal to the bins associated with a uniform quantiser with $qb$ bits
* apply uniform quantisation over the input data, compute the associated MSE and set it to $MSE_{old}$
* set $\gamma$ = $\infty$
* while $\gamma > \epsilon$:
* compute $l_i^k$ using the centroid condition for each bin $b_i^k$
* derive the new quantisation bins as $b_i^{k+1} = \frac{l_{i}^k + l_{i+1}^k}{2}$
* apply the quantiser derived by these new bins and reproduction levels and compute the MSE
* compute $\gamma = \frac{MSE_{old} - MSE}{MSE_{old}}$
* set $k = k + 1$
Where $\epsilon$ denotes a given tolerance threshold. The iterative produce described above is also known as the [Lloyd-Max algorithm](https://en.wikipedia.org/wiki/Lloyd%27s_algorithm). The next code cell will provide you with an implementation of the Lloyd-Max algorithm, which is conveniently wrapped up as a function so we can then use it to compare its rate-distortion performance with the uniform quantiser analysed before.
```
from typing import Any, List, Tuple
from nptyping import NDArray
def lloydmax(X: NDArray[(Any), np.float64], qb: int) -> Tuple[List[float], List[float], NDArray[(Any), np.float64]]:
levels = 1 << qb
delta = (np.max(X) - np.min(X)) / levels
pmf = np.zeros(X.shape)
# Quantisation bins
bins = np.array([np.min(X) + float(i * delta) for i in range(levels + 1)], np.float64)
# Add a small quantity to the last bin to avoid empty cells
bins[-1] += 0.1
# pmf calculation
for i in range(levels):
index = (bins[i] <= X) & (X < bins[i + 1])
pmf[index] = np.sum(index) / X.size
# Reproduction levels
rl = (bins[:levels] + bins[1:levels + 1]) / 2
# Codebook initialization with a uniform scalar quantiser
XQ = np.zeros(X.shape)
for i in range(rl.size):
index = (bins[i] <= X) & (X < bins[i + 1])
XQ[index] = rl[i]
error = np.square(X - XQ)
MSE_old = np.average(error)
epsilon, variation, step = 1e-5, 1, 1
# Lloyd-Max Iteration over all decision thresholds and reproduction levels
bins_next, rl_next = np.zeros(bins.shape), np.zeros(rl.shape)
while variation > epsilon:
# Loop over all reproduction levels in order to adjust them wrt
# centroid condition
for i in range(levels):
index = (bins[i] <= X) & (X < bins[i + 1])
if np.all(~index): # empty decision threshold, relative reprodution level will be the same for the next step
rl_next[i] = rl[i]
else: # centroid condition
rl_next[i] = np.sum(np.multiply(X[index], pmf[index])) / np.sum(pmf[index])
# New decision threshold: they are at the mid point of two
# reproduction levels
bins_next[1:levels] = (rl_next[:levels - 1] + rl_next[1:levels]) / 2
bins_next[0], bins_next[-1] = bins[0], bins[-1]
# New MSE calculation
XQ[:] = 0
for i in range(rl_next.size):
index = (bins_next[i] <= X) & (X < bins_next[i + 1])
XQ[index] = rl_next[i]
MSE = np.average(np.square(X - XQ))
variation = (MSE_old - MSE) / (MSE_old)
# Recompute the pmf weights
for i in range(levels):
index = (bins_next[i] <= X) & (X < bins_next[i + 1])
pmf[index] = np.sum(index) / X.size
# Swap the old variables with the new ones
bins, rl, MSE_old = bins_next, rl_next, MSE
step += 1
return bins, rl, XQ
```
The code above contains some comments to help the reader understand the flow. We are now ready to try this non uniform quantiser and measure its performance. The following code cell in Python will run the Lloyd-Max quantiser for each of the tested quantiser bit values and compute its associated SNR.
```
snr_data_lm = np.zeros((len(qb)))
for i, b in enumerate(qb):
_, _, xq_lm = lloydmax(Xg, b)
mse = np.average(np.square(Xg - xq_lm))
snr_data_lm[i] = 10 * np.log10(var_Xg / mse)
# Plot the results, including the 6dB rule
plt.figure(figsize=(8,8))
plt.plot(qb, snr_data, 'b-o', label='Uniform quantiser applied to uniform PMF')
plt.xlabel('Quantiser bits', fontsize=16)
plt.ylabel('Signal-to-Noise-Ratio SNR [dB]', fontsize=16)
plt.grid()
plt.plot(qb, snr_data_g, 'k-+', label='Uniform quantiser applied to Gaussian PMF')
plt.plot(qb, snr_data_lm, 'g-x', label='Lloyd-Max quantiser applied to Gaussian PMF')
plt.plot(qb, six_dB_rule, 'r-*', label='Six dB rule')
plt.legend();
```
We can observe from the graph above that the Lloyd-Max algorithm starts by providing a better SNR performance at low bitrates and then tends to sit on the same performance of the uniform quantiser when applied to a Gaussian variable. This result might be surprising at first sight but it is actually not. In fact, as the number of quantiser bits gets higher, the quantisation step of the Lloyd-Max quantiser gets smaller and the PMF enclosed in each quantisation bin resembles a uniform one. In that case, the best the Lloyd-Max algorithm can do is to place all reproduction levels at approximatively the mid-point of the quantisation bin, which is exactly what a uniform quantiser would do. Finally, we also note that the Lloyd-Max algorithm would require to send the reproduction levels and quantisation bins, thus some additional rate needs to be added to the bits used by the quantiser.
## Concluding remarks
In this short tutorial we have investigated the rate-distortion performance of two types of scalar quantiser when applied to random variables with a given probability mass function. We showed how a uniform quantiser follows the six dB rule when it is applied to a random variable uniformly distributed but it is sub-optimal in the case of a Gaussian PMF. We then introduced the Lloyd-Max algorithm which provides a better rate-distortion performance, most notably when the number of bits allocated to the quantiser is small. The price to pay for this improved rate-distortion tradeoff is the additional complexity associated with the iterative Lloyd-Max procedure.
| true |
code
| 0.696036 | null | null | null | null |
|
## Part 3 - Deploy the model
In the second notebook we created a basic model and exported it to a file. In this notebook we'll use that same model file to create a REST API with Microsoft ML Server. The Ubuntu DSVM has an installation of ML Server for testing deployments. We'll create a REST API with our model and test it with the same truck image we used in notebook 2 to evaluate the model.
There are two variables you must set before running this notebook. The first is the password for your ML Server instance. At MLADS we've already set this for you. If you're following this tutorial on your own, you should configure your ML Server instance for [one-box deployment](https://docs.microsoft.com/en-us/machine-learning-server/operationalize/configure-machine-learning-server-one-box). The second variable is the name of the deployed web service. This needs to be unique on the VM. We recommend that you use your username and a number, like *username5*.
```
# choose a unique service name. We recommend you use your username and a number, like alias3
service_name = ____SET_ME_TO_A_UNIQUE_VALUE_____
# set the ML Server admin password. This is NOT your login password; it is the admin password for Machine Learning Server. If you deployed this tutorial using the ARM template in GitHub, this password is Dsvm@123
ml_server_password = ____SET_ME_____
```
## Microsoft ML Server Operationalization
ML Server Operationalization provides the ability to easily convert a model into a REST API and call it from many languages.
```
from IPython.display import Image as ShowImage
ShowImage(url="https://docs.microsoft.com/en-us/machine-learning-server/media/what-is-operationalization/data-scientist-easy-deploy.png", width=800, height=800)
```
ML Server runs one or more web node as the front end for REST API calls and one more compute nodes to perform the calculations for the deployed services. This VM was configured for ML Server Operationalization when it was created. Here we run a single web node and single compute node on this VM in a *one-box* configuration.
ML Server provides the azureml.deploy Python package to deploy new REST API endpoints and call them.
```
from IPython.display import Image as ShowImage
ShowImage(url="https://docs.microsoft.com/en-us/machine-learning-server/operationalize/media/configure-machine-learning-server-one-box/setup-onebox.png", width=800, height=800)
```
More details are available in [the ML Server documentation](https://docs.microsoft.com/en-us/machine-learning-server/operationalize/configure-machine-learning-server-one-box-9-2).
```
from azureml.deploy import DeployClient
from azureml.deploy.server import MLServer
HOST = 'http://localhost:12800'
context = ('admin', ml_server_password)
client = DeployClient(HOST, use=MLServer, auth=context)
```
Retrieve the truck image for testing our deployed service.
```
from PIL import Image
import pandas as pd
import numpy as np
from matplotlib.pyplot import imshow
from IPython.display import Image as ImageShow
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
url = "https://cntk.ai/jup/201/00014.png"
myimg = np.array(Image.open(urlopen(url)), dtype=np.float32)
flattened = myimg.ravel()
ImageShow(url=url, width=64, height=64)
```
## Deploy the model
We need two functions to deploy a model in ML Server. The *init* function handles service initialization. The *eval* function evaluates a single input value and returns the result. *eval* will be called by the server when we call the REST API.
Our *eval* function accepts a single input: a 1D numpy array with the image to evaluate. It needs to (1) reshape the input data from a 1D array to a 3D image, (2) subtract the image mean, to mimic the inputs to the model during training, (3) evaluate the model on the image, and (4) return the results as a pandas DataFrame. Alternatively we could return just the top result or the top three results.
```
import cntk
with open('model.cntk', mode='rb') as file: # b is important -> binary
binary_model = file.read()
# --Define an `init` function to handle service initialization --
def init():
import cntk
global loaded_model
loaded_model = cntk.ops.functions.load_model(binary_model)
# define an eval function to handle scoring
def eval(image_data):
import numpy as np
import cntk
from pandas import DataFrame
image_data = image_data.copy().reshape((32, 32, 3))
image_mean = 133.0
image_data -= image_mean
image_data = np.ascontiguousarray(np.transpose(image_data, (2, 0, 1)))
results = loaded_model.eval({loaded_model.arguments[0]:[image_data]})
return DataFrame(results)
# create the API
service = client.service(service_name)\
.version('1.0')\
.code_fn(eval, init)\
.inputs(image_data=np.array)\
.outputs(results=pd.DataFrame)\
.models(binary_model=binary_model)\
.description('My CNTK model')\
.deploy()
print(help(service))
service.capabilities()
```
Now call our newly created API with our truck image.
```
res = service.eval(flattened)
# -- Pluck out the named output `results` as defined during publishing and print --
print(res.output('results'))
# get the top 3 predictions
result = res.output('results')
result = result.as_matrix()[0]
top_count = 3
result_indices = (-np.array(result)).argsort()[:top_count]
label_lookup = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
print("Top 3 predictions:")
for i in range(top_count):
print("\tLabel: {:10s}, confidence: {:.2f}%".format(label_lookup[result_indices[i]], result[result_indices[i]] * 100))
# -- Retrieve the URL of the swagger file for this service.
cap = service.capabilities()
swagger_URL = cap['swagger']
print(swagger_URL)
print(service.swagger())
```
| true |
code
| 0.647854 | null | null | null | null |
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1 </span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2 </span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1 </span>Load Cached Results</a></span></li><li><span><a href="#Run-From-Scratch" data-toc-modified-id="Run-From-Scratch-2.2"><span class="toc-item-num">2.2 </span>Run From Scratch</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3 </span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-Breakdown" data-toc-modified-id="Gender-Breakdown-3.1"><span class="toc-item-num">3.1 </span>Gender Breakdown</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2 </span>Face Sizes</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.3"><span class="toc-item-num">3.3 </span>Appearances on a Single Show</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.4"><span class="toc-item-num">3.4 </span>Screen Time Across All Shows</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4 </span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-GCS" data-toc-modified-id="Save-Model-to-GCS-4.1"><span class="toc-item-num">4.1 </span>Save Model to GCS</a></span><ul class="toc-item"><li><span><a href="#Make-sure-the-GCS-file-is-valid" data-toc-modified-id="Make-sure-the-GCS-file-is-valid-4.1.1"><span class="toc-item-num">4.1.1 </span>Make sure the GCS file is valid</a></span></li></ul></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2 </span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1 </span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2 </span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div>
```
from esper.prelude import *
from esper.identity import *
from esper import embed_google_images
```
# Name
```
name = 'Chris Matthews'
```
# Search
## Load Cached Results
```
assert name != ''
results = FaceIdentityModel.load(name=name)
imshow(np.hstack([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']]))
plt.show()
plot_precision_and_cdf(results)
```
## Run From Scratch
Run this section if you do not have a cached model and precision curve estimates.
```
assert name != ''
img_dir = embed_google_images.fetch_images(name)
face_imgs = load_and_select_faces_from_images(img_dir)
face_embs = embed_google_images.embed_images(face_imgs)
assert(len(face_embs) == len(face_imgs))
imshow(np.hstack([cv2.resize(x[0], (200, 200)) for x in face_imgs if x]))
plt.show()
face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs)
precision_model = PrecisionModel(face_ids_by_bucket)
print('Select all MISTAKES. Ordered by DESCENDING score. Expecting {} frames'.format(precision_model.get_lower_count()))
lower_widget = precision_model.get_lower_widget()
lower_widget
print('Select all NON-MISTAKES. Ordered by ASCENDING distance. Expecting {} frames'.format(precision_model.get_upper_count()))
upper_widget = precision_model.get_upper_widget()
upper_widget
```
Run the following cell after labelling.
```
lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected)
upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected)
precision_by_bucket = {**lower_precision, **upper_precision}
results = FaceIdentityModel(
name=name,
face_ids_by_bucket=face_ids_by_bucket,
face_ids_to_score=face_ids_to_score,
precision_by_bucket=precision_by_bucket,
model_params={
'images': list(zip(face_embs, face_imgs))
}
)
plot_precision_and_cdf(results)
# Save the model
results.save()
```
# Analysis
## Gender Breakdown
```
gender_breakdown = compute_gender_breakdown(results)
print('Raw counts:')
for k, v in gender_breakdown.items():
print(' ', k, ':', v)
print()
print('Proportions:')
denominator = sum(v for v in gender_breakdown.values())
for k, v in gender_breakdown.items():
print(' ', k, ':', v / denominator)
print()
print('Showing examples:')
show_gender_examples(results)
```
## Face Sizes
```
plot_histogram_of_face_sizes(results)
```
## Appearances on a Single Show
```
show_name = 'Hardball'
screen_time_by_video_id = compute_screen_time_by_video(results, show_name)
plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id)
plot_screentime_over_time(name, show_name, screen_time_by_video_id)
plot_distribution_of_appearance_times_by_video(results, show_name)
```
## Screen Time Across All Shows
```
screen_time_by_show = get_screen_time_by_show(results)
plot_screen_time_by_show(name, screen_time_by_show)
```
# Persist to Cloud
## Save Model to GCS
```
gcs_model_path = results.save_to_gcs()
```
### Make sure the GCS file is valid
```
gcs_results = FaceIdentityModel.load_from_gcs(name=name)
plot_precision_and_cdf(gcs_results)
```
## Save Labels to DB
```
from django.core.exceptions import ObjectDoesNotExist
def standardize_name(name):
return name.lower()
person_type = ThingType.objects.get(name='person')
try:
person = Thing.objects.get(name=standardize_name(name), type=person_type)
print('Found person:', person.name)
except ObjectDoesNotExist:
person = Thing(name=standardize_name(name), type=person_type)
print('Creating person:', person.name)
labeler = Labeler(name='face-identity-{}'.format(person.name), data_path=gcs_model_path)
```
### Commit the person and labeler
```
person.save()
labeler.save()
```
### Commit the FaceIdentity labels
```
commit_face_identities_to_db(results, person, labeler)
print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count()))
```
| true |
code
| 0.593491 | null | null | null | null |
|
# An introduction to geocoding
Geocoders are tools to which you pass in an address / place of interest and it gives back the coordinates of that place.
The **`arcgis.geocoding`** module provides types and functions for geocoding, batch geocoding and reverse geocoding.
```
from arcgis.gis import GIS
from arcgis import geocoding
from getpass import getpass
password = getpass()
gis = GIS("http://www.arcgis.com", "arcgis_python", password)
```
## Geocoding addresses
All geocoding operations are handled by `geocode()` function. It can geocode
1. single line address
2. multi field address
3. points of interest
4. administrative place names
5. postal codes
```
results = geocoding.geocode('San Diego, CA')
results
len(results)
results[0]
map1 = gis.map('San Diego, CA')
map1
map1.draw(results[0]['location'])
```
### Geocode single line addresses
```
r2 = geocoding.geocode('San Diego Convention center')
len(r2)
[str(r['score']) +" : "+ r['attributes']['Match_addr'] for r in r2]
```
### Geocode multi field address
```
multi_field_address = {
"Address" : "111 Harbor Dr",
"City" : "San Diego",
"Region" : "CA",
"Subregion":"San Diego",
"Postal" : 92103,
"Country":"USA"
}
r_multi_fields = geocoding.geocode(multi_field_address)
len(r_multi_fields)
```
### Searching within an exent
Find restaurants within walking distance of this training center (Hard Rock Hotel, Palm Springs)
```
# first create an extent.
conv_center = geocoding.geocode('San Diego Convention Center, California')[0]
map3 = gis.map('San Diego Convention Center', zoomlevel=17)
map3
restaurants = geocoding.geocode('grocery',search_extent=conv_center['extent'],
max_locations=15)
len(restaurants)
map3.clear_graphics()
for shop in restaurants:
popup = {
"title" : shop['attributes']['PlaceName'],
"content" : shop['attributes']['Place_addr']
}
map3.draw(shop['location'],popup)
```
## Batch geocoding
```
addresses = ["380 New York St, Redlands, CA",
"1 World Way, Los Angeles, CA",
"1200 Getty Center Drive, Los Angeles, CA",
"5905 Wilshire Boulevard, Los Angeles, CA",
"100 Universal City Plaza, Universal City, CA 91608",
"4800 Oak Grove Dr, Pasadena, CA 91109"]
results = geocoding.batch_geocode(addresses)
len(results)
map5 = gis.map('Los Angeles, CA')
map5
for address in results:
map5.draw(address['location'])
```
## Reverse geocoding
```
reverse_geocode_results = geocoding.reverse_geocode([2.2945, 48.8583])
reverse_geocode_results
map6 = gis.map('San Diego convention center, San Diego, CA', 16)
map6
def find_addr(map6, g):
try:
map6.draw(g)
geocoded = geocoding.reverse_geocode(g)
print(geocoded['address']['Match_addr'])
except:
print("Couldn't match address. Try another place...")
map6.on_click(find_addr)
```
| true |
code
| 0.385519 | null | null | null | null |
|
This is a simple NLP project which predicts the sentiment of the movie reviews from IMDB dataset.
```
import numpy as np
import pandas as pd
import os
import glob
import csv
import random
```
Gathering the Datasets and converting them to a single csv file
```
# Since all the reviews and sentiments are in txt file I have used this function to combine them and put them in an array
def txt_tocsv(path_of_files, arr, dir_list):
for filename in dir_list:
fpath = os.path.join(path_of_files, filename)
with open(fpath, 'r', encoding="utf8") as file:
arr.append(file.read())
# Converting the combined text file dataset to a dataframe using pandas
def arr_todf(dic, arr, data_f, rating):
dic = {"review": arr,
"rating": [rating]*len(arr)}
data_f = pd.DataFrame(dic)
print(data_f.head())
return(data_f)
```
Converting the Positive training dataset to a csv file
```
pos_arr = []
path = r"datasets\train_pos.csv"
dir_list_pos = os.listdir(path)
txt_tocsv(path, pos_arr, dir_list_pos)
pos_dic = {}
pos_df = pd.DataFrame()
pos_df = arr_todf(pos_dic, pos_arr, pos_df, 1)
pos_df.to_csv("train_pos.csv", index=False)
```
Converting the negative training datasets to csv files
```
neg_arr = []
path = r"datasets\train_neg.csv"
dir_list_neg = os.listdir(path)
txt_tocsv(path, neg_arr, dir_list_neg)
neg_dic = {}
neg_df = pd.DataFrame()
neg_df = arr_todf(neg_dic, neg_arr, neg_df, 0)
neg_df.to_csv("train_neg.csv", index=False)
```
Merging both the training dataset
```
pos_rev = pd.read_csv("train_pos.csv")
neg_rev = pd.read_csv("train_neg.csv")
training_data = pd.concat([pos_rev, neg_rev])
training_df = pd.DataFrame(training_data)
training_df = training_df.sample(frac=1)
training_df.head(10)
training_df.to_csv("training_data.csv", index=False)
```
Test data
Converting all the Positive Test Data to csv file
```
path = r'datasets\test_pos.csv'
dir_list_pos_test = os.listdir(path)
pos_arr_test = []
txt_tocsv(path, pos_arr_test, dir_list_pos_test)
pos_dic_test = {}
pos_df_test = pd.DataFrame()
pos_df_test = arr_todf(pos_dic_test, pos_arr_test, pos_df_test, 1)
pos_df_test.to_csv("test_pos.csv", index=False)
```
Converting all the negative test data to csv file
```
path = r"datasets\test_neg.csv"
dir_list_neg_test = os.listdir(path)
neg_arr_test = []
txt_tocsv(path, neg_arr_test, dir_list_neg_test)
neg_dic_test = {}
neg_df_test = pd.DataFrame()
neg_df_test = arr_todf(neg_dic_test, neg_arr_test, neg_df_test, 0)
neg_df_test.to_csv("test_neg.csv", index=False)
pos_rev_test = pd.read_csv("test_pos.csv")
neg_rev_test = pd.read_csv("test_neg.csv")
test_data = pd.concat([pos_rev_test, neg_rev_test])
testing_df = pd.DataFrame(test_data)
testing_df = testing_df.sample(frac=1)
testing_df.head(10)
testing_df.to_csv("testing_data.csv", index=False)
```
Merging both the training and testing dataset to get the full dataset
```
train_csv = pd.read_csv("training_data.csv")
test_csv = pd.read_csv("testing_data.csv")
dataset = pd.concat([train_csv, test_csv])
dataset_df = pd.DataFrame(dataset)
dataset_df = dataset_df.sample(frac=1)
dataset_df.head()
dataset_df.to_csv("imdb_dataset.csv", index=False)
import seaborn as sns
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from wordcloud import WordCloud, STOPWORDS
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
from bs4 import BeautifulSoup
import spacy
import re
import string
import unicodedata
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.stem import LancasterStemmer, WordNetLemmatizer
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from textblob import TextBlob
from textblob import Word
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import warnings
```
This part deals with data cleaning and model training
```
# importing the csv file to a dataframe
df_imdb_train = pd.read_csv('imdb_dataset.csv')
df_imdb_train = df_imdb_train.sample(frac=1)
# df_imdb_train.head(20)
df_imdb_train.shape
df_imdb_train.describe()
df_imdb_train['rating'].value_counts()
# tokenization
tokenizer = ToktokTokenizer()
stopword_list = nltk.corpus.stopwords.words('english')
```
Data cleaning
```
# removing the html strips
def html_rem(text):
soup = BeautifulSoup(text, "html.parser")
return soup.get_text()
# removing the brackets
def brac_rem(text):
return re.sub('\[[^]]*\]', '', text)
def denoise_txt(text):
'''removing the noisy text'''
text = html_rem(text)
text = brac_rem(text)
return text
df_imdb_train['review'] = df_imdb_train['review'].apply(denoise_txt)
def rem_spe_char(text, rem_dig=True):
'''removing the special character'''
pattern = r'[^a-zA-Z0-9\s]'
text = re.sub(pattern, '', text)
return text
df_imdb_train['review'] = df_imdb_train['review'].apply(rem_spe_char)
def simple_stemmer(text):
'''eliminates the affixes from words in order to retrieve the base form'''
ps = nltk.porter.PorterStemmer()
text = ' '.join([ps.stem(word) for word in text.split()])
return text
df_imdb_train['review'] = df_imdb_train['review'].apply(simple_stemmer)
stop_words = set(stopwords.words('english'))
print(stop_words)
def rem_stop_words(text, is_lower_case=False):
'''removes all the words that have little or no meaning'''
tokens = tokenizer.tokenize(text)
tokens = [token.strip()for token in tokens]
if is_lower_case:
filter_tokens = [
token for token in tokens if token not in stopword_list]
else:
filter_tokens = [
token for token in tokens if token.lower() not in stopword_list]
filtered_text = ' '.join(filter_tokens)
return filtered_text
df_imdb_train['review'] = df_imdb_train['review'].apply(rem_stop_words)
norm_train_rev = df_imdb_train.review[:40000]
norm_train_rev[0]
norm_test_rev = df_imdb_train.review[40000:]
# norm_test_rev.count()
```
Bag of word model
```
cv = CountVectorizer(min_df=0, max_df=1, binary=False, ngram_range=(1, 3))
cv_train_rev = cv.fit_transform(norm_train_rev)
cv_test_rev = cv.transform(norm_test_rev)
print("Bag of words for training dataset:", cv_train_rev.shape)
print("Bag of words of test dataset:", cv_test_rev.shape)
```
TFIDF model
```
tf = TfidfVectorizer(min_df=0, max_df=1, use_idf=True, ngram_range=(1, 3))
tf_train_rev = tf.fit_transform(norm_train_rev)
tf_test_rev = tf.transform(norm_test_rev)
print("TFIDF of training dataset:", tf_train_rev.shape)
print("TFIDF test data:", tf_test_rev.shape)
lb = LabelBinarizer()
rating_data = lb.fit_transform(df_imdb_train['rating'])
train_rating = rating_data[:40000]
test_rating = rating_data[40000:]
print(train_rating)
print(test_rating)
```
Modelling the data(multinomial naive bayes)
```
mnb = MultinomialNB()
# bag of words
mnb_bow = mnb.fit(cv_train_rev, np.ravel(train_rating))
print(mnb_bow)
# tfidf
mnb_tfidf = mnb.fit(tf_train_rev, np.ravel(train_rating))
print(mnb_tfidf)
mb_bow_predict = mnb.predict(cv_test_rev)
print(mb_bow_predict)
mb_tfidf_predict = mnb.predict(tf_test_rev)
print(mb_tfidf_predict)
mb_bow_acc = accuracy_score(test_rating, mb_bow_predict)
print(mb_bow_acc)
mb_tfidf_acc = accuracy_score(test_rating, mb_tfidf_predict)
print(mb_tfidf_acc)
mb_bow_report = classification_report(
test_rating, mb_bow_predict, target_names=['Positive', 'Negative'])
print(mb_bow_report)
mb_tfidf_report = classification_report(
test_rating, mb_tfidf_predict, target_names=['Positive', 'Negative'])
print(mb_tfidf_report)
cm_bow = confusion_matrix(test_rating, mb_bow_predict, labels=[1, 0])
print(cm_bow)
cm = pd.DataFrame(cm_bow)
sns.heatmap(cm, annot=True, fmt="d")
cm_tfidf = confusion_matrix(test_rating, mb_tfidf_predict, labels=[1, 0])
print(cm_tfidf)
cm_t=pd.DataFrame(cm_tfidf)
sns.heatmap(cm_t,annot=True,fmt="d")
predict = mnb.predict(cv.transform(["When you make a film with a killer-kids premise, there are two effective ways to approach it; you can either make it as realistic as possible, creating believable characters and situations, or you can make it as fun as possible by playing it for laughs (something which the makers of Silent Night, Deadly Night did, for example, on an equally controversial subject: a killer Santa). The people who made Bloody Birthday, however, do neither of those things; they simply rely on the shock value of the image of a kid with a gun (or a knife, or a noose, or an arrow) in his/her hand. The result is both offensive and stupid. The whole film looks like a bad idea that was rushed through production (and then kept from release for several years). It's redeemed a tiny bit by good performances from the kids, but it's VERY sloppily made. (*1/2)"]))
if predict == 1:
print("The sentiment is positive")
else:
print("The sentiment is negative")
predict = mnb.predict(cv.transform(["I went and saw this movie last night after being coaxed to by a few friends of mine. I'll admit that I was reluctant to see it because from what I knew of Ashton Kutcher he was only able to do comedy. I was wrong. Kutcher played the character of Jake Fischer very well, and Kevin Costner played Ben Randall with such professionalism. The sign of a good movie is that it can toy with our emotions. This one did exactly that. The entire theater(which was sold out) was overcome by laughter during the first half of the movie, and were moved to tears during the second half. While exiting the theater I not only saw many women in tears, but many full grown men as well, trying desperately not to let anyone see them crying. This movie was great, and I suggest that you go see it before you judge."]))
if predict == 1:
print("The sentiment is positive")
else:
print("The sentiment is negative")
```
| true |
code
| 0.356951 | null | null | null | null |
|
# Explainable fraud detection model
In this example we develop a small fraud detection model for credit card transactions based on XGBoost, export it to TorchScript using Hummingbird (https://github.com/microsoft/hummingbird) and run Shapley Value Sampling explanations (see https://captum.ai/api/shapley_value_sampling.html for reference) on it, via torch script.
We load both the original model and the explainability script in RedisAI and trigger them in a DAG.
## Data
For this example we use a dataset of transactions made by credit cards in September 2013 by European cardholders.
The dataset presents transactions that occurred in two days, with 492 frauds out of 284,807 transactions.
The dataset is available at https://www.kaggle.com/mlg-ulb/creditcardfraud. For anonymity purposes, the features are 28 PCA features (V1 to V28), along with transaction Time and Amount.
__In order to run this notebook please download the `creditcard.csv` file from Kaggle and place it in the `data/` directory.__
Once the file is in place, we start by importing Pandas and reading the data. We create a dataframe of covariates and a dataframe of targets.
```
import pandas as pd
import numpy as np
df = pd.read_csv('data/creditcard.csv')
X = df.drop(['Class'], axis=1)
Y = df['Class']
```
## Model
We start off by randomly splitting train and test datasets.
```
from sklearn.model_selection import train_test_split
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
```
Next we use XGBoost to classify the transactions. Note that we convert the arguments to `fit` to NumPy arrays.
```
from xgboost import XGBClassifier
model = XGBClassifier(label_encoder=False)
model.fit(X_train.to_numpy(), y_train.to_numpy())
```
We now obtain predictions on the test dataset and binarize the output probabilities to get a target.
```
y_pred = model.predict(X_test.to_numpy())
predictions = [round(value) for value in y_pred]
```
We evaluate the accuracy of our model on the test set (this is just an example: the dataset is heavily unbalanced so accuracy is not a fair characterization in this case).
```
from sklearn.metrics import accuracy_score, confusion_matrix
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
```
Looking at the confusion matrix gives a clearer representation.
```
confusion_matrix(y_test, predictions)
```
We are interested to explore are cases of fraud, so we extract them from the test set.
```
X_test_fraud = X_test[y_test == 1].to_numpy()
```
We verify how many times we are getting it right.
```
model.predict(X_test_fraud) == 1
```
## Exporting to TorchScript with Hummingbird
From the project page (https://github.com/microsoft/hummingbird):
> Hummingbird is a library for compiling trained traditional ML models into tensor computations. Hummingbird allows users to seamlessly leverage neural network frameworks (such as PyTorch) to accelerate traditional ML models.
Hummingbird can take scikit-learn, XGBoost or LightGBM models and export them to PyTorch, TorchScript, ONNX and TVM. This works very well for running ML models on RedisAI and take advantage of vectorized CPU instructions or GPU.
We choose to convert the boosted tree to tensor computations using the `gemm` implementation.
```
from hummingbird.ml import convert, load
extra_config={
"tree_implementation": "gemm"
}
hummingbird_model = convert(model, 'torchscript', test_input=X_test_fraud, extra_config=extra_config)
```
At this point, `hm_model` is an object containing a TorchScript model that is ready to be exported.
```
import torch
torch.jit.save(hummingbird_model.model, "models/fraud_detection_model.pt")
```
We can verify everything works by loading the model and running a prediction. The model outputs a tuple containing the predicted classes and the output probabilities.
```
loaded_model = torch.jit.load("models/fraud_detection_model.pt")
X_test_fraud_tensor = torch.from_numpy(X_test_fraud)
loaded_output_classes, loaded_output_probs = loaded_model(X_test_fraud_tensor)
```
We can now compare against the original output from the XGBoost model.
```
xgboost_output_classes = torch.from_numpy(model.predict(X_test_fraud))
torch.equal(loaded_output_classes, xgboost_output_classes)
```
## Explainer Script
The script `torch_shapely.py` is a torch script degined specificly running on RedisAI, and utilizes RedisAI extension for torch script, that allows to run any model stored in RedisAI from within the script. Let's go over the details:
In RedisAI, each entry point (function in script) should have the signature:
`function_name(tensors: List[Tensor], keys: List[str], args: List[str]):`
In our case our entry point is `shapely_sample(tensors: List[Tensor], keys: List[str], args: List[str]):` and the parameters are:
```
Tensors:
tensors[0] - x : Input tensor to the model
tensors[1] - baselines : Optional - reference values which replace each feature when
ablated; if no baselines are provided, baselines are set
to all zeros
Keys:
keys[0] - model_key: Redis key name where the model is stored as RedisAI model.
Args:
args[0] - n_samples: number of random feature permutations performed
args[1] - number_of_outputs - number of model outputs
args[2] - output_tensor_index - index of the tested output tensor
args[3] - Optional - target: output indices for which Shapley Value Sampling is
computed; if model returns a single scalar, target can be
None
```
The script will create `n_samples` amount of permutations of the input features. For each permutation it will check for each feature what was its contribution to the result by running the model repeatedly on a new subset of input features.
## Serving model and explainer in RedisAI
At this point we can load the model we exported into RedisAI and serve it from there. We will also load the `torch_shapely.py` script, that allows calculating the Shapely value of a model, from within RedisAI. After making sure RedisAI is running, we initialize the client.
```
import redisai
rai = redisai.Client()
```
We read the model and the script.
```
with open("models/fraud_detection_model.pt", "rb") as f:
fraud_detection_model_blob = f.read()
with open("torch_shapley.py", "rb") as f:
shapely_script = f.read()
```
We load both model and script into RedisAI.
```
rai.modelstore("fraud_detection_model", "TORCH", "CPU", fraud_detection_model_blob)
rai.scriptstore("shapley_script", device='CPU', script=shapely_script, entry_points=["shapley_sample"] )
```
All set, it's now test time. We reuse our `X_test_fraud` NumPy array we created previously. We set it, and run the Shapley script and get explanations as arrays.
```
rai.tensorset("fraud_input", X_test_fraud, dtype="float")
rai.scriptexecute("shapley_script", "shapley_sample", inputs = ["fraud_input"], keys = ["fraud_detection_model"], args = ["20", "2", "0"], outputs=["fraud_explanations"])
rai_expl = rai.tensorget("fraud_explanations")
winning_feature_redisai = np.argmax(rai_expl[0], axis=0)
print("Winning feature: %d" % winning_feature_redisai)
```
Alternatively we can set up a RedisAI DAG and run it in one swoop.
```
dag = rai.dag(routing ="fraud_detection_model")
dag.tensorset("fraud_input", X_test_fraud, dtype="float")
dag.modelexecute("fraud_detection_model", "fraud_input", ["fraud_pred", "fraud_prob"])
dag.scriptexecute("shapely_script", "shapely_sample", inputs = ["fraud_input"], keys = ["fraud_detection_model"], args = ["20", "2", "0"], outputs=["fraud_explanations"])
dag.tensorget("fraud_pred")
dag.tensorget("fraud_explanations")
```
We now set the input and request a DAG execution, which will produce the desired outputs.
```
# rai.tensorset("fraud_input", X_test_fraud, dtype="float")
_, _, _, dag_pred, dag_expl = dag.execute()
dag_pred
```
We can now check that the winning feature matches with what we computed earlier on the first sample in the test batch.
```
winning_feature_redisai_dag = np.argmax(dag_expl[0])
print("Winning feature: %d" % winning_feature_redisai_dag)
dag_expl[1]
```
| true |
code
| 0.610628 | null | null | null | null |
|
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#export
from fastai.data.all import *
from fastai.text.core import *
#hide
from nbdev.showdoc import *
#default_exp text.models.awdlstm
#default_cls_lvl 3
```
# AWD-LSTM
> AWD LSTM from [Smerity et al.](https://arxiv.org/pdf/1708.02182.pdf)
## Basic NLP modules
On top of the pytorch or the fastai [`layers`](/layers.html#layers), the language models use some custom layers specific to NLP.
```
#export
def dropout_mask(x, sz, p):
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new_empty(*sz).bernoulli_(1-p).div_(1-p)
t = dropout_mask(torch.randn(3,4), [4,3], 0.25)
test_eq(t.shape, [4,3])
assert ((t == 4/3) + (t==0)).all()
#export
class RNNDropout(Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p=0.5): self.p=p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.data, (x.size(0), 1, *x.shape[2:]), self.p)
dp = RNNDropout(0.3)
tst_inp = torch.randn(4,3,7)
tst_out = dp(tst_inp)
for i in range(4):
for j in range(7):
if tst_out[i,0,j] == 0: assert (tst_out[i,:,j] == 0).all()
else: test_close(tst_out[i,:,j], tst_inp[i,:,j]/(1-0.3))
```
It also supports doing dropout over a sequence of images where time dimesion is the 1st axis, 10 images of 3 channels and 32 by 32.
```
_ = dp(torch.rand(4,10,3,32,32))
#export
class WeightDropout(Module):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self, module, weight_p, layer_names='weight_hh_l0'):
self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)
for layer in self.layer_names:
#Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
delattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
setattr(self.module, layer, w.clone())
if isinstance(self.module, (nn.RNNBase, nn.modules.rnn.RNNBase)):
self.module.flatten_parameters = self._do_nothing
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
if self.training: w = F.dropout(raw_w, p=self.weight_p)
else: w = raw_w.clone()
setattr(self.module, layer, w)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
return self.module(*args)
def reset(self):
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
setattr(self.module, layer, raw_w.clone())
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self): pass
module = nn.LSTM(5,7)
dp_module = WeightDropout(module, 0.4)
wgts = dp_module.module.weight_hh_l0
tst_inp = torch.randn(10,20,5)
h = torch.zeros(1,20,7), torch.zeros(1,20,7)
dp_module.reset()
x,h = dp_module(tst_inp,h)
loss = x.sum()
loss.backward()
new_wgts = getattr(dp_module.module, 'weight_hh_l0')
test_eq(wgts, getattr(dp_module, 'weight_hh_l0_raw'))
assert 0.2 <= (new_wgts==0).sum().float()/new_wgts.numel() <= 0.6
assert dp_module.weight_hh_l0_raw.requires_grad
assert dp_module.weight_hh_l0_raw.grad is not None
assert ((dp_module.weight_hh_l0_raw.grad == 0.) & (new_wgts == 0.)).any()
#export
class EmbeddingDropout(Module):
"Apply dropout with probability `embed_p` to an embedding layer `emb`."
def __init__(self, emb, embed_p):
self.emb,self.embed_p = emb,embed_p
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.emb.weight.size(0),1)
mask = dropout_mask(self.emb.weight.data, size, self.embed_p)
masked_embed = self.emb.weight * mask
else: masked_embed = self.emb.weight
if scale: masked_embed.mul_(scale)
return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm,
self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse)
enc = nn.Embedding(10, 7, padding_idx=1)
enc_dp = EmbeddingDropout(enc, 0.5)
tst_inp = torch.randint(0,10,(8,))
tst_out = enc_dp(tst_inp)
for i in range(8):
assert (tst_out[i]==0).all() or torch.allclose(tst_out[i], 2*enc.weight[tst_inp[i]])
#export
class AWD_LSTM(Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange=0.1
def __init__(self, vocab_sz, emb_sz, n_hid, n_layers, pad_token=1, hidden_p=0.2, input_p=0.6, embed_p=0.1,
weight_p=0.5, bidir=False):
store_attr('emb_sz,n_hid,n_layers,pad_token')
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,
bidir, weight_p, l) for l in range(n_layers)])
self.encoder.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
self.reset()
def forward(self, inp, from_embeds=False):
bs,sl = inp.shape[:2] if from_embeds else inp.shape
if bs!=self.bs: self._change_hidden(bs)
output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))
new_hidden = []
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append(new_h)
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = to_detach(new_hidden, cpu=False, gather=False)
return output
def _change_hidden(self, bs):
self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])
if self.bs > bs: return (self.hidden[l][0][:,:bs].contiguous(), self.hidden[l][1][:,:bs].contiguous())
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
```
This is the core of an AWD-LSTM model, with embeddings from `vocab_sz` and `emb_sz`, `n_layers` LSTMs potentially `bidir` stacked, the first one going from `emb_sz` to `n_hid`, the last one from `n_hid` to `emb_sz` and all the inner ones from `n_hid` to `n_hid`. `pad_token` is passed to the PyTorch embedding layer. The dropouts are applied as such:
- the embeddings are wrapped in `EmbeddingDropout` of probability `embed_p`;
- the result of this embedding layer goes through an `RNNDropout` of probability `input_p`;
- each LSTM has `WeightDropout` applied with probability `weight_p`;
- between two of the inner LSTM, an `RNNDropout` is applied with probability `hidden_p`.
THe module returns two lists: the raw outputs (without being applied the dropout of `hidden_p`) of each inner LSTM and the list of outputs with dropout. Since there is no dropout applied on the last output, those two lists have the same last element, which is the output that should be fed to a decoder (in the case of a language model).
```
tst = AWD_LSTM(100, 20, 10, 2, hidden_p=0.2, embed_p=0.02, input_p=0.1, weight_p=0.2)
x = torch.randint(0, 100, (10,5))
r = tst(x)
test_eq(tst.bs, 10)
test_eq(len(tst.hidden), 2)
test_eq([h_.shape for h_ in tst.hidden[0]], [[1,10,10], [1,10,10]])
test_eq([h_.shape for h_ in tst.hidden[1]], [[1,10,20], [1,10,20]])
test_eq(r.shape, [10,5,20])
test_eq(r[:,-1], tst.hidden[-1][0][0]) #hidden state is the last timestep in raw outputs
tst.eval()
tst.reset()
tst(x);
tst(x);
#hide
#test bs change
x = torch.randint(0, 100, (6,5))
r = tst(x)
test_eq(tst.bs, 6)
# hide
# cuda
tst = AWD_LSTM(100, 20, 10, 2, bidir=True).to('cuda')
tst.reset()
x = torch.randint(0, 100, (10,5)).to('cuda')
r = tst(x)
x = torch.randint(0, 100, (6,5), device='cuda')
r = tst(x)
#export
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])
return groups.map(params)
#export
awd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
#export
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]
groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]
groups = L(groups + [model[1]])
return groups.map(params)
#export
awd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
```
## QRNN
```
#export
class AWD_QRNN(AWD_LSTM):
"Same as an AWD-LSTM, but using QRNNs instead of LSTMs"
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
from fastai.text.models.qrnn import QRNN
rnn = QRNN(n_in, n_out, 1, save_prev_x=(not bidir), zoneout=0, window=2 if l == 0 else 1, output_gate=True, bidirectional=bidir)
rnn.layers[0].linear = WeightDropout(rnn.layers[0].linear, weight_p, layer_names='weight')
return rnn
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return one_param(self).new_zeros(self.n_dir, self.bs, nh)
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return torch.cat([self.hidden[l], self.hidden[l].new_zeros(self.n_dir, bs-self.bs, nh)], dim=1)
if self.bs > bs: return self.hidden[l][:, :bs]
return self.hidden[l]
# cuda
# cpp
model = AWD_QRNN(vocab_sz=10, emb_sz=20, n_hid=16, n_layers=2, bidir=False)
x = torch.randint(0, 10, (7,5))
y = model(x)
test_eq(y.shape, (7, 5, 20))
# hide
# cuda
# cpp
# test bidir=True
model = AWD_QRNN(vocab_sz=10, emb_sz=20, n_hid=16, n_layers=2, bidir=True)
x = torch.randint(0, 10, (7,5))
y = model(x)
test_eq(y.shape, (7, 5, 20))
#export
awd_qrnn_lm_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
#export
awd_qrnn_clas_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| true |
code
| 0.723175 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/KwonDoRyoung/AdvancedBasicEducationProgram/blob/main/Challenge01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
import os
import csv
root_path = "/content/drive/MyDrive/aihub30754" # 데이터가 존재하는 최상위 폴더
metadata_train_path = os.path.join(root_path, "train/train.csv") # 메타데이터의 경로
print(metadata_train_path)
dataset = [] # meta 데이터에서 정보를 추출 후, 저장
category = set() # 집합: Category 정보를 담기 위함
# csv 파일을 읽고(reader) 각각 행에 있는 정보를 순차적으로 호출(for 부분)
with open(metadata_train_path, "r") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx == 0:
# 1 행은 데이터 분류에 대한 정보
continue
# row[0]: 이미지 파일 이름
# row[1]: 이미지 Class
dataset.append(row)
category.add(row[-1]) # category=label=class
print(len(dataset), len(category))
print(category)
print(dataset)
import os
import csv
from PIL import Image
from torch.utils.data import Dataset
class SeaGarbage(Dataset):
def __init__(self, data_path, phase="train"):
self.phase = phase
self.data_path = data_path
self.dataset, self.category = self._read_metadata(data_path, phase)
self.classes = list(self.category.keys())
def __len__(self):
return len(self.dataset)
def _read_metadata(self, data_path, phase):
dataset = []
category = set()
metadata_path = os.path.join(data_path, f"{phase}.csv")
with open(metadata_path, "r") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx == 0:
# 1 행은 데이터 분류에 대한 정보
continue
# row[0]: 이미지 파일 이름
# row[1]: 이미지 Class
dataset.append(row)
category.add(row[-1]) # category=label=class
category_dict = {}
for _c, _n in enumerate(list(category)):
category_dict[_n] = _c
return dataset, category_dict
def __getitem__(self, idx):
image_path = os.path.join(self.data_path, "images", self.dataset[idx][0])
image = Image.open(image_path)
label = self.category[self.dataset[idx][-1]]
return image, label
root_path = "/content/drive/MyDrive/aihub30754"
train_path = os.path.join(root_path, "train")
train_dataset1 = SeaGarbage(train_path, phase="train")
print(f"# of dataset: {len(train_dataset1)}")
print(f"class: {train_dataset1.classes}")
img, label = train_dataset1[0]
plt.imshow(img)
plt.title(train_dataset1.classes[label])
# from genericpath import exists
# import os
# import csv
# import shutil
# root_path = "/content/drive/MyDrive/aihub30754" # 데이터가 존재하는 최상위 폴더
# metadata_train_path = os.path.join(root_path, "train/train.csv") # 메타데이터의 경로
# print(metadata_train_path)
# os.makedirs(os.path.join(root_path, "train/image_with_class"), exist_ok=True)
# # csv 파일을 읽고(reader) 각각 행에 있는 정보를 순차적으로 호출(for 부분)
# with open(metadata_train_path, "r") as f:
# reader = csv.reader(f)
# for idx, row in enumerate(reader):
# if idx == 0:
# # 1 행은 데이터 분류에 대한 정보
# continue
# if not os.path.exists(os.path.join(root_path, f"train/image_with_class/{row[-1]}")):
# os.makedirs(os.path.join(root_path, f"train/image_with_class/{row[-1]}"), exist_ok=True)
# shutil.copyfile(os.path.join(root_path, f"train/images/{row[0]}"), os.path.join(root_path, f"train/image_with_class/{row[-1]}/{row[0]}"))
import matplotlib.pyplot as plt
from torchvision.datasets import ImageFolder
root_path = "/content/drive/MyDrive/aihub30754"
train_path = os.path.join(root_path, "train/image_with_class")
train_dataset2 = ImageFolder(train_path)
print(f"# of dataset: {len(train_dataset2)}")
print(f"class: {train_dataset2.classes}")
img, label = train_dataset2[0]
plt.imshow(img)
plt.title(train_dataset2.classes[label])
```
| true |
code
| 0.423458 | null | null | null | null |
|
```
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['mathtext.fontset'] = 'stix'
```
Load 200ns Aib9 trajectory
```
infile = '../../DATA/Train/AIB9/sum_phi_200ns.npy'
input_x = np.load(infile)
bins=np.arange(-15., 17, 1)
num_bins=len(bins)
idx_200ns=np.digitize(input_x, bins)
di=1
N_mean=np.sum(np.abs(idx_200ns[:-di]-idx_200ns[di:])==1)
N_mean/=len(idx_200ns)
N0=len(np.where(idx_200ns<=15)[0])
N1=len(np.where(idx_200ns>=16)[0])
kappa_in = N0/N1
print('kappa:', kappa_in)
print('Nearest neighbor:', N_mean)
```
Check 100ns Aib9 trajectory
```
idx_100ns = idx_200ns[:2000000]
di=1
N_mean=np.sum(np.abs(idx_100ns[:-di]-idx_100ns[di:])==1)
N_mean/=len(idx_100ns)
N0=len(np.where(idx_100ns<=15)[0])
N1=len(np.where(idx_100ns>=16)[0])
kappa_in = N0/N1
print('kappa:', kappa_in)
print('Nearest neighbor:', N_mean)
```
# Calculate Nearest neighbor $\langle N\rangle$ sampled from the first training
In the first training, we let 800 independent LSTMs predict 800 trajectories of 100$ns$. Since we are using LSTM as a generative model, we can also train just one LSTM and use it to generate 800 predictions, starting from either the same initial condition or different initial conditions.
Data location: `./Output/`
```
N_mean_list=[]
output_dir='./Output'
for i in range(800):
pred_dir=os.path.join(output_dir, '{}/prediction.npy'.format(i))
prediction=np.load(pred_dir)
di=1
N_mean=np.sum(np.abs(prediction[:-di]-prediction[di:])==1)
N_mean/=len(prediction)
N_mean_list.append(N_mean)
N_mean_arr=np.array(N_mean_list)
```
Plot distribution
```
hist = np.histogram( N_mean_arr, bins=50 )
prob = hist[0].T
mids = 0.5*(hist[1][1:]+hist[1][:-1])
fig, ax = plt.subplots(figsize=(5,4))
ax.set_title('Distribution', size=20)
ax.plot(mids, prob)
ax.tick_params(axis='both', which='both', direction='in', labelsize=14)
ax.set_xlabel('$\langle N\\rangle$', size=16)
ax.set_ylabel('Counts', size=16)
plt.show()
```
# Determine $\Delta\lambda$
Following the reference, we want to solve the following equation for $\Delta\lambda$
\begin{align}
\bar{s}^{(j)}_2&=\sum_{\Gamma}P^{(2)}_{\Gamma}s^{(j)}_{\Gamma} \nonumber \\
&=\frac{\sum_{k\in\Omega} s^{(j)}_k e^{-\Delta\lambda_j s^{(j)}_k} }{\sum_{k\in\Omega} e^{-\Delta\lambda_j s^{(j)}_k}} \\
&=f(\Delta\lambda)
\label{eq:lambda_solver}
\end{align}
To determine the $\Delta\lambda$ value, we can calculate the above equation and plot it versus $\Delta\lambda$, and find $\Delta\lambda=\Delta\lambda_{\ast}$ which gives
\begin{align}
\bar{s}^{(j)}_2=f(\Delta\lambda_{\ast})=s^{\rm target}
\end{align}
We would also like to predict the variance of $\bar{s}^{(j)}_2$, which can be obtained through derivative of $f$.
\begin{align}
f'(\lambda)=-\sigma^2_{\bar{s}^{(j)}_2}
\end{align}
### $s = \langle N\rangle$
```
def f(lm):
return np.sum(N_mean_arr*np.exp(-lm*N_mean_arr))/np.sum(np.exp(-lm*N_mean_arr))
def df(lm):
return f(lm)**2-np.sum(N_mean_arr*N_mean_arr*np.exp(-lm*N_mean_arr))/np.sum(np.exp(-lm*N_mean_arr))
lm_arr = np.linspace(0,1000)
f_arr = [f(lm_i) for lm_i in lm_arr]
fig, ax=plt.subplots(figsize=(5,3))
ax.plot(lm_arr, f_arr, label='$f$')
ax.tick_params(axis='both', which='both', direction='in', labelsize=14)
ax.set_xlabel('$\lambda$', size=16)
ax.set_ylabel('$f(\lambda)$', size=16)
ax.legend(fontsize=16)
plt.show()
lm=62
print( 'f({:.1f}) = {:.6f}'.format(lm, f(lm)) )
print( 'Standard error stderr[f(0)]={:.6f}'.format(np.std(N_mean_arr)/np.sqrt(len(N_mean_arr))) )
print( 'df({:.1f}) = {:.6f}'.format(lm, df(lm)) )
print( 'Expected standard error for new N_mean = {:.6f}'.format( np.sqrt(-df(lm))/np.sqrt(10) ) )
```
Let's see if select 10 predictions to build the subset is enough.
```
lm_ast=62
p=np.exp(-lm_ast*(N_mean_arr))
p/=np.sum(p)
subset_mean_arr = []
subset_stdv_arr = []
for i in range(200):
idx = np.random.choice(len(N_mean_arr), 10, p=p)
selected = N_mean_arr[idx]
mean=np.mean(selected)
stdv=np.std(selected)/np.sqrt(len(selected))
subset_mean_arr.append(mean)
subset_stdv_arr.append(stdv)
fig, ax = plt.subplots(figsize=(12,5), nrows=1, ncols=2)
ax[0].plot(subset_mean_arr)
ax[0].plot(np.arange(len(subset_mean_arr)), [0.38]*len(subset_mean_arr))
ax[1].plot(subset_stdv_arr)
ax[1].plot(np.arange(len(subset_stdv_arr)), [0.004]*len(subset_stdv_arr))
ax[0].tick_params(axis='both', which='both', direction='in', labelsize=16)
ax[0].set_xlabel('indices', size=16)
ax[0].set_ylabel('$\langle N\\rangle$', size=16)
ax[0].set_ylim(0.3,0.5)
ax[1].tick_params(axis='both', which='both', direction='in', labelsize=16)
ax[1].set_xlabel('indices', size=16)
ax[1].set_ylabel('$\sigma_{N}$', size=16)
ax[1].set_ylim(0.0,0.01)
plt.show()
```
So we will constrain our $\langle N\rangle$ to 0.38 with standard error 0.0041. Although we have shown above that subset size=10 is sufficient, there could be variance in mean constraint. Therefore, we will also constrain our sampling until we reach a reasonable value of $\langle N\rangle$ of the subset.
```
lm_ast=62
p=np.exp(-lm_ast*(N_mean_arr))
p/=np.sum(p)
mean=np.inf
stdv=np.inf
while abs(mean-0.380)>0.001 or abs(stdv-0.0041)>0.0001:
idx = np.random.choice(len(N_mean_arr), 10, p=p)
selected = N_mean_arr[idx]
mean=np.mean(selected)
stdv=np.std(selected)/np.sqrt(len(selected))
print( 'mean of selected sample = {:.3f}'.format(np.mean(selected)) )
print( 'Standard error stderr[selected sample] = {:.3f}'.format(np.std(selected)/np.sqrt(len(selected))) )
for ki in kappa_arr[idx]:
print('{:.3f}'.format(ki))
```
# Concatenate subset as a new training set
Concatenate the subset to a single trajectory, this concatenated trajectory is then used later to re-train a new LSTM.
```
conc=[]
output_dir='./Output'
for i in idx:
pred_dir=os.path.join(output_dir, '{}/prediction.npy'.format(i))
prediction=np.load(pred_dir)
conc.extend(prediction)
conc = np.array(conc)
```
We can also check what the $\langle N\rangle$ as well as $\kappa$ value of concatenated trajectory is.
```
N0=len(np.where(conc<=15)[0])
N1=len(np.where(conc>=16)[0])
kappa_conc = N0/N1
di=1
N_mean_conc=np.sum(np.abs(conc[:-di]-conc[di:])==1)
N_mean_conc/=len(conc)
print('kappa:', kappa_conc)
print('Nearest neighbor:', N_mean_conc)
```
| true |
code
| 0.468973 | null | null | null | null |
|
# Workshop 4: cartopy and best practices
# Part II: Best Practices
Here I dump my entire accumulated wisdom upon you, not so much hoping that you know it all by the end, but that you know of the concepts and know what to search for. I realize that many lessons will be learned the hard way.
## 1. Technical tips
### jupyter lab tips
|||
|--- |--- |
| `Option/Alt` + drag | multi-line selection |
| `Ctrl/Cmd` + `X`/`C`/`V` | cut/copy/paste lines|
| `Cmd` + `?`| comment out line |
### Linux and SSH
The science faculty has its own cluster: `gemini.science.uu.nl`. There you can perform heavier, longer computations.
You can tunnel into the cluster by typing
```
ssh (your_solis_id)@gemini.science.uu.nl
```
you are then prompted for your password.
Note that working `ssh` is not a very stable connection: `broken pipe` error. All active commands are interrupted if your connection is severed. One way around this is to use the `screen` functionality.
Many advanced text editors let you work remotely. For example, Visual Studio Code has a `Remote-SSH` extension that lets you work on the remote machine as if it were local.
Some useful commands when navigating the terminal and submitting jobs on gemini.
| command | effect |
| --- | --- |
| `pwd` | print working directory, where are you in the file system|
| `cd` | change directory, without: to home directory; `..` for level up|
| `ls` | list content of folder |
| `cp`/`mv` | copy/move file |
| `rm` | remove file (CAUTION: this is permanent) |
| `grep (word) (file)` | search for `word` in `file` |
| `touch (file)` | create empty `file` |
| `top` | monitor processes and system resources |
| `Up(arrow)` | previous command
| `Ctrl + C` | cancel |
| `Ctrl + R` | search command history |
| `qsub (your_job.sh)` | submits `your_job.sh` bash script to queue* |
| `qstat` | check on your jobs in the batch queue |
| `qdel (job-id)` | deletes job with id `job-id` in queue |
\* There are 48 job slots (12 nodes with 4 cores) with 4Gb of memory per core
### Project organization
Once your projects contain more than a few results (e.g. for SOAC and your thesis), it is worthwhile organizing. A common structure has proven useful for most cases:
```
| project_name
README (markdown or simple text file describing your project and its structure)
| data (when working with big data this may be external to the project folder)
| raw_data (never touch these files)
| processed (derived files)
| doc (includes `requirements.txt` with python environment description)
| src (all your [well-documented] code: .py, .ipynb, ... files)
| results (all figures, maybe in subfolders)
```
A well organized project helps you current and future self as well as anyone else looking at the results. It is thus an important step for reproducibility.
### colormaps
Be conscious about the colors and colormaps you use! Colors can hide or emphasize data, which can be used to improve your presentation. Read this [short blog post](https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/) and learn why the `jet`colormap is bad.
A main consideration for accessibility of your results must be the color blindness which afflicts quite a few people. See [ColorBrewer](https://colorbrewer2.org) for colors that work well together.
The `cmocean` package adds some [beautiful, well-designed colormaps for oceanography](https://matplotlib.org/cmocean/) to the standard matplotlib colormaps.
## 2. Fundemental programming guidelines
### Understanding Python error messages
Understanding Python errors can be daunting at first, especially if they are very long. But don;t despair, after some practive you become better at interpreting them and will find aathem helpful in pinning down the problem. The general idea is that Python shows you the steps from the line where you called the offending line all the way down to the line in the file that raised an error. Often, the most important part of the error message is located at the end.
### DRY: Don't repeat yourself
It is almost always a sign of bad programming if you have to repeat a line several times. It clutters the code and makes the code harder to maintain.
### simplify code
Instead of writing one huge function, __break your functions down into logical component functions__. This will save you many headaches when hunting for bugs.
### coding style
Python is very forgiving towards your code writing style. Just because it runs wihtout errors does not mean is well written, though.
How to write good, readable python code is laid out in the __[PEP8 Style Guide for Python Code](https://pep8.org/)__. Read it and try to adhere to it.
### reuse code
Once you have iterated to stable code (and you want to share it across jupyter notebooks), you should put it in a separate `.py` file. You can __import functions from `.py` files__ simply as `from file import function`.
### Defensive programming
Defensive programming is a programming philosophy that tries to guard against errors and minimize time spent on solving bugs. The fundamental idea is that of __unit testing__: you break the code into the small into the small steps (functions) and then test whether they give the expected (known) results for simple test cases. One would write a function with known input and output. Unit testing can be automated and this is known as "continuous integration (CI)" (integrated in GitHub,for example).
This approach works well for traditional software development with fixed goals, but it is not always suited to scientific programming as the goals shift with new knowledge.
However, the principle of defensive programming is still very valuable. A __simple and easy-to-implement version of this defensive philosophy__ can be implemented by using the `assert` statement often (this is not exactly unit testing). It checks whether a statement is true and can raise a custom error.
```
import numpy as np
def calc_circumference(radius):
""" simple example function to calculate circle cirumference """
return 2*np.pi*radius
def calc_circumference2(radius):
""" simple example function to calculate circle cirumference """
assert type(radius) in [float, int], 'radius must be a number'
return 2*np.pi*radius
# this works as expected
calc_circumference(1)
# this does not work and Python tells us why with its own error message
calc_circumference('hello')
# this does not work and our message tells us why
calc_circumference2('hello')
```
### Back-up
__Always back up your code and data!__ There is nothing more frustrating than having to rewrite code after you dropped your laptop or something crashed. Cloud services like _[SURFdrive](https://surfdrive.surf.nl)_ or Dropbox/OneDrive make this very easy. the advantage here is that is is __automated__ and does not rely on you remembering that you need to backup.
### Version control
Do you know this?

There is a better way: version control.
Version control systems start with a base version of the document and then record changes you make each step of the way. You can think of it as a recording of your progress: you can rewind to start at the base document and play back each change you made, eventually arriving at your more recent version.

Once you think of changes as separate from the document itself, you can then think about “playing back” different sets of changes on the base document, ultimately resulting in different versions of that document. For example, two users can make independent sets of changes on the same document - these changes can be organized into separate “branches”, or groupings of work that can be shared.

Unless there are conflicts, you can even incorporate two sets of changes into the same base document, or “merged”.

__Key points:__
- Version control is like an unlimited ‘undo’.
- Version control also allows many people to work in parallel.
- version control works well for human-readable files (e.g. .py, .txt, .tex), but not binary files (e.g. .docx, .png, ...) because it does line-by-line comparison.
`git` is one implementation of a distributed version control system. You can `Github` is a company that let's you host repositories (version controlled folders) online. Everyone can create a free GitHub account and as a student you can create a free Pro account.
The use of `git` and GitHub is requires its own tutorial, as there is a small learning up-front cost before you benefit from it. Much information in this section was talen from the [Software Carpentry tutorial](https://osulp.github.io/git-beginner/) which I recommend.
## 3. Open science, open access, reproducibility
### Open Science
From the [Open Science Wikipedia article](https://en.wikipedia.org/wiki/Open_science):
> Open science is the movement to make scientific research (including publications, data, physical samples, and software) and its dissemination accessible to all levels of an inquiring society, amateur or professional.
The fundamentl idea is that should be able to see all your steps for arriving at certain conclusions. In our context this specifically refers to making the (documented!) code available
At Utrecht University, there is a society dedicated to Open Science: [Open Science Community Utrecht](https://openscience-utrecht.com/).
### Open access
The traditional publishing business model has been to charge readers for access to articles. This is less than ideal, as the public pays for the research and the results are behind a paywall. This hinders knowledge transfer and there is a growing movement to open access (i.e. make it free) to scientific knowledge.
To publish open access usually costs money, as the publishers cannot earn money with selling the articles/journals. __The Dutch Universities have agreements with all major publishers to cover open access fees.__ Use this!
### Licenses
If you want to reuse any online content you must check for the license. If there is no license, you are legally not allowed to use it. This is why it's important that you include a license with your code if you want others to reuse it.
### reproducible code
When you publish your results (as a thesis or paper) you must __ensure that your code can reproduce all the results__. In jupyter notebooks you should check whether it runs completely from start to finish __without error__. The code must be documented. Ultimately, the clean version of __your code (and if possible raw data) should be uploaded to a permament repository__ such as [UU's own Yoda system](https://www.uu.nl/en/research/yoda), [Zenodo](https://zenodo.org/), [figshare](https://figshare.com/). It can then receivce a __digital object identifier number (DOI)__ and should be cited in your paper and __can be cited by others__.
### virtual environments
Virtual environments are custom python environments, with specific packages installed. So far you have likely used the `root` environment of your conda installation. This is fine for your course work. With conda you can easily create new environments in the GUI or the command line as such:
```
conda create -n my_new_env python
```
where `my_new_env` is the name of the environment. In the command line you would activate this environment with `conda activate my_new_env`. You will then see that name in parantheses in front of your prompt.
In general, it is advised to create a new environment for every major project (like a thesis or a particular paper). This ensures that you know which packages + versions you used to do your calculations. You can then __export a list of all the packages used__ at the end of your project and save it with the rest of the code. Only this __ensures reproducibility__.
You can view a list of your environments directly in the Anaconda GUI or byt typing `conda info --envs`
| true |
code
| 0.564819 | null | null | null | null |
|
# Carvana Image Masking Challenge
https://www.kaggle.com/c/carvana-image-masking-challenge
```
IMG_ROWS = 480
IMG_COLS = 320
TEST_IMG_ROWS = 1918
TEST_IMG_COLS = 1280
```
## Загружаем исходные изображения
```
import cv2
import numpy as np
from scipy import ndimage
from glob import glob
SAMPLE = 5000
train_img_paths = sorted(glob('./data/train/*.jpg'))[:SAMPLE]
train_mask_paths = sorted(glob('./data/train_masks/*.gif'))[:SAMPLE]
train_imgs = np.array([cv2.resize(ndimage.imread(path), (IMG_ROWS, IMG_COLS))
for path in train_img_paths])
train_masks = np.array([cv2.resize(ndimage.imread(path, mode = 'L'), (IMG_ROWS, IMG_COLS))
for path in train_mask_paths])
train_masks = train_masks.astype(np.float32)
train_masks[train_masks<=127] = 0.
train_masks[train_masks>127] = 1.
train_masks = np.reshape(train_masks, (*train_masks.shape, 1))
%matplotlib inline
from matplotlib import pyplot as plt
fig = plt.figure(0, figsize=(20, 20))
fig.add_subplot(1, 2, 1)
plt.imshow(train_imgs[0])
fig.add_subplot(1, 2, 2)
plt.imshow(np.squeeze(train_masks[0]), cmap='gray')
```
## Инициализируем архитектуру U-Net
```
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Conv2DTranspose
from keras.layers import BatchNormalization
from keras.layers import concatenate
from keras.models import Model
inputs = Input((IMG_COLS, IMG_ROWS, 3))
bnorm1 = BatchNormalization()(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(bnorm1)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.summary()
```
## Задаем функцию потерь
```
from keras import backend as K
from keras.losses import binary_crossentropy
SMOOTH = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH)
def bce_dice_loss(y_true, y_pred):
return 0.5 * binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
```
## Запускаем процесс обучения
```
from keras.optimizers import Adam
model.compile(Adam(lr=1e-4),
bce_dice_loss,
metrics=[binary_crossentropy, dice_coef])
model.fit(train_imgs[50:], train_masks[50:],
batch_size=12, epochs=10,
validation_data=(train_imgs[:50], train_masks[:50]))
```
## Предсказание модели
```
test_paths = sorted(glob('./data/test/*.jpg'))
def test_img_generator(test_paths):
while True:
for path in test_paths:
yield np.array([cv2.resize(ndimage.imread(path), (IMG_ROWS, IMG_COLS))])
pred = model.predict_generator(test_img_generator(test_paths[:10]), len(test_paths[:10]))
```
## Визуализируем результат
```
fig = plt.figure(0, figsize=(20, 10))
k = 5
fig.add_subplot(2, 2, 1)
plt.imshow(ndimage.imread(test_paths[k]))
fig.add_subplot(2, 2, 2)
plt.imshow(np.squeeze(cv2.resize(pred[k], (TEST_IMG_ROWS, TEST_IMG_COLS))), cmap='gray')
fig.add_subplot(2, 2, 3)
plt.imshow(ndimage.imread(test_paths[k+1]))
fig.add_subplot(2, 2, 4)
plt.imshow(np.squeeze(cv2.resize(pred[k+1], (TEST_IMG_ROWS, TEST_IMG_COLS))), cmap='gray')
```
## Подготавливаем данные для отправки
```
def rle_encode(mask):
pixels = mask.flatten()
pixels[0] = 0
pixels[-1] = 0
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return runs
with open('submit.txt', 'w') as dst:
dst.write('img,rle_mask\n')
for path in test_paths:
img = np.array([cv2.resize(ndimage.imread(path), (IMG_ROWS, IMG_COLS))])
pred_mask = model.predict(img)[0]
bin_mask = 255. * cv2.resize(pred_mask, (TEST_IMG_ROWS, TEST_IMG_COLS))
bin_mask[bin_mask<=127] = 0
bin_mask[bin_mask>127] = 1
rle = rle_encode(bin_mask.astype(np.uint8))
rle = ' '.join(str(x) for x in rle)
dst.write('%s,%s\n' % (path.split('/')[-1], rle))
# 20 epochs
# loss: -0.9891 - binary_crossentropy: 0.0077 - dice_coef: 0.9930
# val_loss: -0.9889 - val_binary_crossentropy: 0.0085 - val_dice_coef: 0.9932
# kaggle: 0.9926
```
| true |
code
| 0.67252 | null | null | null | null |
|
# Classificação
```
import os
import numpy as np
from sklearn.datasets import make_moons, make_circles, make_classification
import itertools
import numpy as np
import matplotlib.pyplot as plt
# make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
def figsize(x, y):
# Get current size
fig_size = plt.rcParams["figure.figsize"]
# Prints: [8.0, 6.0]
fig_size[0] = x
fig_size[1] = y
plt.rcParams["figure.figsize"] = fig_size
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
(X, y)]
def plot_classification(name, clf, X, y, cmap):
score = clf.score(X, y)
h = 0.2
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, alpha=.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Greys)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(name + " - Score %.2f" % score)
def plot_multi_class(name, clf, X, y, cmap=plt.cm.PRGn):
score = clf.score(X, y)
h = 0.2
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, alpha=.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Greys)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(name + " - Score %.2f" % score)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
```
Considerando os seguintes dados, gerados aleatoriamente:
```
figsize(14, 5)
for i, (X, y) in enumerate(datasets):
plt.subplot(1,3,i+1)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Greys)
```
Gostaríamos de criar um **classificador** capaz de apropriadamente **separar** duas classes e corretamente classificar novas entradas.
## Solução usando Máquinas de suporte vetorial (SVM)
```
from sklearn.svm import SVC
svc = SVC(kernel='linear')
X, y = datasets[0]
svc.fit(X, y)
figsize(8,8)
plot_classification('SVC linear', svc, X, y, plt.cm.PRGn)
figsize(15, 5)
for dataset_idx, (X, y) in enumerate(datasets):
plt.subplot(1, 3, dataset_idx+1)
svc.fit(X, y)
plot_classification('SVC linear', svc, X, y, plt.cm.PRGn)
svc = SVC(kernel='poly', degree=3)
for dataset_idx, (X, y) in enumerate(datasets):
plt.subplot(1, 3, dataset_idx+1)
svc.fit(X, y)
plot_classification('SVC Polynomial', svc, X, y, plt.cm.PRGn)
svc = SVC(kernel='rbf')
for dataset_idx, (X, y) in enumerate(datasets):
plt.subplot(1, 3, dataset_idx+1)
svc.fit(X, y)
plot_classification('SVC RBF', svc, X, y, plt.cm.PRGn)
```
# Exercício *Iris*
- 50 amostras de 3 espécies diferentes de íris (150 amostras no total)
- Medidas: comprimento da sépala, largura da sépala, comprimento da pétala, largura da pétala

## Aprendizado de máquina no conjunto de dados da íris
Enquadrado como um problema de **aprendizado supervisionado**: Preveja as espécies de uma íris usando as suas medidas.
- Famoso conjunto de dados para aprendizado de máquina porque a previsão é **fácil**
- Saiba mais sobre o conjunto de dados da íris: [UCI Machine Learning Repository] (http://archive.ics.uci.edu/ml/datasets/Iris)
- Cada linha é uma **observação** (também conhecida como: exemplo, amostra, sample)
- Cada coluna é uma **feature** (também conhecido como: preditor, atributo, variável independente)
- Cada valor que estamos prevendo é a resposta (também conhecida como: target, outcome, label, dependent variable)
- A classificação é um aprendizado supervisionado no qual a resposta é categórica
- Regressão é a aprendizagem supervisionada em que a resposta é ordenada e contínua
```
from IPython.display import IFrame
IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)
from sklearn.datasets import load_iris
iris = load_iris()
print(type(iris))
print(iris.DESCR)
print(iris.data)
print(type(iris.data))
print(type(iris.target))
print(iris.data.shape)
print(iris.target.shape)
X = iris.data # Features
y = iris.target # Labels
figsize(8,8)
plt.scatter(X[:,0], X[:,1], c=y)
```
## Exercício:
Crie um classificador capaz de separar as 3 classes de plantas.
```
figsize(6,6)
plt.scatter(X[:,0], X[:,1], c=y)
figsize(6,6)
plt.scatter(X[:,2], X[:,3], c=y)
figsize(6,6)
plt.scatter(X[:,0], X[:,2], c=y)
np.random.seed(42)
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
train_features, test_features, train_labels, test_labels = train_test_split(X, y, test_size=0.2)
target_names = iris.target_names
print(iris.target_names)
print(test_features[:2])
print(train_features[:2])
print(test_labels[:10])
print(train_labels[:10])
classifier_svm = SVC()
classifier_svm.fit(train_features, train_labels)
svm_labels = classifier_svm.predict(test_features)
```
Apresente as métricas de validação Matriz de Confusão, Precision/Recall, F1 e ROC para este classificador.
```
classifier_svm.score(test_features, test_labels)
confusion_mat = confusion_matrix(test_labels, svm_labels)
confusion_mat
figsize(4, 4)
plot_confusion_matrix(confusion_mat, target_names)
print(classification_report(test_labels, svm_labels, target_names=target_names))
```
| true |
code
| 0.705227 | null | null | null | null |
|
# GLM: Robust Linear Regression
Author: [Thomas Wiecki](https://twitter.com/twiecki)
This tutorial first appeard as a post in small series on Bayesian GLMs on my blog:
1. [The Inference Button: Bayesian GLMs made easy with PyMC3](http://twiecki.github.com/blog/2013/08/12/bayesian-glms-1/)
2. [This world is far from Normal(ly distributed): Robust Regression in PyMC3](http://twiecki.github.io/blog/2013/08/27/bayesian-glms-2/)
3. [The Best Of Both Worlds: Hierarchical Linear Regression in PyMC3](http://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/)
In this blog post I will write about:
- How a few outliers can largely affect the fit of linear regression models.
- How replacing the normal likelihood with Student T distribution produces robust regression.
- How this can easily be done with `PyMC3` and its new `glm` module by passing a `family` object.
This is the second part of a series on Bayesian GLMs (click [here for part I about linear regression](http://twiecki.github.io/blog/2013/08/12/bayesian-glms-1/)). In this prior post I described how minimizing the squared distance of the regression line is the same as maximizing the likelihood of a Normal distribution with the mean coming from the regression line. This latter probabilistic expression allows us to easily formulate a Bayesian linear regression model.
This worked splendidly on simulated data. The problem with simulated data though is that it's, well, simulated. In the real world things tend to get more messy and assumptions like normality are easily violated by a few outliers.
Lets see what happens if we add some outliers to our simulated data from the last post.
Again, import our modules.
```
%matplotlib inline
import pymc3 as pm
import matplotlib.pyplot as plt
import numpy as np
import theano
```
Create some toy data but also add some outliers.
```
size = 100
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=.5, size=size)
# Add outliers
x_out = np.append(x, [.1, .15, .2])
y_out = np.append(y, [8, 6, 9])
data = dict(x=x_out, y=y_out)
```
Plot the data together with the true regression line (the three points in the upper left corner are the outliers we added).
```
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model')
ax.plot(x_out, y_out, 'x', label='sampled data')
ax.plot(x, true_regression_line, label='true regression line', lw=2.)
plt.legend(loc=0);
```
## Robust Regression
Lets see what happens if we estimate our Bayesian linear regression model using the `glm()` function as before. This function takes a [`Patsy`](http://patsy.readthedocs.org/en/latest/quickstart.html) string to describe the linear model and adds a Normal likelihood by default.
```
with pm.Model() as model:
pm.glm.GLM.from_formula('y ~ x', data)
trace = pm.sample(2000, cores=2)
```
To evaluate the fit, I am plotting the posterior predictive regression lines by taking regression parameters from the posterior distribution and plotting a regression line for each (this is all done inside of `plot_posterior_predictive()`).
```
plt.figure(figsize=(7, 5))
plt.plot(x_out, y_out, 'x', label='data')
pm.plot_posterior_predictive_glm(trace, samples=100,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line,
label='true regression line', lw=3., c='y')
plt.legend(loc=0);
```
As you can see, the fit is quite skewed and we have a fair amount of uncertainty in our estimate as indicated by the wide range of different posterior predictive regression lines. Why is this? The reason is that the normal distribution does not have a lot of mass in the tails and consequently, an outlier will affect the fit strongly.
A Frequentist would estimate a [Robust Regression](http://en.wikipedia.org/wiki/Robust_regression) and use a non-quadratic distance measure to evaluate the fit.
But what's a Bayesian to do? Since the problem is the light tails of the Normal distribution we can instead assume that our data is not normally distributed but instead distributed according to the [Student T distribution](http://en.wikipedia.org/wiki/Student%27s_t-distribution) which has heavier tails as shown next (I read about this trick in ["The Kruschke"](http://www.indiana.edu/~kruschke/DoingBayesianDataAnalysis/), aka the puppy-book; but I think [Gelman](http://www.stat.columbia.edu/~gelman/book/) was the first to formulate this).
Lets look at those two distributions to get a feel for them.
```
normal_dist = pm.Normal.dist(mu=0, sd=1)
t_dist = pm.StudentT.dist(mu=0, lam=1, nu=1)
x_eval = np.linspace(-8, 8, 300)
plt.plot(x_eval, theano.tensor.exp(normal_dist.logp(x_eval)).eval(), label='Normal', lw=2.)
plt.plot(x_eval, theano.tensor.exp(t_dist.logp(x_eval)).eval(), label='Student T', lw=2.)
plt.xlabel('x')
plt.ylabel('Probability density')
plt.legend();
```
As you can see, the probability of values far away from the mean (0 in this case) are much more likely under the `T` distribution than under the Normal distribution.
To define the usage of a T distribution in `PyMC3` we can pass a family object -- `T` -- that specifies that our data is Student T-distributed (see `glm.families` for more choices). Note that this is the same syntax as `R` and `statsmodels` use.
```
with pm.Model() as model_robust:
family = pm.glm.families.StudentT()
pm.glm.GLM.from_formula('y ~ x', data, family=family)
trace_robust = pm.sample(2000, cores=2)
plt.figure(figsize=(7, 5))
plt.plot(x_out, y_out, 'x')
pm.plot_posterior_predictive_glm(trace_robust,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line,
label='true regression line', lw=3., c='y')
plt.legend();
```
There, much better! The outliers are barely influencing our estimation at all because our likelihood function assumes that outliers are much more probable than under the Normal distribution.
## Summary
- `PyMC3`'s `glm()` function allows you to pass in a `family` object that contains information about the likelihood.
- By changing the likelihood from a Normal distribution to a Student T distribution -- which has more mass in the tails -- we can perform *Robust Regression*.
The next post will be about logistic regression in PyMC3 and what the posterior and oatmeal have in common.
*Extensions*:
- The Student-T distribution has, besides the mean and variance, a third parameter called *degrees of freedom* that describes how much mass should be put into the tails. Here it is set to 1 which gives maximum mass to the tails (setting this to infinity results in a Normal distribution!). One could easily place a prior on this rather than fixing it which I leave as an exercise for the reader ;).
- T distributions can be used as priors as well. I will show this in a future post on hierarchical GLMs.
- How do we test if our data is normal or violates that assumption in an important way? Check out this [great blog post](http://allendowney.blogspot.com/2013/08/are-my-data-normal.html) by Allen Downey.
| true |
code
| 0.696881 | null | null | null | null |
|
Convolutional Dictionary Learning
=================================
This example demonstrates the use of [cbpdndl.ConvBPDNDictLearn](http://sporco.rtfd.org/en/latest/modules/sporco.dictlrn.cbpdndl.html#sporco.dictlrn.cbpdndl.ConvBPDNDictLearn) for learning a convolutional dictionary from a set of colour training images [[51]](http://sporco.rtfd.org/en/latest/zreferences.html#id54), using PGM solvers for both sparse coding [[13]](http://sporco.rtfd.org/en/latest/zreferences.html#id13) [[53]](http://sporco.rtfd.org/en/latest/zreferences.html#id56) and dictionary update steps [[26]](http://sporco.rtfd.org/en/latest/zreferences.html#id25).
```
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco.dictlrn import cbpdndl
from sporco import util
from sporco import signal
from sporco import plot
plot.config_notebook_plotting()
from sporco.pgm.backtrack import BacktrackStandard
```
Load training images.
```
exim = util.ExampleImages(scaled=True, zoom=0.5)
img1 = exim.image('barbara.png', idxexp=np.s_[10:522, 100:612])
img2 = exim.image('kodim23.png', idxexp=np.s_[:, 60:572])
img3 = exim.image('monarch.png', idxexp=np.s_[:, 160:672])
S = np.stack((img1, img2, img3), axis=3)
```
Highpass filter training images.
```
npd = 16
fltlmbd = 5
sl, sh = signal.tikhonov_filter(S, fltlmbd, npd)
```
Construct initial dictionary.
```
np.random.seed(12345)
D0 = np.random.randn(16, 16, 3, 96)
```
Set regularization parameter and options for dictionary learning solver. Note the multi-scale dictionary filter sizes. Also note the possibility of changing parameters in the backtracking algorithm.
```
lmbda = 0.2
L_sc = 36.0
L_du = 50.0
dsz = ((8, 8, 3, 32), (12, 12, 3, 32), (16, 16, 3, 32))
opt = cbpdndl.ConvBPDNDictLearn.Options({
'Verbose': True, 'MaxMainIter': 200, 'DictSize': dsz,
'CBPDN': {'Backtrack': BacktrackStandard(gamma_u=1.1), 'L': L_sc},
'CCMOD': {'Backtrack': BacktrackStandard(), 'L': L_du}},
xmethod='pgm', dmethod='pgm')
```
Create solver object and solve.
```
d = cbpdndl.ConvBPDNDictLearn(D0, sh, lmbda, opt, xmethod='pgm',
dmethod='pgm')
D1 = d.solve()
print("ConvBPDNDictLearn solve time: %.2fs" % d.timer.elapsed('solve'))
```
Display initial and final dictionaries.
```
D1 = D1.squeeze()
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0), title='D0', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1, dsz), title='D1', fig=fig)
fig.show()
```
Get iterations statistics from solver object and plot functional value, residuals, and automatically adjusted gradient step parameters against the iteration number.
```
its = d.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.X_Rsdl, its.D_Rsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['X', 'D'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(np.vstack((its.X_L, its.D_L)).T, xlbl='Iterations',
ylbl='Inverse of Gradient Step Parameter', ptyp='semilogy',
lgnd=['$L_X$', '$L_D$'], fig=fig)
fig.show()
```
| true |
code
| 0.783544 | null | null | null | null |
|
# Notebook for testing performance of Visual Recognition Custom Classifiers
[Watson Developer Cloud](https://www.ibm.com/watsondevelopercloud) is a platform of cognitive services that leverage machine learning techniques to help partners and clients solve a variety of business problems. Furthermore, several of the WDC services fall under the **supervised learning** suite of machine learning algorithms, that is, algorithms that learn by example. This begs the questions: "How many examples should we provide?" and "When is my solution ready for prime time?"
It is critical to understand that training a machine learning solution is an iterative process where it is important to continually improve the solution by providing new examples and measuring the performance of the trained solution. In this notebook, we show how you can compute important Machine Learning metrics (accuracy, precision, recall, confusion_matrix) to judge the performance of your solution. For more details on these various metrics, please consult the **[Is Your Chatbot Ready for Prime-Time?](https://developer.ibm.com/dwblog/2016/chatbot-cognitive-performance-metrics-accuracy-precision-recall-confusion-matrix/)** blog.
<br> The notebook assumes you have already created a Watson [Visual Recognition](https://www.ibm.com/watson/developercloud/visual-recognition.html) instance and trained [custom classifiers](https://www.ibm.com/watson/developercloud/doc/visual-recognition/tutorial-custom-classifier.html). </br>
<br> To leverage this notebook, you need to provide the following information</br>
* Credentials for your Visual Recognition instance (apikey)
* id for your trained classifier (this is returned when you train your Visual Recognition custom classifier)
* csv file with your test images (paths to images on your local disk) and corresponding class labels
* results csv file to write the results to (true vs. predicted class labels)
* csv file to write confusion matrix to
Note that the input test csv file should have a header with the fields **image** and **class**.
```
# Only run this the first time if pandas_ml is not installed on your machine
!pip install pandas_ml
# latest version of watson_developer_cloud (1.0.0) as of November 20, 2017
!pip install -I watson_developer_cloud==1.0.0
# previous version of watson_developer_cloud
#Import utilities
import json
import csv
import sys
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import pandas_ml
from pandas_ml import ConfusionMatrix
from watson_developer_cloud import VisualRecognitionV3
```
Provide the path to the parms file which includes credentials to access your VR service as well as the input
test csv file and the output csv files to write the output results to.
```
# Sample parms file data
#{
# "url": "https://gateway-a.watsonplatform.net/visual-recognition/api",
# "apikey":"YOUR_VISUAL_RECOGNITION_APIKEY",
# "vr_id":"YOUR_VISUAL_RECOGNITION_CUSTOM_CLASSIFIER_ID",
# "test_csv_file": "COMPLETE_PATH_TO_YOUR_TEST_CSV_FILE",
# "results_csv_file": "COMPLETE PATH TO RESULTS FILE (any file you can write to)",
# "confmatrix_csv_file": "COMPLETE PATH TO CONFUSION MATRIX FILE (any file you can write to)"
#}
# Provide complete path to the file which includes all required parms
# A sample parms file is included (example_VR_parms.json)
vrParmsFile = 'COMPLETE PATH TO YOUR PARMS FILE'
parms = ''
with open(vrParmsFile) as parmFile:
parms = json.load(parmFile)
url=parms['url']
apikey=parms['apikey']
vr_id=parms['vr_id']
test_csv_file=parms['test_csv_file']
results_csv_file=parms['results_csv_file']
confmatrix_csv_file=parms['confmatrix_csv_file']
json.dumps(parms)
# Create an object for your Visual Recognition instance
visual_recognition = VisualRecognitionV3('2016-05-20', api_key=apikey)
```
Define useful methods to classify using custom VR classifier.
```
# Given an image and a pointer to VR instance and classifierID, get back VR response
def getVRresponse(vr_instance,classifierID,image_path):
with open(image_path, 'rb') as image_file:
parameters = json.dumps({'threshold':0.01, 'classifier_ids': [classifierID]})
#parameters = json.dumps({'threshold':0.01, 'classifier_ids': ['travel_1977348895','travel_2076475268','default']})
image_results = vr_instance.classify(images_file=image_file,
parameters = parameters)
# For our purposes, we assume each call is to classify one image
# Although the Visual Recognition classify endpoint accepts as input
# a .zip file, we need each image to be labeled with the correct class
classList = []
for classifier in image_results['images'][0]['classifiers']:
if classifier['classifier_id'] == vr_id:
classList = classifier['classes']
break
# Sort the returned classes by score
#print("classList: ", classList)
sorted_classList = sorted(classList, key=lambda k: k.get('score', 0), reverse=True)
#print("sortedList: ", sorted_classList)
return sorted_classList
# Process multiple images (provided via csv file) in batch. Effectively, read the csv file and for each image
# utterance, get VR response. Aggregate and return results.
def batchVR(vr_instance,classifierID,csvfile):
test_classes=[]
vr_predict_classes=[]
vr_predict_confidence=[]
images=[]
i=0
with open(csvfile, 'r') as csvfile:
csvReader=csv.DictReader(csvfile)
for row in csvReader:
test_classes.append(row['class'])
vr_response = getVRresponse(vr_instance,classifierID,row['image'])
vr_predict_classes.append(vr_response[0]['class'])
vr_predict_confidence.append(vr_response[0]['score'])
images.append(row['image'])
i = i+1
if(i%250 == 0):
print("")
print("Processed ", i, " records")
if(i%10 == 0):
sys.stdout.write('.')
print("")
print("Finished processing ", i, " records")
return test_classes, vr_predict_classes, vr_predict_confidence, images
# Plot confusion matrix as an image
def plot_conf_matrix(conf_matrix):
plt.figure()
plt.imshow(conf_matrix)
plt.show()
# Print confusion matrix to a csv file
def confmatrix2csv(conf_matrix,labels,csvfile):
with open(csvfile, 'w') as csvfile:
csvWriter = csv.writer(csvfile)
row=list(labels)
row.insert(0,"")
csvWriter.writerow(row)
for i in range(conf_matrix.shape[0]):
row=list(conf_matrix[i])
row.insert(0,labels[i])
csvWriter.writerow(row)
# List of all custom classifiers in your visual recognition service
#print(json.dumps(visual_recognition.list_classifiers(), indent=2))
# This is an optional step to quickly test response from Visual Recognition for a given image
##testImage='COMPLETE PATH TO YOUR TEST IMAGE'
##classifierList = "'" + vr_id + "'" + "," + "'" + "default" + "'"
##results = getVRresponse(visual_recognition,vr_id,testImage)
##print(json.dumps(results, indent=2))
```
Call Visual Recognition on the specified csv file and collect results.
```
test_classes,vr_predict_classes,vr_predict_conf,images=batchVR(visual_recognition,vr_id,test_csv_file)
# print results to csv file including original text, the correct label,
# the predicted label and the confidence reported by NLC.
csvfileOut=results_csv_file
with open(csvfileOut, 'w') as csvOut:
outrow=['image','true class','VR Predicted class','Confidence']
csvWriter = csv.writer(csvOut,dialect='excel')
csvWriter.writerow(outrow)
for i in range(len(images)):
outrow=[images[i],test_classes[i],vr_predict_classes[i],str(vr_predict_conf[i])]
csvWriter.writerow(outrow)
# Compute confusion matrix
labels=list(set(test_classes))
vr_confusion_matrix = confusion_matrix(test_classes, vr_predict_classes, labels)
vrConfMatrix = ConfusionMatrix(test_classes, vr_predict_classes)
# Print out confusion matrix with labels to csv file
confmatrix2csv(vr_confusion_matrix,labels,confmatrix_csv_file)
%matplotlib inline
vrConfMatrix.plot()
# Compute accuracy of classification
acc=accuracy_score(test_classes, vr_predict_classes)
print('Classification Accuracy: ', acc)
# print precision, recall and f1-scores for the different classes
print(classification_report(test_classes, vr_predict_classes, labels=labels))
#Optional if you would like each of these metrics separately
#[precision,recall,fscore,support]=precision_recall_fscore_support(test_classes, vr_predict_classes, labels=labels)
#print("precision: ", precision)
#print("recall: ", recall)
#print("f1 score: ", fscore)
#print("support: ", support)
```
| true |
code
| 0.401658 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/kareem1925/Ismailia-school-of-AI/blob/master/quantum_mnist_classification/Classifying_mnist_data_using_quantum_features.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
We will first install Qulacs plugin with GPU for Pennylane and then refresh the environment.
```
import os
!pip install git+https://github.com/kareem1925/pennylane-qulacs@GPU_support
os.kill(os.getpid(), 9)
```
Run the following command to make sure everything is working perfectly
```
import qulacs
qulacs.QuantumStateGpu
from pennylane import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder,OneHotEncoder,normalize,LabelBinarizer
from sklearn.utils import compute_sample_weight
import pennylane as qml
from sklearn.datasets import load_digits
import warnings
from sklearn.metrics import balanced_accuracy_score as acc
from pennylane.optimize import AdamOptimizer,AdagradOptimizer
np.seterr(all="ignore")
warnings.filterwarnings('ignore')
```
**Defining the log loss function along with softmax and accuracy**
```
# this function is taken from scikit-learn code of meterics and it has been
# modified to comply to autograd's numpy
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
# transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
# print(loss)
return loss
def accuracy_score(y_true, y_pred):
"""
This function computed the weighted aaverage accuarcy
"""
weights = compute_sample_weight('balanced',y_true)
return acc(y_true,y_pred,sample_weight=weights)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
```
**Data loading and splitting**
```
X,y = load_digits(n_class=3,return_X_y=True)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1,random_state=5,stratify=y)
la = OneHotEncoder(sparse=False).fit(y.reshape(-1,1))
y_train = la.transform(y_train.reshape(-1,1))
y_test = la.transform(y_test.reshape(-1,1))
y_test[:2]
```
**Defining the quantum circuit**
```
# initialize the device
dev = qml.device("qulacs.simulator", wires=7,shots=1000,analytic = True)
@qml.qnode(dev)
def qclassifier(weights, X=None):
# pennylane normalizes the input for us by setting normalize to True so no need for any preprocessing
qml.templates.AmplitudeEmbedding(X,wires=list(range(7)),pad=0.0,normalize=True)
### the following comments mimics the same line below except for CRX gate where you should define its weights
### because the init template in pennylane doesn't do that it only initializes the rotation parameters
# for i in range(weights.shape[0]):
# for j in range(weights.shape[1]):
# qml.Rot(*weights[i][j],wires=j)
# for x in range(cnots.shape[1]):
# qml.CRX(*cnots[i][x],wires=[x,(x+1)%6])
qml.templates.StronglyEntanglingLayers(weights,wires=list(range(7)))
return [qml.expval(qml.PauliZ(i)) for i in range(7)]
```
### **The Cost Function**
This fucntion contains the main logic of the full network. it takes a batched input with the weights and first pass the the quantum weights into the quantum classifier.
Then it adds the bias to the output from Qcircuit. After that, we apply the classical operations the relu and softmax as shown in the for loop below.
```
def cost(params, x, y):
# Compute prediction for each input in data batch
loss = []
for i in range(len(x)):
out = qclassifier(params[0],X=x[i])+params[1] # quantum output
out = np.maximum(0,np.dot(params[2],out)+params[3]) # reul on the first layer
loss.append(softmax(np.dot(params[4],out)+params[5])) # softmax on the second layer
loss = log_loss(y,np.array(loss),labels=y_train) # compute loss
weights = compute_sample_weight('balanced',y)
# weighted average to compensate for imbalanced batches
s = 0
for x, y in zip(loss, weights):
s += x * y
return s/sum(weights)
# a helper function to predict the label of the image
def predict(params,x,y):
prob = []
for i in range(len(x)):
out = qclassifier(params[0],X=x[i])+params[1]
out = np.maximum(0,np.dot(params[2],out)+params[3])
out = softmax(np.dot(params[4],out)+params[5])
prob.append(np.argmax(out))
return prob
```
### **Weights initialization**
```
np.random.seed(88)
# quantum parameters
n_qubits= 7
Q_n_layer = 8
Qweights = qml.init.strong_ent_layers_uniform(n_layers = Q_n_layer,n_wires = n_qubits,low=0,high=1,seed=0)
Qbias = np.random.uniform(low=-.1,high=.1,size=(n_qubits))*0
# first layer parameters
hidden_units = 12
linear2_layer = np.random.randn(hidden_units,n_qubits)*0.01
bias2_layer = np.random.randn(hidden_units)*0
classes = 3
# second layer parameters
linear3_layer = np.random.randn(classes,hidden_units)*0.01
bias3_layer = np.random.randn(classes)*0
params = [Qweights,Qbias,linear2_layer,bias2_layer,linear3_layer,bias3_layer]
params
```
**Load the saved weights**
You can download the weights from this [link](https://github.com/kareem1925/Ismailia-school-of-AI/raw/master/quantum_mnist_classification/final-grads.npy). Or, you can check the [repo](https://github.com/kareem1925/Ismailia-school-of-AI/tree/master/quantum_mnist_classification) itself.
```
final_weights = np.load("/content/final-grads.npy",allow_pickle=True)
```
Convert the one hot encoding back to its original labels
```
labels = la.inverse_transform(y_test)
predictions=predict(final_weights,X_test,y_test)
print(accuracy_score(labels,predictions))
from sklearn.metrics import classification_report
print(classification_report(labels,predictions))
```
### **Training procedure**
you can run this cell and have fun with the training.
```
from sklearn.utils import shuffle
learning_rate = 0.12
epochs = 1200
batch_size = 32
opt = AdamOptimizer(learning_rate) # classical adam optimizer
opt.reset()
loss = np.inf #random large number
grads = []
for it in range(epochs):
# data shuffling
X_train_1,y_train_1 = shuffle(X_train,y_train)
X_test_1,y_test_1 = shuffle(X_test,y_test)
# batching the data, i.e. every epoch processes the batch_size samples only
Xbatch = X_train_1[:batch_size]
ybatch = y_train_1[:batch_size]
params = opt.step(lambda v: cost(v, Xbatch, ybatch), params) # updating weights
grads.append(params)
if it % 1 == 0:
test_loss = cost(params, X_test_1[:50], y_test_1[:50])
if test_loss < loss:
loss = test_loss
print('heey new loss')
print("Iter: {:5d} | test loss: {:0.7f} ".format(it + 1, test_loss))
```
| true |
code
| 0.628977 | null | null | null | null |
|
# Module Efficiency History and Projections
```
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
```
This journal covers the development of a historical baseline and baseline future projection of average module efficiency for each installation year.
```
cwd = os.getcwd() #grabs current working directory
skipcols = ['Source']
mod_eff_raw = pd.read_csv(cwd+"/../../../PV_ICE/baselines/SupportingMaterial/module_eff.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
mod_eff_raw['mod_eff'] = pd.to_numeric(mod_eff_raw['mod_eff'])
print(mod_eff_raw['mod_eff'][2019])
plt.plot(mod_eff_raw, marker='o')
```
There appears to be an "outlier" in 2003. This is from a different source. It does however, fit within the range of module efficiency specified in the prior data point (2001, avg = 13.6, min = 12, max = 16.1). For the purposes of interpolation, we will drop this single datapoint.
```
mod_eff_raw['mod_eff'][2003]=np.nan
plt.plot(mod_eff_raw, marker='o')
```
Now interpolate for missing years. Going to break into 2 parts for this, a linear historical part, and an exponential decay out to 2050.
```
mod_eff_early = mod_eff_raw.loc[(mod_eff_raw.index<=2019)]
mod_eff_history = mod_eff_early.interpolate(method='linear',axis=0)
#print(mod_eff_history)
plt.plot(mod_eff_history)
# Import curve fitting package from scipy
from scipy.optimize import curve_fit
# Function to calculate the power-law with constants a and b
def power_law(x, a, b):
return a*np.power(x, b)
#generae a dataset for the area in between
mod_eff_late = mod_eff_raw.loc[(mod_eff_raw.index>=2020)]
y_dummy = power_law(mod_eff_late.index-2019, mod_eff_late['mod_eff'][2020], 0.073)
#played around with the exponential until y_dummy[31] closely matched projected 25.06% value. CITE
print(y_dummy[30])
plt.plot(y_dummy)
#create a dataframe of the projection
mod_eff_late['mod_eff'] = y_dummy
#print(mod_eff_late)
plt.plot(mod_eff_late)
```
Now smash the two dataframes back together for our average module efficiency baseline.
```
mod_eff = pd.concat([mod_eff_history, mod_eff_late])
mod_eff.to_csv(cwd+'/../../../PV_ICE/baselines/SupportingMaterial/output_avg_module_eff_final.csv', index=True)
plt.plot(mod_eff)
plt.title('Average Module Efficiency (%)')
plt.ylabel('Efficiency (%)')
#graph for paper
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
plt.axvspan(2020, 2050.5, facecolor='gray', alpha=0.1)
plt.plot(mod_eff_raw, marker='o', label='Raw Data')
plt.plot(mod_eff, '--k', label='PV ICE Baseline')
plt.title('Average Module Efficiency [%]')
plt.ylabel('Efficiency [%]')
plt.legend()
plt.xlim([1974, 2050.5])
```
| true |
code
| 0.539893 | null | null | null | null |
|
<img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# Working with Watson Machine Learning
The notebook will train, create and deploy a Credit Risk model. It will then configure OpenScale to monitor drift in data and accuracy by injecting sample payloads for viewing in the OpenScale Insights dashboard.
### Contents
- [1. Setup](#setup)
- [2. Model building and deployment](#model)
- [3. OpenScale configuration](#openscale)
- [4. Generate drift model](#driftmodel)
- [5. Submit payload](#payload)
- [6. Enable drift monitoring](#monitor)
- [7. Run drift monitor](# )
# 1.0 Setup <a name="setup"></a>
## 1.1 Package installation
```
import warnings
warnings.filterwarnings('ignore')
!rm -rf /home/spark/shared/user-libs/python3.6*
!pip install --upgrade opt-einsum==2.3.2 --no-cache | tail -n 1
!pip install --upgrade typing-extensions==3.6.2.1 --no-cache | tail -n 1
!pip install --upgrade jupyter==1 --no-cache | tail -n 1
!pip install --upgrade tensorboard==1.15.0 | tail -n 1
!pip install --upgrade ibm-ai-openscale==2.2.1 --no-cache | tail -n 1
!pip install --upgrade JPype1-py3 | tail -n 1
!pip install --upgrade watson-machine-learning-client-V4==1.0.93 | tail -n 1
!pip install --upgrade numpy==1.18.3 --no-cache | tail -n 1
!pip install --upgrade SciPy==1.4.1 --no-cache | tail -n 1
!pip install --upgrade pyspark==2.3 | tail -n 1
!pip install --upgrade scikit-learn==0.20.3 | tail -n 1
!pip install --upgrade pandas==0.24.2 | tail -n 1
!pip install --upgrade ibm-wos-utils>=1.2.1
```
### Action: restart the kernel!
## 1.2 Configure credentials
- WOS_CREDENTIALS (ICP)
- WML_CREDENTIALS (ICP)
- DATABASE_CREDENTIALS (DB2 on ICP)
- SCHEMA_NAME
The url for `WOS_CREDENTIALS` is the url of the CP4D cluster, i.e. `https://zen-cpd-zen.apps.com`.
```
WOS_CREDENTIALS = {
"url": "********",
"username": "********",
"password": "********"
}
WML_CREDENTIALS = WOS_CREDENTIALS.copy()
WML_CREDENTIALS['instance_id']='openshift'
WML_CREDENTIALS['version']='3.0.0'
```
Provide `DATABASE_CREDENTIALS`. Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten. Details in the cell below is removed as it contains password.
```
DATABASE_CREDENTIALS = {
}
```
Provide SCHEMA_NAME. Details in the cell below is removedDetails in the cell below is removed
```
SCHEMA_NAME = ''
```
Provide a custom name to be concatenated to model name, deployment name and open scale monitor. Sample value for CUSTOM_NAME could be ```CUSTOM_NAME = 'SAMAYA_OPENSCALE_3.0'```
```
CUSTOM_NAME = 'SAMAYA-DRIFT'
```
# 2.0 Model building and deployment <a name="model"></a>
In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service.
## 2.1 Load the training data
```
import pandas as pd
!rm -rf german_credit_data_biased_training.csv
!wget https://raw.githubusercontent.com/IBM/cpd-intelligent-loan-agent-assets/master/data/german_credit_data_biased_training.csv -O german_credit_data_biased_training.csv
!ls -lh german_credit_data_biased_training.csv
data_df = pd.read_csv('german_credit_data_biased_training.csv', sep=",", header=0)
data_df.head()
from pyspark.sql import SparkSession
import json
spark = SparkSession.builder.getOrCreate()
df_data = spark.read.csv(path="german_credit_data_biased_training.csv", sep=",", header=True, inferSchema=True)
df_data.head()
```
## 2.2 Explore data
```
df_data.printSchema()
print("Number of records: " + str(df_data.count()))
```
## 2.3 Create a model
Choose a unique name (i.e. your name or initials) and a date or date-time for `MODEL_NAME` and `DEPLOYMENT_NAME`
```
MODEL_NAME = CUSTOM_NAME + "_MODEL"
DEPLOYMENT_NAME = CUSTOM_NAME + "_DEPLOYMENT"
spark_df = df_data
(train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24)
print("Number of records for training: " + str(train_data.count()))
print("Number of records for evaluation: " + str(test_data.count()))
spark_df.printSchema()
```
The code below creates a Random Forest Classifier with Spark, setting up string indexers for the categorical features and the label column. Finally, this notebook creates a pipeline including the indexers and the model, and does an initial Area Under ROC evaluation of the model.
```
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml import Pipeline, Model
si_CheckingStatus = StringIndexer(inputCol = 'CheckingStatus', outputCol = 'CheckingStatus_IX')
si_CreditHistory = StringIndexer(inputCol = 'CreditHistory', outputCol = 'CreditHistory_IX')
si_LoanPurpose = StringIndexer(inputCol = 'LoanPurpose', outputCol = 'LoanPurpose_IX')
si_ExistingSavings = StringIndexer(inputCol = 'ExistingSavings', outputCol = 'ExistingSavings_IX')
si_EmploymentDuration = StringIndexer(inputCol = 'EmploymentDuration', outputCol = 'EmploymentDuration_IX')
si_Sex = StringIndexer(inputCol = 'Sex', outputCol = 'Sex_IX')
si_OthersOnLoan = StringIndexer(inputCol = 'OthersOnLoan', outputCol = 'OthersOnLoan_IX')
si_OwnsProperty = StringIndexer(inputCol = 'OwnsProperty', outputCol = 'OwnsProperty_IX')
si_InstallmentPlans = StringIndexer(inputCol = 'InstallmentPlans', outputCol = 'InstallmentPlans_IX')
si_Housing = StringIndexer(inputCol = 'Housing', outputCol = 'Housing_IX')
si_Job = StringIndexer(inputCol = 'Job', outputCol = 'Job_IX')
si_Telephone = StringIndexer(inputCol = 'Telephone', outputCol = 'Telephone_IX')
si_ForeignWorker = StringIndexer(inputCol = 'ForeignWorker', outputCol = 'ForeignWorker_IX')
si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df)
label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels)
va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \
"OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \
"LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \
"Dependents"], outputCol="features")
from pyspark.ml.classification import RandomForestClassifier
classifier = RandomForestClassifier(featuresCol="features")
pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\
si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter])
model = pipeline.fit(train_data)
predictions = model.transform(test_data)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderROC')
area_under_curve = evaluatorDT.evaluate(predictions)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderPR')
area_under_PR = evaluatorDT.evaluate(predictions)
#default evaluation is areaUnderROC
print("areaUnderROC = %g" % area_under_curve, "areaUnderPR = %g" % area_under_PR)
```
### 2.4 evaluate more metrics by exporting them into pandas and numpy
```
from sklearn.metrics import classification_report
y_pred = predictions.toPandas()['prediction']
y_pred = ['Risk' if pred == 1.0 else 'No Risk' for pred in y_pred]
y_test = test_data.toPandas()['Risk']
print(classification_report(y_test, y_pred, target_names=['Risk', 'No Risk']))
```
## 2.5 Publish the model
In this section, the notebook uses Watson Machine Learning to save the model (including the pipeline) to the WML instance. Previous versions of the model are removed so that the notebook can be run again, resetting all data for another demo.
```
from watson_machine_learning_client import WatsonMachineLearningAPIClient
import json
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
```
### 2.5.1 Set default space
This is a new feature in CP4D, in order to deploy a model, you would have to create different
deployment spaces and deploy your models there. You can list all the spaces using the .list()
function, or you can create new spaces by going to CP4D menu on top left corner --> analyze -->
analytics deployments --> New Deployment Space. Once you know which space you want to deploy
in, simply use the GUID of the space as argument for .set.default_space() function below
```
wml_client.spaces.list()
```
We'll use the `GUID` for your Deployment space as listed for the `default_space` in the method below:
```
wml_client.set.default_space('346b75fd-018d-4465-8cb8-0985406cfdee')
```
Alternately, set `space_name` below and use the following cell to create a space with that name
```
# space_name = "my_space_name"
# spaces = wml_client.spaces.get_details()['resources']
# space_id = None
# for space in spaces:
# if space['entity']['name'] == space_name:
# space_id = space["metadata"]["guid"]
# if space_id is None:
# space_id = wml_client.spaces.store(
# meta_props={wml_client.spaces.ConfigurationMetaNames.NAME: space_name})["metadata"]["guid"]
#wml_client.set.default_space(space_id)
```
### 2.5.2 Remove existing model and deployment
```
deployment_details = wml_client.deployments.get_details()
for deployment in deployment_details['resources']:
deployment_id = deployment['metadata']['guid']
model_id = deployment['entity']['asset']['href'].split('/')[3].split('?')[0]
if deployment['entity']['name'] == DEPLOYMENT_NAME:
print('Deleting deployment id', deployment_id)
wml_client.deployments.delete(deployment_id)
print('Deleting model id', model_id)
wml_client.repository.delete(model_id)
wml_client.repository.list_models()
```
### 2.5.3 Set `training_data_reference`
```
training_data_reference = {
"name": "Credit Risk feedback",
"connection": DATABASE_CREDENTIALS,
"source": {
"tablename": "CREDIT_RISK_TRAINING",
'schema_name': 'TRAININGDATA',
"type": "db2"
}
}
```
### 2.5.4 Store the model in Watson Machine Learning on CP4D
```
wml_models = wml_client.repository.get_model_details()
model_uid = None
for model_in in wml_models['resources']:
if MODEL_NAME == model_in['entity']['name']:
model_uid = model_in['metadata']['guid']
break
if model_uid is None:
print("Storing model ...")
metadata = {
wml_client.repository.ModelMetaNames.NAME: MODEL_NAME,
wml_client.repository.ModelMetaNames.TYPE: 'mllib_2.3',
wml_client.repository.ModelMetaNames.RUNTIME_UID: 'spark-mllib_2.3',
}
published_model_details = wml_client.repository.store_model(model, metadata, training_data=df_data, pipeline=pipeline)
model_uid = wml_client.repository.get_model_uid(published_model_details)
print("Done")
model_uid
```
## 2.6 Deploy the model
The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
```
wml_deployments = wml_client.deployments.get_details()
deployment_uid = None
for deployment in wml_deployments['resources']:
if DEPLOYMENT_NAME == deployment['entity']['name']:
deployment_uid = deployment['metadata']['guid']
break
if deployment_uid is None:
print("Deploying model...")
meta_props = {
wml_client.deployments.ConfigurationMetaNames.NAME: DEPLOYMENT_NAME,
wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
}
deployment = wml_client.deployments.create(artifact_uid=model_uid, meta_props=meta_props)
deployment_uid = wml_client.deployments.get_uid(deployment)
print("Model id: {}".format(model_uid))
print("Deployment id: {}".format(deployment_uid))
```
# 3.0 Configure OpenScale <a name="openscale"></a>
The notebook will now import the necessary libraries and set up a Python OpenScale client.
```
from ibm_ai_openscale import APIClient4ICP
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
ai_client = APIClient4ICP(WOS_CREDENTIALS)
ai_client.version
```
## 3.1 Create datamart
### 3.1.1 Set up datamart
Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
Prior instances of the Credit model will be removed from OpenScale monitoring.
```
try:
data_mart_details = ai_client.data_mart.get_details()
print('Using existing external datamart')
except:
print('Setting up external datamart')
ai_client.data_mart.setup(db_credentials=DATABASE_CREDENTIALS, schema=SCHEMA_NAME)
data_mart_details = ai_client.data_mart.get_details()
```
## 3.2 Bind machine learning engines
Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. If this binding already exists, this code will output a warning message and use the existing binding.
```
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance4ICP(wml_credentials=WML_CREDENTIALS))
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
bindings_details = ai_client.data_mart.bindings.get_details()
binding_uid
ai_client.data_mart.bindings.list()
```
## 3.3 Subscriptions
```
ai_client.data_mart.bindings.list_assets()
ai_client.data_mart.bindings.get_details(binding_uid)
```
### 3.3.1 Remove existing credit risk subscriptions
This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for subscription in subscriptions_uids:
sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
if sub_name == MODEL_NAME:
ai_client.data_mart.subscriptions.delete(subscription)
print('Deleted existing subscription for', MODEL_NAME)
```
This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
```
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
problem_type=ProblemType.BINARY_CLASSIFICATION,
input_data_type=InputDataType.STRUCTURED,
label_column='Risk',
prediction_column='predictedLabel',
probability_column='probability',
feature_columns = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
categorical_columns = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
))
if subscription is None:
print('Subscription already exists; get the existing one')
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == MODEL_NAME:
subscription = ai_client.data_mart.subscriptions.get(sub)
```
Get subscription list
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
ai_client.data_mart.subscriptions.list()
subscription_details = subscription.get_details()
```
# 4.0 Generate drift model <a name="driftmodel"></a>
Drift requires a trained model to be uploaded manually for WML. You can train, create and download a drift detection model using the code below. The entire code can be found [here](https://github.com/IBM-Watson/aios-data-distribution/blob/master/training_statistics_notebook.ipynb) ( check for Drift detection model generation).
```
training_data_info = {
"class_label":'Risk',
"feature_columns":["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
"categorical_columns":["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
}
#Set model_type. Acceptable values are:["binary","multiclass","regression"]
model_type = "binary"
#model_type = "multiclass"
#model_type = "regression"
def score(training_data_frame):
#To be filled by the user
WML_CREDENTAILS = WML_CREDENTIALS
#The data type of the label column and prediction column should be same .
#User needs to make sure that label column and prediction column array should have the same unique class labels
prediction_column_name = "predictedLabel"
probability_column_name = "probability"
feature_columns = list(training_data_frame.columns)
training_data_rows = training_data_frame[feature_columns].values.tolist()
#print(training_data_rows)
payload_scoring = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [{
"fields": feature_columns,
"values": [x for x in training_data_rows]
}]
}
score = wml_client.deployments.score(deployment_uid, payload_scoring)
score_predictions = score.get('predictions')[0]
prob_col_index = list(score_predictions.get('fields')).index(probability_column_name)
predict_col_index = list(score_predictions.get('fields')).index(prediction_column_name)
if prob_col_index < 0 or predict_col_index < 0:
raise Exception("Missing prediction/probability column in the scoring response")
import numpy as np
probability_array = np.array([value[prob_col_index] for value in score_predictions.get('values')])
prediction_vector = np.array([value[predict_col_index] for value in score_predictions.get('values')])
return probability_array, prediction_vector
#Generate drift detection model
from ibm_wos_utils.drift.drift_trainer import DriftTrainer
drift_detection_input = {
"feature_columns":training_data_info.get('feature_columns'),
"categorical_columns":training_data_info.get('categorical_columns'),
"label_column": training_data_info.get('class_label'),
"problem_type": model_type
}
drift_trainer = DriftTrainer(data_df,drift_detection_input)
if model_type != "regression":
#Note: batch_size can be customized by user as per the training data size
drift_trainer.generate_drift_detection_model(score,batch_size=data_df.shape[0])
#Note: Two column constraints are not computed beyond two_column_learner_limit(default set to 200)
#User can adjust the value depending on the requirement
drift_trainer.learn_constraints(two_column_learner_limit=200)
drift_trainer.create_archive()
#Generate a download link for drift detection model
from IPython.display import HTML
import base64
import io
def create_download_link_for_ddm( title = "Download Drift detection model", filename = "drift_detection_model.tar.gz"):
#Retains stats information
with open(filename,'rb') as file:
ddm = file.read()
b64 = base64.b64encode(ddm)
payload = b64.decode()
html = '<a download="{filename}" href="data:text/json;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
create_download_link_for_ddm()
#!rm -rf drift_detection_model.tar.gz
#!wget -O drift_detection_model.tar.gz https://github.com/IBM/cpd-intelligent-loan-agent-assets/blob/master/models/drift_detection_model.tar.gz?raw=true
```
# 5.0 Submit payload <a name="payload"></a>
### Score the model so we can configure monitors
Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model.
```
fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"]
values = [
["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"],
["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"],
["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"],
["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"],
["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"],
["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"]
]
payload_scoring = {"fields": fields,"values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
print('Single record scoring result:', '\n fields:', scoring_response['predictions'][0]['fields'], '\n values: ', scoring_response['predictions'][0]['values'][0])
```
# 6. Enable drift monitoring <a name="monitor"></a>
```
subscription.drift_monitoring.enable(threshold=0.05, min_records=10,model_path="drift_detection_model.tar.gz")
```
# 7. Run Drift monitor on demand <a name="driftrun"></a>
```
!rm german_credit_feed.json
!wget https://raw.githubusercontent.com/IBM/cpd-intelligent-loan-agent-assets/master/data/german_credit_feed.json
import random
with open('german_credit_feed.json', 'r') as scoring_file:
scoring_data = json.load(scoring_file)
fields = scoring_data['fields']
values = []
for _ in range(10):
current = random.choice(scoring_data['values'])
#set age of all rows to 100 to increase drift values on dashboard
current[12] = 100
values.append(current)
payload_scoring = {"fields": fields, "values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
drift_run_details = subscription.drift_monitoring.run(background_mode=False)
subscription.drift_monitoring.get_table_content()
```
## Congratulations!
You have finished running all the cells within the notebook for IBM Watson OpenScale. You can now view the OpenScale dashboard by going to the CP4D `Home` page, and clicking `Services`. Choose the `OpenScale` tile and click the menu to `Open`. Click on the tile for the model you've created to see fairness, accuracy, and performance monitors. Click on the timeseries graph to get detailed information on transactions during a specific time window.
OpenScale shows model performance over time. You have two options to keep data flowing to your OpenScale graphs:
* Download, configure and schedule the [model feed notebook](https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_scoring_feed.ipynb). This notebook can be set up with your WML credentials, and scheduled to provide a consistent flow of scoring requests to your model, which will appear in your OpenScale monitors.
* Re-run this notebook. Running this notebook from the beginning will delete and re-create the model and deployment, and re-create the historical data. Please note that the payload and measurement logs for the previous deployment will continue to be stored in your datamart, and can be deleted if necessary.
| true |
code
| 0.431464 | null | null | null | null |
|
<img src="Images/slide_1_clustering.png" width="700" height="700">
<img src="Images/slide_2_clustering.png" width="700" height="700">
## Text Vectorization
Question: What is text vectorization?
Answer: The process to transform text data to numerical vectors
## Options for Text Vectorization
- Count the number of unique words for each sentence (BOW)
- Assign weights to each word in the sentences
- Map each word to a number (dictionary with words as key and numbers as values) and represent each sentences as the sequence of numbers
## Bag-of-Word Matrix
- BoW is a matrix where its rows are sentences and its columns are unique words for the whole documents (corpus)
- We can write down our own function to return BoW matrix based
- Below, we will seehow we can build BoW by calling sklearn methods
```
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ['This is the first sentence.',
'This one is the second sentence.',
'And this is the third one.',
'Is this the first sentence?']
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(documents)
# X.torray() is BoW
print(X.toarray())
```
## How to get unique words?
```
# Get the unique words
print(vectorizer.get_feature_names())
```
## Clustering
- Clustering is an unsupervised learning method
- This is very often used **because we usually don’t have labeled data**
- K-Means clustering is one of the popular clustering algorithm
- The goal of any cluster algorithm is to find groups (clusters) in the given data
## Examples of Clustering
- Cluster movie dataset -> We expect the movies which their genres are similar be clustred in the same group
- News Article Clustering -> We want the News related to science be in the same group, News related to sport be in the same group
## Demo of K-means
```
from figures import plot_kmeans_interactive
plot_kmeans_interactive()
```
## K-means algorithm:
Assume the inputs are $s_1$, $s_2$, ..., $s_n$. Choose $K$ arbitrarily.
Step 1 - Pick $K$ random points as cluster centers (called centroids)
Step 2 - Assign each $s_i$ to nearest cluster by calculating its distance to each centroid
Step 3 - Find new cluster center by taking the average of the assigned points
Step 4 - Repeat Step 2 and 3 until none of the cluster assignments change
## Lets generate sample dataset
```
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1])
```
### How to choose correct number of cluster (K)?
Choose arbitrary K
1- Compute all of the distances of red points to red centroid
2- Do step (1) for other colors (purple, blue, ...)
3- Add them up
```
import numpy as np
from scipy.spatial import distance
distortions = []
K = range(1, 10)
for k in K:
km = KMeans(n_clusters=k)
km.fit(X)
distortions.append(sum(np.min(distance.cdist(X, km.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
# Plot the elbow
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.show()
```
## Another implementation for obtaining the appropriate number of cluster
```
sum_of_squared_distances = []
K = range(1,15)
for k in K:
km = KMeans(n_clusters=k)
km.fit(X)
sum_of_squared_distances.append(km.inertia_)
# Plot the elbow
plt.plot(K, sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.show()
```
## Combine Text Vectorization and Clustering the Texts
- Based on the documents that is given, we want to cluster sentences
- To do this: We need two steps:
- Vectorize the sentences (texts)
- Aplly Kmeans to cluster our vectorized sentences (texts)
```
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ["This little kitty came to play when I was eating at a restaurant.",
"Merley has the best squooshy kitten belly.",
"Google Translate app is incredible.",
"If you open 100 tab in google you get a smiley face.",
"Best cat photo I've ever taken.",
"Climbing ninja cat.",
"Impressed with google map feedback.",
"Key promoter extension for Google Chrome."]
vectorizer = TfidfVectorizer(stop_words='english')
X = vectorizer.fit_transform(documents)
print(vectorizer.get_feature_names())
print(X.shape)
true_k = 2
model = KMeans(n_clusters=true_k, init='k-means++')
model.fit(X)
# print('M:')
# print(model.cluster_centers_.argsort())
# print(model.cluster_centers_.argsort()[:, ::-1])
# print("Top terms per cluster:")
# order_centroids = model.cluster_centers_.argsort()[:, ::-1]
# terms = vectorizer.get_feature_names()
# for i in range(true_k):
# print("Cluster %d:" % i),
# for ind in order_centroids[i, :10]:
# print(' %s' % terms[ind]),
# print("\n")
# print("Prediction")
Y = vectorizer.transform(["chrome browser to open."])
print('Y:')
print(Y.toarray())
prediction = model.predict(Y)
print(prediction)
Y = vectorizer.transform(["My cat is hungry."])
prediction = model.predict(Y)
print(prediction)
```
## Other clustering methods and comparison:
http://scikit-learn.org/stable/modules/clustering.html
## Resources:
- https://www.youtube.com/watch?v=FrmrHyOSyhE
- https://jakevdp.github.io/PythonDataScienceHandbook/05.11-k-means.html
## Summary
- Inorder to work with text, we should transform it into vector of numbers
- We learned three methods for text vectorization
- Clustering as an unseprvised learning algorithm obtains the groups based on geometric
| true |
code
| 0.703982 | null | null | null | null |
|
# 第1章: 準備運動
## 00. 文字列の逆順
***
文字列”stressed”の文字を逆に(末尾から先頭に向かって)並べた文字列を得よ.
```
str = 'stressed'
ans = str[::-1]
print(ans)
```
## 01. 「パタトクカシーー」
***
「パタトクカシーー」という文字列の1,3,5,7文字目を取り出して連結した文字列を得よ.
```
str = 'パタトクカシーー'
ans = str[::2]
print(ans)
```
## 02. 「パトカー」+「タクシー」=「パタトクカシーー」
***
「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
```
str1 = 'パトカー'
str2 = 'タクシー'
ans = ''.join([i + j for i, j in zip(str1, str2)])
print(ans)
```
## 03. 円周率
***
“Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.”という文を単語に分解し,各単語の(アルファベットの)文字数を先頭から出現順に並べたリストを作成せよ.
```
import re
str = 'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'
str = re.sub('[,\.]', '', str) # ,と.を除去
splits = str.split() # スペースで区切って単語ごとのリストを作成
ans = [len(i) for i in splits]
print(ans)
```
## 04. 元素記号
***
“Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.”という文を単語に分解し,1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の1文字,それ以外の単語は先頭に2文字を取り出し,取り出した文字列から単語の位置(先頭から何番目の単語か)への連想配列(辞書型もしくはマップ型)を作成せよ.
```
str = 'Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'
splits = str.split()
one_ch = [1, 5, 6, 7, 8, 9, 15, 16, 19] # 1文字を取り出す単語の番号リスト
ans = {}
for i, word in enumerate(splits):
if i + 1 in one_ch:
ans[word[:1]] = i + 1 # リストにあれば1文字を取得
else:
ans[word[:2]] = i + 1 # なければ2文字を取得
print(ans)
```
## 05. n-gram
***
与えられたシーケンス(文字列やリストなど)からn-gramを作る関数を作成せよ.この関数を用い,”I am an NLPer”という文から単語bi-gram,文字bi-gramを得よ.
```
def ngram(n, lst):
return list(zip(*[lst[i:] for i in range(n)]))
str = 'I am an NLPer'
words_bi_gram = ngram(2, str.split())
chars_bi_gram = ngram(2, str)
print('単語bi-gram:', words_bi_gram)
print('文字bi-gram:', chars_bi_gram)
str = 'I am an NLPer'
[str[i:] for i in range(2)]
```
## 06. 集合
***
“paraparaparadise”と”paragraph”に含まれる文字bi-gramの集合を,それぞれ, XとYとして求め,XとYの和集合,積集合,差集合を求めよ.さらに,’se’というbi-gramがXおよびYに含まれるかどうかを調べよ.
```
str1 = 'paraparaparadise'
str2 = 'paragraph'
X = set(ngram(2, str1))
Y = set(ngram(2, str2))
union = X | Y
intersection = X & Y
difference = X - Y
print('X:', X)
print('Y:', Y)
print('和集合:', union)
print('積集合:', intersection)
print('差集合:', difference)
print('Xにseが含まれるか:', {('s', 'e')} <= X)
print('Yにseが含まれるか:', {('s', 'e')} <= Y)
```
## 07. テンプレートによる文生成
***
引数x, y, zを受け取り「x時のyはz」という文字列を返す関数を実装せよ.さらに,x=12, y=”気温”, z=22.4として,実行結果を確認せよ.
```
def generate_sentence(x, y, z):
print(f'{x}時のとき{y}は{z}')
generate_sentence(12, '気温', 22.4)
```
## 08. 暗号文
***
与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.
英小文字ならば(219 - 文字コード)の文字に置換
その他の文字はそのまま出力
この関数を用い,英語のメッセージを暗号化・復号化せよ.
```
def cipher(str):
rep = [chr(219 - ord(x)) if x.islower() else x for x in str]
return ''.join(rep)
message = 'the quick brown fox jumps over the lazy dog'
message = cipher(message)
print('暗号化:', message)
message = cipher(message)
print('復号化:', message)
```
## 09. Typoglycemia
***
スペースで区切られた単語列に対して,各単語の先頭と末尾の文字は残し,それ以外の文字の順序をランダムに並び替えるプログラムを作成せよ.ただし,長さが4以下の単語は並び替えないこととする.適当な英語の文(例えば”I couldn’t believe that I could actually understand what I was reading : the phenomenal power of the human mind .”)を与え,その実行結果を確認せよ.
```
import random
def shuffle(words):
result = []
for word in words.split():
if len(word) > 4: # 長さが4超であればシャッフル
word = word[:1] + ''.join(random.sample(word[1:-1], len(word) - 2)) + word[-1:]
result.append(word)
return ' '.join(result)
words = "I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind ."
ans = shuffle(words)
print(ans)
```
| true |
code
| 0.231614 | null | null | null | null |
|
## Facial Filters
Using your trained facial keypoint detector, you can now do things like add filters to a person's face, automatically. In this optional notebook, you can play around with adding sunglasses to detected face's in an image by using the keypoints detected around a person's eyes. Checkout the `images/` directory to see what pther .png's have been provided for you to try, too!
<img src="images/face_filter_ex.png" width=60% height=60%/>
Let's start this process by looking at a sunglasses .png that we'll be working with!
```
# import necessary resources
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import cv2
# load in sunglasses image with cv2 and IMREAD_UNCHANGED
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# plot our image
plt.imshow(sunglasses)
# print out its dimensions
print('Image shape: ', sunglasses.shape)
```
## The 4th dimension
You'll note that this image actually has *4 color channels*, not just 3 as your avg RGB image does. This is due to the flag we set `cv2.IMREAD_UNCHANGED`, which tells this to read in another color channel.
#### Alpha channel
It has the usual red, blue, and green channels any color image has, and the 4th channel respresents the **transparency level of each pixel** in the image; this is often called the **alpha** channel. Here's how the transparency channel works: the lower the value, the more transparent, or see-through, the pixel will become. The lower bound (completely transparent) is zero here, so any pixels set to 0 will not be seen; these look like white background pixels in the image above, but they are actually totally transparent.
This transparent channel allows us to place this rectangular image of sunglasses on an image of a face and still see the face area that is techically covered by the transparentbackground of the sunglasses image!
Let's check out the alpha channel of our sunglasses image in the next Python cell. Because many of the pixels in the background of the image have an alpha value of 0, we'll need to explicitly print out non-zero values if we want to see them.
```
# print out the sunglasses transparency (alpha) channel
alpha_channel = sunglasses[:,:,3]
print ('The alpha channel looks like this (black pixels = transparent): ')
plt.imshow(alpha_channel, cmap='gray')
# just to double check that there are indeed non-zero values
# let's find and print out every value greater than zero
values = np.where(alpha_channel != 0)
print ('The non-zero values of the alpha channel are: ')
print (values)
```
#### Overlaying images
This means that when we place this sunglasses image on top of another image, we can use the transparency channel as a filter:
* If the pixels are non-transparent (alpha_channel > 0), overlay them on the new image
#### Keypoint locations
In doing this, it's helpful to understand which keypoint belongs to the eyes, mouth, etc., so in the image below we also print the index of each facial keypoint directly on the image so you can tell which keypoints are for the eyes, eyebrows, etc.,
<img src="images/landmarks_numbered.jpg" width=50% height=50%/>
It may be useful to use keypoints that correspond to the edges of the face to define the width of the sunglasses, and the locations of the eyes to define the placement.
Next, we'll load in an example image. Below, you've been given an image and set of keypoints from the provided training set of data, but you can use your own CNN model to generate keypoints for *any* image of a face (as in Notebook 3) and go through the same overlay process!
```
# load in training data
key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')
# print out some stats about the data
print('Number of images: ', key_pts_frame.shape[0])
# helper function to display keypoints
def show_keypoints(image, key_pts):
"""Show image with keypoints"""
plt.imshow(image)
plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')
# a selected image
n = 120
image_name = key_pts_frame.iloc[n, 0]
image = mpimg.imread(os.path.join('data/training/', image_name))
key_pts = key_pts_frame.iloc[n, 1:].as_matrix()
key_pts = key_pts.astype('float').reshape(-1, 2)
print('Image name: ', image_name)
plt.figure(figsize=(5, 5))
show_keypoints(image, key_pts)
plt.show()
```
Next, you'll see an example of placing sunglasses on the person in the loaded image.
Note that the keypoints are numbered off-by-one in the numbered image above, and so `key_pts[0,:]` corresponds to the first point (1) in the labelled image.
```
# Display sunglasses on top of the image in the appropriate place
# copy of the face image for overlay
image_copy = np.copy(image)
# top-left location for sunglasses to go
# 17 = edge of left eyebrow
x = int(key_pts[17, 0])
y = int(key_pts[17, 1])
# height and width of sunglasses
# h = length of nose
h = int(abs(key_pts[27,1] - key_pts[34,1]))
# w = left to right eyebrow edges
w = int(abs(key_pts[17,0] - key_pts[26,0]))
# read in sunglasses
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# resize sunglasses
new_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC)
# get region of interest on the face to change
roi_color = image_copy[y:y+h,x:x+w]
# find all non-transparent pts
ind = np.argwhere(new_sunglasses[:,:,3] > 0)
# for each non-transparent point, replace the original image pixel with that of the new_sunglasses
for i in range(3):
roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i]
# set the area of the image to the changed region with sunglasses
image_copy[y:y+h,x:x+w] = roi_color
# display the result!
plt.imshow(image_copy)
```
#### Further steps
Look in the `images/` directory to see other available .png's for overlay! Also, you may notice that the overlay of the sunglasses is not entirely perfect; you're encouraged to play around with the scale of the width and height of the glasses and investigate how to perform [image rotation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html) in OpenCV so as to match an overlay with any facial pose.
| true |
code
| 0.549097 | null | null | null | null |
|
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(777) # reproducibility
# Hyper parameters
num_epochs = 30
num_classes = 10
batch_size = 100
learning_rate = 0.001
# Device configuration
device = torch.device('cuda')
# transform images to tensors of normalized range [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, num_workers=2)
# Write the code to define the convolutional neural network for CIFAR-10
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = Net(num_classes).to(device)
print(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
# zero the parameter gradients
optimizer.zero_grad()
# backward + optimize
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, running_loss / 100))
running_loss = 0.0
# Test the model
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
```
| true |
code
| 0.897964 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
%run plot.py
```
### Function for the random step
$DX$ is the standard deviation, $bias$ is the constant average of the step
```
# random seed for reproducibility
np.random.seed(12345)
# function for the random step, using lambda construction
# int() for cleaner look and for mimicing a detector with finite resolution
jump = lambda drift, stdev: int(np.random.normal(drift,stdev))
for i in range(10):
print(jump(5,50))
```
### Function for the added pattern
to add to part of a time series, over $z$ bins, with amplitude $a$
```
def pattern(i,z,a):
return int(a*np.sin((np.pi*i)/z))
# random seed for reproducibility
np.random.seed(12345)
# pattern parameters: Z=nr of steps, A=amplitude
Z=12
A=500
# number of data samples
N=10000
# size of each sample of the timeseries
L=60
# step parameters: introduce small positive bias
DX = 50
bias = 5
y = [0] * N
x = [[0] * L for i in range(N)]
for i in range(N):
if i>0:
x[i][0] = x[i-1][-1] + jump(bias,DX)
for j in range(1,L):
x[i][j] = x[i][j-1] + jump(bias,DX)
y[i] = i%3
##y[i] = random.randint(0,2)
if y[i]>0:
j0 = np.random.randint(0,L-1-Z)
###print(i,j0,j1)
sign = 3-2*y[i]
for j in range(Z):
x[i][j0+j] += sign*pattern(j,Z,A)
for i in range(min(3,N)):
print(x[i],y[i])
Show_data(x,L,"original data")
```
### Save data on file
```
# command in linux
!mkdir DATA
str0 = f'ts_L{L}_Z{Z}_A{A}_DX{DX}_bias{bias}_N{N}.dat'
print(str0)
fname='DATA/x_'+str0
np.savetxt(fname,x,fmt="%d")
fname='DATA/y_'+str0
np.savetxt(fname,y,fmt="%d")
```
<a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=1cb9264e-65a5-431d-a980-16667908489e' target="_blank">
<img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img>
Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
| true |
code
| 0.36063 | null | null | null | null |
|
# Recurrent Neural Networks with ``gluon``
With gluon, now we can train the recurrent neural networks (RNNs) more neatly, such as the long short-term memory (LSTM) and the gated recurrent unit (GRU). To demonstrate the end-to-end RNN training and prediction pipeline, we take a classic problem in language modeling as a case study. Specifically, we will show how to predict the distribution of the next word given a sequence of previous words.
## Import packages
To begin with, we need to make the following necessary imports.
```
import math
import os
import time
import numpy as np
import mxnet as mx
from mxnet import gluon, autograd
from mxnet.gluon import nn, rnn
```
## Define classes for indexing words of the input document
In a language modeling problem, we define the following classes to facilitate the routine procedures for loading document data. In the following, the ``Dictionary`` class is for word indexing: words in the documents can be converted from the string format to the integer format.
In this example, we use consecutive integers to index words of the input document.
```
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
```
The ``Dictionary`` class is used by the ``Corpus`` class to index the words of the input document.
```
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(path + 'train.txt')
self.valid = self.tokenize(path + 'valid.txt')
self.test = self.tokenize(path + 'test.txt')
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = np.zeros((tokens,), dtype='int32')
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return mx.nd.array(ids, dtype='int32')
```
## Provide an exposition of different RNN models with ``gluon``
Based on the ``gluon.Block`` class, we can make different RNN models available with the following single ``RNNModel`` class.
Users can select their preferred RNN model or compare different RNN models by configuring the argument of the constructor of ``RNNModel``. We will show an example following the definition of the ``RNNModel`` class.
```
class RNNModel(gluon.Block):
"""A model with an encoder, recurrent layer, and a decoder."""
def __init__(self, mode, vocab_size, num_embed, num_hidden,
num_layers, dropout=0.5, tie_weights=False, **kwargs):
super(RNNModel, self).__init__(**kwargs)
with self.name_scope():
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(vocab_size, num_embed,
weight_initializer = mx.init.Uniform(0.1))
if mode == 'rnn_relu':
self.rnn = rnn.RNN(num_hidden, num_layers, activation='relu', dropout=dropout,
input_size=num_embed)
elif mode == 'rnn_tanh':
self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'lstm':
self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'gru':
self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
else:
raise ValueError("Invalid mode %s. Options are rnn_relu, "
"rnn_tanh, lstm, and gru"%mode)
if tie_weights:
self.decoder = nn.Dense(vocab_size, in_units = num_hidden,
params = self.encoder.params)
else:
self.decoder = nn.Dense(vocab_size, in_units = num_hidden)
self.num_hidden = num_hidden
def forward(self, inputs, hidden):
emb = self.drop(self.encoder(inputs))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.reshape((-1, self.num_hidden)))
return decoded, hidden
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
```
## Select an RNN model and configure parameters
For demonstration purposes, we provide an arbitrary selection of the parameter values. In practice, some parameters should be more fine tuned based on the validation data set.
For instance, to obtain a better performance, as reflected in a lower loss or perplexity, one can set ``args_epochs`` to a larger value.
In this demonstration, LSTM is the chosen type of RNN. For other RNN options, one can replace the ``'lstm'`` string to ``'rnn_relu'``, ``'rnn_tanh'``, or ``'gru'`` as provided by the aforementioned ``gluon.Block`` class.
```
args_data = '../data/nlp/ptb.'
args_model = 'rnn_relu'
args_emsize = 100
args_nhid = 100
args_nlayers = 2
args_lr = 1.0
args_clip = 0.2
args_epochs = 1
args_batch_size = 32
args_bptt = 5
args_dropout = 0.2
args_tied = True
args_cuda = 'store_true'
args_log_interval = 500
args_save = 'model.param'
```
## Load data as batches
We load the document data by leveraging the aforementioned ``Corpus`` class.
To speed up the subsequent data flow in the RNN model, we pre-process the loaded data as batches. This procedure is defined in the following ``batchify`` function.
```
context = mx.gpu() # this notebook takes too long on cpu
corpus = Corpus(args_data)
def batchify(data, batch_size):
"""Reshape data into (num_example, batch_size)"""
nbatch = data.shape[0] // batch_size
data = data[:nbatch * batch_size]
data = data.reshape((batch_size, nbatch)).T
return data
train_data = batchify(corpus.train, args_batch_size).as_in_context(context)
val_data = batchify(corpus.valid, args_batch_size).as_in_context(context)
test_data = batchify(corpus.test, args_batch_size).as_in_context(context)
```
## Build the model
We go on to build the model, initialize model parameters, and configure the optimization algorithms for training the RNN model.
```
ntokens = len(corpus.dictionary)
model = RNNModel(args_model, ntokens, args_emsize, args_nhid,
args_nlayers, args_dropout, args_tied)
model.collect_params().initialize(mx.init.Xavier(), ctx=context)
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': args_lr, 'momentum': 0, 'wd': 0})
loss = gluon.loss.SoftmaxCrossEntropyLoss()
```
## Train the model and evaluate on validation and testing data sets
Now we can define functions for training and evaluating the model. The following are two helper functions that will be used during model training and evaluation.
```
def get_batch(source, i):
seq_len = min(args_bptt, source.shape[0] - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len]
return data, target.reshape((-1,))
def detach(hidden):
if isinstance(hidden, (tuple, list)):
hidden = [i.detach() for i in hidden]
else:
hidden = hidden.detach()
return hidden
```
The following is the function for model evaluation. It returns the loss of the model prediction. We will discuss the details of the loss measure shortly.
```
def eval(data_source):
total_L = 0.0
ntotal = 0
hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx=context)
for i in range(0, data_source.shape[0] - 1, args_bptt):
data, target = get_batch(data_source, i)
output, hidden = model(data, hidden)
L = loss(output, target)
total_L += mx.nd.sum(L).asscalar()
ntotal += L.size
return total_L / ntotal
```
Now we are ready to define the function for training the model. We can monitor the model performance on the training, validation, and testing data sets over iterations.
```
def train():
best_val = float("Inf")
for epoch in range(args_epochs):
total_L = 0.0
start_time = time.time()
hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx = context)
for ibatch, i in enumerate(range(0, train_data.shape[0] - 1, args_bptt)):
data, target = get_batch(train_data, i)
hidden = detach(hidden)
with autograd.record():
output, hidden = model(data, hidden)
L = loss(output, target)
L.backward()
grads = [i.grad(context) for i in model.collect_params().values()]
# Here gradient is for the whole batch.
# So we multiply max_norm by batch_size and bptt size to balance it.
gluon.utils.clip_global_norm(grads, args_clip * args_bptt * args_batch_size)
trainer.step(args_batch_size)
total_L += mx.nd.sum(L).asscalar()
if ibatch % args_log_interval == 0 and ibatch > 0:
cur_L = total_L / args_bptt / args_batch_size / args_log_interval
print('[Epoch %d Batch %d] loss %.2f, perplexity %.2f' % (
epoch + 1, ibatch, cur_L, math.exp(cur_L)))
total_L = 0.0
val_L = eval(val_data)
print('[Epoch %d] time cost %.2fs, validation loss %.2f, validation perplexity %.2f' % (
epoch + 1, time.time() - start_time, val_L, math.exp(val_L)))
if val_L < best_val:
best_val = val_L
test_L = eval(test_data)
model.save_parameters(args_save)
print('test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))
else:
args_lr = args_lr * 0.25
trainer._init_optimizer('sgd',
{'learning_rate': args_lr,
'momentum': 0,
'wd': 0})
model.load_parameters(args_save, context)
```
Recall that the RNN model training is based on maximization likelihood of observations. For evaluation purposes, we have used the following two measures:
* Loss: the loss function is defined as the average negative log likelihood of the target words (ground truth) under prediction: $$\text{loss} = -\frac{1}{N} \sum_{i = 1}^N \text{log} \ p_{\text{target}_i}, $$ where $N$ is the number of predictions and $p_{\text{target}_i}$ the predicted likelihood of the $i$-th target word.
* Perplexity: the average per-word perplexity is $\text{exp}(\text{loss})$.
To orient the reader using concrete examples, let us illustrate the idea of the perplexity measure as follows.
* Consider the perfect scenario where the model always predicts the likelihood of the target word as 1. In this case, for every $i$ we have $p_{\text{target}_i} = 1$. As a result, the perplexity of the perfect model is 1.
* Consider a baseline scenario where the model always predicts the likelihood of the target word randomly at uniform among the given word set $W$. In this case, for every $i$ we have $p_{\text{target}_i} = 1 / |W|$. As a result, the perplexity of a uniformly random prediction model is always $|W|$.
* Consider the worst-case scenario where the model always predicts the likelihood of the target word as 0. In this case, for every $i$ we have $p_{\text{target}_i} = 0$. As a result, the perplexity of the worst model is positive infinity.
Therefore, a model with a lower perplexity that is closer to 1 is generally more effective. Any effective model has to achieve a perplexity lower than the cardinality of the target set.
Now we are ready to train the model and evaluate the model performance on validation and testing data sets.
```
train()
model.load_parameters(args_save, context)
test_L = eval(test_data)
print('Best test loss %.2f, test perplexity %.2f'%(test_L, math.exp(test_L)))
```
## Next
[Introduction to optimization](../chapter06_optimization/optimization-intro.ipynb)
For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
| true |
code
| 0.759515 | null | null | null | null |
|
# Analysing data (GRIB)
In this notebook we will demonstrate how to:
* find locations of extreme values from GRIB data
* compute and plot a time series extracted from a point
* mask values that are not of interest
* compute wind speed
* compute and plot a vertical cross section
* compute and plot a vertical profile
We will use **Metview** to do all of this.
The data we will work with in this notebook is related to storm Joachim from December 2011. The 10 metre windgust forecast was retrieved from MARS for a few steps on a low resolution grid to provide input for the exercises. This is stored in file "joachim_wind_gust.grib". Further data, stored in "joachim_uv.grib", gives wind components on various pressure levels.
## Exploring the surface wind gusts
### Loading the data and finding the extreme values
```
import metview as mv
```
Read the data from GRIB file into a Metview [Fieldset](../data_types/fieldset.rst):
```
filename = "joachim_wind_gust.grib"
if mv.exist(filename):
wg = mv.read(filename)
else:
wg = mv.gallery.load_dataset(filename)
print(wg)
print(len(wg))
```
The data is a [Fieldset](../data_types/fieldset.rst) with 5 fields. Let's inspect the contents with a few GRIB keys:
```
mv.grib_get(wg, ['shortName', 'dataDate', 'dataTime',
'stepRange', 'validityDate', 'validityTime'])
```
First let's check the minimum and maximum values over all the fields:
```
print(mv.minvalue(wg), mv.maxvalue(wg))
```
Now, the maximum values for each field - iterate over the fieldset:
```
all_maxes = [mv.maxvalue(f) for f in wg]
all_maxes
```
So we can see immediately that the largest value occurs in the first field. Let's restrict our operations to this first field:
```
wg0 = wg[0]
max0 = all_maxes[0]
```
Find the locations where the value equals the maximum:
```
max_location = mv.find(wg0, max0)
max_location
```
### Extracting a time series
Obtain a time series of values at this location (one value from each field):
```
vals_for_point = mv.nearest_gridpoint(wg, max_location[0])
times = mv.valid_date(wg)
for tv in zip(times, vals_for_point):
print(tv)
```
Let's make a simple plot from the data - we could use matplotlib, but Metview can also give us a nice time series curve:
```
haxis = mv.maxis(
axis_type = "date",
axis_date_type = "hours",
axis_hours_label = "on",
axis_hours_label_height = 0.4,
axis_years_label_height = 0.4,
axis_months_label_height = 0.4,
axis_days_label_height = 0.4
)
ts_view = mv.cartesianview(
x_automatic = "on",
x_axis_type = "date",
y_automatic = "on",
horizontal_axis = haxis
)
curve_wg = mv.input_visualiser(
input_x_type = "date",
input_date_x_values = times,
input_y_values = vals_for_point)
visdef = mv.mgraph(graph_line_thickness=3)
```
Finally we set the plotting target to the **Jupyter notebook** (we only have to do it once in a notebook) and generate the plot:
```
mv.setoutput('jupyter')
```
The order of parameters to the [plot()](../api/functions/plot.rst) command matters: view, data, visual definitions. There can be multiple sets of data and visual definitions in the same [plot()](../api/functions/plot.rst) command.
```
mv.plot(ts_view, curve_wg, visdef)
```
### Finding a range of extreme values
Find the locations where the value is within 95% of the maximum by supplying a range of values:
```
mv.find(wg0, [max0*0.95, max0])
```
If we want to work with these points in Metview, the easiest way is to use the gfind() function to return a [Geopoints](../data_types/geopoints.rst) variable:
```
max_points = mv.gfind(wg0, max0, max0*0.05)
print(len(max_points), 'points')
print('first point:')
max_points[0]
```
Compute a simple bounding box for these points:
```
north = mv.latitudes(max_points).max() + 2
south = mv.latitudes(max_points).min() - 2
east = mv.longitudes(max_points).max() + 2
west = mv.longitudes(max_points).min() - 2
[north, south, east, west]
```
Plot the points on a map using this bounding box:
```
view = mv.geoview(
map_area_definition = "corners",
area = [south, west, north, east]
)
coloured_markers = mv.msymb(
legend = "on",
symbol_type = "marker",
symbol_table_mode = "advanced",
symbol_advanced_table_max_level_colour = "red",
symbol_advanced_table_min_level_colour = "RGB(1, 0.8, 0.8)",
symbol_advanced_table_height_list = 0.8
)
mv.plot(view, max_points, coloured_markers)
```
### Using Fieldset operations to preserve only the extreme values
Alternative way to obtain the largest values - use [Fieldset](../data_types/fieldset.rst) operations:
```
# compute field of 1s and 0s according to test
largest_mask = wg0 > (max0*0.85)
# convert 0s into missing values
largest_mask = mv.bitmap(largest_mask, 0)
# copy the pattern of missing values to wg0
masked_wg0 = mv.bitmap(wg0, largest_mask)
```
The result has **missing values** where the original values were below our threshold. In terms of actual data storage, if we were to write this into a GRIB file, it would be much smaller than the original GRIB file because GRIB is very efficient at (not) storing missing values. Let's plot the result with grid point markers:
```
gridvals_1x1 = mv.mcont(
contour = "off",
contour_grid_value_plot = "on",
contour_grid_value_plot_type = "both",
contour_grid_value_format = "(F4.2)",
contour_grid_value_height = 0.45,
grib_scaling_of_retrieved_fields = "off"
)
mv.plot(view, masked_wg0, gridvals_1x1)
```
## Exploring the atmosphere
### Retrieve U/V wind component data on multiple pressure levels
To explore further into the atmosphere, we will need appropriate data. We can either retrieve from MARS or read from the supplied GRIB file, which was originally taken from MARS and then subsampled and downgraded a little in order to make the file smaller:
```
use_mars = False
if use_mars:
uv = mv.retrieve(
type = "fc",
levelist = [1000,925,850,700,500,400,300,250,200,150,100],
param = ["u","v"],
date = 20111215,
step = 12,
area = [25,-60,75,60],
grid = [0.25,0.25]
)
else:
uv = mv.read("joachim_uv.grib")
mv.grib_get(uv, ['shortName','level'])
```
### Compute wind speed
Extract the U and V components into different Fieldsets, each will have 11 fields:
```
u = mv.read(data = uv, param = "u")
v = mv.read(data = uv, param = "v")
```
Compute the **wind speed** directly on the Fieldsets, giving us a single Fieldset containing 11 fields of wind speed:
```
spd = mv.sqrt(u*u + v*v)
```
Change the paramId and extract 500hPa level for plotting (not stricly necessary, but it lets us use the default ecCharts style, which requires a correct paramId):
```
spd = mv.grib_set_long(spd, ['paramId', 10])
spd500 = mv.read(data = spd, levelist = 500)
```
Plot the field into a view that covers the data area:
```
view = mv.geoview(
map_area_definition = "corners",
area = [25,-60,75,60]
)
mv.plot(view, spd500, mv.mcont(contour_automatic_setting='ecmwf', legend='on'))
```
### Compute and plot a vertical cross section
Define a line along an area of interest and plot it onto the map:
```
line = [43.3,-36.0,54.4,13.1] # S, W, N, E
line_graph = mv.mgraph(
graph_type = "curve",
graph_line_colour = "pink",
graph_line_thickness = 7
)
mv.plot(
view,
spd500,
mv.mcont(contour_automatic_setting='ecmwf', legend='on'),
mv.mvl_geoline(*line,1),line_graph
)
```
Use this line to define a cross section view ([mxsectview()](../gen_files/icon_functions/mxsectview.rst)):
```
xs_view = mv.mxsectview(
bottom_level = 1000.0,
top_level = 100,
line = line
)
```
Create a colour scale using [mcont()](../gen_files/icon_functions/mcont.rst) to plot the data with:
```
xs_shade = mv.mcont(
legend = "on",
contour_line_style = "dash",
contour_line_colour = "charcoal",
contour_highlight = "off",
contour_level_count = 20,
contour_label = "off",
contour_shade = "on",
contour_shade_method = "area_fill",
contour_shade_max_level_colour = "red",
contour_shade_min_level_colour = "blue",
contour_shade_colour_direction = "clockwise"
)
```
Into the view, plot the data using the given Contouring definition (note the order):
```
mv.plot(xs_view, spd, xs_shade)
```
We can also obtain the computed cross section data as a [NetCDF](../data_types/netcdf.rst), which we can write to disk as "mv.write('my_xs.nc', xs_data)" :
```
xs_data = mv.mcross_sect(
data = spd,
line = line
)
print(xs_data)
```
### Compute and plot a vertical profile
Define a **vertical profile view** and plotting attributes:
```
vp_view = mv.mvertprofview(
point = [47.0,-3.5],
bottom_level = 1000,
top_level = 100
)
graph_plotting = mv.mgraph(
graph_line_colour = "red",
graph_line_thickness = 3
)
mv.plot(vp_view, spd, graph_plotting)
```
| true |
code
| 0.484807 | null | null | null | null |
|
```
import tensorflow as tf
from tensorflow import data
import shutil
import math
from datetime import datetime
from tensorflow.python.feature_column import feature_column
from tensorflow.contrib.learn import learn_runner
from tensorflow.contrib.learn import make_export_strategy
print(tf.__version__)
```
## Steps to use the TF Experiment APIs
1. Define dataset **metadata**
2. Define **data input function** to read the data from csv files + **feature processing**
3. Create TF **feature columns** based on metadata + **extended feature columns**
4. Define an **estimator** (DNNRegressor) creation function with the required **feature columns & parameters**
5. Define a **serving function** to export the model
7. Run an **Experiment** with **learn_runner** to train, evaluate, and export the model
8. **Evaluate** the model using test data
9. Perform **predictions**
```
MODEL_NAME = 'reg-model-03'
TRAIN_DATA_FILES_PATTERN = 'data/train-*.csv'
VALID_DATA_FILES_PATTERN = 'data/valid-*.csv'
TEST_DATA_FILES_PATTERN = 'data/test-*.csv'
RESUME_TRAINING = False
PROCESS_FEATURES = True
EXTEND_FEATURE_COLUMNS = True
MULTI_THREADING = True
```
## 1. Define Dataset Metadata
* CSV file header and defaults
* Numeric and categorical feature names
* Target feature name
* Unused columns
```
HEADER = ['key','x','y','alpha','beta','target']
HEADER_DEFAULTS = [[0], [0.0], [0.0], ['NA'], ['NA'], [0.0]]
NUMERIC_FEATURE_NAMES = ['x', 'y']
CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'alpha':['ax01', 'ax02'], 'beta':['bx01', 'bx02']}
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys())
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
TARGET_NAME = 'target'
UNUSED_FEATURE_NAMES = list(set(HEADER) - set(FEATURE_NAMES) - {TARGET_NAME})
print("Header: {}".format(HEADER))
print("Numeric Features: {}".format(NUMERIC_FEATURE_NAMES))
print("Categorical Features: {}".format(CATEGORICAL_FEATURE_NAMES))
print("Target: {}".format(TARGET_NAME))
print("Unused Features: {}".format(UNUSED_FEATURE_NAMES))
```
## 2. Define Data Input Function
* Input csv files name pattern
* Use TF Dataset APIs to read and process the data
* Parse CSV lines to feature tensors
* Apply feature processing
* Return (features, target) tensors
### a. parsing and preprocessing logic
```
def parse_csv_row(csv_row):
columns = tf.decode_csv(csv_row, record_defaults=HEADER_DEFAULTS)
features = dict(zip(HEADER, columns))
for column in UNUSED_FEATURE_NAMES:
features.pop(column)
target = features.pop(TARGET_NAME)
return features, target
def process_features(features):
features["x_2"] = tf.square(features['x'])
features["y_2"] = tf.square(features['y'])
features["xy"] = tf.multiply(features['x'], features['y']) # features['x'] * features['y']
features['dist_xy'] = tf.sqrt(tf.squared_difference(features['x'],features['y']))
return features
```
### b. data pipeline input function
```
def csv_input_fn(files_name_pattern, mode=tf.estimator.ModeKeys.EVAL,
skip_header_lines=0,
num_epochs=None,
batch_size=200):
shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
print("")
print("* data input_fn:")
print("================")
print("Input file(s): {}".format(files_name_pattern))
print("Batch size: {}".format(batch_size))
print("Epoch Count: {}".format(num_epochs))
print("Mode: {}".format(mode))
print("Shuffle: {}".format(shuffle))
print("================")
print("")
file_names = tf.matching_files(files_name_pattern)
dataset = data.TextLineDataset(filenames=file_names)
dataset = dataset.skip(skip_header_lines)
if shuffle:
dataset = dataset.shuffle(buffer_size=2 * batch_size + 1)
#useful for distributed training when training on 1 data file, so it can be shareded
#dataset = dataset.shard(num_workers, worker_index)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda csv_row: parse_csv_row(csv_row))
if PROCESS_FEATURES:
dataset = dataset.map(lambda features, target: (process_features(features), target))
#dataset = dataset.batch(batch_size) #??? very long time
dataset = dataset.repeat(num_epochs)
iterator = dataset.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
features, target = csv_input_fn(files_name_pattern="")
print("Feature read from CSV: {}".format(list(features.keys())))
print("Target read from CSV: {}".format(target))
```
## 3. Define Feature Columns
The input numeric columns are assumed to be normalized (or have the same scale). Otherise, a normlizer_fn, along with the normlisation params (mean, stdv) should be passed to tf.feature_column.numeric_column() constructor.
```
def extend_feature_columns(feature_columns):
# crossing, bucketizing, and embedding can be applied here
feature_columns['alpha_X_beta'] = tf.feature_column.crossed_column(
[feature_columns['alpha'], feature_columns['beta']], 4)
return feature_columns
def get_feature_columns():
CONSTRUCTED_NUMERIC_FEATURES_NAMES = ['x_2', 'y_2', 'xy', 'dist_xy']
all_numeric_feature_names = NUMERIC_FEATURE_NAMES.copy()
if PROCESS_FEATURES:
all_numeric_feature_names += CONSTRUCTED_NUMERIC_FEATURES_NAMES
numeric_columns = {feature_name: tf.feature_column.numeric_column(feature_name)
for feature_name in all_numeric_feature_names}
categorical_column_with_vocabulary = \
{item[0]: tf.feature_column.categorical_column_with_vocabulary_list(item[0], item[1])
for item in CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.items()}
feature_columns = {}
if numeric_columns is not None:
feature_columns.update(numeric_columns)
if categorical_column_with_vocabulary is not None:
feature_columns.update(categorical_column_with_vocabulary)
if EXTEND_FEATURE_COLUMNS:
feature_columns = extend_feature_columns(feature_columns)
return feature_columns
feature_columns = get_feature_columns()
print("Feature Columns: {}".format(feature_columns))
```
## 4. Define an Estimator Creation Function
* Get dense (numeric) columns from the feature columns
* Convert categorical columns to indicator columns
* Create Instantiate a DNNRegressor estimator given **dense + indicator** feature columns + params
```
def create_estimator(run_config, hparams):
feature_columns = list(get_feature_columns().values())
dense_columns = list(
filter(lambda column: isinstance(column, feature_column._NumericColumn),
feature_columns
)
)
categorical_columns = list(
filter(lambda column: isinstance(column, feature_column._VocabularyListCategoricalColumn) |
isinstance(column, feature_column._BucketizedColumn),
feature_columns)
)
indicator_columns = list(
map(lambda column: tf.feature_column.indicator_column(column),
categorical_columns)
)
estimator = tf.estimator.DNNRegressor(
feature_columns= dense_columns + indicator_columns ,
hidden_units= hparams.hidden_units,
optimizer= tf.train.AdamOptimizer(),
activation_fn= tf.nn.elu,
dropout= hparams.dropout_prob,
config= run_config
)
print("")
print("Estimator Type: {}".format(type(estimator)))
print("")
return estimator
```
## 5. Define Serving Funcion
```
def csv_serving_input_fn():
SERVING_HEADER = ['x','y','alpha','beta']
SERVING_HEADER_DEFAULTS = [[0.0], [0.0], ['NA'], ['NA']]
rows_string_tensor = tf.placeholder(dtype=tf.string,
shape=[None],
name='csv_rows')
receiver_tensor = {'csv_rows': rows_string_tensor}
row_columns = tf.expand_dims(rows_string_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=SERVING_HEADER_DEFAULTS)
features = dict(zip(SERVING_HEADER, columns))
return tf.estimator.export.ServingInputReceiver(
process_features(features), receiver_tensor)
```
## 6. Run Experiment
### a. Define Experiment Function
```
def generate_experiment_fn(**experiment_args):
def _experiment_fn(run_config, hparams):
train_input_fn = lambda: csv_input_fn(
files_name_pattern=TRAIN_DATA_FILES_PATTERN,
mode = tf.contrib.learn.ModeKeys.TRAIN,
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size
)
eval_input_fn = lambda: csv_input_fn(
files_name_pattern=VALID_DATA_FILES_PATTERN,
mode=tf.contrib.learn.ModeKeys.EVAL,
num_epochs=1,
batch_size=hparams.batch_size
)
estimator = create_estimator(run_config, hparams)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_steps=None,
**experiment_args
)
return _experiment_fn
```
### b. Set HParam and RunConfig
```
TRAIN_SIZE = 12000
NUM_EPOCHS = 1000
BATCH_SIZE = 500
NUM_EVAL = 10
CHECKPOINT_STEPS = int((TRAIN_SIZE/BATCH_SIZE) * (NUM_EPOCHS/NUM_EVAL))
hparams = tf.contrib.training.HParams(
num_epochs = NUM_EPOCHS,
batch_size = BATCH_SIZE,
hidden_units=[8, 4],
dropout_prob = 0.0)
model_dir = 'trained_models/{}'.format(MODEL_NAME)
run_config = tf.contrib.learn.RunConfig(
save_checkpoints_steps=CHECKPOINT_STEPS,
tf_random_seed=19830610,
model_dir=model_dir
)
print(hparams)
print("Model Directory:", run_config.model_dir)
print("")
print("Dataset Size:", TRAIN_SIZE)
print("Batch Size:", BATCH_SIZE)
print("Steps per Epoch:",TRAIN_SIZE/BATCH_SIZE)
print("Total Steps:", (TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS)
print("Required Evaluation Steps:", NUM_EVAL)
print("That is 1 evaluation step after each",NUM_EPOCHS/NUM_EVAL," epochs")
print("Save Checkpoint After",CHECKPOINT_STEPS,"steps")
```
### c. Run Experiment via learn_runner
```
if not RESUME_TRAINING:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
learn_runner.run(
experiment_fn=generate_experiment_fn(
export_strategies=[make_export_strategy(
csv_serving_input_fn,
exports_to_keep=1
)]
),
run_config=run_config,
schedule="train_and_evaluate",
hparams=hparams
)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
```
## 7. Evaluate the Model
```
TRAIN_SIZE = 12000
VALID_SIZE = 3000
TEST_SIZE = 5000
train_input_fn = lambda: csv_input_fn(files_name_pattern= TRAIN_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TRAIN_SIZE)
valid_input_fn = lambda: csv_input_fn(files_name_pattern= VALID_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= VALID_SIZE)
test_input_fn = lambda: csv_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TEST_SIZE)
estimator = create_estimator(run_config, hparams)
train_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
train_rmse = round(math.sqrt(train_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Train RMSE: {} - {}".format(train_rmse, train_results))
print("############################################################################################")
valid_results = estimator.evaluate(input_fn=valid_input_fn, steps=1)
valid_rmse = round(math.sqrt(valid_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Valid RMSE: {} - {}".format(valid_rmse,valid_results))
print("############################################################################################")
test_results = estimator.evaluate(input_fn=test_input_fn, steps=1)
test_rmse = round(math.sqrt(test_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Test RMSE: {} - {}".format(test_rmse, test_results))
print("############################################################################################")
```
## 8. Prediction
```
import itertools
predict_input_fn = lambda: csv_input_fn(files_name_pattern=TEST_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.PREDICT,
batch_size= 5)
predictions = estimator.predict(input_fn=predict_input_fn)
values = list(map(lambda item: item["predictions"][0],list(itertools.islice(predictions, 5))))
print()
print("Predicted Values: {}".format(values))
```
## What can we improve?
* **Use .tfrecords files instead of CSV** - TFRecord files are optimised for tensorflow.
* **Build a Custom Estimator** - Custom Estimator APIs give you the flexibility to build custom models in a simple and standard way
| true |
code
| 0.531453 | null | null | null | null |
|
# Time Series Forecast with Basic RNN
* Dataset is downloaded from https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data
```
import pandas as pd
import numpy as np
import datetime
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('data/pm25.csv')
print(df.shape)
df.head()
df.isnull().sum()*100/df.shape[0]
df.dropna(subset=['pm2.5'], axis=0, inplace=True)
df.reset_index(drop=True, inplace=True)
df['datetime'] = df[['year', 'month', 'day', 'hour']].apply(
lambda row: datetime.datetime(year=row['year'],
month=row['month'], day=row['day'],hour=row['hour']), axis=1)
df.sort_values('datetime', ascending=True, inplace=True)
df.head()
df['year'].value_counts()
plt.figure(figsize=(5.5, 5.5))
g = sns.lineplot(data=df['pm2.5'], color='g')
g.set_title('pm2.5 between 2010 and 2014')
g.set_xlabel('Index')
g.set_ylabel('pm2.5 readings')
```
### Note
* Scaling the variables will make optimization functions work better, so here going to scale the variable into [0,1] range
```
scaler = MinMaxScaler(feature_range=(0, 1))
df['scaled_pm2.5'] = scaler.fit_transform(np.array(df['pm2.5']).reshape(-1, 1))
df.head()
plt.figure(figsize=(5.5, 5.5))
g = sns.lineplot(data=df['scaled_pm2.5'], color='purple')
g.set_title('Scaled pm2.5 between 2010 and 2014')
g.set_xlabel('Index')
g.set_ylabel('scaled_pm2.5 readings')
# 2014 data as validation data, before 2014 as training data
split_date = datetime.datetime(year=2014, month=1, day=1, hour=0)
df_train = df.loc[df['datetime']<split_date]
df_val = df.loc[df['datetime']>=split_date]
print('Shape of train:', df_train.shape)
print('Shape of test:', df_val.shape)
df_val.reset_index(drop=True, inplace=True)
df_val.head()
# The way this works is to have the first nb_timesteps-1 observations as X and nb_timesteps_th as the target,
## collecting the data with 1 stride rolling window.
def makeXy(ts, nb_timesteps):
"""
Input:
ts: original time series
nb_timesteps: number of time steps in the regressors
Output:
X: 2-D array of regressors
y: 1-D array of target
"""
X = []
y = []
for i in range(nb_timesteps, ts.shape[0]):
X.append(list(ts.loc[i-nb_timesteps:i-1]))
y.append(ts.loc[i])
X, y = np.array(X), np.array(y)
return X, y
X_train, y_train = makeXy(df_train['scaled_pm2.5'], 7)
print('Shape of train arrays:', X_train.shape, y_train.shape)
print(X_train[0], y_train[0])
print(X_train[1], y_train[1])
X_val, y_val = makeXy(df_val['scaled_pm2.5'], 7)
print('Shape of validation arrays:', X_val.shape, y_val.shape)
print(X_val[0], y_val[0])
print(X_val[1], y_val[1])
```
### Note
* In 2D array above for X_train, X_val, it means (number of samples, number of time steps)
* However RNN input has to be 3D array, (number of samples, number of time steps, number of features per timestep)
* Only 1 feature which is scaled_pm2.5
* So, the code below converts 2D array to 3D array
```
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_val = X_val.reshape((X_val.shape[0], X_val.shape[1], 1))
print('Shape of arrays after reshaping:', X_train.shape, X_val.shape)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.metrics import mean_absolute_error
tf.random.set_seed(10)
model = Sequential()
model.add(SimpleRNN(32, input_shape=(X_train.shape[1:])))
model.add(Dropout(0.2))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='rmsprop', loss='mean_absolute_error', metrics=['mae'])
model.summary()
save_weights_at = 'basic_rnn_model'
save_best = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=0,
save_best_only=True, save_weights_only=False, mode='min',
save_freq='epoch')
history = model.fit(x=X_train, y=y_train, batch_size=16, epochs=20,
verbose=1, callbacks=[save_best], validation_data=(X_val, y_val),
shuffle=True)
# load the best model
best_model = load_model('basic_rnn_model')
# Compare the prediction with y_true
preds = best_model.predict(X_val)
pred_pm25 = scaler.inverse_transform(preds)
pred_pm25 = np.squeeze(pred_pm25)
# Measure MAE of y_pred and y_true
mae = mean_absolute_error(df_val['pm2.5'].loc[7:], pred_pm25)
print('MAE for the validation set:', round(mae, 4))
mae = mean_absolute_error(df_val['scaled_pm2.5'].loc[7:], preds)
print('MAE for the scaled validation set:', round(mae, 4))
# Check the metrics and loss of each apoch
mae = history.history['mae']
val_mae = history.history['val_mae']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(mae))
plt.plot(epochs, mae, 'bo', label='Training MAE')
plt.plot(epochs, val_mae, 'b', label='Validation MAE')
plt.title('Training and Validation MAE')
plt.legend()
plt.figure()
# Here I was using MAE as loss too, that's why they lookedalmost the same...
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()
```
### Note
* Best model saved by `ModelCheckpoint` saved 12th epoch result, which had 0.12 val_loss
* From the history plot of training vs validation loss, 12th epoch result (i=11) has the lowest validation loss. This aligh with the result from `ModelCheckpoint`
* Set different tensorflow seed will get different results!
| true |
code
| 0.609873 | null | null | null | null |
|
# Title
_Brief abstract/introduction/motivation. State what the chapter is about in 1-2 paragraphs._
_Then, have an introduction video:_
```
from bookutils import YouTubeVideo
YouTubeVideo("w4u5gCgPlmg")
```
**Prerequisites**
* _Refer to earlier chapters as notebooks here, as here:_ [Earlier Chapter](Fuzzer.ipynb).
```
import bookutils
```
## Synopsis
<!-- Automatically generated. Do not edit. -->
To [use the code provided in this chapter](Importing.ipynb), write
```python
>>> from fuzzingbook.Template import <identifier>
```
and then make use of the following features.
_For those only interested in using the code in this chapter (without wanting to know how it works), give an example. This will be copied to the beginning of the chapter (before the first section) as text with rendered input and output._
You can use `int_fuzzer()` as:
```python
>>> print(int_fuzzer())
76.5
```
## _Section 1_
\todo{Add}
## _Section 2_
\todo{Add}
### Excursion: All the Details
This text will only show up on demand (HTML) or not at all (PDF). This is useful for longer implementations, or repetitive, or specialized parts.
### End of Excursion
## _Section 3_
\todo{Add}
_If you want to introduce code, it is helpful to state the most important functions, as in:_
* `random.randrange(start, end)` - return a random number [`start`, `end`]
* `range(start, end)` - create a list with integers from `start` to `end`. Typically used in iterations.
* `for elem in list: body` executes `body` in a loop with `elem` taking each value from `list`.
* `for i in range(start, end): body` executes `body` in a loop with `i` from `start` to `end` - 1.
* `chr(n)` - return a character with ASCII code `n`
```
import random
def int_fuzzer():
"""A simple function that returns a random integer"""
return random.randrange(1, 100) + 0.5
# More code
pass
```
## _Section 4_
\todo{Add}
## Synopsis
_For those only interested in using the code in this chapter (without wanting to know how it works), give an example. This will be copied to the beginning of the chapter (before the first section) as text with rendered input and output._
You can use `int_fuzzer()` as:
```
print(int_fuzzer())
```
## Lessons Learned
* _Lesson one_
* _Lesson two_
* _Lesson three_
## Next Steps
_Link to subsequent chapters (notebooks) here, as in:_
* [use _mutations_ on existing inputs to get more valid inputs](MutationFuzzer.ipynb)
* [use _grammars_ (i.e., a specification of the input format) to get even more valid inputs](Grammars.ipynb)
* [reduce _failing inputs_ for efficient debugging](Reducer.ipynb)
## Background
_Cite relevant works in the literature and put them into context, as in:_
The idea of ensuring that each expansion in the grammar is used at least once goes back to Burkhardt \cite{Burkhardt1967}, to be later rediscovered by Paul Purdom \cite{Purdom1972}.
## Exercises
_Close the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with_
```
**Solution.**
```
_Your solution can then extend up to the next title (i.e., any markdown cell starting with `#`)._
_Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution._
### Exercise 1: _Title_
_Text of the exercise_
```
# Some code that is part of the exercise
pass
```
_Some more text for the exercise_
**Solution.** _Some text for the solution_
```
# Some code for the solution
2 + 2
```
_Some more text for the solution_
### Exercise 2: _Title_
_Text of the exercise_
**Solution.** _Solution for the exercise_
| true |
code
| 0.498474 | null | null | null | null |
|
# Databases <span class="tocSkip"></span>
Introduction
------------
Many of you will deal with complex data — and often, lots of it. Ecological and Evolutionary data are particularly complex because they contain large numbers of attributes, often measured in very different scales and units for individual taxa, populations, etc. In this scenario, storing the data in a database makes a lot of sense! You can
easily include the database in your analysis workflow — indeed, that's why people use databases. And you can use python (and R) to build, manipulate and use your database.
### Relational databases
A *relational* database is a collection of interlinked (*related*) tables that altogether store a complex dataset
in a logical, computer-readable format. Dividing a dataset into multiple tables minimizes redundancies. For example, if your data were sampled from three sites — then, rather than repeating the site name and description in each row in a text file, you could just specify a numerical "key" that directs to another table containing the sampling site name and description.
Finally, if you have many rows in your data file, the type of sequential access we have been using in our `python` and `R` scripts is inefficient — you should be able to instantly access any row regardless of its position
Data columns in a database are usually called *fields*, while the rows are the *records*. Here are a few things to
keep in mind about databases:
* Each field typically contains only one data type (e.g., integers, floats, strings)
* Each record is a "data point", composed of different values, one for each field — somewhat like a python tuple
* Some fields are special, and are called *keys*:
* The *primary key* uniquely defines a record in a table (e.g., each row is identified by a unique number)
* To allow fast retrieval, some fields (and typically all the keys) are indexed — a copy of certain columns that can be searched very efficiently.
* *Foreign keys* are keys in a table that are primary keys in another table and define relationships between the tables
The key to designing a database is to minimize redundancy and dependency without losing the logical consistency of tables — this is called *normalization* (arguably more of an art than a science!)
Let's look at a simple example.
Imagine you recorded body sizes of species from different field sites in a single text file (e.g., a `.csv` file) with the following fields:
|Field|Definition|
|:-|:-|
|`ID` | Unique ID for the record|
|`SiteName` | Name of the site|
|`SiteLong` | Longitude of the site|
|`SiteLat` | Latitude of the site|
|`SamplingDate` | Date of the sample|
|`SamplingHour` | Hour of the sampling|
|`SamplingAvgTemp` | Average air temperature on the sampling day|
|`SamplingWaterTemp` | Temperature of the water|
|`SamplingPH` | PH of the water|
|`SpeciesCommonName`| Species of the sampled individual|
|`SpeciesLatinBinom`| Latin binomial of the species|
|`BodySize` | Width of the individual|
|`BodyWeight` | Weight of the individual|
It would be logical to divide the data into four tables:
*Site table*:
|Field|Definition|
|:-|:-|
|`SiteID` |ID for the site|
|`SiteName`| Name of the site|
|`SiteLong` | Longitude of the site|
|`SiteLat` | Latitude of the site|
*Sample table*:
|Field|Definition|
|:-|:-|
|`SamplingID` | ID for the sampling date|
|`SamplingDate` | Date of the sample|
|`SamplingHour` | Hour of the sample|
|`SamplingAvgTemp` |Average air temperature|
|`SamplingWaterTemp`| Temperature of the water|
|`SamplingPH` | PH of the water|
*Species table*:
|Field|Definition|
|:-|:-|
|`SpeciesID` | ID for the species|
|`SpeciesCommonName`| Species name|
|`SpeciesLatinBinom` | Latin binomial of the species|
*Individual table*:
|Field|Definition|
|:-|:-|
|`IndividualID`| ID for the individual sampled|
|`SpeciesID` | ID for the species|
|`SamplingID` |ID for the sampling day|
|`SiteID` | ID for the site|
|`BodySize` | Width of the individual|
|`BodyWeight` | Weight of the individual|
In each table, the first ID field is the primary key. The last table contains three foreign keys because each individual is associated with one species, one sampling day and one sampling site.
These structural features of a database are called its *schema*.
## SQLite
`SQLite` is a simple (and very popular) SQL (Structured Query Language)-based solution for managing localized, personal databases. I can safely bet that most, if not all of you unknowingly (or knowingly!) use `SQLite` — it is used by MacOSX, Firefox, Acrobat Reader,iTunes, Skype, iPhone, etc. SQLite is also the database "engine" underlying your [Silwood Masters Web App](http://silwoodmasters.co.uk)
We can easily use SQLite through Python scripts. First, install SQLite by typing in the Ubuntu terminal:
```bash
sudo apt install sqlite3 libsqlite3-dev
```
Also, make sure that you have the necessary package for python by typing `import sqlite3` in the python or ipython shell. Finally, you may install a GUI for SQLite3 :
`sudo apt install sqliteman`
Now type `sqlite3` in the Ubuntu terminal to check if SQLite successfully launches.
SQLite has very few data types (and lacks a boolean and a date type):
|Field Data Type| Definition|
|:-|:-|
|`NULL` | The value is a NULL value |
|`INTEGER` | The value is a signed integer, stored in up to or 8 bytes |
| `REAL` | The value is a floating point value, stored as in 8 bytes |
| `TEXT` | The value is a text string |
| `BLOB` | The value is a blob of data, stored exactly as it was input (useful for binary types, such as bitmap images or pdfs) |
Typically, you will build a database by importing csv data — be aware that:
* Headers: the csv should have no headers
* Separators: if the comma is the separator, each record should not contain any other commas
* Quotes: there should be no quotes in the data
* Newlines: there should be no newlines
Now build your first database in SQLite! We will use as example a global dataset on metabolic traits called *Biotraits* that we are currently developing in our lab (should be in your `Data` directory). This dataset contains 164 columns (fields). Thermal response curves for different traits and species are stored in rows. This means
that site description or taxonomy are repeated as many times as temperatures are measured in the curve. You can imagine how much redundacy can be here!!!
For this reason, it is easier to migrate the dataset to SQL and split it into several tables:
* *TCP*: Includes the thermal curve performance for each species and trait (as many rows per trait and species as temperatures have been measured within the TCP)
* *TraitInfo*: Contains site description and conditions under the traits were measured (one row per thermal curve)
* Consumer: Consumer description including taxonomy (one row per thermal
curve).
* Resource: Resource description including taxonomy (one row per thermal
curve).
* Size: Size data for each species (one row per thermal curve)
DataSource: Contains information about the data source (citation, contributors) (one row per thermal curve).
So all these tables compose the *Biotraits* `schema`.
In an Linux/Unix terminal, navigate to your `data` directory:
Now, launch a new database using sqlite:
```bash
sqlite3 Biotraits.db
```
This should return something like:
```sql
SQLite version 3.11.0 2016-02-15 17:29:24
Enter ".help" for usage hints.
```
This creates an empty database in your `data` directory.
You should now see the sqlite cursor (`sqlite>`), and will be entering your commands there.
Now we need to create a table with some fields. Let's start with the *TraitInfo* table (enter these one line at a time, without the `...>`):
```bash
sqlite> CREATE TABLE TraitInfo (Numbers integer primary key,
...> OriginalID text,
...> FinalID text,
...> OriginalTraitName text,
...> OriginalTraitDef text,
...> Replicates integer,
...> Habitat integer,
...> Climate text,
...> Location text,
...> LocationType text,
...> LocationDate text,
...> CoordinateType text,
...> Latitude integer,
...> Longitude integer);
```
Note that I am writing all SQL commands in upper case, but it is not necessary. I am using upper case here because SQL syntax is long and clunky, and it quickly becomes hard to spot (and edit) commands in long strings of complex queries.
Now let's import the dataset:
`sqlite> .mode csv`
`sqlite> .import TraitInfo.csv TraitInfo`
So we built a table and imported a csv file into it. Now we can ask
SQLite to show all the tables we currently have:
`sqlite> .tables`
Let's run our first *Query* (note that you need a semicolon
to end a command):
`sqlite> SELECT * FROM TraitInfo LIMIT 5;`
Let's turn on some nicer formatting:
`sqlite> .mode column`
`sqlite> .header ON`
`sqlite> SELECT * FROM TraitInfo LIMIT 5;`
You should see something like:
```bash
Numbers OriginalID FinalID OriginalTraitName ...
------- ---------- ---------- ------------------------- ...
1 1 MTD1 Resource Consumption Rate ...
4 2 MTD2 Resource Consumption Rate ...
6 3 MTD3 Resource Consumption Rate ...
9 4 MTD4 Resource Mass Consumption ...
12 5 MTD5 Resource Mass Consumption ...
```
The main statement to select records from a table is
`SELECT`:
`sqlite> .width 40 ## NOTE: Control the width`
`sqlite> SELECT DISTINCT OriginalTraitName FROM TraitInfo; # Returns unique values`
Which gives:
```bash
OriginalTraitName
----------------------------------------
Resource Consumption Rate
Resource Mass Consumption Rate
Mass-Specific Mass Consumption Rate
Voluntary Body Velocity
Forward Attack Distance
Foraging Velocity
Resource Reaction Distance
....
```
Now try these:
```bash
sqlite> SELECT DISTINCT Habitat FROM TraitInfo
...> WHERE OriginalTraitName = "Resource Consumption Rate"; # Sets a condition`
Habitat
----------------------------------------
freshwater
marine
terrestrial
sqlite> SELECT COUNT (*) FROM TraitInfo; # Returns number of rows
Count (*)
--------------------
2336
sqlite> SELECT Habitat, COUNT(OriginalTraitName) # Returns number of rows for each group
...> FROM TraitInfo GROUP BY Habitat;
Habitat COUNT(OriginalTraitName)
---------- ------------------------
NA 16
freshwater 609
marine 909
terrestria 802
sqlite> SELECT COUNT(DISTINCT OriginalTraitName) # Returns number of unique values
...> FROM TraitInfo;
COUNT(DISTINCT OriginalTraitName)
---------------------------------
220
sqlite> SELECT COUNT(DISTINCT OriginalTraitName) TraitCount # Assigns alias to the variable
...> FROM TraitInfo;
TraitCount
----------
sqlite> SELECT Habitat,
...> COUNT(DISTINCT OriginalTraitName) AS TN
...> FROM TraitInfo GROUP BY Habitat;
Habitat TN
---------- ----------
NA 7
freshwater 82
marine 95
terrestria 96
sqlite> SELECT * # WHAT TO SELECT
...> FROM TraitInfo # FROM WHERE
...> WHERE Habitat = "marine" # CONDITIONS
...> AND OriginalTraitName = "Resource Consumption Rate";
Numbers OriginalID FinalID OriginalTraitName ...
---------- ---------- ---------- ------------------------- ...
778 308 MTD99 Resource Consumption Rate ...
798 310 MTD101 Resource Consumption Rate ...
806 311 MTD102 Resource Consumption Rate ...
993 351 MTD113 Resource Consumption Rate ...
```
The structure of the `SELECT` command is as follows (*Note: **all** characters are case **in**sensitive*):
```bash
SELECT [DISTINCT] field
FROM table
WHERE predicate
GROUP BY field
HAVING predicate
ORDER BY field
LIMIT number
;
```
Let's try some more elaborate queries:
```bash
sqlite> SELECT Numbers FROM TraitInfo LIMIT 5;
Numbers
----------
1
4
6
9
12
sqlite> SELECT Numbers
...> FROM TraitInfo
...> WHERE Numbers > 100
...> AND Numbers < 200;
Numbers
----------
107
110
112
115
sqlite> SELECT Numbers
...> FROM TraitInfo
...> WHERE Habitat = "freshwater"
...> AND Number > 700
...> AND Number < 800;
Numbers
----------
704
708
712
716
720
725
730
735
740
744
748
```
You can also match records using something like regular expressions. In SQL, when we use the command `LIKE`, the percent % symbol matches any sequence of zero or more characters and the underscore matches any single character. Similarly, `GLOB` uses the asterisk and the underscore.
```bash
sqlite> SELECT DISTINCT OriginalTraitName
...> FROM TraitInfo
...> WHERE OriginalTraitName LIKE "_esource Consumption Rate";
OriginalTraitName
-------------------------
Resource Consumption Rate
sqlite> SELECT DISTINCT OriginalTraitName
...> FROM TraitInfo
...> WHERE OriginalTraitName LIKE "Resource%";
OriginalTraitName
----------------------------------------
Resource Consumption Rate
Resource Mass Consumption Rate
Resource Reaction Distance
Resource Habitat Encounter Rate
Resource Consumption Probability
Resource Mobility Selection
Resource Size Selection
Resource Size Capture Intent Acceptance
Resource Encounter Rate
Resource Escape Response Probability
sqlite> SELECT DISTINCT OriginalTraitName
...> FROM TraitInfo
...> WHERE OriginalTraitName GLOB "Resource*";
OriginalTraitName
----------------------------------------
Resource Consumption Rate
Resource Mass Consumption Rate
Resource Reaction Distance
Resource Habitat Encounter Rate
Resource Consumption Probability
Resource Mobility Selection
Resource Size Selection
Resource Size Capture Intent Acceptance
Resource Encounter Rate
Resource Escape Response Probability
# NOTE THAT GLOB IS CASE SENSITIVE, WHILE LIKE IS NOT
sqlite> SELECT DISTINCT OriginalTraitName
...> FROM TraitInfo
...> WHERE OriginalTraitName LIKE "resource%";
OriginalTraitName
----------------------------------------
Resource Consumption Rate
Resource Mass Consumption Rate
Resource Reaction Distance
Resource Habitat Encounter Rate
Resource Consumption Probability
Resource Mobility Selection
Resource Size Selection
Resource Size Capture Intent Acceptance
Resource Encounter Rate
Resource Escape Response Probability
```
We can also order by any column:
```bash
sqlite> SELECT OriginalTraitName, Habitat FROM
...> TraitInfo LIMIT 5;
OriginalTraitName Habitat
------------------------- ----------
Resource Consumption Rate freshwater
Resource Consumption Rate freshwater
Resource Consumption Rate freshwater
Resource Mass Consumption freshwater
Resource Mass Consumption freshwater
sqlite> SELECT OriginalTraitName, Habitat FROM
...> TraitInfo ORDER BY OriginalTraitName LIMIT 5;
OriginalTraitName Habitat
-------------------------- ----------
48-hr Hatching Probability marine
Asexual Reproduction Rate marine
Attack Body Acceleration marine
Attack Body Velocity marine
Attack Body Velocity marine
```
Until now we have just queried data from one single table, but as we
have seen, the point of storing a database in SQL is that we can use
multiple tables minimizing redundancies within them. And of course,
querying data from those different tables at the same time will be
necessary at some point.
Let's import then one more table to our database:
```bash
sqlite> CREATE TABLE Consumer (Numbers integer primary key,
...> OriginalID text,
...> FinalID text,
...> Consumer text,
...> ConCommon text,
...> ConKingdom text,
...> ConPhylum text,
...> ConClass text,
...> ConOrder text,
...> ConFamily text,
...> ConGenus text,
...> ConSpecies text);
```
```bash
sqlite> .import Consumer.csv Consumer
```
Now we have two tables in our database:
```bash
sqlite> .tables
Consumer TraitInfo
```
These tables are connected by two different keys: `OriginalID` and `FinalID`. These are unique IDs for each thermal curve. For each `FinalID` we can get the trait name (`OriginalTraitName`) from the `TraitInfo` table and the corresponding species name (`ConSpecies`) from the `Consumer` table.
```bash
sqlite> SELECT A1.FinalID, A1.Consumer, A2.FinalID, A2.OriginalTraitName
...> FROM Consumer A1, TraitInfo A2
...> WHERE A1.FinalID=A2.FinalID LIMIT 8;
FinalID Consumer FinalID OriginalTraitName
---------- --------------------- ---------- -------------------------
MTD1 Chaoborus trivittatus MTD1 Resource Consumption Rate
MTD2 Chaoborus trivittatus MTD2 Resource Consumption Rate
MTD3 Chaoborus americanus MTD3 Resource Consumption Rate
MTD4 Stizostedion vitreum MTD4 Resource Mass Consumption
MTD5 Macrobrachium rosenbe MTD5 Resource Mass Consumption
MTD6 Ranatra dispar MTD6 Resource Consumption Rate
MTD7 Ceriodaphnia reticula MTD7 Mass-Specific Mass Consum
MTD8 Polyphemus pediculus MTD8 Voluntary Body Velocity
# In the same way we assign alias to variables, we can use them for tables.
```
This example seems easy because both tables have the same number of rows. But the query is still as simple when we have tables with different rows.
Let's import the TCP table:
```bash
sqlite> CREATE TABLE TCP (Numbers integer primary key,
...> OriginalID text,
...> FinalID text,
...> OriginalTraitValue integer,
...> OriginalTraitUnit text,
...> LabGrowthTemp integer,
...> LabGrowthTempUnit text,
...> ConTemp integer,
...> ConTempUnit text,
...> ConTempMethod text,
...> ConAcc text,
...> ConAccTemp integer);
sqlite> .import TCP.csv TCP
sqlite> .tables
Consumer TCP TraitInfo
```
Now imagine we want to query the thermal performance curves that we have stored for the species Mytilus edulis. Using the FinalID to match the tables, the query can be as simple as:
```bash
sqlite> CREATE TABLE TCP (Numbers integer primary key,
...> OriginalID text,
...> FinalID text,
...> OriginalTraitValue integer,
...> OriginalTraitUnit text,
...> LabGrowthTemp integer,
...> LabGrowthTempUnit text,
...> ConTemp integer,
...> ConTempUnit text,
...> ConTempMethod text,
...> ConAcc text,
...> ConAccTemp integer);
sqlite> .import TCP.csv TCP
sqlite> .tables
Consumer TCP TraitInfo
sqlite> SELECT A1.ConTemp, A1.OriginalTraitValue, A2.OriginalTraitName, A3.Consumer
...> FROM TCP A1, TraitInfo A2, Consumer A3
...> WHERE A1.FinalID=A2.FinalID AND A3.ConSpecies="Mytilus edulis" AND A3.FinalID=A2.FinalID LIMIT 8
ConTemp OriginalTraitValue OriginalTraitName Consumer
---------- -------------------- ------------------------------ --------------------
25 2.707075 Filtration Rate Mytilus edulis
20 3.40721 Filtration Rate Mytilus edulis
5 3.419455 Filtration Rate Mytilus edulis
15 3.711165 Filtration Rate Mytilus edulis
10 3.875465 Filtration Rate Mytilus edulis
5 0.34 In Vitro Gill Particle Transpo Mytilus edulis
10 0.46 In Vitro Gill Particle Transpo Mytilus edulis
15 0.595 In Vitro Gill Particle Transpo Mytilus edulis
```
So on and so forth (joining tables etc. would come next...). But if you want to keep practicing and learn more about sqlite commands, this is a very useful site: <http://www.sqlite.org/sessions/sqlite.html>. You can
store your queries and database management commands in an ` .sql` file (`geany` will take care of syntax highlighting etc.)
## SQLite with Python
It is easy to access, update and manage SQLite databases with Python (you will find this script file in the `code` directory):
```python
import sqlite3
conn = sqlite3.connect(":memory:")
c = conn.cursor()
c.execute("CREATE TABLE tt (Val TEXT)")
conn.commit()
z = [('a',), ('ab',), ('abc',), ('b',), ('c',)]
c.executemany("INSERT INTO tt VALUES (?)", z)
conn.commit()
c.execute("SELECT * FROM tt WHERE Val LIKE 'a%'").fetchall()
conn.close()
```
You can create a database in memory, without using the disk — thus you can create and discard an SQLite database within your workflow!
Readings and Resources
----------------------
* "The Definitive Guide to SQLite" is a pretty complete guide to SQLite and freely available from [here](
http://sd.blackball.lv/library/The_Definitive_Guide_to_SQLite_2nd_edition.pdf)
* For databses in general, try the [Stanford Introduction to Databases course](https://www.coursera.org/course/db)
* A set of sqlite tutorials in Jupyter: https://github.com/royalosyin/Practice-SQL-with-SQLite-and-Jupyter-Notebook
| true |
code
| 0.494995 | null | null | null | null |
|
# Name
Data processing by creating a cluster in Cloud Dataproc
# Label
Cloud Dataproc, cluster, GCP, Cloud Storage, KubeFlow, Pipeline
# Summary
A Kubeflow Pipeline component to create a cluster in Cloud Dataproc.
# Details
## Intended use
Use this component at the start of a Kubeflow Pipeline to create a temporary Cloud Dataproc cluster to run Cloud Dataproc jobs as steps in the pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |
| region | The Cloud Dataproc region to create the cluster in. | No | GCPRegion | | |
| name | The name of the cluster. Cluster names within a project must be unique. You can reuse the names of deleted clusters. | Yes | String | | None |
| name_prefix | The prefix of the cluster name. | Yes | String | | None |
| initialization_actions | A list of Cloud Storage URIs identifying executables to execute on each node after the configuration is completed. By default, executables are run on the master and all the worker nodes. | Yes | List | | None |
| config_bucket | The Cloud Storage bucket to use to stage the job dependencies, the configuration files, and the job driver console’s output. | Yes | GCSPath | | None |
| image_version | The version of the software inside the cluster. | Yes | String | | None |
| cluster | The full [cluster configuration](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause before polling the operation. | Yes | Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
cluster_name | The name of the cluster. | String
Note: You can recycle the cluster by using the [Dataproc delete cluster component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/dataproc/delete_cluster).
## Cautions & requirements
To use the component, you must:
* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contains initialization action files.
* The role, `roles/dataproc.editor` on the project.
## Detailed description
This component creates a new Dataproc cluster by using the [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using KFP SDK
```
import kfp.components as comp
dataproc_create_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.5.0/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Set sample parameters
```
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'Dataproc - Create Cluster'
```
#### Example pipeline that uses the component
```
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
description='Dataproc create cluster pipeline'
)
def dataproc_create_cluster_pipeline(
project_id = PROJECT_ID,
region = 'us-central1',
name='',
name_prefix='',
initialization_actions='',
config_bucket='',
image_version='',
cluster='',
wait_interval='30'
):
dataproc_create_cluster_op(
project_id=project_id,
region=region,
name=name,
name_prefix=name_prefix,
initialization_actions=initialization_actions,
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
wait_interval=wait_interval)
```
#### Compile the pipeline
```
pipeline_func = dataproc_create_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Kubernetes Engine for Kubeflow](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts)
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/create_cluster/sample.ipynb)
* [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| true |
code
| 0.488344 | null | null | null | null |
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Double-check-we-are-using-Python-3.8" data-toc-modified-id="Double-check-we-are-using-Python-3.8-1">Double check we are using Python 3.8</a></span></li><li><span><a href="#What-is-Naive-Bayes?" data-toc-modified-id="What-is-Naive-Bayes?-2">What is Naive Bayes?</a></span></li><li><span><a href="#What-is-Guassian-Naive-Bayes" data-toc-modified-id="What-is-Guassian-Naive-Bayes-3">What is Guassian Naive Bayes</a></span></li><li><span><a href="#Machine-learning-models-needs-data" data-toc-modified-id="Machine-learning-models-needs-data-4">Machine learning models needs data</a></span></li><li><span><a href="#Build-Naive-Bayes-Classifier" data-toc-modified-id="Build-Naive-Bayes-Classifier-5">Build Naive Bayes Classifier</a></span><ul class="toc-item"><li><span><a href="#What-about-P(data)?" data-toc-modified-id="What-about-P(data)?-5.1">What about <code>P(data)</code>?</a></span></li></ul></li><li><span><a href="#Prediction" data-toc-modified-id="Prediction-6">Prediction</a></span></li><li><span><a href="#Sources" data-toc-modified-id="Sources-7">Sources</a></span></li></ul></div>
# Double check we are using Python 3.8
```
from platform import python_version
assert python_version().startswith('3.8')
```
# What is Naive Bayes?
One of the simplest machine learning models
<center><img src="https://chrisalbon.com/images/machine_learning_flashcards/Gaussian_Naive_Bayes_Classifier_print.png" width="75%"/></center>
# What is Guassian Naive Bayes
Continuous data assume Gaussian (aka, normal distribution).
Each feature has a mean and variance calculated from the data, segmented by category (aka, class).
# Machine learning models needs data
https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Sex_classification
# Build Naive Bayes Classifier
```
from statistics import NormalDist
# Historical data
height_male = NormalDist.from_samples([6, 5.92, 5.58, 5.92])
height_female = NormalDist.from_samples([5, 5.5, 5.42, 5.75])
weight_male = NormalDist.from_samples([180, 190, 170, 165])
weight_female = NormalDist.from_samples([100, 150, 130, 150])
foot_size_male = NormalDist.from_samples([12, 11, 12, 10])
foot_size_female = NormalDist.from_samples([6, 8, 7, 9])
# Starting with a 50% prior probability of being male or female,
prior_male, prior_female = 0.5, 0.5
# New person
ht = 6.0 # height
wt = 130 # weight
fs = 8 # foot size
# We compute the posterior as the prior times the product of likelihoods for the feature measurements given the gender:
posterior_male = (prior_male *
height_male.pdf(ht) *
weight_male.pdf(wt) *
foot_size_male.pdf(fs))
posterior_female = (prior_female *
height_female.pdf(ht) *
weight_female.pdf(wt) *
foot_size_female.pdf(fs))
```
What about `P(data)`?
-------
The denominator is P(data) which the prior probability of the data/features occurring. How likely is it to see a human (regardless of category) with this weight?
The denominators are the same for all class labels so we can cancel/drop them to simplify our computation. Ignoring the denominator is standard for Naive Bayes classifier.
# Prediction
```
# The final prediction goes to the largest posterior.
# This is known as the maximum a posteriori or MAP:
prediction = 'male' if posterior_male > posterior_female else 'female'
print(f"Given the data, the new person is predicted to be: {prediction}")
```
# Sources
- https://docs.python.org/3/library/statistics.html#normaldist-examples-and-recipes
- https://chrisalbon.com/images/machine_learning_flashcards/Gaussian_Naive_Bayes_Classifier_print.png
<br>
<br>
<br>
----
| true |
code
| 0.754844 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/dimi-fn/Various-Data-Science-Scripts/blob/main/Algorithms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Algorithm performance can be measured based on **runtime** (Big-O time complexity) and **space** complexity (Memory Footprint of the algorithm).
# Algorithm Performance Analysis: Runtime Measurement with Big-O Notation
* Execution time is not considered a good metric to measure algorithmic complexity (different hardware will require different time execution), therefore, the `Big-O notation` is used as a standard metric instead.
* Algorithm speed is not measured in seconds but in terms of growth
* The Big-O Notation indicates the grade of how an algorithm **scales** with regard to changes in the input dataset size
> **Big-0 Notation**: Statistical measure used to describe the **complexity** of the algorithm.
>> **n**: the relationship between the input and the steps taken by the algorithm. In the examples below, `n` will b the number of items in the input array (i.e. n == input size)
>> **c**: positive constant
|Time - Algorithm | Big-O Notation - Complexity | Algorithm Type| Runtime Analysis Example: let n=10 | n = 20
| --- | --- | -------|---------------------------------------| ----|
| Constant | O(1) | | 1| 1|
| Logarithmic | O(log(n)) | Binary Search| log(10) = 1 |log(20) = 1.301 |
| Polygarithmic | O((log(n))<sup>c</sup>) | | (log(10))<sup>2</sup> = 1 |(log(20))<sup>2</sup> = 1.693 |
| Linear | O(n) | Linear Search| 10 = 10| 20 = 20 |
| Log Linear (Superlinear) | O(nlog(n)) | Heap Sort, Merge Sort|10 * log(10) = 10 | 20 * log(20) = 59.9 |
| Quadratic | O(n<sup>2</sup>) | | | |
| Qubic | O(n<sup>3</sup>) | | | |
| Polynomial (Algebraic) |O(n<sup>c</sup>) | Strassen’s Matrix Multiplication, Bubble Sort, Selection Sort, Insertion Sort, Bucket Sort| 10<sup>2</sup> = 100| 20<sup>2</sup> = 400|
| Exponential | O(c<sup>n</sup>) | Tower of Hanoi| 2<sup>10</sup> = 1,024| 2<sup>20</sup> = 1,048,576 |
| Factorial | O(n!) | Determinant Expansion by Minors, Brute force Search algorithm for Traveling Salesman Problem | 10! = 3,628,800 |20! = 2.432902e+18<sup>18</sup>|
## Constant complexity - O(1)
* complexity remains constant
* the steps required to complete the execution of an algorithm remain constant, irrespective of the number of inputs, i.e., the algorithm needs the same amount of time to execute independently of the input size
> e.g. below the algorithm 1) finds the square of the first element of the list, and 2) it prints it out. It does that every time regardless of the number of items in the list -> complexity == constant
```
def constant_complexity(items):
result = items[0] * items[0]
print (result)
constant_complexity([10, 15, 20, 25, 30])
import matplotlib.pyplot as plt
import numpy as np
x = [10, 15, 20, 25, 30]
y = [2, 2, 2, 2, 2]
plt.figure(figsize=(14,7))
plt.plot(x, y, 'b')
plt.xlabel('Inputs')
plt.ylabel('Steps')
plt.title('Constant Complexity - O(c)')
plt.show()
```
## Logarithmic - O(log(n))
* the number of operations increases by one each time the data is doubled
* binary search -> logarithmic time
* simply put: the algorithm compares the target value to the middle element of a **sorted** array, and then it goes up or down based on the item position to match
## Linear Complexity - O(n)
* the steps required to complete the execution of an algorithm increase or decrease linearly with the number of inputs.
* the number of the iterations that the algorithm will proceed to will be equal to the size of the input items array. Example:
```
def linear_complexity(items):
for item in items:
print(item)
linear_complexity([10, 15, 20, 25, 30])
import matplotlib.pyplot as plt
import numpy as np
x = [10, 15, 20, 25, 30]
y = [10, 15, 20, 25, 30]
plt.figure(figsize=(14,7))
plt.plot(x, y, 'b')
plt.xlabel('Inputs')
plt.ylabel('Steps')
plt.title('Linear Complexity - O(n)')
plt.show()
```
## Quadratic Complexity O(n<sup>2</sup>)
* the steps required to execute an algorithm are a quadratic function of the number of items in the input
* total number of steps: n * n, e.g. if the input has 10 elements then the algorithm will do 100 operations
> Below: a total of 25 operations since the number of items in list is 5
```
def quadratic_complexity(items):
count_operations=0
for item in items:
for item2 in items:
print(item, '---' ,item)
count_operations = count_operations +1
print("Number of operations was: {}".format(count_operations))
quadratic_complexity([10, 15, 20, 25, 30])
```
# Algorithm Performance Analysis: Space Complexity (Memory Footprint)
Time complexity is not enough in order to judge algorithm's performance. We also need to take into consideration the memory allocation and usage during algorithm's execution.
There is a trade-off between trying to achieve the best runtime performance with optimal memory allocation, as it happens e.g., between precision and recall in classification metrics. In other words, one algorithm might be quite fast, but it might also occupy a lot of memory space.
| true |
code
| 0.476336 | null | null | null | null |
|
## Perform Analysis on Athletes
This file reads the detailed athlete information and performs Linear Regression analysis on this data.
The following areas are examined in this code
* <a href=#Visualize>Visualize Data</a>
* <a href=#LinearRegression>Linear Regression</a>
* <a href=#LASSO>LASSO</a>
* <a href=#MixedEffect>Mixed Effect</a>
* <a href=#Algebraic>Algebraic Model</a>
```
# Necessary imports
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import patsy
from math import sqrt
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import kurtosis, skew
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Lasso
from sklearn import linear_model
%matplotlib inline
```
## Read data
```
boy1600 = pd.read_csv("../1allDistrict_boy1600.csv")
girl1600 = pd.read_csv("../1allDistrict_girl1600.csv")
girl400 = pd.read_csv("../1allDistrict_girl400.csv")
boy400 = pd.read_csv("../1allDistrict_boy400.csv")
boy1600['sex'] = 'boy'
girl1600['sex'] = 'girl'
boy400['sex'] = 'boy'
girl400['sex'] = 'girl'
print(f"Girl 1600: {girl1600.shape}")
print(f"Boy 1600: {boy1600.shape}")
print(f"Girl 400: {girl400.shape}")
print(f"Boy 400: {boy400.shape}")
# put the boy and girl data into one file
athlete_data = pd.concat([boy1600,girl1600])
#athlete_data = pd.concat([boy400,girl400])
print(athlete_data.shape)
print(athlete_data.columns)
# rename columns because statsmodels doesn't like the 12_PR format
# add a numerical column for sex of the athlete
athlete_data['PR12'] = athlete_data['12_PR']
athlete_data['PR11'] = athlete_data['11_PR']
athlete_data['PR10'] = athlete_data['10_PR']
athlete_data['PR9'] = athlete_data['9_PR']
athlete_data['Nsex'] = [1 if s == 'boy' else 0 for s in athlete_data['sex']]
print('number of unique schools: ',len(athlete_data['School'].unique()))
```
## Set up X and y
```
# How many unique athletes in each district
athlete_data.District.value_counts()
print(athlete_data.District[athlete_data.District == 'District 7'])
print(athlete_data.District[athlete_data.District == 'District 8'])
# for 1600 data
# drop the 3 athletes from District 7 and 8
athlete_data.drop(index=104,inplace=True)
athlete_data.drop(index=201,inplace=True)
athlete_data.drop(index=252,inplace=True)
print(athlete_data.District[athlete_data.District == 'District 7'])
print(athlete_data.District[athlete_data.District == 'District 8'])
# for 400 data
# drop the athlete from District 8
athlete_data.drop(index=132,inplace=True)
athlete_data.head()
```
Variable |Description |Value
----------|------------------------------:|:----
District 1|Athlete school in this district| 0 or 1
District 2|Athlete school in this district| 0 or 1
District 3|Athlete school in this district| 0 or 1
District 4|Athlete school in this district| 0 or 1
District 5|Athlete school in this district| 0 or 1
District 6|Athlete school in this district| 0 or 1
Sex |Athlete girl or boy | 1=boy, 0=girl
Grad Year |Graduation Year | int
9th Grade PR|Best time in 9th Grade | float
10th Grade PR|Best time in 10th Grade | float
11th Grade PR|Best time in 11th Grade | float|
```
#given the athlete_data read from files, generate the X & y dataframes
def get_Xy(athlete_data,Dist=100):
X = pd.DataFrame()
if Dist == 100:
# create one-hot columns for District
X = pd.get_dummies(athlete_data[['District']])
X = pd.concat([X, athlete_data[['PR9','PR10','PR11','Nsex','Grad_Yr']]], axis=1, sort=False)
y = athlete_data['PR12']
else:
filtered_data = athlete_data[athlete_data['District'] == 'District '+str(Dist)]
X = filtered_data[['PR9','PR10','PR11','Nsex','Grad_Yr']]
y = filtered_data['PR12']
#y = pd.DataFrame(y.values.reshape((len(y),1)))
return(X,y)
X,y = get_Xy(athlete_data,100)
X.shape
y.shape
type(y)
```
## Visualize Data <a name='Visualize' />
```
X.corr()
X.info()
sns.distplot(athlete_data['PR12'])
plt.show()
sns.distplot(athlete_data['PR12'],label = '12th Grade',norm_hist=False)
sns.distplot(athlete_data['PR11'],label = '11th Grade',norm_hist=False)
sns.distplot(athlete_data['PR10'],label = '10th Grade',norm_hist=False)
sns.distplot(athlete_data['PR9'],label = '9th Grade',norm_hist=False)
plt.legend()
plt.show();
# plot 9th grade PR vs 12th grade PR for boys by district
grid=sns.lmplot(x = "PR9",y = "PR12",col="District",
col_wrap=3,
data=athlete_data[athlete_data['Nsex'] == 1])
plt.ylim(top=450) # adjust the top leaving bottom unchanged
plt.ylim(bottom=240) # adjust the top leaving bottom unchanged
sns.catplot(x="District",y="PR12",
data=athlete_data[(athlete_data['Nsex'] == 1)]);
#plt.figure(figsize=(10,2))
plt.ylabel('12th grade PR (Seconds)')
plt.xlabel('District')
plt.xticks(range(0,6),('1','2','3','4','5','6'));
plt.title('Variation in 12th grade times by district');
#plt.figure(figsize=(6,3))
#plt.savefig('12_PR_by_District.png')
#boxplot = athlete_data.boxplot(column=[athlete_data[athlete_data[District == 'District 1'['PR12'],
# athlete_data[athlete_data[District == 'District 2'['PR12']])
data = ([athlete_data[athlete_data.District == 'District 1']['PR12'],
athlete_data[athlete_data.District == 'District 2']['PR12'],
athlete_data[athlete_data.District == 'District 3']['PR12'],
athlete_data[athlete_data.District == 'District 4']['PR12'],
athlete_data[athlete_data.District == 'District 5']['PR12'],
athlete_data[athlete_data.District == 'District 6']['PR12']])
fig_box, fig = plt.subplots()
fig.set_title('12th grade PR for each district')
fig.boxplot(data)
plt.xlabel('District')
plt.ylabel('time (seconds)')
plt.show()
# How many unique athletes in each district
athlete_data.School.value_counts()
```
## Linear Regression Model <a name='LinearRegression' />
```
#divide in to train and test sets
X_train,X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,random_state=42,stratify=X['Nsex'])
X_train.shape
X_test.shape
# Create an empty model
lr = LinearRegression()
# Fit the model to the full dataset
lr.fit(X_train, y_train)
# Print out the R^2 for the model against the full dataset
lr.score(X_train,y_train)
y_pred = lr.predict(X_test)
X.columns
RMSE = sqrt(((y_test-y_pred)**2).values.mean())
print(RMSE)
plt.scatter(y_pred,y_test,alpha=0.5);
plt.ylabel('y_test (seconds)');
plt.xlabel('y_predicted (seconds)');
plt.plot([max(y_pred),min(y_pred)],[max(y_pred),min(y_pred)],color='r')
#plt.plot([240,470],[240,470],color='r')
#plt.savefig('test_vs_pred.png');
print('Using all data (9th, 10th & 11th grades) to predict 12th grade PR')
print('Train R^2: ',lr.score(X_train, y_train))
print('Train RMSE:',
sqrt(mean_squared_error(y_train, lr.predict(X_train))))
print('Test R^2: ', lr.score(X_test, y_test))
print('Test RMSE:',
sqrt(mean_squared_error(y_test, lr.predict(X_test))))
data = y_test-lr.predict(X_test)
print('Skew:',skew(data))
print("mean : ", np.mean(data))
print("var : ", np.var(data))
print("skew : ",skew(data))
print("kurt : ",kurtosis(data))
data = y_test-lr.predict(X_test)
plt.hist(data,40)
plt.plot([0,0],[0,150],color='r')
plt.title('histogram of residuals')
plt.xlabel('y_predicted - y')
#remove 9th grade PR data - how good does it do now
X1_train = X_train.drop(['PR9'],axis=1)
X1_test = X_test.drop(['PR9'],axis=1)
lr.fit(X1_train,y_train)
print('Using only 10th & 11th to predict 12th grade PR')
print('Train R^2: ',lr.score(X1_train, y_train))
print('Train RMSE:',
sqrt(mean_squared_error(y_train, lr.predict(X1_train))))
print('Test R^2: ', lr.score(X1_test, y_test))
print('Test RMSE:',
sqrt(mean_squared_error(y_test, lr.predict(X1_test))))
#remove 9th grade PR data - how good does it do now
# only select boys
athlete_data_boy = athlete_data[athlete_data.sex == 'boy'].copy()
X1,y1 = get_Xy(athlete_data_boy,100)
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.4,random_state=42)
X1_train.drop(['PR9'],axis=1)
lr = LinearRegression()
lr.fit(X1_train,y1_train)
print('Using only 10th & 11th to predict 12th grade PR for boys')
print('Train R^2: ',lr.score(X1_train, y1_train))
print('Train RSSE:',
sqrt(mean_squared_error(y1_train, lr.predict(X1_train))))
print('Test R^2: ', lr.score(X1_test, y1_test))
print('Test RSSE:',
sqrt(mean_squared_error(y1_test, lr.predict(X1_test))))
#remove 10th and 11th grade PR data - how good does it do now
X2_train = X_train.drop(['PR10','PR11'],axis=1)
X2_test = X_test.drop(['PR10','PR11'],axis=1)
lr.fit(X2_train,y_train)
print('Using only 9th grade to predict 12th grade PR')
print('Train R^2: ',lr.score(X2_train, y_train))
print('Train SSE:',
mean_squared_error(y_train, lr.predict(X2_train)))
print('Test R^2: ', lr.score(X2_test, y_test))
print('Test SSE:',
mean_squared_error(y_test, lr.predict(X2_test)))
# add a PR11**2 and PR10**2 term to linear regression
X3_train = X_train.copy()
X3_train['PR11squared'] = X_train['PR11']**2
X3_train['PR10squared'] = X_train['PR10']**2
X3_test = X_test.copy()
X3_test['PR11squared'] = X_test['PR11']**2
X3_test['PR10squared'] = X_test['PR10']**2
# Create an empty model
lr = LinearRegression()
lr.fit(X3_train,y_train)
print('Using squared terms as well to predict 12th grade PR')
print('Train R^2: ',lr.score(X3_train, y_train))
print('Train RMSE:',
sqrt(mean_squared_error(y_train, lr.predict(X3_train))))
print('Test R^2: ', lr.score(X3_test, y_test))
print('Test RMSE:',
sqrt(mean_squared_error(y_test, lr.predict(X3_test))))
# add a PR11**2 and PR10**2 term to linear regression
X4_train = X_train.copy()
X4_train['PR11squared'] = X_train['PR11']**2
X4_train['PR10squared'] = X_train['PR10']**2
#X4_train['PR11_o_PR10'] = X_train['PR11']/X_train['PR10']
#X4_train['PR10_o_PR9'] = X_train['PR10']/X_train['PR9']
X4_test = X_test.copy()
X4_test['PR11squared'] = X_test['PR11']**2
X4_test['PR10squared'] = X_test['PR10']**2
#X4_test['PR11_o_PR10'] = X_test['PR11']/X_test['PR10']
#X4_test['PR10_o_PR9'] = X_test['PR11']/X_test['PR9']
# Create an empty model
lr = LinearRegression()
lr.fit(X4_train,y_train)
print('Using squared terms as well to predict 12th grade PR')
print('Train R^2: ',lr.score(X4_train, y_train))
print('Train RMSE:',
sqrt(mean_squared_error(y_train, lr.predict(X4_train))))
print('Test R^2: ', lr.score(X4_test, y_test))
print('Test RMSE:',
sqrt(mean_squared_error(y_test, lr.predict(X4_test))))
data = y_test-lr.predict(X4_test)
print('Skew:',skew(data))
print("mean : ", np.mean(data))
print("var : ", np.var(data))
print("skew : ",skew(data))
print("kurt : ",kurtosis(data))
import yellowbrick
from sklearn.linear_model import Ridge
from yellowbrick.regressor import ResidualsPlot
# Instantiate the linear model and visualizer
visualizer = ResidualsPlot(model = lr)
visualizer.fit(X3_train, y_train) # Fit the training data to the model
visualizer.poof()
```
Now do it with statsmodels
```
X = pd.DataFrame()
# create one-hot columns for District
X = pd.get_dummies(athlete_data[['District']])
X = pd.concat([X, athlete_data[['PR9','PR10','PR11','Nsex','Grad_Yr']]], axis=1, sort=False)
y = athlete_data['PR12']
#y = pd.DataFrame(y.values.reshape((len(y),1)))
X.shape,y.shape
sm_data = pd.DataFrame()
# create one-hot columns for District
sm_data = pd.get_dummies(athlete_data[['District']])
sm_data = pd.concat([X, athlete_data[['PR9','PR10','PR11','PR12','Nsex','Grad_Yr']]], axis=1, sort=False)
y_train_sm, X_train_sm = patsy.dmatrices('PR12 ~ PR9 + PR10 + PR11 + Nsex + Grad_Yr',data = sm_data, return_type='dataframe')
model = sm.OLS(y_train_sm,X_train_sm)
fit = model.fit()
print(fit.summary())
```
Explore the effect of sample size on the results.
```
# Set District to filter for only one district, Dist=100 is all districts
Dist = 100
filtered_X, filtered_y = get_Xy(athlete_data,Dist)
#divide into train and test sets
X_train, X_test, y_train, y_test = train_test_split(filtered_X, filtered_y, test_size=0.4,
random_state=42,stratify=filtered_X['Nsex'])
# Create an empty model
output_data = pd.DataFrame()
max_sample_size = min(401,len(X_train))
for sample_size in range(10,max_sample_size,1):
X2_train = X_train.sample(n=sample_size,random_state=1)
y2_train = y_train.sample(n=sample_size,random_state=1)
#X2_test = X_test.sample(n=sample_size,random_state=1)
#y2_test = y_test.sample(n=sample_size,random_state=1)
lr = LinearRegression()
lr.fit(X2_train, y2_train)
y2_predict = lr.predict(X_test)
test_score = lr.score(X_test,y_test)
train_score = lr.score(X2_train,y2_train)
train_error = mean_squared_error(y2_train, lr.predict(X2_train))
test_error = mean_squared_error(y_test, lr.predict(X_test))
#test_error = mean_squared_error(y2_test, lr.predict(X2_test))
#print(sample_size,train_error,test_error)
output_data = output_data.append([[sample_size,test_score,train_score,train_error,test_error]])
#print('Train R^2: ', train_score)
#print('Train SSE:', train_error)
#print('Test R^2: ', test_score)
#print('Test SSE:', test_error)
plt.plot(output_data[0],output_data[3],label='Train Error')
plt.plot(output_data[0],output_data[4],label='Test Error')
plt.legend()
plt.title('Model error vs. number of data points');
plt.xlabel('Number of data points');
plt.ylabel('RMS Error');
print('boys in train set: ',X_train[X_train.Nsex == 1]['Nsex'].count())
print('girls in train set:',X_train[X_train.Nsex == 0]['Nsex'].count())
print('boys in test set: ',X_test[X_test.Nsex == 1]['Nsex'].count())
print('girls in test set: ',X_test[X_test.Nsex == 0]['Nsex'].count())
```
## LASSO shows feature importance <a name='LASSO' />
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=42,stratify=X['Nsex'])
lr_lasso = linear_model.Lasso(alpha=0.1)
lr_fit = lr_lasso.fit(X_train, y_train)
# Print out the R^2 for the model against the full dataset
lr_lasso.score(X_train,y_train)
#lr_lasso.get_params()['lassocv'].alpha_
lr_lasso.get_params()
X_train.columns
print(X_train.shape)
print(y_train.shape,lr_lasso.predict(X_train).shape)
X_train.head()
print('Train R^2: ',lr_lasso.score(X_train, y_train))
print('Train RMSE:', sqrt(mean_squared_error(y_train,lr_lasso.predict(X_train))))
print('Test R^2: ', lr_lasso.score(X_test, y_test))
print('Test RMSE:', sqrt(mean_squared_error(y_test, lr_lasso.predict(X_test))))
alpha_list = [1e-4, 1e-3, 1e-2, .05, 1e-1,.3,.5,.7]
lasso_results = []
for alpha in alpha_list:
lr_lasso = linear_model.Lasso(alpha=alpha)
lr_lasso_fit = lr_lasso.fit(X_train, y_train)
score = lr_lasso.score(X_train,y_train)
RMSE = sqrt(mean_squared_error(y_test, lr_lasso.predict(X_test)))
coef = lr_lasso_fit.coef_.tolist()
#print(coef)
lasso_results.append([alpha,score,coef,RMSE])
num_features = X.shape[1]
for alpha,score,coef,RMSE in lasso_results:
#print(alpha,score,coef)
test = (alpha == 0.7)
test = True
if test:
plt.plot(range(1,num_features+1),coef,label=f"alpha = {alpha}")
plt.legend()
plt.xticks(np.linspace(0,num_features+1, num=num_features+2));
plt.xlabel('Feature')
plt.ylabel('Lasso coefficient');
num_features = X.shape[1]
for alpha,score,coef,RMSE in lasso_results:
#print(alpha,score,coef)
#test = (alpha == 0.7)
test = (alpha >= 0.001) and (alpha <= .3)
if test:
plt.plot(range(1,num_features+1),coef,label=f"alpha = {alpha}")
plt.legend()
plt.xticks(np.linspace(0,num_features+1, num=num_features+2));
plt.xlabel('Feature')
plt.ylabel('Lasso coefficient');
X_train.columns
pd.DataFrame(lasso_results)
lasso_results[5][2]
xx = [row[0] for row in lasso_results]
yy = [row[3] for row in lasso_results]
plt.semilogx(xx,yy);
plt.xlabel('alpha')
plt.ylabel('RMSE');
```
## Modeling District as a mixed effect <a name='MixedEffect' />
Random effect - District
Fixed effect - PRs from each year, grad year
We expect to see some clustering due to the random effect variable.
```
sm_data = athlete_data[['District','PR9','PR10','PR11','PR12','Nsex','Grad_Yr']]
y_train_sm, X_train_sm = patsy.dmatrices('PR12 ~ PR9 + PR10 + PR11 + Nsex + Grad_Yr',
data = sm_data, return_type='dataframe')
print(sm_data.shape)
sm_data.head()
print(y_train_sm.shape,X_train_sm.shape)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,random_state=42)
#data_train = pd.concat([y_train,X_train],axis=1,sort=False)
#data_test = pd.concat([y_test,X_test],axis=1,sort=False)
#md = smf.mixedlm("12_PR ~ 9_PR + 10_PR + 11_PR + sex + Grad_Yr",
# data = athlete_data,
# groups = athlete_data["District"])
md = smf.mixedlm('PR12 ~ PR9 + PR10 + PR11 + Nsex + Grad_Yr',
data = sm_data,
groups = sm_data['District'])
mdf = md.fit()
print(mdf.summary())
y_sm = sm_data['PR12']
#X_sm = sm_data = athlete_data[['District','PR9','PR10','PR11','Nsex','Grad_Yr']]
#y_sm_predict = mdf.predict(X_sm)
y_sm_predict = mdf.fittedvalues
RMSE = sqrt(((y_sm-y_sm_predict)**2).values.mean())
print(RMSE)
# and let's plot the predictions
performance = pd.DataFrame()
performance["predicted"] = mdf.fittedvalues
performance["residuals"] = mdf.resid.values
#performance["PR12"] = data.age_scaled
sns.lmplot(x = "predicted", y = "residuals", data = performance)
```
## Algebraic Model <a name='Algebraic' />
How well can you predict 12th grade scores if you use a brute force method. Assume the ratio in the decrease in times from 10th grade to 11th grade is the same as from 11th grade to 12th grade. In this way with the competition times in 10th and 11th grade you can predict the time for 12th grade.
```
athlete_data.head()
print(y_test.iloc[831])
print(y_test)
RMSE = 0
average = 0
total = 0
growth = []
growth1 = []
residual = []
max_val = []
df = athlete_data.sample(n=len(y_test))
for index,athlete in df.iterrows():
g12 = athlete['PR12']
g11 = athlete['PR11']
g10 = athlete['PR10']
g9 = athlete['PR9']
g12_predict = g11 + (g11/g10)*(g11-g10)
#g12_predict = g11**2/g10
RMSE += (g12_predict - g12)**2
average += g12
total += 1
growth.append((g12/g11)/(g11/g10))
residual.append(g12_predict - g12)
if (g11-g10) != 0:
g = (g12-g11)/(g11-g10)
if g < 5: growth1.append(g)
max_val.append(g12)
RMSE = sqrt(RMSE/total)
average = average/total
print('RMSE:',RMSE)
print('12th grade average time:',average)
#plt.scatter(max,growth)
#plt.hist(growth1,1000);
plt.hist(growth,40);
plt.title('Histogram of ratio of 12/11 grade times to 11/10 grade times');
#plt.xlim(-10,10)
plt.plot([1,1],[0,105],color='r')
#plt.plot([0,0],[0,130],color='y')
plt.hist(residual,40)
plt.plot([0,0],[0,150],color='r')
plt.title('histogram of residuals')
plt.xlabel('y_predicted - y')
```
| true |
code
| 0.501892 | null | null | null | null |
|
## Feature Scaling
We discussed previously that the scale of the features is an important consideration when building machine learning models. Briefly:
### Feature magnitude matters because:
- The regression coefficients of linear models are directly influenced by the scale of the variable.
- Variables with bigger magnitude / larger value range dominate over those with smaller magnitude / value range
- Gradient descent converges faster when features are on similar scales
- Feature scaling helps decrease the time to find support vectors for SVMs
- Euclidean distances are sensitive to feature magnitude.
- Some algorithms, like PCA require the features to be centered at 0.
### The machine learning models affected by the feature scale are:
- Linear and Logistic Regression
- Neural Networks
- Support Vector Machines
- KNN
- K-means clustering
- Linear Discriminant Analysis (LDA)
- Principal Component Analysis (PCA)
### Feature Scaling
**Feature scaling** refers to the methods or techniques used to normalize the range of independent variables in our data, or in other words, the methods to set the feature value range within a similar scale. Feature scaling is generally the last step in the data preprocessing pipeline, performed **just before training the machine learning algorithms**.
There are several Feature Scaling techniques, which we will discuss throughout this section:
- Standardisation
- Mean normalisation
- Scaling to minimum and maximum values - MinMaxScaling
- Scaling to maximum value - MaxAbsScaling
- Scaling to quantiles and median - RobustScaling
- Normalization to vector unit length
In this notebook, we will discuss **Standardisation**.
=================================================================
## Standardisation
Standardisation involves centering the variable at zero, and standardising the variance to 1. The procedure involves subtracting the mean of each observation and then dividing by the standard deviation:
**z = (x - x_mean) / std**
The result of the above transformation is **z**, which is called the z-score, and represents how many standard deviations a given observation deviates from the mean. A z-score specifies the location of the observation within a distribution (in numbers of standard deviations respect to the mean of the distribution). The sign of the z-score (+ or - ) indicates whether the observation is above (+) or below ( - ) the mean.
The shape of a standardised (or z-scored normalised) distribution will be identical to the original distribution of the variable. If the original distribution is normal, then the standardised distribution will be normal. But, if the original distribution is skewed, then the standardised distribution of the variable will also be skewed. In other words, **standardising a variable does not normalize the distribution of the data** and if this is the desired outcome, we should implement any of the techniques discussed in section 7 of the course.
In a nutshell, standardisation:
- centers the mean at 0
- scales the variance at 1
- preserves the shape of the original distribution
- the minimum and maximum values of the different variables may vary
- preserves outliers
Good for algorithms that require features centered at zero.
## In this demo
We will perform standardisation using the Boston House Prices data set that comes with Scikit-learn
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# dataset for the demo
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
# the scaler - for standardisation
from sklearn.preprocessing import StandardScaler
# load the the Boston House price data
# this is how we load the boston dataset from sklearn
boston_dataset = load_boston()
# create a dataframe with the independent variables
data = pd.DataFrame(boston_dataset.data,
columns=boston_dataset.feature_names)
# add target
data['MEDV'] = boston_dataset.target
data.head()
# Information about the boston house prince dataset
# you will find details about the different variables
# the aim is to predict the "Median value of the houses"
# MEDV column in this dataset
# and there are variables with characteristics about
# the homes and the neighborhoods
# print the dataset description
print(boston_dataset.DESCR)
# let's have a look at the main statistical parameters of the variables
# to get an idea of the feature magnitudes
data.describe()
```
The different variables present different value ranges, mean, max, min, standard deviations, etc. In other words, they show different magnitudes or scales. Note for this demo, how **the mean values are not centered at zero, and the standard deviations are not scaled to 1**.
When standardising the data set, we need to first identify the mean and standard deviation of the variables. These parameters need to be learned from the train set, stored, and then used to scale test and future data. Thus, we will first divide the data set into train and test, as we have done throughout the course.
```
# let's separate the data into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data.drop('MEDV', axis=1),
data['MEDV'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
```
### Standardisation
The StandardScaler from scikit-learn removes the mean and scales the data to unit variance. Plus, it learns and stores the parameters needed for scaling. Thus, it is top choice for this feature scaling technique.
On the downside, you can't select which variables to scale directly, it will scale the entire data set, and it returns a NumPy array, without the variable values.
```
# standardisation: with the StandardScaler from sklearn
# set up the scaler
scaler = StandardScaler()
# fit the scaler to the train set, it will learn the parameters
scaler.fit(X_train)
# transform train and test sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# the scaler stores the mean of the features, learned from train set
scaler.mean_
# the scaler stores the standard deviation deviation of the features,
# learned from train set
scaler.scale_
# let's transform the returned NumPy arrays to dataframes for the rest of
# the demo
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
# let's have a look at the original training dataset: mean and standard deviation
# I use np.round to reduce the number of decimals to 1.
np.round(X_train.describe(), 1)
# let's have a look at the scaled training dataset: mean and standard deviation
# I use np.round to reduce the number of decimals to 1.
np.round(X_train_scaled.describe(), 1)
```
As expected, the mean of each variable, which were not centered at zero, is now around zero and the standard deviation is set to 1. Note however, that the minimum and maximum values vary according to how spread the variable was to begin with and is highly influenced by the presence of outliers.
```
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['RM'], ax=ax1)
sns.kdeplot(X_train['LSTAT'], ax=ax1)
sns.kdeplot(X_train['CRIM'], ax=ax1)
# after scaling
ax2.set_title('After Standard Scaling')
sns.kdeplot(X_train_scaled['RM'], ax=ax2)
sns.kdeplot(X_train_scaled['LSTAT'], ax=ax2)
sns.kdeplot(X_train_scaled['CRIM'], ax=ax2)
plt.show()
```
Note from the above plots how standardisation centered all the distributions at zero, but it preserved their original distribution. The value range is not identical, but it looks more homogeneous across the variables.
Note something interesting in the following plot:
```
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['AGE'], ax=ax1)
sns.kdeplot(X_train['DIS'], ax=ax1)
sns.kdeplot(X_train['NOX'], ax=ax1)
# after scaling
ax2.set_title('After Standard Scaling')
sns.kdeplot(X_train_scaled['AGE'], ax=ax2)
sns.kdeplot(X_train_scaled['DIS'], ax=ax2)
sns.kdeplot(X_train_scaled['NOX'], ax=ax2)
plt.show()
X_train['AGE'].min(), X_train['AGE'].max(),
```
In the above plot, we can see how, by scaling, the variable NOX, which varied across a very narrow range of values [0-1], and AGE which varied across [0-100], now spread over a more homogeneous range of values, so that we can compare them directly in one plot, whereas before it was difficult. In a linear model, AGE would dominate the output, but after standardisation, both variables will be able to have an input (assuming that they are both predictive).
```
plt.scatter(X_train['AGE'], X_train['NOX'])
plt.scatter(X_train_scaled['AGE'], X_train_scaled['NOX'])
```
| true |
code
| 0.690872 | null | null | null | null |
|
<h2>SMS Spam Detection</h2>
See <a href="https://www.kaggle.com/uciml/sms-spam-collection-dataset">Kaggle.com</a>
```
import numpy as np
import numpy.core.defchararray as npf
from sklearn.preprocessing import LabelEncoder
# load data
import pandas as pd
#sms = pd.read_csv("../data/spam.csv", encoding="latin-1")
sms = pd.read_csv("../data/spam_utf8.csv")
sms = sms.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'],axis=1)
sms = sms.rename(columns = {'v1':'label','v2':'message'})
X_raw = sms["message"]
y_raw = sms["label"]
print(X_raw[5])
print(y_raw[5])
for i in range(0,X_raw.shape[0]):
X_raw[i] = str(npf.replace(X_raw[i], "'", ""))
X_raw[i] = str(npf.replace(X_raw[i], "å", ""))
X_raw[i] = str(npf.replace(X_raw[i], "!", ""))
X_raw[i] = str(npf.replace(X_raw[i], "?", ""))
X_raw[i] = str(npf.replace(X_raw[i], ".", " "))
X_raw[i] = str(npf.replace(X_raw[i], "\"", ""))
print(X_raw[5])
# Convert class label strings to integers
encoder = LabelEncoder()
encoder.fit(y_raw)
y = encoder.transform(y_raw)
#np_data = df.values
# split data into X and y
#X_raw = np_data[:,0:-1]
#y_raw = pd.factorize(np_data[:,-1])[0]
#print(X_raw[4])
#print(y_raw[2])
# set seed to randomizer
#seed = 7
# flatten input matrix to vector
#X_raw = X_raw.ravel()
print("Instances: {}".format(X_raw.shape[0]))
```
<h2>Convert to bag of words</h2>
```
from sklearn.feature_extraction.text import CountVectorizer
def gen_bagofwords(X_raw, ng_min=1, ng_max=1, df_min=1):
count_vect = CountVectorizer(ngram_range=(ng_min,ng_max), min_df=df_min)
X = count_vect.fit_transform(X_raw)
print("Bag-of-words size: {}".format(X.shape[1]))
return X
```
<h2>Convert from occurences to frequencies</h2>
```
from sklearn.feature_extraction.text import TfidfTransformer
def conv_frequencies(X):
tf_transformer = TfidfTransformer(sublinear_tf=True).fit(X)
X = tf_transformer.transform(X)
return X
```
<h2>Function for evaluating model accuracy</h2>
```
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
def evaluate_test(model):
print("\n-- Test set --")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=111, stratify=y)
# train model on training dataset
model.fit(X_train, y_train)
# evaluate dataset
y_pred = model.predict(X_test)
# calculate accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y_test, y_pred)
print(conf_mx)
def evaluate_cv(model):
print("\n-- 5-fold CV --")
# 10-fold CV
y_pred = cross_val_predict(model, X, y, cv=5)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Average accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
```
<h2>Naive Bayes</h2>
```
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
#X = gen_bagofwords(X_raw, df_min=0.001)
X = gen_bagofwords(X_raw)
X = conv_frequencies(X)
model = MultinomialNB(alpha=0.01)
evaluate_test(model)
evaluate_cv(model)
X = gen_bagofwords(X_raw, 1, 2, df_min=0.001)
#X = gen_bagofwords(X_raw)
X = conv_frequencies(X)
model = MultinomialNB(alpha=0.01)
evaluate_test(model)
evaluate_cv(model)
```
<h2>SVM</h2>
```
from sklearn import svm
X = gen_bagofwords(X_raw, df_min=0.001)
#X = gen_bagofwords(X_raw)
X = conv_frequencies(X)
model = svm.LinearSVC(random_state=42)
evaluate_test(model)
evaluate_cv(model)
X = gen_bagofwords(X_raw, 1, 2, df_min=0.001)
#X = gen_bagofwords(X_raw)
X = conv_frequencies(X)
model = svm.LinearSVC(random_state=42)
evaluate_test(model)
evaluate_cv(model)
```
<h2>Pipeline example</h2>
```
from sklearn.pipeline import Pipeline
X = X_raw.ravel()
model = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB(alpha=.01)),])
evaluate(model)
```
| true |
code
| 0.412205 | null | null | null | null |
|
# The Basics: Training Your First Model
Celsius to Fahrenheit 변환기를 텐서플로를 사용하여 구현해 보자.
섭씨를 화씨로 변환하는 공식은 아래와 같다.:
$$ f = c \times 1.8 + 32 $$
TensorFlow에서 Celsius 데이터 (0, 8, 15, 22, 38)를 입력으로 하고 출력이 Fahrenheit values (32, 46, 59, 72, 100)가 되도록 신경망을 훈련하자.
최종적으로 섭씨를 화씨로 변환하는 모형이 학습된다.
## Import dependencies
```
import tensorflow as tf
import numpy as np
```
## Set up training data
신경망 모형을 학습하기 위해서 훈련 데이터를 생성한다.
```
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i,c in enumerate(celsius_q):
print("{} degrees Celsius = {} degrees Fahrenheit".format(c, fahrenheit_a[i]))
```
### Some Machine Learning terminology
- **Feature** — 모형의 입력값이다. 이 경우에는 단일한 값인데 Celsius 온도에 해당한다.
- **Labels** — 모형이 예측하고자 하는 값이다. 예측 target에 해당된다. 이 경우에는 단일한 값으로Fahrenheit 온도이다.
- **Example** — 훈련 단계에서 활용되는 데이터로 inputs/outputs의 쌍으로 이루어 진다. 이번 예제에서는 `celsius_q` 와 `fahrenheit_a`의 쌍이다. 예를 들면 `(22,72)`와 같은 형태를 말한다.
## Create the model
텐서플로우의 Dense layer를 사용하여 간단한 모형을 생성한다.
### Build a layer
`tf.keras.layers.Dense` 의 설정은 다음과 같다.:
* `input_shape=[1]` — input value가 1개의 값이라는 것이다. one-dimensional array 로 1개의 값을 입력한다. 입력값은 화씨 데이터이다.
* `units=1` — layer의 neuron 갯수이다. 우리의 예제에서는 1개의 layer만을 사용하므로 output값인 화씨 데이터를 출력하기 위해 1로 설정한다. — 신경망의 최종적인 output은 화씨값이다.
```
l0 = tf.keras.layers.Dense(units=1, input_shape=[1])
```
### Assemble layers into the model
위에서 지정한 layer로 모형을 생성한다.
```
model = tf.keras.Sequential([l0])
```
**Note**
위의 설정은 아래와 같이 간단하게 표현 가능하다.
```python
model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1, input_shape=[1])
])
```
## Compile the model, with loss and optimizer functions
training에 앞서서, 모형을 compile해야 한다. 이때 다음과 같은 것을 설정해야 한다:
- **Loss function** — 모형의 예측값과 실제값의 차이를 계산한다. 모형의 훈련은 이 loss함수의 값을 최소화하는데 있다.
- **Optimizer function** — loss를 최소화하는 알고리즘을 말한다.
```
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(0.1))
```
loss function ([mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error)) mean_squared_error는 regression 문제를 해결하기 위해 주로 사용된다. optimizer ([Adam](https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/)) 주로 사용되는 optimizer는 Adam이 있다. 뒤에서 자세히 배우게 된다.
Optimizer 설정시 제일 중요한 요소는 learning rate (`0.1` in the code above)의 설정이다. 학습률이라고 하는데 0.001에서 0.1사이의 값을 기본적으로 사용한다.
## Train the model
`fit` method로 모형을 훈련한다.
training을 진행하면서 모형의 "weights"가 optimizer에 의해서 최적화 된다.
`fit` method가 학습의 기본적인 요소들을 설정한다. 첫 번째 인수는 inputs, 두번째 인수는 outputs. `epochs` 은 얼마나 많은 학습을 진행할건지 횟수를 나타낸다. 그리고 `verbose`인수는 훈련과정을 화면에 표시할지 설정한다.
```
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=0)
print("Finished training the model")
```
## Display training statistics
`fit` method는 history object를 리턴한다. 이 object 를 그래프로 표현하여 학습 진행과정을 확인할 수 있다. 훈련 epoch이 진행 되면서 loss가 줄어든 것을 모니터링 할 수 있다.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.xlabel('Epoch Number')
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
```
## Use the model to predict values
생성된 모형을 사용하여 예측을 해보자.
```
print(model.predict([100.0]))
```
## Looking at the layer weights
최종적으로 생성된 Dense layer의 weights값을 확인해 보자.
```
print("These are the layer variables: {}".format(l0.get_weights()))
```
결과값이 변환 공식과 유사하게 훈련된 것을 알 수 있다.
## 추가적인 시도
Dense layers를 더 많이 넣어서 모형을 구성하는 것이 가능하다.
```
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
# 훈련된 모형으로 결과를 예측한다.
print("Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit".format(model.predict([100.0])))
# 훈련된 모형의 가중치 정보를 확인한다.
print("These are the l0 variables: {}".format(l0.get_weights()))
print("These are the l1 variables: {}".format(l1.get_weights()))
print("These are the l2 variables: {}".format(l2.get_weights()))
```
## 실습
위에 제시된 예제를 적절하게 응용하여 각자 섭씨온도를 화씨온도로 전환하는 딥러닝 모형을 구현해 보자.
* hidden layer의 갯수 및 hidden unit의 갯수를 적절히 설정하여 모형을 구성한다.
* 적절한 optimizer를 선택할 수 있다.
* 훈련 epoch 수를 적절히 선택한다.
| true |
code
| 0.623778 | null | null | null | null |
|
# Multiple Kernel Learning
#### By Saurabh Mahindre - <a href="https://github.com/Saurabh7">github.com/Saurabh7</a>
This notebook is about multiple kernel learning in shogun. We will see how to construct a combined kernel, determine optimal kernel weights using MKL and use it for different types of [classification](http://en.wikipedia.org/wiki/Statistical_classification) and [novelty detection](http://en.wikipedia.org/wiki/Novelty_detection).
1. [Introduction](#Introduction)
2. [Mathematical formulation](#Mathematical-formulation-(skip-if-you-just-want-code-examples))
3. [Using a Combined kernel](#Using-a-Combined-kernel)
4. [Example: Toy Data](#Prediction-on-toy-data)
1. [Generating Kernel weights](#Generating-Kernel-weights)
5. [Binary classification using MKL](#Binary-classification-using-MKL)
6. [MKL for knowledge discovery](#MKL-for-knowledge-discovery)
7. [Multiclass classification using MKL](#Multiclass-classification-using-MKL)
8. [One-class classification using MKL](#One-class-classification-using-MKL)
```
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import shogun as sg
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
```
### Introduction
<em>Multiple kernel learning</em> (MKL) is about using a combined kernel i.e. a kernel consisting of a linear combination of arbitrary kernels over different domains. The coefficients or weights of the linear combination can be learned as well.
[Kernel based methods](http://en.wikipedia.org/wiki/Kernel_methods) such as support vector machines (SVMs) employ a so-called kernel function $k(x_{i},x_{j})$ which intuitively computes the similarity between two examples $x_{i}$ and $x_{j}$. </br>
Selecting the kernel function
$k()$ and it's parameters is an important issue in training. Kernels designed by humans usually capture one aspect of data. Choosing one kernel means to select exactly one such aspect. Which means combining such aspects is often better than selecting.
In shogun the [MKL](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKL.html) is the base class for MKL. We can do classifications: [binary](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLClassification.html), [one-class](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLOneClass.html), [multiclass](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLMulticlass.html) and regression too: [regression](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLRegression.html).
### Mathematical formulation (skip if you just want code examples)
</br>In a SVM, defined as:
$$f({\bf x})=\text{sign} \left(\sum_{i=0}^{N-1} \alpha_i k({\bf x}, {\bf x_i})+b\right)$$</br>
where ${\bf x_i},{i = 1,...,N}$ are labeled training examples ($y_i \in {±1}$).
One could make a combination of kernels like:
$${\bf k}(x_i,x_j)=\sum_{k=0}^{K} \beta_k {\bf k_k}(x_i, x_j)$$
where $\beta_k > 0$ and $\sum_{k=0}^{K} \beta_k = 1$
In the multiple kernel learning problem for binary classification one is given $N$ data points ($x_i, y_i$ )
($y_i \in {±1}$), where $x_i$ is translated via $K$ mappings $\phi_k(x) \rightarrow R^{D_k} $, $k=1,...,K$ , from the input into $K$ feature spaces $(\phi_1(x_i),...,\phi_K(x_i))$ where $D_k$ denotes dimensionality of the $k$-th feature space.
In MKL $\alpha_i$,$\beta$ and bias are determined by solving the following optimization program. For details see [1].
$$\mbox{min} \hspace{4mm} \gamma-\sum_{i=1}^N\alpha_i$$
$$ \mbox{w.r.t.} \hspace{4mm} \gamma\in R, \alpha\in R^N \nonumber$$
$$\mbox {s.t.} \hspace{4mm} {\bf 0}\leq\alpha\leq{\bf 1}C,\;\;\sum_{i=1}^N \alpha_i y_i=0 \nonumber$$
$$ {\frac{1}{2}\sum_{i,j=1}^N \alpha_i \alpha_j y_i y_j \leq \gamma}, \forall k=1,\ldots,K\nonumber\\
$$
Here C is a pre-specified regularization parameter.
Within shogun this optimization problem is solved using [semi-infinite programming](http://en.wikipedia.org/wiki/Semi-infinite_programming). For 1-norm MKL one of the two approaches described in [1] is used.
The first approach (also called the wrapper algorithm) wraps around a single kernel SVMs, alternatingly solving for $\alpha$ and $\beta$. It is using a traditional SVM to generate new violated constraints and thus requires a single kernel SVM and any of the SVMs contained in shogun can be used. In the MKL step either a linear program is solved via [glpk](http://en.wikipedia.org/wiki/GNU_Linear_Programming_Kit) or cplex or analytically or a newton (for norms>1) step is performed.
The second much faster but also more memory demanding approach performing interleaved optimization, is integrated into the chunking-based [SVMlight](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1SVMLight.html).
### Using a Combined kernel
Shogun provides an easy way to make combination of kernels using the [CombinedKernel](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CombinedKernel.html) class, to which we can append any [kernel](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1Kernel.html) from the many options shogun provides. It is especially useful to combine kernels working on different domains and to combine kernels looking at independent features and requires [CombinedFeatures](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CombinedFeatures.html) to be used. Similarly the CombinedFeatures is used to combine a number of feature objects into a single CombinedFeatures object
```
kernel = sg.CombinedKernel()
```
### Prediction on toy data
In order to see the prediction capabilities, let us generate some data using the [GMM](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGMM.html) class. The data is sampled by setting means ([GMM notebook](http://www.shogun-toolbox.org/static/notebook/current/GMM.html)) such that it sufficiently covers X-Y grid and is not too easy to classify.
```
num=30;
num_components=4
means=np.zeros((num_components, 2))
means[0]=[-1,1]
means[1]=[2,-1.5]
means[2]=[-1,-3]
means[3]=[2,1]
covs=np.array([[1.0,0.0],[0.0,1.0]])
# gmm=sg.distribution("GMM")
# gmm.set_pseudo_count(num_components)
gmm=sg.GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs,i) for i in range(num_components)]
gmm.set_coef(np.array([1.0,0.0,0.0,0.0]))
xntr=np.array([gmm.sample() for i in range(num)]).T
xnte=np.array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(np.array([0.0,1.0,0.0,0.0]))
xntr1=np.array([gmm.sample() for i in range(num)]).T
xnte1=np.array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(np.array([0.0,0.0,1.0,0.0]))
xptr=np.array([gmm.sample() for i in range(num)]).T
xpte=np.array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(np.array([0.0,0.0,0.0,1.0]))
xptr1=np.array([gmm.sample() for i in range(num)]).T
xpte1=np.array([gmm.sample() for i in range(5000)]).T
traindata=np.concatenate((xntr,xntr1,xptr,xptr1), axis=1)
trainlab=np.concatenate((-np.ones(2*num), np.ones(2*num)))
testdata=np.concatenate((xnte,xnte1,xpte,xpte1), axis=1)
testlab=np.concatenate((-np.ones(10000), np.ones(10000)))
#convert to shogun features and generate labels for data
feats_train=sg.features(traindata)
labels=sg.BinaryLabels(trainlab)
_=plt.jet()
plt.figure(figsize=(18,5))
plt.subplot(121)
# plot train data
_=plt.scatter(traindata[0,:], traindata[1,:], c=trainlab, s=100)
plt.title('Toy data for classification')
plt.axis('equal')
colors=["blue","blue","red","red"]
# a tool for visualisation
from matplotlib.patches import Ellipse
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
for i in range(num_components):
plt.gca().add_artist(get_gaussian_ellipse_artist(means[i], covs, color=colors[i]))
```
### Generating Kernel weights
Just to help us visualize let's use two gaussian kernels ([GaussianKernel](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1GaussianKernel.html)) with considerably different widths. As required in MKL, we need to append them to the Combined kernel. To generate the optimal weights (i.e $\beta$s in the above equation), training of [MKL](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLClassification.html) is required. This generates the weights as seen in this example.
```
width0=0.5
kernel0=sg.kernel("GaussianKernel", log_width=np.log(width0))
width1=25
kernel1=sg.kernel("GaussianKernel", log_width=np.log(width1))
#combine kernels
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.init(feats_train, feats_train)
mkl = sg.MKLClassification()
#set the norm, weights sum to 1.
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(labels)
#train to get weights
mkl.train()
w=kernel.get_subkernel_weights()
print(w)
```
### Binary classification using MKL
Now with the data ready and training done, we can do the binary classification. The weights generated can be intuitively understood. We will see that on plotting individual subkernels outputs and outputs of the MKL classification. To apply on test features, we need to reinitialize the kernel with `kernel.init` and pass the test features. After that it's just a matter of doing `mkl.apply` to generate outputs.
```
size=100
x1=np.linspace(-5, 5, size)
x2=np.linspace(-5, 5, size)
x, y=np.meshgrid(x1, x2)
#Generate X-Y grid test data
grid=sg.features(np.array((np.ravel(x), np.ravel(y))))
kernel0t=sg.kernel("GaussianKernel", log_width=np.log(width0))
kernel1t=sg.kernel("GaussianKernel", log_width=np.log(width1))
kernelt=sg.CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
#initailize with test grid
kernelt.init(feats_train, grid)
mkl.set_kernel(kernelt)
#prediction
grid_out=mkl.apply()
z=grid_out.get_values().reshape((size, size))
plt.figure(figsize=(10,5))
plt.title("Classification using MKL")
c=plt.pcolor(x, y, z)
_=plt.contour(x, y, z, linewidths=1, colors='black')
_=plt.colorbar(c)
```
To justify the weights, let's train and compare two subkernels with the MKL classification output. Training MKL classifier with a single kernel appended to a combined kernel makes no sense and is just like normal single kernel based classification, but let's do it for comparison.
```
z=grid_out.get_labels().reshape((size, size))
# MKL
plt.figure(figsize=(20,5))
plt.subplot(131, title="Multiple Kernels combined")
c=plt.pcolor(x, y, z)
_=plt.contour(x, y, z, linewidths=1, colors='black')
_=plt.colorbar(c)
comb_ker0=sg.CombinedKernel()
comb_ker0.append_kernel(kernel0)
comb_ker0.init(feats_train, feats_train)
mkl.set_kernel(comb_ker0)
mkl.train()
comb_ker0t=sg.CombinedKernel()
comb_ker0t.append_kernel(kernel0)
comb_ker0t.init(feats_train, grid)
mkl.set_kernel(comb_ker0t)
out0=mkl.apply()
# subkernel 1
z=out0.get_labels().reshape((size, size))
plt.subplot(132, title="Kernel 1")
c=plt.pcolor(x, y, z)
_=plt.contour(x, y, z, linewidths=1, colors='black')
_=plt.colorbar(c)
comb_ker1=sg.CombinedKernel()
comb_ker1.append_kernel(kernel1)
comb_ker1.init(feats_train, feats_train)
mkl.set_kernel(comb_ker1)
mkl.train()
comb_ker1t=sg.CombinedKernel()
comb_ker1t.append_kernel(kernel1)
comb_ker1t.init(feats_train, grid)
mkl.set_kernel(comb_ker1t)
out1=mkl.apply()
# subkernel 2
z=out1.get_labels().reshape((size, size))
plt.subplot(133, title="kernel 2")
c=plt.pcolor(x, y, z)
_=plt.contour(x, y, z, linewidths=1, colors='black')
_=plt.colorbar(c)
```
As we can see the multiple kernel output seems just about right. Kernel 1 gives a sort of overfitting output while the kernel 2 seems not so accurate. The kernel weights are hence so adjusted to get a refined output. We can have a look at the errors by these subkernels to have more food for thought. Most of the time, the MKL error is lesser as it incorporates aspects of both kernels. One of them is strict while other is lenient, MKL finds a balance between those.
```
kernelt.init(feats_train, sg.features(testdata))
mkl.set_kernel(kernelt)
out = mkl.apply()
evaluator = sg.evaluation("ErrorRateMeasure")
print("Test error is %2.2f%% :MKL" % (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))
comb_ker0t.init(feats_train, sg.features(testdata))
mkl.set_kernel(comb_ker0t)
out = mkl.apply()
evaluator = sg.evaluation("ErrorRateMeasure")
print("Test error is %2.2f%% :Subkernel1"% (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))
comb_ker1t.init(feats_train, sg.features(testdata))
mkl.set_kernel(comb_ker1t)
out = mkl.apply()
evaluator = sg.evaluation("ErrorRateMeasure")
print("Test error is %2.2f%% :subkernel2" % (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))
```
### MKL for knowledge discovery
MKL can recover information about the problem at hand. Let us see this with a binary classification problem. The task is to separate two concentric classes shaped like circles. By varying the distance between the boundary of the circles we can control the separability of the problem. Starting with an almost non-separable scenario, the data quickly becomes separable as the distance between the circles increases.
```
def circle(x, radius, neg):
y=np.sqrt(np.square(radius)-np.square(x))
if neg:
return[x, -y]
else:
return [x,y]
def get_circle(radius):
neg=False
range0=np.linspace(-radius,radius,100)
pos_a=np.array([circle(i, radius, neg) for i in range0]).T
neg=True
neg_a=np.array([circle(i, radius, neg) for i in range0]).T
c=np.concatenate((neg_a,pos_a), axis=1)
return c
def get_data(r1, r2):
c1=get_circle(r1)
c2=get_circle(r2)
c=np.concatenate((c1, c2), axis=1)
feats_tr=sg.features(c)
return c, feats_tr
l=np.concatenate((-np.ones(200),np.ones(200)))
lab=sg.BinaryLabels(l)
#get two circles with radius 2 and 4
c, feats_tr=get_data(2,4)
c1, feats_tr1=get_data(2,3)
_=plt.gray()
plt.figure(figsize=(10,5))
plt.subplot(121)
plt.title("Circles with different separation")
p=plt.scatter(c[0,:], c[1,:], c=lab.get_labels())
plt.subplot(122)
q=plt.scatter(c1[0,:], c1[1,:], c=lab.get_labels())
```
These are the type of circles we want to distinguish between. We can try classification with a constant separation between the circles first.
```
def train_mkl(circles, feats_tr):
#Four kernels with different widths
kernel0=sg.kernel("GaussianKernel", log_width=np.log(1))
kernel1=sg.kernel("GaussianKernel", log_width=np.log(5))
kernel2=sg.kernel("GaussianKernel", log_width=np.log(7))
kernel3=sg.kernel("GaussianKernel", log_width=np.log(10))
kernel = sg.CombinedKernel()
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.append_kernel(kernel2)
kernel.append_kernel(kernel3)
kernel.init(feats_tr, feats_tr)
mkl = sg.MKLClassification()
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(lab)
mkl.train()
w=kernel.get_subkernel_weights()
return w, mkl
def test_mkl(mkl, grid):
kernel0t=sg.kernel("GaussianKernel", log_width=np.log(1))
kernel1t=sg.kernel("GaussianKernel", log_width=np.log(5))
kernel2t=sg.kernel("GaussianKernel", log_width=np.log(7))
kernel3t=sg.kernel("GaussianKernel", log_width=np.log(10))
kernelt = sg.CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
kernelt.append_kernel(kernel2t)
kernelt.append_kernel(kernel3t)
kernelt.init(feats_tr, grid)
mkl.set_kernel(kernelt)
out=mkl.apply()
return out
size=50
x1=np.linspace(-10, 10, size)
x2=np.linspace(-10, 10, size)
x, y=np.meshgrid(x1, x2)
grid=sg.features(np.array((np.ravel(x), np.ravel(y))))
w, mkl=train_mkl(c, feats_tr)
print(w)
out=test_mkl(mkl,grid)
z=out.get_values().reshape((size, size))
plt.figure(figsize=(5,5))
c=plt.pcolor(x, y, z)
_=plt.contour(x, y, z, linewidths=1, colors='black')
plt.title('classification with constant separation')
_=plt.colorbar(c)
```
As we can see the MKL classifier classifies them as expected. Now let's vary the separation and see how it affects the weights.The choice of the kernel width of the Gaussian kernel used for classification is expected to depend on the separation distance of the learning problem. An increased distance between the circles will correspond to a larger optimal kernel width. This effect should be visible in the results of the MKL, where we used MKL-SVMs with four kernels with different widths (1,5,7,10).
```
range1=np.linspace(5.5,7.5,50)
x=np.linspace(1.5,3.5,50)
temp=[]
for i in range1:
#vary separation between circles
c, feats=get_data(4,i)
w, mkl=train_mkl(c, feats)
temp.append(w)
y=np.array([temp[i] for i in range(0,50)]).T
plt.figure(figsize=(20,5))
_=plt.plot(x, y[0,:], color='k', linewidth=2)
_=plt.plot(x, y[1,:], color='r', linewidth=2)
_=plt.plot(x, y[2,:], color='g', linewidth=2)
_=plt.plot(x, y[3,:], color='y', linewidth=2)
plt.title("Comparison between kernel widths and weights")
plt.ylabel("Weight")
plt.xlabel("Distance between circles")
_=plt.legend(["1","5","7","10"])
```
In the above plot we see the kernel weightings obtained for the four kernels. Every line shows one weighting. The courses of the kernel weightings reflect the development of the learning problem: as long as the problem is difficult the best separation can be obtained when using the kernel with smallest width. The low width kernel looses importance when the distance between the circle increases and larger kernel widths obtain a larger weight in MKL. Increasing the distance between the circles, kernels with greater widths are used.
### Multiclass classification using MKL
MKL can be used for multiclass classification using the [MKLMulticlass](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLMulticlass.html) class. It is based on the GMNPSVM Multiclass SVM. Its termination criterion is set by `set_mkl_epsilon(float64_t eps )` and the maximal number of MKL iterations is set by `set_max_num_mkliters(int32_t maxnum)`. The epsilon termination criterion is the L2 norm between the current MKL weights and their counterpart from the previous iteration. We set it to 0.001 as we want pretty accurate weights.
To see this in action let us compare it to the normal [GMNPSVM](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGMNPSVM.html) example as in the [KNN notebook](http://www.shogun-toolbox.org/static/notebook/current/KNN.html#Comparison-to-Multiclass-Support-Vector-Machines), just to see how MKL fares in object recognition. We use the [USPS digit recognition dataset](http://www.gaussianprocess.org/gpml/data/).
```
from scipy.io import loadmat, savemat
from os import path, sep
mat = loadmat(sep.join(['..','..','..','data','multiclass', 'usps.mat']))
Xall = mat['data']
Yall = np.array(mat['label'].squeeze(), dtype=np.double)
# map from 1..10 to 0..9, since shogun
# requires multiclass labels to be
# 0, 1, ..., K-1
Yall = Yall - 1
np.random.seed(0)
subset = np.random.permutation(len(Yall))
#get first 1000 examples
Xtrain = Xall[:, subset[:1000]]
Ytrain = Yall[subset[:1000]]
Nsplit = 2
all_ks = range(1, 21)
print(Xall.shape)
print(Xtrain.shape)
```
Let's plot five of the examples to get a feel of the dataset.
```
def plot_example(dat, lab):
for i in range(5):
ax=plt.subplot(1,5,i+1)
plt.title(int(lab[i]))
ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')
ax.set_xticks([])
ax.set_yticks([])
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xtrain, Ytrain)
```
We combine a [Gaussian kernel](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1GaussianKernel.html) and a [PolyKernel](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CPolyKernel.html). To test, examples not included in training data are used.
This is just a demonstration but we can see here how MKL is working behind the scene. What we have is two kernels with significantly different properties. The gaussian kernel defines a function space that is a lot larger than that of the linear kernel or the polynomial kernel. The gaussian kernel has a low width, so it will be able to represent more and more complex relationships between the training data. But it requires enough data to train on. The number of training examples here is 1000, which seems a bit less as total examples are 10000. We hope the polynomial kernel can counter this problem, since it will fit the polynomial for you using a lot less data than the squared exponential. The kernel weights are printed below to add some insight.
```
# MKL training and output
labels = sg.MulticlassLabels(Ytrain)
feats = sg.features(Xtrain)
#get test data from 5500 onwards
Xrem=Xall[:,subset[5500:]]
Yrem=Yall[subset[5500:]]
#test features not used in training
feats_rem = sg.features(Xrem)
labels_rem = sg.MulticlassLabels(Yrem)
kernel = sg.CombinedKernel()
feats_train = sg.CombinedFeatures()
feats_test = sg.CombinedFeatures()
#append gaussian kernel
subkernel = sg.kernel("GaussianKernel", log_width=np.log(15))
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = sg.features(Xtrain)
subkernel = sg.kernel('PolyKernel', degree=10, c=2)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = sg.MKLMulticlass(1.2, kernel, labels)
mkl.set_epsilon(1e-2)
mkl.set_mkl_epsilon(0.001)
mkl.set_mkl_norm(1)
mkl.train()
#initialize with test features
kernel.init(feats_train, feats_test)
out = mkl.apply()
evaluator = sg.evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xbad, Ybad)
w=kernel.get_subkernel_weights()
print(w)
# Single kernel:PolyKernel
C=1
pk = sg.kernel('PolyKernel', degree=10, c=2)
svm = sg.GMNPSVM(C, pk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = sg.evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xbad, Ybad)
#Single Kernel:Gaussian kernel
width=15
C=1
gk=sg.kernel("GaussianKernel", log_width=np.log(width))
svm=sg.GMNPSVM(C, gk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = sg.evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xbad, Ybad)
```
The misclassified examples are surely pretty tough to predict. As seen from the accuracy MKL seems to work a shade better in the case. One could try this out with more and different types of kernels too.
### One-class classification using MKL
[One-class classification](http://en.wikipedia.org/wiki/One-class_classification) can be done using MKL in shogun. This is demonstrated in the following simple example using [MKLOneClass](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1MKLOneClass.html). We will see how abnormal data is detected. This is also known as novelty detection. Below we generate some toy data and initialize combined kernels and features.
```
X = -0.3 * np.random.randn(100,2)
traindata = np.r_[X + 2, X - 2].T
X = -0.3 * np.random.randn(20, 2)
testdata = np.r_[X + 2, X - 2].T
trainlab=np.concatenate((np.ones(99),-np.ones(1)))
#convert to shogun features and generate labels for data
feats=sg.features(traindata)
labels=sg.BinaryLabels(trainlab)
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
grid=sg.features(np.array((np.ravel(xx), np.ravel(yy))))
#test features
feats_t=sg.features(testdata)
x_out=(np.random.uniform(low=-4, high=4, size=(20, 2))).T
feats_out=sg.features(x_out)
kernel=sg.CombinedKernel()
feats_train=sg.CombinedFeatures()
feats_test=sg.CombinedFeatures()
feats_test_out=sg.CombinedFeatures()
feats_grid=sg.CombinedFeatures()
#append gaussian kernel
subkernel=sg.kernel("GaussianKernel", log_width=np.log(8))
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = sg.features(traindata)
subkernel = sg.kernel('PolyKernel', degree=10, c=3)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = sg.MKLOneClass()
mkl.set_kernel(kernel)
mkl.set_labels(labels)
mkl.set_interleaved_optimization_enabled(False)
mkl.set_epsilon(1e-2)
mkl.put('mkl_epsilon', 0.1)
mkl.set_mkl_norm(1)
```
Now that everything is initialized, let's see MKLOneclass in action by applying it on the test data and on the X-Y grid.
```
mkl.train()
print("Weights:")
w=kernel.get_subkernel_weights()
print(w)
#initialize with test features
kernel.init(feats_train, feats_test)
normal_out = mkl.apply()
#test on abnormally generated data
kernel.init(feats_train, feats_test_out)
abnormal_out = mkl.apply()
#test on X-Y grid
kernel.init(feats_train, feats_grid)
grid_out=mkl.apply()
z=grid_out.get_values().reshape((500,500))
z_lab=grid_out.get_labels().reshape((500,500))
a=abnormal_out.get_labels()
n=normal_out.get_labels()
#check for normal and abnormal classified data
idx=np.where(normal_out.get_labels() != 1)[0]
abnormal=testdata[:,idx]
idx=np.where(normal_out.get_labels() == 1)[0]
normal=testdata[:,idx]
plt.figure(figsize=(15,6))
pl =plt.subplot(121)
plt.title("One-class classification using MKL")
_=plt.pink()
c=plt.pcolor(xx, yy, z)
_=plt.contour(xx, yy, z_lab, linewidths=1, colors='black')
_=plt.colorbar(c)
p1=pl.scatter(traindata[0, :], traindata[1,:], cmap=plt.gray(), s=100)
p2=pl.scatter(normal[0,:], normal[1,:], c="red", s=100)
p3=pl.scatter(abnormal[0,:], abnormal[1,:], c="blue", s=100)
p4=pl.scatter(x_out[0,:], x_out[1,:], c=a, cmap=plt.jet(), s=100)
_=pl.legend((p1, p2, p3), ["Training samples", "normal samples", "abnormal samples"], loc=2)
plt.subplot(122)
c=plt.pcolor(xx, yy, z)
plt.title("One-class classification output")
_=plt.gray()
_=plt.contour(xx, yy, z, linewidths=1, colors='black')
_=plt.colorbar(c)
```
MKL one-class classification will give you a bit more flexibility compared to normal classifications. The kernel weights are expected to be more or less similar here since the training data is not overly complicated or too easy, which means both the gaussian and polynomial kernel will be involved. If you don't know the nature of the training data and lot of features are invoved, you could easily use kernels with much different properties and benefit from their combination.
### References:
[1] Soeren Sonnenburg, Gunnar Raetsch, Christin Schaefer, and Bernhard Schoelkopf. Large Scale Multiple Kernel Learning. Journal of Machine Learning Research, 7:1531-1565, July 2006.
[2]F. R. Bach, G. R. G. Lanckriet, and M. I. Jordan. Multiple kernel learning, conic duality, and
the SMO algorithm. In C. E. Brodley, editor, Twenty-first international conference on Machine
learning. ACM, 2004
[3] Kernel Methods for Object Recognition , Christoph H. Lampert
| true |
code
| 0.419737 | null | null | null | null |
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# ADM Quantities in terms of BSSN Quantities
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
**Notebook Status:** <font color='orange'><b> Self-Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**
### NRPy+ Source Code for this module: [ADM_in_terms_of_BSSN.py](../edit/BSSN/ADM_in_terms_of_BSSN.py)
## Introduction:
This tutorial notebook constructs all quantities in the [ADM formalism](https://en.wikipedia.org/wiki/ADM_formalism) (see also Chapter 2 in Baumgarte & Shapiro's book *Numerical Relativity*) in terms of quantities in our adopted (covariant, tensor-rescaled) BSSN formalism. That is to say, we will write the ADM quantities $\left\{\gamma_{ij},K_{ij},\alpha,\beta^i\right\}$ and their derivatives in terms of the BSSN quantities $\left\{\bar{\gamma}_{ij},\text{cf},\bar{A}_{ij},\text{tr}K,\alpha,\beta^i\right\}$ and their derivatives.
### A Note on Notation:
As is standard in NRPy+,
* Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.
* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.
As a corollary, any expressions in NRPy+ involving mixed Greek and Latin indices will need to offset one set of indices by one; a Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook).
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
1. [Step 2](#threemetric): The ADM three-metric $\gamma_{ij}$ and its derivatives in terms of rescaled BSSN quantities
1. [Step 2.a](#derivatives_e4phi): Derivatives of $e^{4\phi}$
1. [Step 2.b](#derivatives_adm_3metric): Derivatives of the ADM three-metric: $\gamma_{ij,k}$ and $\gamma_{ij,kl}$
1. [Step 2.c](#christoffel): Christoffel symbols $\Gamma^i_{jk}$ associated with the ADM 3-metric $\gamma_{ij}$
1. [Step 3](#extrinsiccurvature): The ADM extrinsic curvature $K_{ij}$ and its derivatives in terms of rescaled BSSN quantities
1. [Step 4](#code_validation): Code Validation against `BSSN.ADM_in_terms_of_BSSN` NRPy+ module
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
Let's start by importing all the needed modules from Python/NRPy+:
```
# Step 1.a: Import all needed modules from NRPy+
from outputC import * # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import sys # Standard Python module for multiplatform OS-level functions
# Step 1.b: Set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem","Spherical")
# Step 1.c: Given the chosen coordinate system, set up
# corresponding reference metric and needed
# reference metric quantities
# The following function call sets up the reference metric
# and related quantities, including rescaling matrices ReDD,
# ReU, and hatted quantities.
rfm.reference_metric()
# Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is
# a 3+1-dimensional decomposition of the general
# relativistic field equations)
DIM = 3
# Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors
import BSSN.BSSN_quantities as Bq
Bq.BSSN_basic_tensors()
gammabarDD = Bq.gammabarDD
cf = Bq.cf
AbarDD = Bq.AbarDD
trK = Bq.trK
Bq.gammabar__inverse_and_derivs()
gammabarDD_dD = Bq.gammabarDD_dD
gammabarDD_dDD = Bq.gammabarDD_dDD
Bq.AbarUU_AbarUD_trAbar_AbarDD_dD()
AbarDD_dD = Bq.AbarDD_dD
```
<a id='threemetric'></a>
# Step 2: The ADM three-metric $\gamma_{ij}$ and its derivatives in terms of rescaled BSSN quantities. \[Back to [top](#toc)\]
$$\label{threemetric}$$
The ADM three-metric is written in terms of the covariant BSSN three-metric tensor as (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
$$
\gamma_{ij} = \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{i j},
$$
where $\gamma=\det{\gamma_{ij}}$ and $\bar{\gamma}=\det{\bar{\gamma}_{ij}}$.
The "standard" BSSN conformal factor $\phi$ is given by (Eq. 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):
\begin{align}
\phi &= \frac{1}{12} \log\left(\frac{\gamma}{\bar{\gamma}}\right) \\
\implies e^{\phi} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/12} \\
\implies e^{4 \phi} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3}
\end{align}
Thus the ADM three-metric may be written in terms of the BSSN three-metric and conformal factor $\phi$ as
$$
\gamma_{ij} = e^{4 \phi} \bar{\gamma}_{i j}.
$$
NRPy+'s implementation of BSSN allows for $\phi$ and two other alternative conformal factors to be defined:
\begin{align}
\chi &= e^{-4\phi} \\
W &= e^{-2\phi},
\end{align}
Thus if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"chi"`, then
\begin{align}
\gamma_{ij} &= \frac{1}{\chi} \bar{\gamma}_{i j} \\
&= \frac{1}{\text{cf}} \bar{\gamma}_{i j},
\end{align}
and if `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"W"`, then
\begin{align}
\gamma_{ij} &= \frac{1}{W^2} \bar{\gamma}_{i j} \\
&= \frac{1}{\text{cf}^2} \bar{\gamma}_{i j}.
\end{align}
```
# Step 2: The ADM three-metric gammaDD and its
# derivatives in terms of BSSN quantities.
gammaDD = ixp.zerorank2()
exp4phi = sp.sympify(0)
if par.parval_from_str("EvolvedConformalFactor_cf") == "phi":
exp4phi = sp.exp(4*cf)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi":
exp4phi = (1 / cf)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "W":
exp4phi = (1 / cf**2)
else:
print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.")
sys.exit(1)
for i in range(DIM):
for j in range(DIM):
gammaDD[i][j] = exp4phi*gammabarDD[i][j]
```
<a id='derivatives_e4phi'></a>
## Step 2.a: Derivatives of $e^{4\phi}$ \[Back to [top](#toc)\]
$$\label{derivatives_e4phi}$$
To compute derivatives of $\gamma_{ij}$ in terms of BSSN variables and their derivatives, we will first need derivatives of $e^{4\phi}$ in terms of the conformal BSSN variable `cf`.
\begin{align}
\frac{\partial}{\partial x^i} e^{4\phi} &= 4 e^{4\phi} \phi_{,i} \\
\implies \frac{\partial}{\partial x^j} \frac{\partial}{\partial x^i} e^{4\phi} &= \frac{\partial}{\partial x^j} \left(4 e^{4\phi} \phi_{,i}\right) \\
&= 16 e^{4\phi} \phi_{,i} \phi_{,j} + 4 e^{4\phi} \phi_{,ij}
\end{align}
Thus computing first and second derivatives of $e^{4\phi}$ in terms of the BSSN quantity `cf` requires only that we evaluate $\phi_{,i}$ and $\phi_{,ij}$ in terms of $e^{4\phi}$ (computed above in terms of `cf`) and derivatives of `cf`:
If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"phi"`, then
\begin{align}
\phi_{,i} &= \text{cf}_{,i} \\
\phi_{,ij} &= \text{cf}_{,ij}
\end{align}
If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"chi"`, then
\begin{align}
\text{cf} = e^{-4\phi} \implies \text{cf}_{,i} &= -4 e^{-4\phi} \phi_{,i} \\
\implies \phi_{,i} &= -\frac{e^{4\phi}}{4} \text{cf}_{,i} \\
\implies \phi_{,ij} &= -e^{4\phi} \phi_{,j} \text{cf}_{,i} -\frac{e^{4\phi}}{4} \text{cf}_{,ij}\\
&= -e^{4\phi} \left(-\frac{e^{4\phi}}{4} \text{cf}_{,j}\right) \text{cf}_{,i} -\frac{e^{4\phi}}{4} \text{cf}_{,ij} \\
&= \frac{1}{4} \left[\left(e^{4\phi}\right)^2 \text{cf}_{,i} \text{cf}_{,j} -e^{4\phi} \text{cf}_{,ij}\right] \\
\end{align}
If `"BSSN_quantities::EvolvedConformalFactor_cf"` is set to `"W"`, then
\begin{align}
\text{cf} = e^{-2\phi} \implies \text{cf}_{,i} &= -2 e^{-2\phi} \phi_{,i} \\
\implies \phi_{,i} &= -\frac{e^{2\phi}}{2} \text{cf}_{,i} \\
\implies \phi_{,ij} &= -e^{2\phi} \phi_{,j} \text{cf}_{,i} -\frac{e^{2\phi}}{2} \text{cf}_{,ij}\\
&= -e^{2\phi} \left(-\frac{e^{2\phi}}{2} \text{cf}_{,j}\right) \text{cf}_{,i} -\frac{e^{2\phi}}{2} \text{cf}_{,ij} \\
&= \frac{1}{2} \left[e^{4\phi} \text{cf}_{,i} \text{cf}_{,j} -e^{2\phi} \text{cf}_{,ij}\right] \\
\end{align}
```
# Step 2.a: Derivatives of $e^{4\phi}$
phidD = ixp.zerorank1()
phidDD = ixp.zerorank2()
cf_dD = ixp.declarerank1("cf_dD")
cf_dDD = ixp.declarerank2("cf_dDD","sym01")
if par.parval_from_str("EvolvedConformalFactor_cf") == "phi":
for i in range(DIM):
phidD[i] = cf_dD[i]
for j in range(DIM):
phidDD[i][j] = cf_dDD[i][j]
elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi":
for i in range(DIM):
phidD[i] = -sp.Rational(1,4)*exp4phi*cf_dD[i]
for j in range(DIM):
phidDD[i][j] = sp.Rational(1,4)*( exp4phi**2*cf_dD[i]*cf_dD[j] - exp4phi*cf_dDD[i][j] )
elif par.parval_from_str("EvolvedConformalFactor_cf") == "W":
exp2phi = (1 / cf)
for i in range(DIM):
phidD[i] = -sp.Rational(1,2)*exp2phi*cf_dD[i]
for j in range(DIM):
phidDD[i][j] = sp.Rational(1,2)*( exp4phi*cf_dD[i]*cf_dD[j] - exp2phi*cf_dDD[i][j] )
else:
print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.")
sys.exit(1)
exp4phidD = ixp.zerorank1()
exp4phidDD = ixp.zerorank2()
for i in range(DIM):
exp4phidD[i] = 4*exp4phi*phidD[i]
for j in range(DIM):
exp4phidDD[i][j] = 16*exp4phi*phidD[i]*phidD[j] + 4*exp4phi*phidDD[i][j]
```
<a id='derivatives_adm_3metric'></a>
## Step 2.b: Derivatives of the ADM three-metric: $\gamma_{ij,k}$ and $\gamma_{ij,kl}$ \[Back to [top](#toc)\]
$$\label{derivatives_adm_3metric}$$
Recall the relation between the ADM three-metric $\gamma_{ij}$, the BSSN conformal three-metric $\bar{\gamma}_{i j}$, and the BSSN conformal factor $\phi$:
$$
\gamma_{ij} = e^{4 \phi} \bar{\gamma}_{i j}.
$$
Now that we have constructed derivatives of $e^{4 \phi}$ in terms of the chosen BSSN conformal factor `cf`, and the [BSSN.BSSN_quantities module](../edit/BSSN/BSSN_quantities.py) ([**tutorial**](Tutorial-BSSN_quantities.ipynb)) defines derivatives of $\bar{\gamma}_{ij}$ in terms of rescaled BSSN variables, derivatives of $\gamma_{ij}$ can be immediately constructed using the product rule:
\begin{align}
\gamma_{ij,k} &= \left(e^{4 \phi}\right)_{,k} \bar{\gamma}_{i j} + e^{4 \phi} \bar{\gamma}_{ij,k} \\
\gamma_{ij,kl} &= \left(e^{4 \phi}\right)_{,kl} \bar{\gamma}_{i j} + \left(e^{4 \phi}\right)_{,k} \bar{\gamma}_{i j,l} + \left(e^{4 \phi}\right)_{,l} \bar{\gamma}_{ij,k} + e^{4 \phi} \bar{\gamma}_{ij,kl}
\end{align}
```
# Step 2.b: Derivatives of gammaDD, the ADM three-metric
gammaDDdD = ixp.zerorank3()
gammaDDdDD = ixp.zerorank4()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
gammaDDdD[i][j][k] = exp4phidD[k]*gammabarDD[i][j] + exp4phi*gammabarDD_dD[i][j][k]
for l in range(DIM):
gammaDDdDD[i][j][k][l] = exp4phidDD[k][l]*gammabarDD[i][j] + \
exp4phidD[k]*gammabarDD_dD[i][j][l] + \
exp4phidD[l]*gammabarDD_dD[i][j][k] + \
exp4phi*gammabarDD_dDD[i][j][k][l]
```
<a id='christoffel'></a>
## Step 2.c: Christoffel symbols $\Gamma^i_{jk}$ associated with the ADM 3-metric $\gamma_{ij}$ \[Back to [top](#toc)\]
$$\label{christoffel}$$
The 3-metric analog to the definition of Christoffel symbol (Eq. 1.18) in Baumgarte & Shapiro's *Numerical Relativity* is given by
$$
\Gamma^i_{jk} = \frac{1}{2} \gamma^{il} \left(\gamma_{lj,k} + \gamma_{lk,j} - \gamma_{jk,l} \right),
$$
which we implement here:
```
# Step 2.c: 3-Christoffel symbols associated with ADM 3-metric gammaDD
# Step 2.c.i: First compute the inverse 3-metric gammaUU:
gammaUU, detgamma = ixp.symm_matrix_inverter3x3(gammaDD)
GammaUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for l in range(DIM):
GammaUDD[i][j][k] += sp.Rational(1,2)*gammaUU[i][l]* \
(gammaDDdD[l][j][k] + gammaDDdD[l][k][j] - gammaDDdD[j][k][l])
```
<a id='extrinsiccurvature'></a>
# Step 3: The ADM extrinsic curvature $K_{ij}$ and its derivatives in terms of rescaled BSSN quantities. \[Back to [top](#toc)\]
$$\label{extrinsiccurvature}$$
The ADM extrinsic curvature may be written in terms of the BSSN trace-free extrinsic curvature tensor $\bar{A}_{ij}$ and the trace of the ADM extrinsic curvature $K$:
\begin{align}
K_{ij} &= \left(\frac{\gamma}{\bar{\gamma}}\right)^{1/3} \bar{A}_{ij} + \frac{1}{3} \gamma_{ij} K \\
&= e^{4\phi} \bar{A}_{ij} + \frac{1}{3} \gamma_{ij} K \\
\end{align}
We only compute first spatial derivatives of $K_{ij}$, as higher-derivatives are generally not needed:
$$
K_{ij,k} = \left(e^{4\phi}\right)_{,k} \bar{A}_{ij} + e^{4\phi} \bar{A}_{ij,k} + \frac{1}{3} \left(\gamma_{ij,k} K + \gamma_{ij} K_{,k}\right)
$$
which is expressed in terms of quantities already defined.
```
# Step 3: Define ADM extrinsic curvature KDD and
# its first spatial derivatives KDDdD
# in terms of BSSN quantities
KDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
KDD[i][j] = exp4phi*AbarDD[i][j] + sp.Rational(1,3)*gammaDD[i][j]*trK
KDDdD = ixp.zerorank3()
trK_dD = ixp.declarerank1("trK_dD")
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
KDDdD[i][j][k] = exp4phidD[k]*AbarDD[i][j] + exp4phi*AbarDD_dD[i][j][k] + \
sp.Rational(1,3)*(gammaDDdD[i][j][k]*trK + gammaDD[i][j]*trK_dD[k])
```
<a id='code_validation'></a>
# Step 4: Code Validation against `BSSN.ADM_in_terms_of_BSSN` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions between
1. this tutorial and
2. the NRPy+ [BSSN.ADM_in_terms_of_BSSN](../edit/BSSN/ADM_in_terms_of_BSSN.py) module.
```
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Bq."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None,idx4=None):
if idx2==None:
return basename+"["+str(idx1)+"]"
if idx3==None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
if idx4==None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]["+str(idx4)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
import BSSN.ADM_in_terms_of_BSSN as AB
AB.ADM_in_terms_of_BSSN()
namecheck_list.extend(["detgamma"])
exprcheck_list.extend([AB.detgamma])
expr_list.extend([detgamma])
for i in range(DIM):
for j in range(DIM):
namecheck_list.extend([gfnm("gammaDD",i,j),gfnm("gammaUU",i,j),gfnm("KDD",i,j)])
exprcheck_list.extend([AB.gammaDD[i][j],AB.gammaUU[i][j],AB.KDD[i][j]])
expr_list.extend([gammaDD[i][j],gammaUU[i][j],KDD[i][j]])
for k in range(DIM):
namecheck_list.extend([gfnm("gammaDDdD",i,j,k),gfnm("GammaUDD",i,j,k),gfnm("KDDdD",i,j,k)])
exprcheck_list.extend([AB.gammaDDdD[i][j][k],AB.GammaUDD[i][j][k],AB.KDDdD[i][j][k]])
expr_list.extend([gammaDDdD[i][j][k],GammaUDD[i][j][k],KDDdD[i][j][k]])
for l in range(DIM):
namecheck_list.extend([gfnm("gammaDDdDD",i,j,k,l)])
exprcheck_list.extend([AB.gammaDDdDD[i][j][k][l]])
expr_list.extend([gammaDDdDD[i][j][k][l]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
if all_passed:
print("ALL TESTS PASSED!")
else:
print("ERROR. ONE OR MORE TESTS FAILED")
sys.exit(1)
```
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-ADM_in_terms_of_BSSN.pdf](Tutorial-ADM_in_terms_of_BSSN.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ADM_in_terms_of_BSSN.ipynb
!pdflatex -interaction=batchmode Tutorial-ADM_in_terms_of_BSSN.tex
!pdflatex -interaction=batchmode Tutorial-ADM_in_terms_of_BSSN.tex
!pdflatex -interaction=batchmode Tutorial-ADM_in_terms_of_BSSN.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
| true |
code
| 0.411702 | null | null | null | null |
|
```
%%html
<link href="https://fonts.googleapis.com/css?family=Open+Sans" rel="stylesheet">
<style>#notebook-container{font-size: 13pt;font-family:'Open Sans', sans-serif;} div.text_cell{max-width: 104ex;}</style>
%pylab inline
import matplotlib.patches as patches
```
# Left and right-hand sums
We want to find the approximate area under the graph of $f(x)=x^4$ for the interval $0 \leq x \leq 1$ by taking the right-hand sum. Using $n=25$.
$$\Delta x=\dfrac{b-a}{n} \implies \dfrac{1-0}{25} = \dfrac{1}{25}$$
We are going to calculate the area of 25 rectangles and take their sum. This can be expressed as:
$$R_{25} = \sum\limits_{i=1}^n f(x_i)\cdot\Delta x \iff \sum\limits_{i=1}^n f\left(a+i \cdot \Delta x \right)\cdot \Delta x \implies \sum\limits_{i=1}^{25} \left(i\cdot \dfrac{1}{25}\right)^4\cdot \dfrac{1}{25}=\dfrac{1}{25}\sum\limits_{i=1}^{25}\dfrac{i^4}{25^4}=0.22$$
```
def right_hand_sum(a, b, n, f):
# Approximate the area under the graph with a right-hand sum.
delta_x = (b-a)/n
result = sum([f(x*delta_x)*delta_x for x in range(1, n)])
# Line for plotting f(x)
X = np.linspace(a, b, n)
y = [f(x) for x in X]
# Plot
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111, aspect='equal')
plot(X, y, c='b')
ylabel('f(x)')
xlabel('x')
title('Right-hand sum estimated: {:.3f}'.format(result))
# Drawing rectangles (right-hand)
for x in range(0, n):
ax.add_patch(patches.Rectangle(
(a+x*delta_x, 0),
delta_x,
f(a+(x+1)*delta_x),
fill=True,
color='b',
alpha=0.2
))
return result
def left_hand_sum(a, b, n, f):
# Approximate the area under the graph with a left-hand sum.
delta_x = (b-a)/n
result = sum([f((x-1)*delta_x)*delta_x for x in range(1, n)])
# Line for plotting f(x)
X = np.linspace(a, b, n)
y = [f(x) for x in X]
# Plot
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111, aspect='equal')
plot(X, y, c='b')
ylabel('f(x)')
xlabel('x')
title('Left-hand sum estimated: {:.3f}'.format(result))
# Drawing rectangles (left-hand)
for x in range(0, n):
ax.add_patch(patches.Rectangle(
(a+x*delta_x, 0),
delta_x,
f(a+x*delta_x),
fill=True,
color='b',
alpha=0.2
))
return result
```
Taking the right-hand sum for $f(x)=x^4$ over the interval $0\leq x\leq 1$ with $n=25$:
```
right_hand_sum(0, 1, 25, lambda x: x**4)
```
Considering that $\int^{2\pi}_0 \sin(x) \ dx=0$. Taking the right-hand sum for $f(x) = \sin x$ over the interval $0 \leq x \leq 2\pi$ with $n=50$:
```
res = right_hand_sum(0, 2*math.pi, 50, lambda x: math.sin(x))
```
Left hand sum for $f(x)=\cos x$ over the interval $0 \leq x \leq \pi$ with $n=50$:
```
left_hand_sum(0, math.pi, 50, lambda x: math.cos(x))
```
Taking the right-hand sum for $f(x)=-x^2+10$ over the interval $-2\leq x\leq 2$ with $n=25$:
```
right_hand_sum(-2, 2, 25, lambda x: -x**2+10)
right_hand_sum(-2, 2, 50, lambda x: -x**2+10)
right_hand_sum(-2, 2, 15, lambda x: -x**2+10)
right_hand_sum(0, 3, 8, lambda x: -0.6380*x**2+3.97142*x+0.01666)
left_hand_sum(0, 3, 8, lambda x: -0.6380*x**2+3.97142*x+0.01666)
X = np.array([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
y = np.array([0, 1.9, 3.3, 4.5, 5.5, 5.9, 6.2])
a = min(X)
b = max(X)
n = len(X)
degree = 2
fit = np.polyfit(X, y, degree)
p = np.poly1d(fit)
linspace = np.linspace(a, b, n)
fx = p(linspace)
plot(X, y, c='lightgray', linestyle='--', lw=2.0)
plot(linspace, fx, c='b')
legend(['Data', 'Regression ({}-degree)'.format(degree)])
delta_x = (b-a)/(n-1)
L = [fit[0]*x**2 + fit[1]*x + fit[2] for x in linspace[:-1]]
R = [fit[0]*x**2 + fit[1]*x + fit[2] for x in linspace]
delta_x
L
sum(L)*delta_x
R
sum(R)*delta_x
plt.figure(figsize=(10,4))
plot(linspace, fx, lw=5, c='lightgray')
plot(linspace[1:], L, c='b', ls='dotted')
plot(linspace, R, c='b', ls='--')
grid(linestyle='--')
legend(['f(x)', '$L_6$', '$R_6$'], fontsize='x-large')
title('Upper and lower sums');
```
## The Definite Integral
$$ \tag{1}\lim_{n\rightarrow\infty} \sum\limits_{i=1}^n f(x_i^*)\cdot\Delta x = \lim_{n\rightarrow\infty}\Bigl[\ f(x_1^*)\cdot\Delta x+f(x_2^*)\cdot\Delta x+\ldots+f(x_n^*)\cdot\Delta x\ \Bigr]$$
This type of limit gets a special name and notation.
**Definition of a Definite Integral**
If $f$ is a function defined for $a\leq x\leq b$, we divide the interval $[a,b]$ into $n$ subintervals of equal width $\Delta x=(b-a)/n$.
We let $x_0 (=a), x_1, x_2, \ldots, x_n(=b)$ be the endpoints of these subintervals and we let $x_1*, x_2*, \ldots, x_n*$ be any sample points in these sub-intervals, so $x_i*$ lies in the $i$-th subinterval $[x_{i-1},x_i]$. Then the definite integral of $f$ from $a$ to $b$ is:
$$\tag{2}\int_a^bf(x) \ dx = \underbrace{\lim_{n\rightarrow\infty}\sum\limits_{i=1}^nf(x_i^*)\cdot\Delta x}_{\text{Riemann sum}}$$
The _integrand_ is $f$ and the _limits of integration_ are $a$ and $b$. The _lower limit_ is $a$, and the _upper limit_ is $b$. the procedure for calculating the integral is called _integration_.
```
x = np.linspace(0, 2*math.pi, 100)
y = np.array([math.sin(x) for x in x])
plt.figure(figsize=(10,4))
plt.plot(x, y, c='b')
plt.fill_between(x,y, facecolor='b', alpha=0.2)
plt.xlabel('x')
plt.ylabel('sin x');
```
**Theorem (3)**
If $f$ is continuous on $[a,b]$, or if $f$ has only a finite number of jumps discontinuities, then $f$ is integrable on $[a,b]$, that is, the definite integral $\int_a^bf(x) \ dx$ exists.
**Theorem (4)**
If $f$ is integrable on $[a,b]$ then
$$\tag{4}\int_a^b f(x) \ dx = \lim_{n\rightarrow\infty}f(x_i)\cdot\Delta x$$
**Rules for sums**
$\tag{5}\sum\limits_{i=1}^n i = \dfrac{n(n+1)}{n}$
$\tag{6}\sum\limits_{i=1}^n i^2 = \dfrac{n(n+1)(2n+1)}{6}$
$\tag{7}\sum\limits_{i=1}^n i^3 = \left[\dfrac{n(n+1)}{2}\right]^2$
$\tag{8}\sum\limits_{i=1}^n c = nc$
$\tag{9}\sum\limits_{i=1}^n c\cdot a_i = c \sum\limits_{i=1}^n a_i$
$\tag{10}\sum\limits_{i=1}^n (a_i + b_i) = \sum\limits_{i=1}^n a_i + \sum\limits_{i=1}^n b_i$
$\tag{11}\sum\limits_{i=1}^n (a_i - b_i) = \sum\limits_{i=1}^n a_i - \sum\limits_{i=1}^n b_i$
**Midpoint rule**
$$\int_a^b f(x) \ dx \approx \sum\limits_{i=1}^n f(\bar{x}_i)\cdot\Delta x = \Delta x \left[ f(\bar{x}_1) + f(\bar{x}_2) + \ldots + f(\bar{x}_n) \right]$$
where $\Delta x = \dfrac{b-a}{n}$ and $\bar{x}_i=\dfrac{1}{2}(x_{i-1}, x_i)$ is the midpoint of $[x_{i-1}, x_i]$.
**Properties of the Definite Integral**
$$\int_a^b f(x) \ dx = -\int_b^a f(x) \ dx$$
$$\int_a^b f(x) \ dx = 0$$
$$\tag{1}\int_a^b c\ dx = c(b-a) \qquad ,\text{where $c$ is any constant}$$
$$\tag{2}\int_a^b\left[f(x)+g(x)\right]\ dx = \int_a^b f(x)\ dx + \int_a^b g(x)\ dx$$
$$\tag{3}\int_a^b c\cdot f(x) \ dx = c \int_a^b f(x)\ dx \qquad ,\text{where $c$ is any constant}$$
$$\tag{4}\int_a^b\left[f(x)-g(x)\right]\ dx = \int_a^b f(x)\ dx - \int_a^b g(x)\ dx$$
$$\tag{5} \int_a^c f(x)\ dx + \int_c^b f(x)\ dx = \int_a^b f(x)\ dx$$
**Comparison Properties of the Integral**
$$\tag{6}\text{If $f(x) \geq 0$ for $a\leq x\leq b$, then} \int_a^b f(x)\ dx \geq 0.$$
$$\tag{7}\text{If $f(x) \geq g(x)$ for $a\leq x\leq b$, then} \int_a^b f(x)\ dx \geq \int_a^b g(x)\ dx.$$
$$\tag{8}\text{If $m \leq f(x) \leq M$ for $a\leq x \leq b$, then } m(b-a) \leq \int_a^b f(x)\ dx \leq M(b-a).$$
```
def riemann_sum(a, b, n, f):
# Calculating the Riemann sum using midpoints
delta_x = (b - a) / n
x = np.linspace(a, b - delta_x, n) + delta_x / 2
y = np.array([f(x) for x in x])
print('Riemann sum for {} from {} to {} with n={}:'.format('f', a, b, n))
print('delta_x: {}'.format(delta_x))
print('x: {}'.format(x))
print('y: {}'.format(y))
result = sum(y) * delta_x
# Plotting
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.axvline(x=a, c='lightgray', ls='--')
ax.axvline(x=b, c='lightgray', ls='--')
rng = b - a
px = np.linspace(a - rng * 0.25, b + rng * 0.25, n+100)
py = np.array([f(x) for x in px])
plot(px, py, c='b', lw=2)
xlim(a - rng * 0.1, b + rng * 0.1)
rngy = np.max(y) - np.min(y)
spacing = rngy * 0.10
ylim(np.min(y)-spacing, np.max(y)+spacing)
for x, y in zip(x, y):
ax.add_patch(patches.Rectangle(
(x - delta_x / 2, 0),
delta_x,
y,
fill=True,
color='b' if y >= 0 else 'r',
alpha=0.2
))
title('Riemann sum for f, ${:.2f}\leq x\leq{:.2f}$, n=${}$'.format(a,b,n))
ylabel('f(x)')
xlabel('x')
if abs(round(result, 6)) == 0: return 0
return result
riemann_sum(0, math.pi/2, 4, lambda x: math.cos(x)**4)
```
Calculating the Riemann Sum for an area under the probability density function:
$$f(x\ |\ \mu, \sigma^2) = \dfrac{1}{\sqrt{2\pi\sigma^2}}e^{-\dfrac{(x-\mu)^2}{2\sigma^2}}$$
```
riemann_sum(-10, 10, 40, lambda x: 1/math.sqrt(2*math.pi)*math.e**(-(x)**2/25))
riemann_sum(0, 1, 16, lambda x: x**3-x**2+x)
riemann_sum(0, math.pi, 16, lambda x: math.cos(x))
riemann_sum(0, 2, 50, lambda x: x/(x+1))
riemann_sum(1, 5, 4, lambda x: x**2*math.e**(-x))
riemann_sum(1, 5, 10, lambda x: x**2*math.e**(-x))
riemann_sum(1, 5, 20, lambda x: x**2*math.e**(-x))
riemann_sum(0, 1.5, 28, lambda x: sin(math.pi*x**2))
```
Evaluating the right hand Riemann sums $R_n$ for the integral $\int^\pi_0 \sin x \ \mathrm{d}x$ with $n=5,10,50,100$.
```
for n in [5, 10, 50, 100]:
delta_x = math.pi / n
x = np.linspace(0, math.pi - delta_x, n)
y = np.array([math.sin(x) for x in x])
rs = np.sum(y)*delta_x
print('Right hand sum for n={} yields {:.6f}.'.format(n, rs))
```
**Right-hand sum**
```
import math
import numpy as np
def rhs(a, b, n, f):
delta_x = (b - a) / n
X = np.linspace(a, b - delta_x, n) + delta_x
y = np.array([f(x) for x in X])
return np.sum(y) * delta_x
rhs(0, math.pi, 10, lambda x: math.sin(x))
```
**Left-hand sum**
```
def lhs(a, b, n, f):
delta_x = (b - a) / n
X = np.linspace(a, b - delta_x, n)
y = np.array([f(x) for x in X])
return np.sum(y) * delta_x
lhs(0, math.pi, 10, lambda x: math.sin(x))
```
**Midpoint sum**
```
def mps(a, b, n, f):
delta_x = (b - a) / n
x = np.linspace(a, b - delta_x, n) + delta_x / 2
y = np.array([f(x) for x in x])
return np.sum(y) * delta_x
mps(0, math.pi, 10, lambda x: math.sin(x))
```
Evaluating the left and right Riemann sums $L_n$ and $R_n$ for the integral $\int_0^\pi e^{-x^2} \ \mathrm{d}x$ with $n=5,10,50,100$.
```
f = lambda x: math.e**(-x**2)
print('Mn: {}\n'.format(riemann_sum(-1, 2, 10, f)))
x = np.arange(5, 100, 5)
y1 = []
y2 = []
for n in x:
fx = lhs(-1, 2, n, f)
y1.append(fx)
print('Left-hand sum with n={} yields {}.'.format(n, fx))
fx = rhs(-1, 2, n, f)
y2.append(fx)
print('Right-hand sum with n={} yields {}.\n'.format(n, fx))
```
**Limit approximation**
```
import numpy.polynomial.polynomial as poly
plt.figure(figsize=(10,6))
scatter(x, y1)
scatter(x, y2)
reg1 = reg2 = np.linspace(0, x[-1]*1.5)
pf1 = poly.polyfit(x, y1, 2)
p1 = poly.Polynomial(pf1)
r1 = p1(reg1)
plot(reg1, r1)
pf2 = poly.polyfit(x, y2, 2)
p2 = poly.Polynomial(pf2)
r2 = p2(reg2)
plot(reg2, r2)
axhline(y=1.62891, c='lightgray', ls='--')
xlabel('$n$ subintervals')
ylabel('approximation of the sum')
legend(['QuadReg LHS', 'QuadReg RHS', '$\int_{-1}^2 e^{-x^2}$', 'Left-hand sum', 'Right-hand sum']);
```
The idea was to approximate the limit by finding the intersection between the two quadratic regressors. As we can see from the plot, it won't be that easy.
It seems that the limit is between the left and the right hand sum. If we add half of the difference to the lower sum we might get a good approximate result of the limit.
$$x = L_n + \dfrac{R_n-L_n}{2}\iff x=\dfrac{R_n+L_n}{2}$$
```
x = (y2[-1] + y1[-1]) / 2
x
```
Considering that $\int_{-1}^2 e^{-x^2} \approx 1.62891$, we are indeed quite close. Let's see what $M_n$ yields:
```
mn = mps(-1, 2, 100, f)
mn
```
This result is even better than our previous approximation.
```
bar(['$M_n$', '$\int_{-1}^2e^{-x^2}$', '$L_n + (R_n-L_n)/2$'], [mn, 1.62891, x], facecolor='b', alpha=0.5)
ylim((1.6288, 1.62898))
f = lambda x: math.sin(x)
x = np.linspace(0, math.pi*2, 100)
y = [f(x) for x in x]
plt.figure(figsize=(10,6))
plot(x,y)
x = np.linspace(0, math.pi, 100)
y = [f(x) for x in x]
plt.figure(figsize=(10,6))
plot(x,y)
riemann_sum(0, math.pi, 10, lambda x: math.sin(x))
```
| true |
code
| 0.629604 | null | null | null | null |
|
Copyright 2019 The Google Research Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# Domain Adaptation using DVRL
* Jinsung Yoon, Sercan O Arik, Tomas Pfister, "Data Valuation using Reinforcement Learning", arXiv preprint arXiv:1909.11671 (2019) - https://arxiv.org/abs/1909.11671
This notebook describes the user-guide of a domain adaptation application using "Data Valuation using Reinforcement Learning (DVRL)".
We consider the scenario where the training dataset comes from a substantially different distribution from the validation and testing sets. Data valuation is expected to be beneficial for this task by selecting the samples from the training dataset that best match the distribution of the validation dataset.
You need:
**Source / Target / Validation Datasets**
* If there is no explicit validation set, users can utilize a small portion of target set as the validation set and the remaining as the target set.
* If users come with their own source / target / validation datasets, the users should save those files as 'source.csv', 'target.csv', 'valid.csv' in './data_files/' directory.
* we use Rossmann store sales dataset (https://www.kaggle.com/c/rossmann-store-sales) as an example in this notebook. Please download the dataset (rossmann-store-sales.zip) from the following link (https://www.kaggle.com/c/rossmann-store-sales/data) and save it to './data_files/' directory.
## Requirements
Clone https://github.com/google-research/google-research/tree/master/dvrl to the current directory
```
%load_ext autoreload
%autoreload 2
```
## Necessary packages and functions call
* rossmann_data_loading: data loader for rossmann dataset
* data_preprocess: data extraction and normalization
* dvrl_regress: data valuation function for regression problem
* metrics: evaluate the quality of data valuation in domain adatation setting
```
import numpy as np
import tensorflow as tf
import lightgbm
from data_loading import load_rossmann_data, preprocess_data
import dvrl
from dvrl_metrics import learn_with_dvrl, learn_with_baseline
```
## Data loading & Select source, target, validation datasets
* Load source, target, validation dataset and save those datasets as source.csv, target.csv, valid.csv in './data_files/' directory
* If users have their own source.csv, target.csv, valid.csv, the users can skip this cell and just save those files to './data_files/' directory
**Input**:
* dict_no: the number of source / valid / target samples. We use 79% / 1% / 20% as the ratio of each dataset
* settings: 'train-on-all', 'train-on-rest', 'train-on-specific'
* target_store_type: target store types ('A','B','C','D').
For instance, to evaluate the performance of store type 'A', (1) 'train-on-all' setting uses the entire source dataset, (2) 'train-on-rest' setting uses the source samples with store type 'B', 'C', and 'D', (3) 'train-on-specific' setting uses the source samples with store type 'A'. Therefore, 'train-on-rest' has the maximum distribution differences between source and target datasets.
```
# The number of source / validation / target samples (79%/1%/20%)
dict_no = dict()
dict_no['source'] = 667027 # 79% of data
dict_no['valid'] = 8443 # 1% of data
# Selects a setting and target store type
setting = 'train-on-rest'
target_store_type = 'B'
# Loads data and selects source, target, validation datasets
load_rossmann_data(dict_no, setting, target_store_type)
print('Finished data loading.')
```
## Data preprocessing
* Extract features and labels from source.csv, valid.csv, target.csv in './data_files/' directory
* Normalize the features of source, validation, and target sets
```
# Normalization methods: either 'minmax' or 'standard'
normalization = 'minmax'
# Extracts features and labels. Then, normalizes features.
x_source, y_source, x_valid, y_valid, x_target, y_target, _ = \
preprocess_data(normalization, 'source.csv', 'valid.csv', 'target.csv')
print('Finished data preprocess.')
```
## Run DVRL
1. **Input**:
* data valuator network parameters - set network parameters of data valuator.
* pred_model: The predictor model that maps output from the input. Any machine learning model (e.g. a neural network or ensemble decision tree) can be used as the predictor model, as long as it has fit, and predict (for regression)/predict_proba (for classification) as its subfunctions. Fit can be implemented using multiple backpropagation iterations.
2. **Output**:
* data_valuator: function that uses training set as inputs to estimate data values
* dvrl_predictor: function that predicts labels of the testing samples
* dve_out: estimated data values of the entire training samples
```
# Resets the graph
tf.reset_default_graph()
# Defines the problem
problem = 'regression'
# Network parameters
parameters = dict()
parameters['hidden_dim'] = 100
parameters['comb_dim'] = 10
parameters['iterations'] = 1000
parameters['activation'] = tf.nn.tanh
parameters['layer_number'] = 5
parameters['batch_size'] = 50000
parameters['learning_rate'] = 0.001
# Defines predictive model
pred_model = lightgbm.LGBMRegressor()
# Sets checkpoint file name
checkpoint_file_name = './tmp/model.ckpt'
# Defines flag for using stochastic gradient descent / pre-trained model
flags = {'sgd': False, 'pretrain': False}
# Initializes DVRL
dvrl_class = dvrl.Dvrl(x_source, y_source, x_valid, y_valid, problem, pred_model, parameters, checkpoint_file_name, flags)
# Trains DVRL
dvrl_class.train_dvrl('rmspe')
# Estimates data values
dve_out = dvrl_class.data_valuator(x_source, y_source)
# Predicts with DVRL
y_target_hat = dvrl_class.dvrl_predictor(x_target)
print('Finished data valuation.')
```
## Evaluations
* In this notebook, we use LightGBM as the predictor model for evaluation purposes (but you can also replace it with another model).
* Here, we use Root Mean Squared Percentage Error (RMSPE) as the performance metric.
### DVRL Performance
DVRL learns robustly although the training data has different distribution from the target data distribution, using the guidance from the small validation data (which comes from the target distribution) via reinforcement learning.
* Train predictive model with weighted optimization using estimated data values by DVRL as the weights
```
# Defines evaluation model
eval_model = lightgbm.LGBMRegressor()
# DVRL-weighted learning
dvrl_perf = learn_with_dvrl(dve_out, eval_model,
x_source, y_source, x_valid, y_valid, x_target, y_target, 'rmspe')
# Baseline prediction performance (treat all training samples equally)
base_perf = learn_with_baseline(eval_model, x_source, y_source, x_target, y_target, 'rmspe')
print('Finished evaluation.')
print('DVRL learning performance: ' + str(np.round(dvrl_perf, 4)))
print('Baseline performance: ' + str(np.round(base_perf, 4)))
```
| true |
code
| 0.603289 | null | null | null | null |
|
```
#all_slow
#export
from fastai.basics import *
from fastai.vision.all import *
#default_exp vision.gan
#default_cls_lvl 3
#hide
from nbdev.showdoc import *
```
# GAN
> Basic support for [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661)
GAN stands for [Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf) and were invented by Ian Goodfellow. The concept is that we train two models at the same time: a generator and a critic. The generator will try to make new images similar to the ones in a dataset, and the critic will try to classify real images from the ones the generator does. The generator returns images, the critic a single number (usually a probability, 0. for fake images and 1. for real ones).
We train them against each other in the sense that at each step (more or less), we:
1. Freeze the generator and train the critic for one step by:
- getting one batch of true images (let's call that `real`)
- generating one batch of fake images (let's call that `fake`)
- have the critic evaluate each batch and compute a loss function from that; the important part is that it rewards positively the detection of real images and penalizes the fake ones
- update the weights of the critic with the gradients of this loss
2. Freeze the critic and train the generator for one step by:
- generating one batch of fake images
- evaluate the critic on it
- return a loss that rewards positively the critic thinking those are real images
- update the weights of the generator with the gradients of this loss
> Note: The fastai library provides support for training GANs through the GANTrainer, but doesn't include more than basic models.
## Wrapping the modules
```
#export
class GANModule(Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self, generator=None, critic=None, gen_mode=False):
if generator is not None: self.generator=generator
if critic is not None: self.critic =critic
store_attr('gen_mode')
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self, gen_mode=None):
"Put the module in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
```
This is just a shell to contain the two models. When called, it will either delegate the input to the `generator` or the `critic` depending of the value of `gen_mode`.
```
show_doc(GANModule.switch)
```
By default (leaving `gen_mode` to `None`), this will put the module in the other mode (critic mode if it was in generator mode and vice versa).
```
#export
@delegates(ConvLayer.__init__)
def basic_critic(in_size, n_channels, n_features=64, n_extra_layers=0, norm_type=NormType.Batch, **kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)]
while cur_size > 4:
layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()]
return nn.Sequential(*layers)
#export
class AddChannels(Module):
"Add `n_dim` channels at the end of the input."
def __init__(self, n_dim): self.n_dim=n_dim
def forward(self, x): return x.view(*(list(x.shape)+[1]*self.n_dim))
#export
@delegates(ConvLayer.__init__)
def basic_generator(out_size, n_channels, in_sz=100, n_features=64, n_extra_layers=0, **kwargs):
"A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2
layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)]
cur_size = 4
while cur_size < out_size // 2:
layers.append(ConvLayer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)]
layers += [nn.ConvTranspose2d(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
critic = basic_critic(64, 3)
generator = basic_generator(64, 3)
tst = GANModule(critic=critic, generator=generator)
real = torch.randn(2, 3, 64, 64)
real_p = tst(real)
test_eq(real_p.shape, [2,1])
tst.switch() #tst is now in generator mode
noise = torch.randn(2, 100)
fake = tst(noise)
test_eq(fake.shape, real.shape)
tst.switch() #tst is back in critic mode
fake_p = tst(fake)
test_eq(fake_p.shape, [2,1])
#export
_conv_args = dict(act_cls = partial(nn.LeakyReLU, negative_slope=0.2), norm_type=NormType.Spectral)
def _conv(ni, nf, ks=3, stride=1, self_attention=False, **kwargs):
if self_attention: kwargs['xtra'] = SelfAttention(nf)
return ConvLayer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)
#export
@delegates(ConvLayer)
def DenseResBlock(nf, norm_type=NormType.Batch, **kwargs):
"Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`."
return SequentialEx(ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
MergeLayer(dense=True))
#export
def gan_critic(n_channels=3, nf=128, n_blocks=3, p=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
DenseResBlock(nf, **_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
ConvLayer(nf, 1, ks=4, bias=False, padding=0, norm_type=NormType.Spectral, act_cls=None),
Flatten()]
return nn.Sequential(*layers)
#export
class GANLoss(GANModule):
"Wrapper around `crit_loss_func` and `gen_loss_func`"
def __init__(self, gen_loss_func, crit_loss_func, gan_model):
super().__init__()
store_attr('gen_loss_func,crit_loss_func,gan_model')
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.gen_loss_func`"
fake_pred = self.gan_model.critic(output)
self.gen_loss = self.gen_loss_func(fake_pred, output, target)
return self.gen_loss
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`."
fake = self.gan_model.generator(input).requires_grad_(False)
fake_pred = self.gan_model.critic(fake)
self.crit_loss = self.crit_loss_func(real_pred, fake_pred)
return self.crit_loss
```
In generator mode, this loss function expects the `output` of the generator and some `target` (a batch of real images). It will evaluate if the generator successfully fooled the critic using `gen_loss_func`. This loss function has the following signature
```
def gen_loss_func(fake_pred, output, target):
```
to be able to combine the output of the critic on `output` (which the first argument `fake_pred`) with `output` and `target` (if you want to mix the GAN loss with other losses for instance).
In critic mode, this loss function expects the `real_pred` given by the critic and some `input` (the noise fed to the generator). It will evaluate the critic using `crit_loss_func`. This loss function has the following signature
```
def crit_loss_func(real_pred, fake_pred):
```
where `real_pred` is the output of the critic on a batch of real images and `fake_pred` is generated from the noise using the generator.
```
#export
class AdaptiveLoss(Module):
"Expand the `target` to match the `output` size before applying `crit`."
def __init__(self, crit): self.crit = crit
def forward(self, output, target):
return self.crit(output, target[:,None].expand_as(output).float())
#export
def accuracy_thresh_expand(y_pred, y_true, thresh=0.5, sigmoid=True):
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh).byte()==y_true[:,None].expand_as(y_pred).byte()).float().mean()
```
## Callbacks for GAN training
```
#export
def set_freeze_model(m, rg):
for p in m.parameters(): p.requires_grad_(rg)
#export
class GANTrainer(Callback):
"Handles GAN Training."
run_after = TrainEvalCallback
def __init__(self, switch_eval=False, clip=None, beta=0.98, gen_first=False, show_img=True):
store_attr('switch_eval,clip,gen_first,show_img')
self.gen_loss,self.crit_loss = AvgSmoothLoss(beta=beta),AvgSmoothLoss(beta=beta)
def _set_trainable(self):
train_model = self.generator if self.gen_mode else self.critic
loss_model = self.generator if not self.gen_mode else self.critic
set_freeze_model(train_model, True)
set_freeze_model(loss_model, False)
if self.switch_eval:
train_model.train()
loss_model.eval()
def before_fit(self):
"Initialize smootheners."
self.generator,self.critic = self.model.generator,self.model.critic
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.crit_losses,self.gen_losses = [],[]
self.gen_loss.reset() ; self.crit_loss.reset()
#self.recorder.no_val=True
#self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
#self.imgs,self.titles = [],[]
def before_validate(self):
"Switch in generator mode for showing results."
self.switch(gen_mode=True)
def before_batch(self):
"Clamp the weights with `self.clip` if it's not None, set the correct input/target."
if self.training and self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
if not self.gen_mode:
(self.learn.xb,self.learn.yb) = (self.yb,self.xb)
def after_batch(self):
"Record `last_loss` in the proper list."
if not self.training: return
if self.gen_mode:
self.gen_loss.accumulate(self.learn)
self.gen_losses.append(self.gen_loss.value)
self.last_gen = to_detach(self.pred)
else:
self.crit_loss.accumulate(self.learn)
self.crit_losses.append(self.crit_loss.value)
def before_epoch(self):
"Put the critic or the generator back to eval if necessary."
self.switch(self.gen_mode)
#def after_epoch(self):
# "Show a sample image."
# if not hasattr(self, 'last_gen') or not self.show_img: return
# data = self.learn.data
# img = self.last_gen[0]
# norm = getattr(data,'norm',False)
# if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
# img = data.train_ds.y.reconstruct(img)
# self.imgs.append(img)
# self.titles.append(f'Epoch {epoch}')
# pbar.show_imgs(self.imgs, self.titles)
# return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
def switch(self, gen_mode=None):
"Switch the model and loss function, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
```
> Warning: The GANTrainer is useless on its own, you need to complete it with one of the following switchers
```
#export
class FixedGANSwitcher(Callback):
"Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator."
run_after = GANTrainer
def __init__(self, n_crit=1, n_gen=1): store_attr('n_crit,n_gen')
def before_train(self): self.n_c,self.n_g = 0,0
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c
target = n_iter if isinstance(n_iter, int) else n_iter(n_in)
if target == n_out:
self.learn.gan_trainer.switch()
self.n_c,self.n_g = 0,0
#export
class AdaptiveGANSwitcher(Callback):
"Switcher that goes back to generator/critic when the loss goes below `gen_thresh`/`crit_thresh`."
run_after = GANTrainer
def __init__(self, gen_thresh=None, critic_thresh=None):
store_attr('gen_thresh,critic_thresh')
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.gan_trainer.gen_mode:
if self.gen_thresh is None or self.loss < self.gen_thresh: self.gan_trainer.switch()
else:
if self.critic_thresh is None or self.loss < self.critic_thresh: self.gan_trainer.switch()
#export
class GANDiscriminativeLR(Callback):
"`Callback` that handles multiplying the learning rate by `mult_lr` for the critic."
run_after = GANTrainer
def __init__(self, mult_lr=5.): self.mult_lr = mult_lr
def before_batch(self):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and self.training:
self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']*self.mult_lr)
def after_batch(self):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']/self.mult_lr)
```
## GAN data
```
#export
class InvisibleTensor(TensorBase):
def show(self, ctx=None, **kwargs): return ctx
#export
def generate_noise(fn, size=100): return cast(torch.randn(size), InvisibleTensor)
#export
@typedispatch
def show_batch(x:InvisibleTensor, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:InvisibleTensor, y:TensorImage, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
return ctxs
bs = 128
size = 64
dblock = DataBlock(blocks = (TransformBlock, ImageBlock),
get_x = generate_noise,
get_items = get_image_files,
splitter = IndexSplitter([]),
item_tfms=Resize(size, method=ResizeMethod.Crop),
batch_tfms = Normalize.from_stats(torch.tensor([0.5,0.5,0.5]), torch.tensor([0.5,0.5,0.5])))
path = untar_data(URLs.LSUN_BEDROOMS)
dls = dblock.dataloaders(path, path=path, bs=bs)
dls.show_batch(max_n=16)
```
## GAN Learner
```
#export
def gan_loss_from_func(loss_gen, loss_crit, weights_gen=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
#export
def _tk_mean(fake_pred, output, target): return fake_pred.mean()
def _tk_diff(real_pred, fake_pred): return real_pred.mean() - fake_pred.mean()
#export
@delegates()
class GANLearner(Learner):
"A `Learner` suitable for GANs."
def __init__(self, dls, generator, critic, gen_loss_func, crit_loss_func, switcher=None, gen_first=False,
switch_eval=True, show_img=True, clip=None, cbs=None, metrics=None, **kwargs):
gan = GANModule(generator, critic)
loss_func = GANLoss(gen_loss_func, crit_loss_func, gan)
if switcher is None: switcher = FixedGANSwitcher(n_crit=5, n_gen=1)
trainer = GANTrainer(clip=clip, switch_eval=switch_eval, gen_first=gen_first, show_img=show_img)
cbs = L(cbs) + L(trainer, switcher)
metrics = L(metrics) + L(*LossMetrics('gen_loss,crit_loss'))
super().__init__(dls, gan, loss_func=loss_func, cbs=cbs, metrics=metrics, **kwargs)
@classmethod
def from_learners(cls, gen_learn, crit_learn, switcher=None, weights_gen=None, **kwargs):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(gen_learn.loss_func, crit_learn.loss_func, weights_gen=weights_gen)
return cls(gen_learn.dls, gen_learn.model, crit_learn.model, *losses, switcher=switcher, **kwargs)
@classmethod
def wgan(cls, dls, generator, critic, switcher=None, clip=0.01, switch_eval=False, **kwargs):
"Create a WGAN from `data`, `generator` and `critic`."
return cls(dls, generator, critic, _tk_mean, _tk_diff, switcher=switcher, clip=clip, switch_eval=switch_eval, **kwargs)
GANLearner.from_learners = delegates(to=GANLearner.__init__)(GANLearner.from_learners)
GANLearner.wgan = delegates(to=GANLearner.__init__)(GANLearner.wgan)
from fastai.callback.all import *
generator = basic_generator(64, n_channels=3, n_extra_layers=1)
critic = basic_critic (64, n_channels=3, n_extra_layers=1, act_cls=partial(nn.LeakyReLU, negative_slope=0.2))
learn = GANLearner.wgan(dls, generator, critic, opt_func = RMSProp)
learn.recorder.train_metrics=True
learn.recorder.valid_metrics=False
learn.fit(1, 2e-4, wd=0.)
learn.show_results(max_n=9, ds_idx=0)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| true |
code
| 0.813368 | null | null | null | null |
|
# Modeling and Simulation in Python
Chapter 13
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
```
### Code from previous chapters
`make_system`, `plot_results`, and `calc_total_infected` are unchanged.
```
def make_system(beta, gamma):
"""Make a system object for the SIR model.
beta: contact rate in days
gamma: recovery rate in days
returns: System object
"""
init = State(S=89, I=1, R=0)
init /= np.sum(init)
t0 = 0
t_end = 7 * 14
return System(init=init, t0=t0, t_end=t_end,
beta=beta, gamma=gamma)
def plot_results(S, I, R):
"""Plot the results of a SIR model.
S: TimeSeries
I: TimeSeries
R: TimeSeries
"""
plot(S, '--', label='Susceptible')
plot(I, '-', label='Infected')
plot(R, ':', label='Recovered')
decorate(xlabel='Time (days)',
ylabel='Fraction of population')
def calc_total_infected(results):
"""Fraction of population infected during the simulation.
results: DataFrame with columns S, I, R
returns: fraction of population
"""
return get_first_value(results.S) - get_last_value(results.S)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
init, t0, t_end = system.init, system.t0, system.t_end
frame = TimeFrame(columns=init.index)
frame.row[t0] = init
for t in linrange(t0, t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
def update_func(state, t, system):
"""Update the SIR model.
state: State (s, i, r)
t: time
system: System object
returns: State (sir)
"""
beta, gamma = system.beta, system.gamma
s, i, r = state
infected = beta * i * s
recovered = gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
```
### Sweeping beta
Make a range of values for `beta`, with constant `gamma`.
```
beta_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 , 1.1]
gamma = 0.2
```
Run the simulation once for each value of `beta` and print total infections.
```
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
print(system.beta, calc_total_infected(results))
```
Wrap that loop in a function and return a `SweepSeries` object.
```
def sweep_beta(beta_array, gamma):
"""Sweep a range of values for beta.
beta_array: array of beta values
gamma: recovery rate
returns: SweepSeries that maps from beta to total infected
"""
sweep = SweepSeries()
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
sweep[system.beta] = calc_total_infected(results)
return sweep
```
Sweep `beta` and plot the results.
```
infected_sweep = sweep_beta(beta_array, gamma)
label = 'gamma = ' + str(gamma)
plot(infected_sweep, label=label)
decorate(xlabel='Contact rate (beta)',
ylabel='Fraction infected')
savefig('figs/chap13-fig01.pdf')
```
### Sweeping gamma
Using the same array of values for `beta`
```
beta_array
```
And now an array of values for `gamma`
```
gamma_array = [0.2, 0.4, 0.6, 0.8]
```
For each value of `gamma`, sweep `beta` and plot the results.
```
plt.figure(figsize=(7, 4))
for gamma in gamma_array:
infected_sweep = sweep_beta(beta_array, gamma)
label = 'gamma = ' + str(gamma)
plot(infected_sweep, label=label)
decorate(xlabel='Contact rate (beta)',
ylabel='Fraction infected',
loc='upper left')
plt.legend(bbox_to_anchor=(1.02, 1.02))
plt.tight_layout()
savefig('figs/chap13-fig02.pdf')
```
**Exercise:** Suppose the infectious period for the Freshman Plague is known to be 2 days on average, and suppose during one particularly bad year, 40% of the class is infected at some point. Estimate the time between contacts.
```
# Solution
# Sweep beta with fixed gamma
gamma = 1/2
infected_sweep = sweep_beta(beta_array, gamma)
# Solution
# Interpolating by eye, we can see that the infection rate passes through 0.4
# when beta is between 0.6 and 0.7
# We can use the `crossings` function to interpolate more precisely
# (although we don't know about it yet :)
beta_estimate = crossings(infected_sweep, 0.4)
# Solution
# Time between contacts is 1/beta
time_between_contacts = 1/beta_estimate
```
## SweepFrame
The following sweeps two parameters and stores the results in a `SweepFrame`
```
def sweep_parameters(beta_array, gamma_array):
"""Sweep a range of values for beta and gamma.
beta_array: array of infection rates
gamma_array: array of recovery rates
returns: SweepFrame with one row for each beta
and one column for each gamma
"""
frame = SweepFrame(columns=gamma_array)
for gamma in gamma_array:
frame[gamma] = sweep_beta(beta_array, gamma)
return frame
```
Here's what the `SweepFrame` look like.
```
frame = sweep_parameters(beta_array, gamma_array)
frame.head()
```
And here's how we can plot the results.
```
for gamma in gamma_array:
label = 'gamma = ' + str(gamma)
plot(frame[gamma], label=label)
decorate(xlabel='Contact rate (beta)',
ylabel='Fraction infected',
title='',
loc='upper left')
```
We can also plot one line for each value of `beta`, although there are a lot of them.
```
plt.figure(figsize=(7, 4))
for beta in [1.1, 0.9, 0.7, 0.5, 0.3]:
label = 'beta = ' + str(beta)
plot(frame.row[beta], label=label)
decorate(xlabel='Recovery rate (gamma)',
ylabel='Fraction infected')
plt.legend(bbox_to_anchor=(1.02, 1.02))
plt.tight_layout()
savefig('figs/chap13-fig03.pdf')
```
It's often useful to separate the code that generates results from the code that plots the results, so we can run the simulations once, save the results, and then use them for different analysis, visualization, etc.
After running `sweep_parameters`, we have a `SweepFrame` with one row for each value of `beta` and one column for each value of `gamma`.
```
contour(frame)
decorate(xlabel='Recovery rate (gamma)',
ylabel='Contact rate (beta)',
title='Fraction infected, contour plot')
savefig('figs/chap13-fig04.pdf')
```
| true |
code
| 0.836655 | null | null | null | null |
|
# Human numbers
```
from fastai2.text.all import *
bs=64
```
## Data
```
path = untar_data(URLs.HUMAN_NUMBERS)
path.ls()
def readnums(d): return ', '.join(o.strip() for o in open(path/d).readlines())
train_txt = readnums('train.txt'); train_txt[:80]
valid_txt = readnums('valid.txt'); valid_txt[-80:]
train_tok = tokenize1(train_txt)
valid_tok = tokenize1(valid_txt)
dsets = Datasets([train_tok, valid_tok], tfms=Numericalize, dl_type=LMDataLoader, splits=[[0], [1]])
dls = dsets.dataloaders(bs=bs, val_bs=bs)
dsets.show((dsets.train[0][0][:80],))
len(dsets.valid[0][0])
len(dls.valid)
dls.seq_len, len(dls.valid)
13017/72/bs
it = iter(dls.valid)
x1,y1 = next(it)
x2,y2 = next(it)
x3,y3 = next(it)
it.close()
x1.numel()+x2.numel()+x3.numel()
```
This is the closes multiple of 64 below 13017
```
x1.shape,y1.shape
x2.shape,y2.shape
x1[0]
y1[0]
v = dls.vocab
' '.join([v[x] for x in x1[0]])
' '.join([v[x] for x in y1[0]])
' '.join([v[x] for x in x2[0]])
' '.join([v[x] for x in x3[0]])
' '.join([v[x] for x in x1[1]])
' '.join([v[x] for x in x2[1]])
' '.join([v[x] for x in x3[1]])
' '.join([v[x] for x in x3[-1]])
dls.valid.show_batch()
```
## Single fully connected model
```
dls = dsets.dataloaders(bs=bs, seq_len=3)
x,y = dls.one_batch()
x.shape,y.shape
nv = len(v); nv
nh=64
def loss4(input,target): return F.cross_entropy(input, target[:,-1])
def acc4 (input,target): return accuracy(input, target[:,-1])
class Model0(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh) # green arrow
self.h_h = nn.Linear(nh,nh) # brown arrow
self.h_o = nn.Linear(nh,nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = self.bn(F.relu(self.h_h(self.i_h(x[:,0]))))
if x.shape[1]>1:
h = h + self.i_h(x[:,1])
h = self.bn(F.relu(self.h_h(h)))
if x.shape[1]>2:
h = h + self.i_h(x[:,2])
h = self.bn(F.relu(self.h_h(h)))
return self.h_o(h)
learn = Learner(dls, Model0(), loss_func=loss4, metrics=acc4)
learn.fit_one_cycle(6, 1e-4)
```
## Same thing with a loop
```
class Model1(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh) # green arrow
self.h_h = nn.Linear(nh,nh) # brown arrow
self.h_o = nn.Linear(nh,nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = torch.zeros(x.shape[0], nh).to(device=x.device)
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = self.bn(F.relu(self.h_h(h)))
return self.h_o(h)
learn = Learner(dls, Model1(), loss_func=loss4, metrics=acc4)
learn.fit_one_cycle(6, 1e-4)
```
## Multi fully connected model
```
dls = dsets.dataloaders(bs=bs, seq_len=20)
x,y = dls.one_batch()
x.shape,y.shape
class Model2(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh)
self.h_h = nn.Linear(nh,nh)
self.h_o = nn.Linear(nh,nv)
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = torch.zeros(x.shape[0], nh).to(device=x.device)
res = []
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = F.relu(self.h_h(h))
res.append(self.h_o(self.bn(h)))
return torch.stack(res, dim=1)
learn = Learner(dls, Model2(), loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(10, 1e-4, pct_start=0.1)
```
## Maintain state
```
class Model3(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh)
self.h_h = nn.Linear(nh,nh)
self.h_o = nn.Linear(nh,nv)
self.bn = nn.BatchNorm1d(nh)
self.h = torch.zeros(bs, nh).cuda()
def forward(self, x):
res = []
if x.shape[0]!=self.h.shape[0]: self.h = torch.zeros(x.shape[0], nh).cuda()
h = self.h
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = F.relu(self.h_h(h))
res.append(self.bn(h))
self.h = h.detach()
res = torch.stack(res, dim=1)
res = self.h_o(res)
return res
def reset(self): self.f.h = torch.zeros(bs, nh).cuda()
learn = Learner(dls, Model3(), metrics=accuracy, loss_func=CrossEntropyLossFlat())
learn.fit_one_cycle(20, 3e-3)
```
## nn.RNN
```
class Model4(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh)
self.rnn = nn.RNN(nh,nh, batch_first=True)
self.h_o = nn.Linear(nh,nv)
self.bn = BatchNorm1dFlat(nh)
self.h = torch.zeros(1, bs, nh).cuda()
def forward(self, x):
if x.shape[0]!=self.h.shape[1]: self.h = torch.zeros(1, x.shape[0], nh).cuda()
res,h = self.rnn(self.i_h(x), self.h)
self.h = h.detach()
return self.h_o(self.bn(res))
learn = Learner(dls, Model4(), loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(20, 3e-3)
```
## 2-layer GRU
```
class Model5(Module):
def __init__(self):
self.i_h = nn.Embedding(nv,nh)
self.rnn = nn.GRU(nh, nh, 2, batch_first=True)
self.h_o = nn.Linear(nh,nv)
self.bn = BatchNorm1dFlat(nh)
self.h = torch.zeros(2, bs, nh).cuda()
def forward(self, x):
if x.shape[0]!=self.h.shape[1]: self.h = torch.zeros(2, x.shape[0], nh).cuda()
res,h = self.rnn(self.i_h(x), self.h)
self.h = h.detach()
return self.h_o(self.bn(res))
learn = Learner(dls, Model5(), loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(10, 1e-2)
```
## fin
| true |
code
| 0.64579 | null | null | null | null |
|
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import sys
from pathlib import Path
sys.path.append(str(Path().cwd().parent))
import pandas as pd
from load_dataset import Dataset
from model import TimeSeriesPredictor, TimeSeriesDetector
from sklearn.linear_model import Ridge
import plotting
from typing import Tuple
```
## Разбор (запускать не надо)
#### Возьмем часовой временной ряд
```
dataset = Dataset('../data/dataset')
ts = dataset['hour_2263.csv']
ts.plot(figsize=(10, 8))
```
#### Разделим на трейн и тест
```
ts_train, ts_test = ts[:-100], ts[-100:]
```
#### Создадим инстанс детектора с Ridge в качестве базы
```
detector = TimeSeriesDetector(
granularity='PT1H',
num_lags=24,
model=Ridge,
alpha=7,
sigma=2.3
)
```
#### Обучим модель на трейне
```
detector.fit(ts_train)
```
#### Используя обученную модель, соберем статистику с трейна
```
detector.fit_statistics(ts_train)
detector.std
```
#### Сделаем in-sample прогноз на ts_test
```
preds = detector.predict_batch(ts_train, ts_test)
```
#### Используя найденную статистику (стандартное отклонение от остатков), получим доверительные интервалы
```
lower, upper = detector.get_prediction_intervals(preds)
lower
upper
```
#### Получим аномалии
```
anoms = detector.detect(ts_test, preds)
anoms
plotting.plot_detection(ts_test, upper, lower, preds)
```
## Практика
```
class TimeSeriesDetector(TimeSeriesPredictor):
def __init__(self, sigma=2.7, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sigma = sigma
def fit_statistics(self, ts: pd.Series):
"""
Используя метод predict_batch получает in-sample предсказания на ряде ts.
Далее получает остатки, вычитая этот прогноз из факта. На остатках считает
стандартное отклонение и записывает его в аттрибут std
"""
self.std = std
def get_prediction_intervals(self, y_pred: pd.Series, season=False) -> Tuple[pd.Series]:
"""
Используя найденный std остатков, заданную self.sigma и предсказанные значения y_pred,
возвращает для них доверительные интервалы
"""
return lower, upper
def detect(self, ts_true, ts_pred, season=False) -> pd.Series:
"""
Используя метод get_predictions_intervals, получает те значения ряда ts_true,
которые выходят за границы доверительных интервалов
"""
return
def fit_seasonal_statistics(self, ts_train, n_splits=3, period=24):
pass
```
## Практика. Часть1. Базовая.
### Задание 1. Метод fit_statistics.
Добавить в класс `TimeSeriesPredictor` метод `fit_statistics`, который при помощи обученной модели получает остатки
предсказаний на трейне и возвращает стандартное отклонение остатков.
* принимает на вход ряд ts
* используя метод `self.predict_batch` получает in-sample прогноз для ts
* получает остатки, вычитая прогноз из факта (не забудьте про abs())
* на остатках считает стандартное отклонение и записывает его в аттрибут `self.std`
### Задание 2. Метод get_predictions_intervals.
Добавить в класс TimeSeriesPredictor метод get_prediction_intervals, который принимает предсказанный батч ts_pred, и, используя std на трейне возвращает для каждой точки ее lower и upper доверительные интервалы.
* принимает на вход предсказанный батч y_pred
* возвращает для него верхний и нижный интервал по формуле `upper, lower = y_pred +/- sigma * std`
### Задание 3. Метод detect.
Добавить в класс TimeSeriesPredictor метод detect, который принимает на вход ts_true и ts_pred и возвращает значения ряда, выходящие за границы доверительных интервалов.
## Практика. Часть 2. Продвинутая.
* Самое важное допущение данного метода заключается в том, что остатки нашего метода хоть как-то похожи на нормальные.
* Одно из очевидных нарушений "нормальности" остатков можно наблюдать в тех рядах, где шумовая компонента имеет разную дисперсию в зависимости от того, в каком моменте периода сезонности она находится. Так, например, у нашего ряда шум днем, "в пиках" ряда, явно выше того, что наблюдается рано утром или ночью, что объясняется естественным отличием в бизнес правилах.
* Одним из способов справиться с проблемой выше может быть метод расчета сезонных интервалов, когда вместо расчета стандартного отклонения остатков на всей истории ряда, мы считаем стандартное отклонение, соответствующее определенному периоду на сезонном интервале
### Задание 1. Метод fit_seasonal_statistics.
Добавить метод `fit_seasonal_statistics`, который бы возвращал стандартное отклонение остатков предсказания для `n_splits` равных участков внутри периода сезонности. Например для часовых рядов возвращал сигмы для `n_splits=3` участков: с 0 до 8-ми утра, с 8-ми утра до 16 вечера, с 16 до полуночи.
* `def fit_seasonal_statistics(self, ts_train, n_splits=3, period=24):`
# получаю предсказания ts_train
# разбиваю остатки на datetime интервалы длинной period/n_splits
# считаю стандартное отклонение для каждого участка
# записываю стандартное отклонение в аттрибут self.season_std = {
datetime_range_1: std_1,
datetime_range_2: std_2,
datetime_range_3: std_3,
}
### Задание 2. Модифицировать get_predictions_intervals.
В метод `get_prediction_intervals` добавить параметр `season=True/False`, который, будучи включенным, определяет к
какому из интервалов в `self.season_std` относится каждая точка из `ts_pred`, и, использую соответствующий `std`,
возвращает для нее доверительные интервалы.
```
predictor.fit(ts_train)
predictor.fit_seasonal_statistics(ts_train)
predictions = predictor.predict_batch(ts_train, ts_test)
lower, upper = predictor.get_prediction_intervals(predictions, season=True)
predictor.detect(ts_test, predictions, season=True)
```
| true |
code
| 0.727849 | null | null | null | null |
|
```
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import recall_score, confusion_matrix
from sklearn.metrics import roc_auc_score
```
Credit card dataset obtained from: https://www.kaggle.com/mlg-ulb/creditcardfraud
```
creditcard = pd.read_csv(r'/Users/admin/Documents/Supervised_learning/Supervised_learning/creditcard.csv')
creditcard.head()
creditcard.shape
#The authors have warned us that the dataset is unbalanced, there are very few fraudulent cases
np.unique(creditcard.Class, return_counts = True)
```
# Vanilla Logistic Regression
```
creditcard.columns
Y = creditcard['Class']
X =creditcard.loc[:, ~creditcard.columns.isin(['Class'])]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=108)
logreg = LogisticRegression(C=1e6)
print(np.mean(cross_val_score(logreg, X_train, Y_train, scoring = 'roc_auc')))
```
# Ridge Regression
```
roc_scores = []
Cs = []
for value in [1e-10,1e-3, 1, 5, 20]:
ridge = LogisticRegression(C=value, penalty= 'l2')
roc = np.mean(cross_val_score(ridge, X_train, Y_train, scoring = 'roc_auc'))
roc_scores.append(roc)
Cs.append(value)
df = pd.DataFrame(roc_scores)
df['params'] = Cs
df.columns=['roc_auc_scores', 'params']
df.sort_values(by = 'roc_auc_scores', ascending=False).reset_index(drop=True)
```
# Test Set Validation
```
ridge = LogisticRegression(C=2.000000e+01, penalty= 'l2')
ridge.fit(X_train, Y_train)
roc_auc_score(Y_test, ridge.predict_proba(X_test)[:, 1])
```
# Lasso
```
roc_auc_scores = []
Cs = []
for value in [1e-15, 1e-3, 10]:
lasso = LogisticRegression(C=value, penalty= 'l1')
roc = np.mean(cross_val_score(lasso, X_train, Y_train, scoring = 'roc_auc'))
roc_scores.append(roc)
Cs.append(value)
df = pd.DataFrame(roc_scores)
df['params'] = Cs
df.columns=['roc_auc_scores', 'params']
df.sort_values(by = 'roc_auc_scores', ascending=False).reset_index(drop=True)
```
# Test Set Validation
```
lasso = LogisticRegression(C=1.000000e+01, penalty= 'l1')
lasso.fit(X_train, Y_train)
roc_auc_score(Y_test, lasso.predict_proba(X_test)[:, 1])
```
# Random Forest
```
roc_auc_scores = []
parameters = []
est_number = [100, 500,700]
for value in est_number:
rfc = RandomForestClassifier(n_jobs = -1, n_estimators = value, class_weight = 'balanced')
roc_auc = np.mean(cross_val_score(rfc, X_train, Y_train, scoring = 'roc_auc', n_jobs=-1))
roc_auc_scores.append(roc_auc)
parameters.append(value)
df = pd.DataFrame(roc_auc_scores)
df['params'] = parameters
df.columns=['roc_auc_scores', 'params']
df.sort_values(by = 'roc_auc_scores', ascending=False).reset_index(drop=True)
roc_auc_scores = []
parameters = []
depth = [8, 20, 50]
for value in depth:
rfc = RandomForestClassifier(
n_jobs = -1,
class_weight = 'balanced',
n_estimators = 700,
max_depth = value)
roc_auc = np.mean(cross_val_score(
rfc,
X_train,
Y_train,
scoring = 'roc_auc',
n_jobs=-1))
roc_auc_scores.append(roc_auc)
parameters.append(value)
df = pd.DataFrame(roc_auc_scores)
df['params'] = parameters
df.columns=['roc_auc_scores', 'params']
df.sort_values(by = 'roc_auc_scores', ascending=False).reset_index(drop=True)
rfc = RandomForestClassifier(
n_jobs = -1,
class_weight = 'balanced',
n_estimators = 1000,
max_depth = 10)
roc_auc = np.mean(cross_val_score(
rfc,
X_train,
Y_train,
scoring = 'roc_auc',
n_jobs=-1))
print(roc_auc)
```
# Test Set Validation
```
rfc= RandomForestClassifier(n_estimators = 1000, max_depth = 10, n_jobs=-1, class_weight='balanced')
rfc.fit(X_train, Y_train)
roc_auc_score(Y_test, rfc.predict_proba(X_test)[:, 1])
```
# We can manually set a threshhold that reflects our business objectives
```
def prediction(classifier, feature_set, prob):
y_predicted = []
for i in classifier.predict_proba(feature_set)[:, 1]:
if i > prob:
y_predicted.append(1)
else:
y_predicted.append(0)
return y_predicted
y_predicted = prediction(rfc, X_test, 0.05)
confusion_matrix(Y_test, y_predicted)
y_predicted = prediction(rfc, X_test, 0.3)
confusion_matrix(Y_test, y_predicted)
```
| true |
code
| 0.41117 | null | null | null | null |
|
## Introduction: How can you use leaf indexes from a tree ensemble?
[](https://colab.research.google.com/github/catboost/tutorials/blob/master/leaf_indexes_calculation/leaf_indexes_calculation_tutorial.ipynb)
Supose we have fitted tree ensemble of size $T$. Then calculation of a predict on a new object can be viewed as follows. First original features of the sample are transformed to a sequence of $T$ categorical features indicating at which leaf of each tree the object gets. Then that sequence of categorical features is one-hot encoded. And finally predict calculated as a scalar product of one-hot encoding and vector of all leaf values of the ensemble.
So a tree ensemble can be viewed as linear model over transformed features. Ultimately, one can say that boosting on trees is a linear model with a generator of tree-transformed features. And in process of training it generates new features and fits coefficients for them in a greedy way.
This decomposition of a tree ensemble on a feature transformation and a linear model suggests several tricks:
1. We can tune leaf values alltogether (not greedily) with the help of all techiques for linear models.
2. Transfer learning: we can take feature transformation from one model and apply it to other dataset with same features (e.g. to predict other target or fit new model on a fresh data).
3. Online learning: we can keep feature transformation (i. e. tree-structures) constant and perform online updates on leaf values (viewed as a coefficients of the linear model). See real world example in this paper [Practical Lessons from Predicting Clicks on Ads at Facebook](https://research.fb.com/wp-content/uploads/2016/11/practical-lessons-from-predicting-clicks-on-ads-at-facebook.pdf).
## In this tutorial we will:
1. See how to get feature transformation from a catboost model (i. e. calculate at which leafs of model trees objects get).
2. Perform a sanity check for the first use case of leaf indexes calculation mentioned above on the california housing dataset.
```
from __future__ import print_function
import numpy as np
from scipy.stats import ttest_rel
from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error
from catboost import CatBoostRegressor
import warnings
warnings.filterwarnings("ignore", category=ConvergenceWarning)
seed = 42
```
### Download and split data
Since it's a demo let's leave major part of the data to test.
```
data = fetch_california_housing(return_X_y=True)
splitted_data = train_test_split(*data, test_size = 0.9, random_state=seed)
X_train, X_test, y_train, y_test = splitted_data
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size=0.2, random_state=seed)
print("{:<20} {}".format("train size:", X_train.shape[0]))
print("{:<20} {}".format("validation size:", X_validate.shape[0]))
print("{:<20} {}".format("test size:", X_test.shape[0]))
```
### Fit catboost
I've put very large learning rate in order to get small model (and fast tutorial).
Decreasing of learning rate yields better but larger ensemble. The effect of leaf values tuning deteriorates in that case but remains statistically significant. Iterestingly that trick still works for ensemble of size $\approx 500$ (learning_rate 0.1-0.2) when number of features in linear model exceeds number of training objects five times.
```
catboost_params = {
"iterations": 500,
"learning_rate": 0.6,
"depth": 4,
"loss_function": "RMSE",
"verbose": False,
"random_seed": seed
}
cb_regressor = CatBoostRegressor(**catboost_params)
cb_regressor.fit(X_train, y_train, eval_set=(X_validate, y_validate), plot=True)
print("tree count: {}".format(cb_regressor.tree_count_))
print("best rmse: {:.5}".format(cb_regressor.best_score_['validation']["RMSE"]))
```
### Transform train data
```
class LeafIndexTransformer(object):
def __init__(self, model):
self.model = model
self.transformer = OneHotEncoder(handle_unknown="ignore")
def fit(self, X):
leaf_indexes = self.model.calc_leaf_indexes(X)
self.transformer.fit(leaf_indexes)
def transform(self, X):
leaf_indexes = self.model.calc_leaf_indexes(X)
return self.transformer.transform(leaf_indexes)
transformer = LeafIndexTransformer(cb_regressor)
transformer.fit(X_train)
train_embedding = transformer.transform(X_train)
validate_embedding = transformer.transform(X_validate)
```
### Fit linear model
```
lin_reg = ElasticNet(warm_start=True)
alpha_range = np.round(np.exp(np.linspace(np.log(0.001), np.log(0.01), 5)), decimals=5)
best_alpha = None
best_loss = None
for curr_alpha in alpha_range:
lin_reg.set_params(alpha=curr_alpha)
lin_reg.fit(train_embedding, y_train)
validate_predict = lin_reg.predict(validate_embedding)
validate_loss = mean_squared_error(y_validate, validate_predict)
if best_alpha is None or best_loss > validate_loss:
best_alpha = curr_alpha
best_loss = validate_loss
print("best alpha: {}".format(best_alpha))
print("best rmse: {}".format(np.sqrt(best_loss)))
lin_reg.set_params(alpha=best_alpha)
lin_reg.fit(train_embedding, y_train)
```
### Evaluate on test data
```
test_embedding = transformer.transform(X_test)
tuned_predict = lin_reg.predict(test_embedding)
untuned_predict = cb_regressor.predict(X_test)
tuned_rmse = np.sqrt(np.mean((tuned_predict - y_test)**2))
untuned_rmse = np.sqrt(np.mean((untuned_predict - y_test)**2))
percent_delta = 100. * (untuned_rmse / tuned_rmse - 1)
print("Tuned model test rmse: {:.5}".format(tuned_rmse))
print("Untuned model test rmse: {:.5} (+{:.2}%)".format(untuned_rmse, percent_delta))
pvalue = ttest_rel((tuned_predict - y_test)**2, (untuned_predict - y_test)**2).pvalue
print("pvalue: {:.5}".format(pvalue))
```
| true |
code
| 0.572006 | null | null | null | null |
|
# Unsupervised methods
In this lesson, we'll cover unsupervised computational text anlalysis approaches. The central methods covered are TF-IDF and Topic Modeling. Both of these are common approachs in the social sciences and humanities.
[DTM/TF-IDF](#dtm)<br>
[Topic modeling](#topics)<br>
### Today you will
* Understand the DTM and why it's important to text analysis
* Learn how to create a DTM in Python
* Learn basic functionality of Python's package scikit-learn
* Understand tf-idf scores
* Learn a simple way to identify distinctive words
* Implement a basic topic modeling algorithm and learn how to tweak it
* In the process, gain more familiarity and comfort with the Pandas package and manipulating data
### Key Jargon
* *Document Term Matrix*:
* a matrix that describes the frequency of terms that occur in a collection of documents. In a document-term matrix, rows correspond to documents in the collection and columns correspond to terms.
* *TF-IDF Scores*:
* short for term frequency–inverse document frequency, is a numerical statistic that is intended to reflect how important a word is to a document in a collection or corpus.
* *Topic Modeling*:
* A general class of statistical models that uncover abstract topics within a text. It uses the co-occurrence of words within documents, compared to their distribution across documents, to uncover these abstract themes. The output is a list of weighted words, which indicate the subject of each topic, and a weight distribution across topics for each document.
* *LDA*:
* Latent Dirichlet Allocation. A particular model for topic modeling. It does not take document order into account, unlike other topic modeling algorithms.
## DTM/TF-IDF <a id='dtm'></a>
In this lesson we will use Python's scikit-learn package learn to make a document term matrix from a .csv Music Reviews dataset (collected from MetaCritic.com). We will then use the DTM and a word weighting technique called tf-idf (term frequency inverse document frequency) to identify important and discriminating words within this dataset (utilizing the Pandas package). The illustrating question: **what words distinguish reviews of Rap albums, Indie Rock albums, and Jazz albums?**
```
import os
import numpy as np
import pandas as pd
DATA_DIR = 'data'
music_fname = 'music_reviews.csv'
music_fname = os.path.join(DATA_DIR, music_fname)
```
### First attempt at reading in file
```
reviews = pd.read_csv(music_fname, sep='\t')
reviews.head()
```
Print the text of the first review.
```
print(reviews['body'][0])
```
### Explore the Data using Pandas
Let's first look at some descriptive statistics about this dataset, to get a feel for what's in it. We'll do this using the Pandas package.
Note: this is always good practice. It serves two purposes. It checks to make sure your data is correct, and there's no major errors. It also keeps you in touch with your data, which will help with interpretation. <3 your data!
First, what genres are in this dataset, and how many reviews in each genre?
```
#We can count this using the value_counts() function
reviews['genre'].value_counts()
```
The first thing most people do is to `describe` their data. (This is the `summary` command in R, or the `sum` command in Stata).
```
#There's only one numeric column in our data so we only get one column for output.
reviews.describe()
```
This only gets us numerical summaries. To get summaries of some of the other columns, we can explicitly ask for it.
```
reviews.describe(include=['O'])
```
Who were the reviewers?
```
reviews['critic'].value_counts().head(10)
```
And the artists?
```
reviews['artist'].value_counts().head(10)
```
We can get the average score as follows:
```
reviews['score'].mean()
```
Now we want to know the average score for each genre? To do this, we use Pandas `groupby` function. You'll want to get very familiar with the `groupby` function. It's quite powerful. (Similar to `collapse` on Stata)
```
reviews_grouped_by_genre = reviews.groupby("genre")
reviews_grouped_by_genre['score'].mean().sort_values(ascending=False)
```
### Creating the DTM using scikit-learn
Ok, that's the summary of the metadata. Next, we turn to analyzing the text of the reviews. Remember, the text is stored in the 'body' column. First, a preprocessing step to remove numbers.
```
def remove_digits(comment):
return ''.join([ch for ch in comment if not ch.isdigit()])
reviews['body_without_digits'] = reviews['body'].apply(remove_digits)
reviews
reviews['body_without_digits'].head()
```
### CountVectorizer Function
Our next step is to turn the text into a document term matrix using the scikit-learn function called `CountVectorizer`.
```
from sklearn.feature_extraction.text import CountVectorizer
countvec = CountVectorizer()
sparse_dtm = countvec.fit_transform(reviews['body_without_digits'])
```
Great! We made a DTM! Let's look at it.
```
sparse_dtm
```
This format is called Compressed Sparse Format. It save a lot of memory to store the dtm in this format, but it is difficult to look at for a human. To illustrate the techniques in this lesson we will first convert this matrix back to a Pandas DataFrame, a format we're more familiar with. For larger datasets, you will have to use the Compressed Sparse Format. Putting it into a DataFrame, however, will enable us to get more comfortable with Pandas!
```
dtm = pd.DataFrame(sparse_dtm.toarray(), columns=countvec.get_feature_names(), index=reviews.index)
dtm.head()
```
### What can we do with a DTM?
We can quickly identify the most frequent words
```
dtm.sum().sort_values(ascending=False).head(10)
```
### Challenge
* Print out the most infrequent words rather than the most frequent words. You can look at the [Pandas documentation](http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats) for more information.
* Print the average number of times each word is used in a review.
* Print this out sorted from highest to lowest.
### TF-IDF scores
How to find distinctive words in a corpus is a long-standing question in text analysis. Today, we'll learn one simple approach to this: TF-IDF. The idea behind words scores is to weight words not just by their frequency, but by their frequency in one document compared to their distribution across all documents. Words that are frequent, but are also used in every single document, will not be distinguising. We want to identify words that are unevenly distributed across the corpus.
One of the most popular ways to weight words (beyond frequency counts) is `tf-idf score`. By offsetting the frequency of a word by its document frequency (the number of documents in which it appears) will in theory filter out common terms such as 'the', 'of', and 'and'.
Traditionally, the *inverse document frequency* of word $j$ is calculated as:
$idf_{j} = log\left(\frac{\#docs}{\#docs\,with\,j}\right)$
and the *term freqency - inverse document frequency* is
$tfidf_{ij} = f_{ij}\times{idf_j}$ where $f_{ij}$ is the number of occurences of word $j$ in document $i$.
You can, and often should, normalize the word frequency:
$tfidf_{ij} = \frac{f_{ij}}{\#words\,in\,doc\,i}\times{idf_{j}}$
We can calculate this manually, but scikit-learn has a built-in function to do so. This function also uses log frequencies, so the numbers will not correspond excactly to the calculations above. We'll use the [scikit-learn calculation](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html), but a challenge for you: use Pandas to calculate this manually.
### TF-IDFVectorizer Function
To do so, we simply do the same thing we did above with CountVectorizer, but instead we use the function TfidfVectorizer.
```
from sklearn.feature_extraction.text import TfidfVectorizer
tfidfvec = TfidfVectorizer()
sparse_tfidf = tfidfvec.fit_transform(reviews['body_without_digits'])
sparse_tfidf
tfidf = pd.DataFrame(sparse_tfidf.toarray(), columns=tfidfvec.get_feature_names(), index=reviews.index)
tfidf.head()
```
Let's look at the 20 words with highest tf-idf weights.
```
tfidf.max().sort_values(ascending=False).head(20)
```
Ok! We have successfully identified content words, without removing stop words.
### Identifying Distinctive Words
What can we do with this? These scores are best used when you want to identify distinctive words for individual documents, or groups of documents, compared to other groups or the corpus as a whole. To illustrate this, let's compare three genres and identify the most distinctive words by genre.
First we add in a column of genre.
```
tfidf['genre_'] = reviews['genre']
tfidf.head()
```
Now lets compare the words with the highest tf-idf weight for each genre.
```
rap = tfidf[tfidf['genre_']=='Rap']
indie = tfidf[tfidf['genre_']=='Indie']
jazz = tfidf[tfidf['genre_']=='Jazz']
rap.max(numeric_only=True).sort_values(ascending=False).head()
indie.max(numeric_only=True).sort_values(ascending=False).head()
jazz.max(numeric_only=True).sort_values(ascending=False).head()
```
There we go! A method of identifying distinctive words.
### Challenge
Instead of outputting the highest weighted words, output the lowest weighted words. How should we interpret these words?
# Topic modeling <a id='topics'></a>
The goal of topic models can be twofold: 1/ learning something about the topics themselves, i.e what the the ext is about 2/ reduce the dimensionality of text to represent a document as a weighted average of K topics instead of a vector of token counts over the whole vocabulary. In the latter case, topic modeling a way to treat text as any data in a more tractable way for any subsequent statistical analysis (linear/logistic regression, etc).
There are many topic modeling algorithms, but we'll use LDA. This is a standard model to use. Again, the goal is not to learn everything you need to know about topic modeling. Instead, this will provide you some starter code to run a simple model, with the idea that you can use this base of knowledge to explore this further.
We will run Latent Dirichlet Allocation, the most basic and the oldest version of topic modeling$^1$. We will run this in one big chunk of code. Our challenge: use our knowledge of scikit-learn that we gained above to walk through the code to understand what it is doing. Your challenge: figure out how to modify this code to work on your own data, and/or tweak the parameters to get better output.
First, a bit of theory. LDA is a generative model - a model over the entire data generating process - in which a document is a mixture of topics and topics are probability distributions over tokens in the vocabulary. The (normalized) frequency of word $j$ in document $i$ can be written as:
$q_{ij} = v_{i1}*\theta_{1j} + v_{i2}*\theta_{2j} + ... + v_{iK}*\theta_{Kj}$
where K is the total number of topics, $\theta_{kj}$ is the probability that word $j$ shows up in topic $k$ and $v_{ik}$ is the weight assigned to topic $k$ in document $i$. The model treats $v$ and $\theta$ as generated from Dirichlet-distributed priors and can be estimated through Maximum Likelihood or Bayesian methods.
Note: we will be using a different dataset for this technique. The music reviews in the above dataset are often short, one word or one sentence reviews. Topic modeling is not really appropriate for texts that are this short. Instead, we want texts that are longer and are composed of multiple topics each. For this exercise we will use a database of children's literature from the 19th century.
The data were compiled by students in this course: http://english197s2015.pbworks.com/w/page/93127947/FrontPage
Found here: http://dhresourcesforprojectbuilding.pbworks.com/w/page/69244469/Data%20Collections%20and%20Datasets#demo-corpora
That page has additional corpora, for those interested in exploring text analysis further.
$^1$ Reference: Blei, D. M., A. Y. Ng, and M. I. Jordan (2003). Latent Dirichlet allocation. Journal of Machine
Learning Research 3, 993–1022.
```
literature_fname = os.path.join(DATA_DIR, 'childrens_lit.csv.bz2')
df_lit = pd.read_csv(literature_fname, sep='\t', encoding = 'utf-8', compression = 'bz2', index_col=0)
#drop rows where the text is missing
df_lit = df_lit.dropna(subset=['text'])
df_lit.head()
```
Now we're ready to fit the model. This requires the use of CountVectorizer, which we've already used, and the scikit-learn function LatentDirichletAllocation.
See [here](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.LatentDirichletAllocation.html) for more information about this function.
First, we have to import it from sklearn.
```
from sklearn.decomposition import LatentDirichletAllocation
```
In sklearn, the input to LDA is a DTM (with either counts or TF-IDF scores).
```
tfidf_vectorizer = TfidfVectorizer(max_df=0.80, min_df=50,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(df_lit['text'])
tf_vectorizer = CountVectorizer(max_df=0.80, min_df=50,
stop_words='english'
)
tf = tf_vectorizer.fit_transform(df_lit['text'])
```
This is where we fit the model.
```
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
lda = LatentDirichletAllocation(n_topics=10, max_iter=20, random_state=0)
lda = lda.fit(tf)
```
This is a function to print out the top words for each topic in a pretty way. Don't worry too much about understanding every line of this code.
```
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #{}:".format(topic_idx))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, 20)
```
### Challenge
Modify the script above to:
* increase the number of topics
* increase the number of printed top words per topic
* fit the model to the tf-idf matrix instead of the tf one
## Topic weights
One thing we may want to do with the output is compare the prevalence of each topic across documents. A simple way to do this (but not memory efficient), is to merge the topic distribution back into the Pandas dataframe.
First get the topic distribution array.
```
topic_dist = lda.transform(tf)
topic_dist
```
Merge back with original dataframe
```
topic_dist_df = pd.DataFrame(topic_dist)
df_w_topics = topic_dist_df.join(df_lit)
df_w_topics
```
Now we can chech the average weight of each topic across gender using `groupby`.
```
grouped = df_w_topics.groupby('author gender')
grouped[0].mean().sort_values(ascending=False)
```
## LDA as dimensionality reduction
Now that we obtained a distribution of topic weights for each document, we can represent our corpus with a dense document-weight matrix as opposed to our initial sparse DTM. The weights can then replace tokens as features for any subsequent task (classification, prediction, etc). A simple example may consist in measuring cosine similarity between documents. For instance, which book is closest to the first book in our corpus? Let's use pairwise cosine similarity to find out.
NB: cosine similarity measures an angle between two vectors, which provides a measure of distance robust to vectors of different lenghts (total number of tokens)
First, let's turn the DTM into a readable dataframe.
```
dtm = pd.DataFrame(tf_vectorizer.fit_transform(df_lit['text']).toarray(), columns=tf_vectorizer.get_feature_names(), index = df_lit.index)
```
Next let's import the cosine_similarity function from sklearn and print the cosine similarity between the first and second book or the first and third book.
```
from sklearn.metrics.pairwise import cosine_similarity
print("Cosine similarity between first and second book: " + str(cosine_similarity(dtm.iloc[0,:], dtm.iloc[1,:])))
print("Cosine similarity between first and third book: " + str(cosine_similarity(dtm.iloc[0,:], dtm.iloc[2,:])))
```
What if we use the topic weights instead of word frequencies?
```
dwm = df_w_topics.iloc[:,:10]
print("Cosine similarity between first and second book: " + str(cosine_similarity(dwm.iloc[0,:], dwm.iloc[1,:])))
print("Cosine similarity between first and third book: " + str(cosine_similarity(dwm.iloc[0,:], dwm.iloc[2,:])))
```
### Challenge
Calculate the cosine similarity between the first book and all other books to identify the most similar one.
### Further resources
[This blog post](https://de.dariah.eu/tatom/feature_selection.html) goes through finding distinctive words using Python in more detail
Paper: [Fightin’ Words: Lexical Feature Selection and Evaluation for Identifying the Content of Political Conflict](http://languagelog.ldc.upenn.edu/myl/Monroe.pdf), Burt Monroe, Michael Colaresi, Kevin Quinn
[Topic modeling with Textacy](https://github.com/repmax/topic-model/blob/master/topic-modelling.ipynb)
| true |
code
| 0.352898 | null | null | null | null |
|
# Air Quality Predictions with Amazon SageMaker and Amazon EMR
This notebook demonstrates the ability to use Apache Spark on Amazon EMR to do data prep with two different datasets in order to build an urban air quality predictor with Amazon SageMaker.
To create the environment, use the `us-east-1` CloudFormation template from the [Create and Managed Amazon EMR Clusters from SageMaker Studio](https://aws.amazon.com/blogs/machine-learning/part-1-create-and-manage-amazon-emr-clusters-from-sagemaker-studio-to-run-interactive-spark-and-ml-workloads/) blog post. This notebook makes use of the approach demonstrated in the blog post about how to [Build a model to predict the impact of weather on urban air quality using Amazon SageMaker](https://aws.amazon.com/blogs/machine-learning/build-a-model-to-predict-the-impact-of-weather-on-urban-air-quality-using-amazon-sagemaker/) and combines data from these two open datasets:
- [OpenAQ physical air quality data](https://registry.opendata.aws/openaq/)
- [NOAA Global Surface Summary of Day](https://registry.opendata.aws/noaa-gsod/)
**Note: This notebook was written for Spark3 (running on EMR6+)**
Before we get started - we need to upgrade the version of pandas we use as there is a [minor version conflict with numpy](https://github.com/numpy/numpy/issues/18355). Run the cell below, restart the kernel, and run the next cell to validate the version of pandas is `1.0.5`.
```
%%local
%pip install pandas==1.0.5
%%local
import pandas as pd
print(pd.__version__)
```
Next, we use the `sagemaker_studio_analytics_extension` to connect to our EMR cluster that we created using "Clusters" section under the "SageMaker resources" tab to the left.
```
# %load_ext sagemaker_studio_analytics_extension.magics
# %sm_analytics emr connect --cluster_id j-xxxxxxxxxxxx --auth-type None
```
When you first connect to the cluster, the extension prints out your YARN Application ID and a link you can use to start the Spark UI.
If you need to fetch the link again, you can always use the `%%info` magic.
```
%%info
```
## Part 1: Data Prep in Amazon EMR
In the cells below, we're going to perform the following operations:
- Use Spark on the EMR cluster to read our data from the OpenAQ S3 Bucket.
- Filter the available data to Seattle and NO2 readings (indicative of air quality).
- Group the readings by day.
- Export the aggregate dataset to a local Pandas dataframe in the notebook.
```
df = spark.read.json("s3://openaq-fetches/realtime-gzipped/2022-01-05/1641409725.ndjson.gz")
df2 = spark.read.schema(df.schema).json("s3://openaq-fetches/realtime-gzipped/20*")
df2.head()
from pyspark.sql.functions import split, lower
dfSea = df2.filter(lower((df2.city)).contains('seattle')).filter(df2.parameter == "no2").withColumn("year", split(df2.date.utc, "-", 0)[0]).cache()
dfSea.show(truncate=False)
from pyspark.sql.functions import to_date
dfNoAvg = dfSea.withColumn("ymd", to_date(dfSea.date.utc)).groupBy("ymd").avg("value").withColumnRenamed("avg(value)", "no2_avg")
dfNoAvg.show()
```
While this is running, you can click the Spark UI link mentioned above to debug your job while it's running. Some useful pages to check out:
- "Jobs" page shows you the current status of your job/task
- "Event Timeline" on the Jobs page shows Spark Executors starting up or shutting down
- The "Executors" tab shows you how many Executors are started, what the capacity is of each, and allows you to drill into logs
Here, you could also experiement with the `dfSea` dataframe as it is cached. The command below should execute within a matter of seconds.
```
%%spark -o dfNoAvg
```
## Part 2: Bring Spark results into SageMaker Studio
With the `%%spark -o` command above, we took the `dfNoAvg` dataframe from Spark and made it available in the `%%local` Python context as a Pandas dataframe with the same name. Now we can use local libraries to explore the data as well.
```
%matplotlib inline
import pandas as pd
from datetime import datetime
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [15, 5]
dfNoAvg.plot(x='ymd')
plt.ylabel('NO2 Conc. ppm')
plt.xlabel('Daily Average')
plt.show()
%%local
# There are some gaps in 2017 and 2018 we need to fill
dfNoAvg = dfNoAvg.set_index('ymd')
dfNoAvg.loc['2018-10-11':'2018-11-21']['no2_avg'].head(50)
%%local
# Fill in the date index first
idx = pd.date_range(dfNoAvg.index.min(), dfNoAvg.index.max())
dfNoAvg = dfNoAvg.reindex(idx, fill_value=None)
dfNoAvg.loc['2018-10-11':'2018-10-25']['no2_avg'].head(10)
%%local
# Then interpolate the values that are missing
dfNoAvg = dfNoAvg.interpolate(method='time')
dfNoAvg.loc['2018-10-11':'2018-10-20']['no2_avg'].head(10)
%%local
year_min, year_max = [f"{dfNoAvg.index.min().year}", f"{dfNoAvg.index.max().year}"]
year_min, year_max
%%send_to_spark -i year_min
%%send_to_spark -i year_max
```
## Part 3: Data Prep in Amazon EMR with the second dataset
Now that our first dataset looks good, we used the `%%send_to_spark` magic above to send the start and stop years we want to read data for back to the Spark driver on EMR. We can use those variables to limit the data we want to read.
## And now the weather
```
from pyspark.sql.types import DoubleType
from pyspark.sql import functions as F
# Scope to Seattle, WA, USA
longLeft, latBottom, longRight, latTop = [-122.459696,47.481002,-122.224433,47.734136]
dfSchema = spark.read.csv("s3://noaa-gsod-pds/2022/32509099999.csv", header=True, inferSchema=True)
# We read our first year, then union the rest of the years :)
def read_year(year):
return spark.read.csv(f"s3://noaa-gsod-pds/{year}/", header=True, schema=dfSchema.schema)
year_range = range(int(year_min), int(year_max)+1)
df = read_year(year_range[0])
for year in year_range[1:]:
df = df.union(read_year(year))
df = df \
.withColumn('LATITUDE', df.LATITUDE.cast(DoubleType())) \
.withColumn('LONGITUDE', df.LONGITUDE.cast(DoubleType()))
seadf = df \
.filter(df.LATITUDE >= latBottom) \
.filter(df.LATITUDE <= latTop) \
.filter(df.LONGITUDE >= longLeft) \
.filter(df.LONGITUDE <= longRight)
# Rename columns so they're easier to read
seafeatures = seadf.selectExpr("Date as date", "MAX as temp_max", "MIN as temp_min", "WDSP as wind_avg", "SLP as pressure_sea_level", "STP as pressure_station", "VISIB as visibility")
# Remove invalid readings
no_data_mappings = [
["temp_max", 9999.9],
["temp_min", 9999.9],
["wind_avg", 999.9],
["pressure_sea_level", 9999.9],
["pressure_station", 9999.9],
["visibility", 999.9],
]
for [name, val] in no_data_mappings:
seafeatures = seafeatures.withColumn(name, F.when(F.col(name)==val, None).otherwise(F.col(name)))
# Now average each reading per day
seafeatures = seafeatures.groupBy("date").agg(*[F.mean(c).alias(c) for c in seafeatures.columns[1:]])
%%spark -o seafeatures
```
## Part 4: Data Analysis in SageMaker Studio
We again use the `%%spark -o` magic to send the aggregate back to SageMaker so we can do some exploration.
One thing to note is that you can certainly do some of this exploration with Spark as well. It just depends on the use case and the size of your data. Because we've aggregated our data down to a few thousand rows, it's relatively easy to manage in the notebook. But if you're unable to do this, you can still use Spark to split your training/test datasets or do other aggregations and write the results out to S3.
```
%%local
seafeatures.plot(x='date', y=['temp_min', 'temp_max'])
plt.ylabel('Max Temp (F)')
plt.xlabel('Daily Average')
plt.show()
%%local
seafeatures.plot(x='date', y=['wind_avg'])
plt.ylabel('Average Wind (mph)')
plt.xlabel('Daily Average')
plt.show()
%%local
seafeatures.plot(x='date', y=['visibility'])
plt.ylabel('Visibility (miles)')
plt.xlabel('Daily Average')
plt.show()
```
## Part 5: Marry the data
Now that we've taken a quick look at our data and done some initial exploration, let's merge the two datasets.
```
%%local
print(dfNoAvg)
seafeaturesi = seafeatures.set_index('date').sort_index()
print(seafeaturesi)
%%local
# We need to make sure the data frames line up, so we'll create new
# dataframes from the min and max of the existing ones.
min_viable_date = max(dfNoAvg.index.min(), seafeaturesi.index.min())
max_viable_date = min(dfNoAvg.index.max(), seafeaturesi.index.max())
print(f"Merging dataframes between {min_viable_date} and {max_viable_date}")
comp_df = pd.merge(
seafeaturesi[min_viable_date:max_viable_date],
dfNoAvg[min_viable_date:max_viable_date][['no2_avg']],
left_index=True, right_index=True
)
print(comp_df.sort_index().head(20))
%%local
# Check some data we looked into previously
print(comp_df.loc['2018-10-11':'2018-10-20'].sort_index())
comp_df = comp_df.sort_index()
```
Now that we've merged them, we can do some quick correlation tests to see what the impact is of different weather events on NO2 readings.
Please see the [afore-mentioned blog post](https://aws.amazon.com/blogs/machine-learning/build-a-model-to-predict-the-impact-of-weather-on-urban-air-quality-using-amazon-sagemaker/) for more in-depth explations of these different charts.
```
%%local
mydata = comp_df[['wind_avg','no2_avg']]
x = mydata['wind_avg']
y = mydata['no2_avg']
plt.scatter(x, y)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,p(x),"r--")
plt.ylabel('NO2 Conc. ppm')
plt.xlabel('Wind Speed (mph)')
plt.show()
%%local
mydata = comp_df[['visibility','no2_avg']].dropna()
x = mydata['visibility']
y = mydata['no2_avg']
plt.scatter(x, y)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,p(x),"r--")
plt.ylabel('NO2 Conc. ppm')
plt.xlabel('Visibility (miles)')
plt.show()
plt.show()
%%local
from datetime import timedelta
comp_df = comp_df.sort_index()
comp_df['no2_avg_prev'] = comp_df["no2_avg"].shift(1)
mydata = comp_df[['no2_avg_prev','no2_avg']]
start_date = comp_df.index.min() + timedelta(days=1)
end_date = comp_df.index.max() + timedelta(days=-1)
mydata = mydata[start_date:end_date]
x = mydata['no2_avg_prev']
y = mydata['no2_avg']
plt.scatter(x, y)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,p(x),"r--")
plt.ylabel('no2_avg')
plt.xlabel('no2_avg_prev')
plt.show()
%%local
cor_cols = ['temp_max', 'temp_min', 'wind_avg','pressure_sea_level','visibility','no2_avg_prev', 'no2_avg']
fig = plt.figure(figsize=(10,10))
im = plt.matshow(comp_df.loc[:, cor_cols].corr(), fignum=0)
fig.colorbar(im)
plt.xticks(range(len(cor_cols)), cor_cols)
plt.yticks(range(len(cor_cols)), cor_cols)
plt.show()
%%local
# Drop the 1st row as NaN
aq_df = comp_df.iloc[1:].copy()
# Drop visibility as it didn't seem correlate much and has NaNs that break the training
aq_df = aq_df.drop('visibility', 1)
# Use the data from years 2016 up to 2020 as training, and the year 2021 as our candidate year for testing and validating our model.
aq_train_df = aq_df[aq_df.index.year < 2021]
aq_test_df = aq_df[aq_df.index.year == 2021]
x_train = aq_train_df.drop('no2_avg',1)
x_test = aq_test_df.drop('no2_avg',1)
y_train = aq_train_df[["no2_avg"]]
y_test = aq_test_df[["no2_avg"]]
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
print(x_train.head())
%%local
from math import sqrt
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
# sMAPE is used in KDD Air Quality challenge: https://biendata.com/competition/kdd_2018/evaluation/
def smape(actual, predicted):
dividend= np.abs(np.array(actual) - np.array(predicted))
denominator = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(dividend, denominator, out=np.zeros_like(dividend), where=denominator!=0, casting='unsafe'))
def print_metrics(y_test, y_pred):
print("RMSE: %.4f" % sqrt(mean_squared_error(y_test, y_pred)))
print('Variance score: %.4f' % r2_score(y_test, y_pred))
print('Explained variance score: %.4f' % explained_variance_score(y_test, y_pred))
forecast_err = np.array(y_test) - np.array(y_pred)
print('Forecast bias: %.4f' % (np.sum(forecast_err) * 1.0/len(y_pred) ))
print('sMAPE: %.4f' % smape(y_test, y_pred))
%%local
import boto3
from sagemaker import get_execution_role, session
sess = session.Session()
bucket = sess.default_bucket()
# This is used to run the LinearLearner training job
role = get_execution_role()
```
## Part 6: Train and Deploy a Machine Learning Model
In the section below, we create a new training job using the Linear Learner algorithm. Once that job completes, we deploy an endpoint and run some validation tests against it.
💁
**NOTE**: You only need to create this training job and deploy it once. You can use the same endpoint, even in future runs of this notebook, without re-training or re-deploying.
💁
```
%%local
from sagemaker import LinearLearner
data_location = f's3://{bucket}/aq-linearlearner/data/train'
output_location = f's3://{bucket}/aq-linearlearner/output'
llearner = LinearLearner(role=role,
predictor_type='regressor',
normalize_data=True,
normalize_label=True,
instance_count=1,
instance_type='ml.c5.xlarge',
output_path=output_location,
data_location=data_location)
%%local
llearner.fit([
llearner.record_set(x_train.values.astype('float32'), y_train.values[:, 0].astype('float32'), channel='train'),
llearner.record_set(x_test.values.astype('float32'), y_test.values[:, 0].astype('float32'), channel='test')
])
```
### Create our estimator
```
%%local
llearner_predictor = llearner.deploy(initial_instance_count=1,
instance_type='ml.t2.medium')
%%local
result = llearner_predictor.predict(x_test.values.astype('float32'))
y_sm_pred = [r.label["score"].float32_tensor.values[0] for r in result]
y_sm_test = y_test.values[:, 0].astype('float32')
print_metrics(y_sm_test, y_sm_pred)
%%local
y_sm_pred_df = pd.DataFrame(y_sm_pred, columns=y_train.columns).set_index(y_test.index).sort_index()
y_sm_test_df = pd.DataFrame(y_sm_test, columns=y_train.columns).set_index(y_test.index).sort_index()
plt.plot(y_sm_test_df, label='actual')
plt.plot(y_sm_pred_df, label='forecast')
plt.legend()
plt.show()
%%local
endpoint_name = llearner_predictor.endpoint_name
```
### Reuse an existing estimator
```
%%local
# The endpoint can take a while to create, so we'll use a previously created one.
# Can specify if there is an existing endpoint
# endpoint_name = ""
from sagemaker import LinearLearnerPredictor
llearner_predictor = LinearLearnerPredictor(endpoint_name)
result = llearner_predictor.predict(x_test.values.astype('float32'))
y_sm_pred = [r.label["score"].float32_tensor.values[0] for r in result]
y_sm_test = y_test.values[:, 0].astype('float32')
print_metrics(y_sm_test, y_sm_pred)
%%local
y_sm_pred_df = pd.DataFrame(y_sm_pred, columns=y_train.columns).set_index(y_test.index).sort_index()
y_sm_test_df = pd.DataFrame(y_sm_test, columns=y_train.columns).set_index(y_test.index).sort_index()
plt.plot(y_sm_test_df, label='actual')
plt.plot(y_sm_pred_df, label='forecast')
plt.legend()
plt.show()
```
## Clean Up
```
%%cleanup -f
%%local
llearner_predictor.delete_endpoint()
```
| true |
code
| 0.396243 | null | null | null | null |
|
# Hypothesis Testing of Human Height Data
In this lab, you will learn how to use Python 3 to perform and understand the basics of hypothesis testing. Hypothesis testing is widely used. Anytime you are trying to determine if a parameter or relationship is statistically significant you can perform a hypothesis test.
In this lab you will explore and perform hypothesis tests on a famous data set collect by Frances Galton, who invented the regression method. Galton collected these data from Families living in late 19th century London. Gaulton published his famous paper in 1885, showing that the highs of adult children regressed to the mean of the population, regardless of the heights of the parents. From this seminal study we have the term regression in statistics,
## Exercise 1. Explore the data
In this first exercise you will load the Galton data set. You will then and explore differences between some of the variables in these data using some simple visulaizaiton technques.
****
**Note:** Data visualization is convered in subsequent modules of this course.
### Load and examine the data set
Execute the code in the cell below to load the Gaulton data set.
```
from azureml import Workspace
ws = Workspace()
ds = ws.datasets['GaltonFamilies.csv']
galton = ds.to_dataframe()
```
With the data loaded, you can examine the first few rows by executing the code in the cell below:
```
galton.head()
```
This data set has 9 features:
1. A case or row number.
2. A unique code for each family in the sample.
3. The height of the father in inches.
4. The height of the mother in inches.
5. The average height of the parents.
6. The number of childern in the family.
7. A code for the each unique child in the family.
8. The gender of the child.
9. The height of the adult child in inches.
Execute the code in the cell below to determine the number of cases in this data set.
```
galton.shape
```
There are a total of 934 cases, or childern, in the sample comprising this data set.
### Visualizing some relationships in these data
To develop a better understanding of some of the relationships in these data you will create and compare some histograms of some of the variables.
The code in the cell below creates a pair of histograms to compare the distributions of two variables. The historgrams are ploted on the same horizontal scale to aid in comparison. A red line is plotted at the mean value of each variable.
Exectue the code in the cell below to plot a pair of histograms comparing the hight of mothers to the height of their sons. You can safely ignore any warnings about building a font cache.
```
%matplotlib inline
def hist_family(df, col1, col2, num_bins = 30):
import matplotlib.pyplot as plt
## Setup for ploting two charts one over the other
fig, ax = plt.subplots(2, 1, figsize = (12,8))
mins = min([df[col1].min(), df[col2].min()])
maxs = max([df[col1].max(), df[col2].max()])
mean1 = df[col1].mean()
mean2 = df[col2].mean()
## Plot the histogram
temp = df[col1].as_matrix()
ax[1].hist(temp, bins = 30, alpha = 0.7)
ax[1].set_xlim([mins, maxs])
ax[1].axvline(x=mean1, color = 'red', linewidth = 4)
ax[1].set_ylabel('Count')
ax[1].set_xlabel(col1)
## Plot the histogram
temp = df[col2].as_matrix()
ax[0].hist(temp, bins = 30, alpha = 0.7)
ax[0].set_xlim([mins, maxs])
ax[0].axvline(x=mean2, color = 'red', linewidth = 4)
ax[0].set_ylabel('Count')
ax[0].set_xlabel(col2)
return [col1, col2]
sons = galton[galton.gender == 'male']
hist_family(sons, 'childHeight', 'mother')
```
Examine these histogram and note the following:
- The distributions of the height of the mothers and their sons have a fair degree of overlap.
- The mean height of the sons is noticeably greater than the mothers.
Next you will compare the heights of mothers to the heights of their daughters.
```
daughters = galton[galton.gender == 'female']
hist_family(daughters, 'childHeight', 'mother')
```
Examine these histogram and note the following:
- The distributions of the height of the mothers and their daughters overlap almost entirely.
- The mean height of the daughters is nearly the same as the mothers.
In summary, it appears that sons are usually taller than their mothers, whereas, the height of daughters does not appear to be much different from their mothers. But, how valid is this conclusion statistically?
## Apply t test
Now that you have examined some of the relationships between the variables in these data, you will now apply formal hypothesis testing. In hypothesis testing the a null hypothesis is tested against a statistic. The null hypothesis is simply that the difference is not significant. Depending on the value of the test statistic, you can accept or reject the null hypthesis.
In this case, you will use the two-sided t-test to determine if the difference in means of two variables are significantly different. The null hypothesis is that there is no significant difference between the means. There are multiple criteria which are used to interpret the test results. You will determine if you can reject the null hyposesis based on the following criteria:
- Selecting a **confidence level** of **5%** or **0.05**.
- Determine if the t-statistic for the degrees of freedom is greater than the **critical value**. The difference in means of Normally distributed variables follows a t-distribution. The large t-statistic indicates the probility that the difference in means is unlikely to be by chance alone.
- Determine if the P-value is less than the **confidence level**. A small P-value indicates the probability of the difference of the means being more extreme by chance alone is the small.
- The **confidence interval** around the difference of the means does not overlap with **0**. If the **confidence interval** is far from **0** this indicates that the difference in means is unlikely to include **0**.
Based on these criteria you will accept of reject the null hypothesis. However, rejecting the null-hypothesis should not be confused with accepting the alternative. It simply means the null is not a good hypothesis.
The **family_test** function in the cell below uses the **CompareMeans** function from the **weightstats** package to compute the two-sided t statistics. The **hist_family_conf** function calls the **family_test** function and plots the results. Execute this code to compute and disply the results.
```
def family_test(df, col1, col2, alpha):
from scipy import stats
import scipy.stats as ss
import pandas as pd
import statsmodels.stats.weightstats as ws
n, _, diff, var, _, _ = stats.describe(df[col1] - df[col2])
degfree = n - 1
temp1 = df[col1].as_matrix()
temp2 = df[col2].as_matrix()
res = ss.ttest_rel(temp1, temp2)
means = ws.CompareMeans(ws.DescrStatsW(temp1), ws.DescrStatsW(temp2))
confint = means.tconfint_diff(alpha=alpha, alternative='two-sided', usevar='unequal')
degfree = means.dof_satt()
index = ['DegFreedom', 'Difference', 'Statistic', 'PValue', 'Low95CI', 'High95CI']
return pd.Series([degfree, diff, res[0], res[1], confint[0], confint[1]], index = index)
def hist_family_conf(df, col1, col2, num_bins = 30, alpha =0.05):
import matplotlib.pyplot as plt
## Setup for ploting two charts one over the other
fig, ax = plt.subplots(2, 1, figsize = (12,8))
mins = min([df[col1].min(), df[col2].min()])
maxs = max([df[col1].max(), df[col2].max()])
mean1 = df[col1].mean()
mean2 = df[col2].mean()
tStat = family_test(df, col1, col2, alpha)
pv1 = mean2 + tStat[4]
pv2 = mean2 + tStat[5]
## Plot the histogram
temp = df[col1].as_matrix()
ax[1].hist(temp, bins = 30, alpha = 0.7)
ax[1].set_xlim([mins, maxs])
ax[1].axvline(x=mean1, color = 'red', linewidth = 4)
ax[1].axvline(x=pv1, color = 'red', linestyle='--', linewidth = 4)
ax[1].axvline(x=pv2, color = 'red', linestyle='--', linewidth = 4)
ax[1].set_ylabel('Count')
ax[1].set_xlabel(col1)
## Plot the histogram
temp = df[col2].as_matrix()
ax[0].hist(temp, bins = 30, alpha = 0.7)
ax[0].set_xlim([mins, maxs])
ax[0].axvline(x=mean2, color = 'red', linewidth = 4)
ax[0].set_ylabel('Count')
ax[0].set_xlabel(col2)
return tStat
hist_family_conf(sons, 'mother', 'childHeight')
```
##### Examine the printed table of results and the charts noting the following:
- The difference of the means is 5.2 inches. You can see this difference graphically by comparing the positions of the solid red lines showing the means of the two distirbutions.
- The **critical value** of the two-sided t-statistic at 945 degrees of freedom is **1.96**. The t-statistic of -39.5 is larger than this **critical value**.
- The P-value is effectively 0, which is smaller than the **confidence level** of 0.05.
- The 95% **confidence interval** of the difference in means is from -4.9 to -5.5, which does not overlap 0. You can see the confidence interval plotted as the two dashed red lines in the lower chart shown above. This **confidence interval** around the mean of the mother's heights does not overlap with the mean of the son's height.
Overall, these statistics indicate you can reject the null hypothesis, or that there difference in the means is not **0**.
```
hist_family_conf(daughters, 'mother', 'childHeight')
```
Examine the printed table of results, which are quite differnt from the test of the heights of mothers vs. sons. Examine the statistics and charts noting the following:
- The difference of the means is only 0.04 inches. You can see this small difference graphically by comparing the positions of the solid red lines showing the means of the two distributions.
- The **critical value** of the two-sided t-statistic at 480 degrees of freedom is **1.96**. The t-statistic of 0.35 is smaller than this **critical value**.
- The P-value is 0.73, which is larger than the **confidence level** of 0.05.
- The 95% **confidence interval** of the difference is from -0.26 to 0.35, which overlaps 0. You can see the confidence interval plotted as the two dashed red lines in the lower chart shown above. This **confidence interval** around the mean of the mother's heights does overlaps the mean of the dauther's height.
Overall, these statistics indicate you cannot reject the null hypothesis that there are is not a significant difference in the means.
**Evaluation question**
You have found that you could not reject the null hypothesis that there was no significant difference between the heights of mothers and their adult dauhters. But what about the difference in height between fathers and their adult daughters? Perform the t-test on the Galton data set to answer the question below:
- Can you reject the null hypothesis that there is no significant difference in the heights of fathers and their adult daughters.
```
hist_family_conf(daughters, 'father', 'childHeight')
```
| true |
code
| 0.613815 | null | null | null | null |
|
# Cloud APIs for Computer Vision: Up and Running in 15 Minutes
This code is part of [Chapter 8- Cloud APIs for Computer Vision: Up and Running in 15 Minutes ](https://learning.oreilly.com/library/view/practical-deep-learning/9781492034858/ch08.html).
## Compile Results for Image Tagging
In this file we will compile the results using the ground truth and the collected data for all the test images. You will need to edit the following:
1. Please edit `data_path` with the path to the test images that have been used for the experiments.
2. If you used different filenames for the prediction filenames, please edit the filenames accordingly.
3. Please download Gensim, which we will be using for comparing word similarity between ground truth with predicted class. Unzip and place the `GoogleNews-vectors-negative300.bin` within `data_path`. Download at: https://github.com/mmihaltz/word2vec-GoogleNews-vectors
Let's start by loading the ground truth JSON file.
```
data_path = "/home/deepvision/production/code/chapter-8/image-tagging/data-may-2020"
validation_images_path = data_path + "/val2017"
import json
with open(data_path + "/final-ground-truth-tags.json") as json_file:
ground_truth = json.load(json_file)
# helper functions to get image name from image id and converse.
def get_id_from_name(name):
return int(name.split("/")[-1].split(".jpg")[0])
def get_name_from_id(image_id):
filename = validation_images_path + \
"/000000" + str(image_id) + ".jpg"
return filename
# Class ids to their string equivalent
with open(data_path + '/class-id-to-name.json') as f:
class_id_to_name = json.load(f)
```
## Helper functions
```
def convert_class_id_to_string(l):
result = []
for class_id in l:
result.append(class_id_to_name[str(class_id)])
return result
def parse(l):
l1 = []
for each in l:
if len(each) >= 2:
l1.append(each.lower())
return l1
def get_class_from_prediction(l):
return list([item[0] for item in l])
```
Please download Gensim, which we will be using for comparing word similarity between ground truth with predicted class.
```
import gensim
from gensim.models import Word2Vec
model = gensim.models.KeyedVectors.load_word2vec_format(data_path +
'/GoogleNews-vectors-negative300.bin', binary=True)
def check_gensim(word, pred):
# get similarity between word and all predicted words in returned predictions
similarity = 0
for each_pred in pred:
# check if returned prediction exists in the Word2Vec model
if each_pred not in model:
continue
current_similarity = model.similarity(word, each_pred)
#print("Word=\t", word, "\tPred=\t", each_pred, "\tSim=\t", current_similarity)
if current_similarity > similarity:
similarity = current_similarity
return similarity
```
### Parsing
Each cloud provider sends the results in slightly different formats and we need to parse each of them correctly. So, we will develop a parsing function unique to each cloud provider.
#### Microsoft Specific Parsing
```
def microsoft_name(image_id):
return "000000" + str(image_id) + ".jpg"
def parse_microsoft_inner(word):
b = word.replace("_", " ")
c = b.lower().strip().split()
return c
def parse_microsoft_response_v1(l):
result = []
b = ""
for each in l["categories"]:
a = each["name"]
result.extend(parse_microsoft_inner(a))
for each in l["tags"]:
a = each["name"]
result.extend(parse_microsoft_inner(a))
if "hint" in each:
a = each["hint"]
result.extend(parse_microsoft_inner(a))
return list(set(result))
def parse_microsoft_response(l):
result = []
b = ""
for each in l:
result.extend(parse_microsoft_inner(each[0]))
return list(set(result))
```
#### Amazon Specific Parsing
```
def parse_amazon_response(l):
result = []
for each in l:
result.append(each.lower())
return list(set(result))
```
#### Google specific parsing
```
def parse_google_response(l):
l1 = []
for each in l:
l1.append(each[0].lower())
if len(each[0].split()) > 1:
l1.extend(each[0].split())
return l1
```
The `threshold` defines how much similar do two words (ground truth and predicted category name) need to be according to Word2Vec for the prediction to be a correct prediction. You can play around with the `threshold`.
```
threshold = .3
def calculate_score(ground_truth, predictions, arg):
total = 0
correct = 0
avg_ground_truth_length = 0
avg_amazon_length = 0
avg_microsoft_length = 0
avg_google_length = 0
for each in ground_truth.keys():
pred = []
gt = list(set(convert_class_id_to_string(ground_truth[each])))
if gt == None or len(gt) < 1:
continue
total += len(gt)
avg_ground_truth_length += len(gt)
if arg == "google" and get_name_from_id(each) in predictions:
pred = predictions[get_name_from_id(each)]
if pred == None or len(pred) <= 0:
continue
pred = parse_google_response(predictions[get_name_from_id(each)])
avg_google_length += len(pred)
elif arg == "microsoft" and microsoft_name(each) in predictions:
pred = predictions[microsoft_name(each)]
if pred == None or len(pred) <= 0:
continue
pred = parse_microsoft_response(predictions[microsoft_name(each)])
avg_microsoft_length += len(pred)
elif arg == "amazon" and get_name_from_id(each) in predictions:
pred = predictions[get_name_from_id(each)]
if pred == None or len(pred) <= 0:
continue
pred = parse_amazon_response(predictions[get_name_from_id(each)])
avg_amazon_length += len(pred)
match = 0
match_word = []
for each_word in gt:
# Check if ground truth exists "as is" in the entire list of predictions
if each_word in pred:
correct += 1
match += 1
match_word.append(each_word)
# Also, ensure that ground truth exists in the Word2Vec model
elif each_word not in model:
continue
# Otherwise, check for similarity between the ground truth and the predictions
elif check_gensim(each_word, pred) >= threshold:
correct += 1
match += 1
match_word.append(each_word)
if arg == "google":
print("Google's Stats\nTotal number of tags returned = ", avg_google_length,
"\nAverage number of tags returned per image = ",
avg_google_length * 1.0 / len(ground_truth.keys()))
elif arg == "amazon":
print("Amazon's Stats\nTotal number of tags returned = ", avg_amazon_length,
"\nAverage number of tags returned per image = ",
avg_amazon_length * 1.0 / len(ground_truth.keys()))
elif arg == "microsoft":
print("Microsoft's Stats\nTotal number of tags returned = ",
avg_microsoft_length, "\nAverage number of tags returned per image = ",
avg_microsoft_length * 1.0 / len(ground_truth.keys()))
print("\nGround Truth Stats\nTotal number of Ground Truth tags = ", total,
"\nTotal number of correct tags predicted = ", correct)
print("\nScore = ", float(correct) / float(total))
```
Now, we are ready to load the predictions that we obtained by using APIs!
```
# Google
with open(data_path + '/google-tags.json') as f:
google = json.load(f)
# Get Google Score
calculate_score(ground_truth, google, "google")
```
**Note**: Microsoft's API for object classification has two versions. The results from both the APIs are different.
If you want to check out Microsoft's outdated (v1) API then use the `microsoft_tags.json` file. We will be using the latest version (i.e., `microsoft_tags_DESCRIPTION.json`) for our November 2019 experiments.
```
# Microsoft
with open(data_path + '/microsoft-tags.json') as f:
microsoft = json.load(f)
# Get Microsoft score
calculate_score(ground_truth, microsoft, "microsoft")
# Amazon
with open(data_path + '/amazon-tags.json') as f:
amazon = json.load(f)
# Get Amazon score
calculate_score(ground_truth, amazon, "amazon")
```
| true |
code
| 0.229557 | null | null | null | null |
|
```
# HIDDEN
# The standard set of libraries we need
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Make plots look a little bit more fancy
plt.style.use('fivethirtyeight')
# The standard library for data in tables
import pandas as pd
# A tiny function to read a file directly from a URL
from urllib.request import urlopen
def read_url(url):
return urlopen(url).read().decode()
# HIDDEN
# Read the text of Pride and Prejudice, split into chapters.
book_url = 'http://www.gutenberg.org/ebooks/42671.txt.utf-8'
book_text = read_url(book_url)
# Break the text into Chapters
book_chapters = book_text.split('CHAPTER ')
# Drop the first "Chapter" - it's the Project Gutenberg header
book_chapters = book_chapters[1:]
```
[Pride and Prejudice](https://en.wikipedia.org/wiki/Pride_and_Prejudice) is
the story of five sisters: Jane, Elizabeth, Mary, Kitty and Lydia, and their
journey through the social life of the mid-17th century. You may remember
that Elizabeth ends up marrying the dashing and aloof Mr Darcy, but along the
way, the feckless Lydia runs off with the equally feckless Mr Wickham, and the
slightly useless Mr Bingley wants to marry Jane, the most beautiful of the
sisters.
We can see when these characters appear in the book, by counting how many
times their names are mentioned in each chapter.
```
# Count how many times the characters appear in each chapter.
counts = pd.DataFrame.from_dict({
'Elizabeth': np.char.count(book_chapters, 'Elizabeth'),
'Darcy': np.char.count(book_chapters, 'Darcy'),
'Lydia': np.char.count(book_chapters, 'Lydia'),
'Wickham': np.char.count(book_chapters, 'Wickham'),
'Bingley': np.char.count(book_chapters, 'Bingley'),
'Jane': np.char.count(book_chapters, 'Jane')},
)
# The cumulative counts:
# how many times in Chapter 1, how many times in Chapters 1 and 2, and so on.
cum_counts = counts.cumsum()
# Add the chapter numbers
number_of_chapters = len(book_chapters)
cum_counts['Chapter'] = np.arange(number_of_chapters)
# Do the plot
cum_counts.plot(x='Chapter')
plt.title('Cumulative Number of Times Each Name Appears');
```
In the plot above, the horizontal axis shows chapter numbers and the vertical
axis shows how many times each character has been mentioned up to and
including that chapter.
Notice first that Elizabeth and Darcy are the main characters. Around chapter
13 we see Wickham and Lydia spike up, as they run away together, and mentions
of Darcy flatten off, when he goes to look for them. Around chapter 50 we see
Jane and Bingley being mentioned at a very similar rate, as Bingley proposes,
and Jane accepts.
{% data8page Literary_Characters %}
| true |
code
| 0.460532 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/papagorgio23/Python101/blob/master/Py_202_F%2B_Model_Answers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Installing Library
!pip install pydata_google_auth
# import base packages into the namespace for this program
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn import model_selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from yellowbrick.classifier import ROCAUC
from yellowbrick.classifier import ClassificationReport
```
# F+ Lead Scoring Model

## Get Data
```
# Using GBQ shout Out to Hughes
import pandas_gbq
import pydata_google_auth
SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive',
]
# get credentials
credentials = pydata_google_auth.get_user_credentials(
SCOPES,
auth_local_webserver=False)
# GBQ
sql = """
SELECT
id
, co_app_verifiable_annual_income__c
, loan_use__c
, employment_status__c
, amount_of_loan_requested__c
, fico__c
, lti__c
, bcc0300__c
, ndi_ratio__c
, utm_source__c
, CASE WHEN date_funded__c IS NOT NULL THEN 1 ELSE 0 END AS Fund
FROM `freedom-dw.salesforce_ffam.application__c` a
WHERE createddate >= '2019-07-01'
AND a.loan_officer__c IS NOT NULL
"""
# run query
fplus_df = pandas_gbq.read_gbq(sql, project_id='ffn-dw-bigquery-prd', credentials=credentials, dialect='standard')
```
## View Data
```
# view top 5 observations
fplus_df.head()
# view bottom 5 observations
fplus_df.tail()
# view columns, data types
fplus_df.info()
# get summary statistics
fplus_df.describe()
# check NA
fplus_df.isna().sum()
```
# Investigate Variables
## 1) - Co App
We only care if an application has a Co App with them. We are using co_app_verifiable_annual_income__c to let us know if they have one.
<br>
* If NA then No Co App
* If Not NA then Co App
<br>
### View data
```
# drop na for co app and view distribution
co_app = fplus_df.dropna(subset=['co_app_verifiable_annual_income__c'])
co_app.info()
co_app.describe()
```
### Plot
```
# Cut the window in 2 parts
f, (ax_hist, ax_box) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (.85, .25)})
# Add a graph in each part
sns.boxplot(co_app['co_app_verifiable_annual_income__c'], ax=ax_box)
sns.distplot(co_app['co_app_verifiable_annual_income__c'], ax=ax_hist)
# Remove x axis name for the boxplot
ax_box.set(xlabel='')
```
### Remove outliers
```
## remove outlier...
co_app = co_app[co_app['co_app_verifiable_annual_income__c'] < 200000]
# Cut the window in 2 parts
f, (ax_hist, ax_box) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (.85, .25)})
# Add a graph in each part
sns.boxplot(co_app['co_app_verifiable_annual_income__c'], ax=ax_box)
sns.distplot(co_app['co_app_verifiable_annual_income__c'], ax=ax_hist)
# Remove x axis name for the boxplot
ax_box.set(xlabel='')
def get_co_app_cat(co_app_income):
"""This function creates a Co-App Flag"""
if pd.isnull(co_app_income):
return 0
return 1
# apply function to dataset
fplus_df['co_app'] = fplus_df['co_app_verifiable_annual_income__c'].apply(get_co_app_cat)
# view counts now
fplus_df['co_app'].value_counts()
# view percentage
fplus_df['co_app'].value_counts()/len(fplus_df)
```
### How does Co-App affect Funding?
```
# plot 2 barplots, 1 showing total counts, 1 showing co app percent by fund
fig, (axis1,axis2) = plt.subplots(1,2, sharex=True, figsize=(10,5))
# barplot
sns.countplot(x='co_app', data=fplus_df, order=[1,0], ax=axis1)
# Get Fund rate for Co-App vs No Co-App
fund_perc = fplus_df[["Fund", "co_app"]].groupby(['Fund'], as_index=False).mean()
sns.barplot(x='Fund', y='co_app', data=fund_perc, order=[1,0], ax=axis2)
```
## 2) - Loan to Income (LTI)
### View Data
```
# drop na for lti and view distribution
loan_income = fplus_df.dropna(subset=['lti__c'])
loan_income['lti__c'].describe()
# Cut the window in 2 parts
f, (ax_hist, ax_box) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (.85, .25)})
# Add a graph in each part
sns.boxplot(loan_income['lti__c'], ax=ax_box)
sns.distplot(loan_income['lti__c'], ax=ax_hist)
# Remove x axis name for the boxplot
ax_box.set(xlabel='')
```
### Remove Outliers
```
744/139230
```
## 3) - Marketing Channel (utm_source)
```
# Cross tab
cm = sns.light_palette("green", as_cmap=True)
pd.crosstab(fplus_df['utm_source__c'], fplus_df['Fund'], values=fplus_df['Fund'], aggfunc=[len, np.mean], margins=True, margins_name="Total").style.background_gradient(cmap = cm)
# pivot table
def fund_rate(x):
'''This function is used within pivot_tables to calculate ratios'''
return np.sum(x) / np.size(x)
pivoting = pd.pivot_table(fplus_df, values='Fund', index='utm_source__c',
aggfunc={'Fund': [np.sum, np.size, fund_rate]})
print(pivoting.sort_values('size', ascending=False).to_string())
```
## 4) - FICO
## 5) - Employment
## 6) - Debt to Income (NDI)
## 7) - Debt to Income Squared
## 8) - Loan Use
## 9) - Bank Card Trades (bcc0300)
## 10) - Loan Amount
# Prediction Model
```
model_data = fplus_df.dropna()
def get_co_app_cat(co_app_income):
if pd.isnull(co_app_income):
return 0
return 1
model_data['co_app_verifiable_annual_income__c'] = model_data['co_app_verifiable_annual_income__c'].apply(get_co_app_cat)
# create dummies
cat_vars = ['loan_use__c','employment_status__c','utm_source__c']
for var in cat_vars:
cat_list = pd.get_dummies(model_data[var], prefix=var)
temp = model_data.join(cat_list)
model_data = temp
data_vars = model_data.columns.values.tolist()
to_keep = [i for i in data_vars if i not in cat_vars]
model_data = model_data[to_keep]
model_data.info()
model_data = model_data.drop(['id'], axis = 1)
# segment out the variable we are predicting from the rest of the data
y = model_data['Fund']
X = model_data.drop(['Fund'], axis = 1)
# Split the data into a train and test dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# 10 Fold Cross Validation.
num_instances = len(X)
seed = 7
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = LogisticRegression()
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0))
# Set classes for all plots
classes = ['Not Fund', 'Fund']
# Instantiate the visualizer with the classification model
visualizer = ROCAUC(model, classes=classes)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
g = visualizer.poof()
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(model, classes=classes, support=True)
visualizer.fit(X_train, y_train) # Fit the visualizer and the model
visualizer.score(X_test, y_test) # Evaluate the model on the test data
g = visualizer.poof() # Draw/show/poof the data
```
## Data Transformation Functions
```
import numpy as np
import pandas as pd
def get_co_app_cat(co_app_income):
if pd.isnull(co_app_income):
return 0
return 1
def get_loan_use_cat(loan_use):
if pd.isnull(loan_use):
return 3
loan_use = loan_use.strip()
if (loan_use == 'Credit Card Refinancing'):
return 4
if (loan_use in ['Major Purchase','Other']):
return 2
if (loan_use == 'Auto Purchase'):
return 1
return 3
def get_employment_cat(employment_status):
if pd.isnull(employment_status):
employment_status = ''
employment_status = employment_status.strip()
if (employment_status == 'Retired'):
return 4
if (employment_status in ['Self-employed']):
return 2
if (employment_status in ['Other', '']):
return 1
return 3
def get_loan_amount_cat(loan_amount):
if pd.isnull(loan_amount):
return 1
loan_amount = float(loan_amount)
if (loan_amount < 15000):
return 4
if (loan_amount >= 15000) and (loan_amount < 20000):
return 3
if (loan_amount >= 20000) and (loan_amount < 25000):
return 2
return 1
def get_mkt_chan_cat(utm_source):
if pd.isnull(utm_source):
return 3
utm_source = utm_source.strip()
if (utm_source in ['creditkarma','nerdwallet']):
return 7
if (utm_source in ['credible','experian']):
return 6
if (utm_source in ['website', 'google','msn','ck','nerd',
'115','save','dm','SLH','201']):
return 5
if (utm_source in ['facebook', 'even','uplift','Quinstreet',
'Personalloanpro','113']):
return 2
if (utm_source in ['LendEDU', 'monevo','247','sfl']):
return 1
return 3
def get_fico(fico):
if pd.isnull(fico):
return 990
fico = int(fico)
if (fico >= 9000):
return 990
if fico < 600:
return 990
return fico
def get_lti(lti):
if pd.isnull(lti):
return 36
lti = float(lti)
if (lti > 35) or (lti < 1):
return 36
if (lti >= 1) and (lti < 2):
return 35
if (lti >= 2) and (lti < 3):
return 34
return np.floor(lti)
def get_bcc0300(bcc0300):
if pd.isnull(bcc0300):
return 99
bcc0300 = int(bcc0300)
if (bcc0300 >= 25):
return 30
return bcc0300
def get_ndi_ratio(ndi_ratio):
if pd.isnull(ndi_ratio):
return 5
ndi_ratio = float(ndi_ratio)
ndi_ratio = np.floor(ndi_ratio)
if (ndi_ratio < 10):
return 5
if (ndi_ratio > 75):
return 80
return ndi_ratio
# Compute the correlation matrix
corr = model_data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# select only numeric data
num_data = fplus_df.select_dtypes(include='number')
# Compute the correlation matrix
corr = num_data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
```
| true |
code
| 0.644561 | null | null | null | null |
|
[](https://www.pythonista.io)
# Reglas de *URL*.
## Preliminar.
```
from flask import Flask
app = Flask(__name__)
```
## Extracción de valores a partir de una *URL*.
Las reglas de *URL* no sólo permiten definir rutas estáticas que apunten a una función, sino que pueden definir rutas dinámicas que permitan obtener información de la propia ruta y usarla como argumento para la función de vista.
``
'{texto 1}<{nombre 1}>{texto 2}<{nombre 2}> ...'
``
Donde:
* ```{texto i}``` es un segmento de texto de la *URL* que es fijo.
* ```{nombre i}``` es el nombre que se le asignará al texto capturado en ese segmento de la *URL*.
**Ejemplo:**
* La siguiente celda define una regla para ```app.route()``` la cual capturaría el texto al final de la ruta ```/saluda/``` y lo guardaría con el nombre ```usuario```, el cual sería usado como argumento para la función ```saluda()```. La aplicación regresaría un documento *HTML* con un texto construido a partir del contenido de ```usuario```.
```
@app.route('/saluda/<usuario>')
def saluda(usuario):
return f'<p>Hola, {usuario}.</p>'
```
## Reglas de ruta con indicadores de tipo.
Es posible indicarle al sewrvidor de aplciaciones el tipo de dato esperado en la ruta.
```
<{tipo}:{nombre}>
```
Donde:
* ```{tipo}``` puede ser:
* ```string```, el cual corresponde a una cadena de caracteres.
* ```int```, el cual corresponde a un número entero.
* ```float```, el cual coorresponde a un número de punto flotante (es obligatorio que tenga un punto).
* ```path```, el cual corresponde a una cadena de caracteres que representa a una ruta.
* ```uuid```, el cual corresponde a un identificador único universal, tal como está definido en el [*RFC 4122*](https://datatracker.ietf.org/doc/html/rfc4122).
En caso de que el segmento de *URL* no corresponda al tipo indicado, *Flask* regresará un estado ```404```.
**Ejemplos:**
La siguiente celda define una regla de *URL* para la función de vista ```operacion()``` en la que es necesario ingresar un valor de tipo ```float``` después del texto ```/operacion/``` y antes del texto ```/mitad``` .
* La ruta ```/operacion/5.0/mitad``` captuará el valor de ```5.0``` en el parámetro ```numero``` y regresará un documento *HTML* desplegando operaciones con dicho valor.
* La ruta ```/operacion/5/mitad``` regresará un estado ```404```.
* La ruta ```/operacion/Juan/mitad``` regresará un estado ```404```.
```
@app.route('/operacion/<float:numero>/mitad')
def mitad(numero):
return f'La mitad de {numero} es {numero / 2}.'
@app.route('/operacion/<int:a>_suma_<int:b>')
def suma(a, b):
return f'La suma de {a} + {b} es {a + b}.'
```
**Advertencia:** Una vez ejecutada la siguiente celda, es necesario interrumpir el *kernel* de *Jupyter* para poder ejecutar el resto de las celdas de la *notebook*.
```
app.run(host="0.0.0.0", port=5000)
```
* http://localhost:5000/saluda/Juan
* http://localhost:5000/operacion/5.0/mitad
* http://localhost:5000/operacion/5/mitad
* http://localhost:5000/operacion/Juan/mitad
* http://localhost:5000/operacion/5.0
* http://localhost:5000/operacion/2_suma_3
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2022.</p>
| true |
code
| 0.245334 | null | null | null | null |
|
**Important: This notebook will only work with fastai-0.7.x. Do not try to run any fastai-1.x code from this path in the repository because it will load fastai-0.7.x**
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.learner import *
import torchtext
from torchtext import vocab, data
from torchtext.datasets import language_modeling
from fastai.rnn_reg import *
from fastai.rnn_train import *
from fastai.nlp import *
from fastai.lm_rnn import *
import dill as pickle
import spacy
```
## Language modeling
### Data
The [large movie view dataset](http://ai.stanford.edu/~amaas/data/sentiment/) contains a collection of 50,000 reviews from IMDB. The dataset contains an even number of positive and negative reviews. The authors considered only highly polarized reviews. A negative review has a score ≤ 4 out of 10, and a positive review has a score ≥ 7 out of 10. Neutral reviews are not included in the dataset. The dataset is divided into training and test sets. The training set is the same 25,000 labeled reviews.
The **sentiment classification task** consists of predicting the polarity (positive or negative) of a given text.
However, before we try to classify *sentiment*, we will simply try to create a *language model*; that is, a model that can predict the next word in a sentence. Why? Because our model first needs to understand the structure of English, before we can expect it to recognize positive vs negative sentiment.
So our plan of attack is the same as we used for Dogs v Cats: pretrain a model to do one thing (predict the next word), and fine tune it to do something else (classify sentiment).
Unfortunately, there are no good pretrained language models available to download, so we need to create our own. To follow along with this notebook, we suggest downloading the dataset from [this location](http://files.fast.ai/data/aclImdb.tgz) on files.fast.ai.
```
PATH='data/aclImdb/'
TRN_PATH = 'train/all/'
VAL_PATH = 'test/all/'
TRN = f'{PATH}{TRN_PATH}'
VAL = f'{PATH}{VAL_PATH}'
%ls {PATH}
```
Let's look inside the training folder...
```
trn_files = !ls {TRN}
trn_files[:10]
```
...and at an example review.
```
review = !cat {TRN}{trn_files[6]}
review[0]
```
Sounds like I'd really enjoy *Zombiegeddon*...
Now we'll check how many words are in the dataset.
```
!find {TRN} -name '*.txt' | xargs cat | wc -w
!find {VAL} -name '*.txt' | xargs cat | wc -w
```
Before we can analyze text, we must first *tokenize* it. This refers to the process of splitting a sentence into an array of words (or more generally, into an array of *tokens*).
*Note:* If you get an error like:
Can't find model 'en'. It doesn't seem to be a shortcut link, a Python package or a valid path to a data directory.
then you need to install the Spacy language model by running this command on the command-line:
$ python -m spacy download en
```
spacy_tok = spacy.load('en')
' '.join([sent.string.strip() for sent in spacy_tok(review[0])])
```
We use Pytorch's [torchtext](https://github.com/pytorch/text) library to preprocess our data, telling it to use the wonderful [spacy](https://spacy.io/) library to handle tokenization.
First, we create a torchtext *field*, which describes how to preprocess a piece of text - in this case, we tell torchtext to make everything lowercase, and tokenize it with spacy.
```
TEXT = data.Field(lower=True, tokenize="spacy")
```
fastai works closely with torchtext. We create a ModelData object for language modeling by taking advantage of `LanguageModelData`, passing it our torchtext field object, and the paths to our training, test, and validation sets. In this case, we don't have a separate test set, so we'll just use `VAL_PATH` for that too.
As well as the usual `bs` (batch size) parameter, we also now have `bptt`; this define how many words are processing at a time in each row of the mini-batch. More importantly, it defines how many 'layers' we will backprop through. Making this number higher will increase time and memory requirements, but will improve the model's ability to handle long sentences.
```
bs=64; bptt=70
FILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH)
md = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=10)
```
After building our `ModelData` object, it automatically fills the `TEXT` object with a very important attribute: `TEXT.vocab`. This is a *vocabulary*, which stores which words (or *tokens*) have been seen in the text, and how each word will be mapped to a unique integer id. We'll need to use this information again later, so we save it.
*(Technical note: python's standard `Pickle` library can't handle this correctly, so at the top of this notebook we used the `dill` library instead and imported it as `pickle`)*.
```
pickle.dump(TEXT, open(f'{PATH}models/TEXT.pkl','wb'))
```
Here are the: # batches; # unique tokens in the vocab; # tokens in the training set; # sentences
```
len(md.trn_dl), md.nt, len(md.trn_ds), len(md.trn_ds[0].text)
```
This is the start of the mapping from integer IDs to unique tokens.
```
# 'itos': 'int-to-string'
TEXT.vocab.itos[:12]
# 'stoi': 'string to int'
TEXT.vocab.stoi['the']
```
Note that in a `LanguageModelData` object there is only one item in each dataset: all the words of the text joined together.
```
md.trn_ds[0].text[:12]
```
torchtext will handle turning this words into integer IDs for us automatically.
```
TEXT.numericalize([md.trn_ds[0].text[:12]])
```
Our `LanguageModelData` object will create batches with 64 columns (that's our batch size), and varying sequence lengths of around 80 tokens (that's our `bptt` parameter - *backprop through time*).
Each batch also contains the exact same data as labels, but one word later in the text - since we're trying to always predict the next word. The labels are flattened into a 1d array.
```
next(iter(md.trn_dl))
```
### Train
We have a number of parameters to set - we'll learn more about these later, but you should find these values suitable for many problems.
```
em_sz = 200 # size of each embedding vector
nh = 500 # number of hidden activations per layer
nl = 3 # number of layers
```
Researchers have found that large amounts of *momentum* (which we'll learn about later) don't work well with these kinds of *RNN* models, so we create a version of the *Adam* optimizer with less momentum than it's default of `0.9`.
```
opt_fn = partial(optim.Adam, betas=(0.7, 0.99))
```
fastai uses a variant of the state of the art [AWD LSTM Language Model](https://arxiv.org/abs/1708.02182) developed by Stephen Merity. A key feature of this model is that it provides excellent regularization through [Dropout](https://en.wikipedia.org/wiki/Convolutional_neural_network#Dropout). There is no simple way known (yet!) to find the best values of the dropout parameters below - you just have to experiment...
However, the other parameters (`alpha`, `beta`, and `clip`) shouldn't generally need tuning.
```
learner = md.get_model(opt_fn, em_sz, nh, nl,
dropouti=0.05, dropout=0.05, wdrop=0.1, dropoute=0.02, dropouth=0.05)
learner.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
learner.clip=0.3
```
As you can see below, I gradually tuned the language model in a few stages. I possibly could have trained it further (it wasn't yet overfitting), but I didn't have time to experiment more. Maybe you can see if you can train it to a better accuracy! (I used `lr_find` to find a good learning rate, but didn't save the output in this notebook. Feel free to try running it yourself now.)
```
learner.fit(3e-3, 4, wds=1e-6, cycle_len=1, cycle_mult=2)
learner.save_encoder('adam1_enc')
learner.load_encoder('adam1_enc')
learner.fit(3e-3, 1, wds=1e-6, cycle_len=10)
```
In the sentiment analysis section, we'll just need half of the language model - the *encoder*, so we save that part.
```
learner.save_encoder('adam3_10_enc')
learner.load_encoder('adam3_10_enc')
```
Language modeling accuracy is generally measured using the metric *perplexity*, which is simply `exp()` of the loss function we used.
```
math.exp(4.165)
pickle.dump(TEXT, open(f'{PATH}models/TEXT.pkl','wb'))
```
### Test
We can play around with our language model a bit to check it seems to be working OK. First, let's create a short bit of text to 'prime' a set of predictions. We'll use our torchtext field to numericalize it so we can feed it to our language model.
```
m=learner.model
ss=""". So, it wasn't quite was I was expecting, but I really liked it anyway! The best"""
s = [TEXT.preprocess(ss)]
t=TEXT.numericalize(s)
' '.join(s[0])
```
We haven't yet added methods to make it easy to test a language model, so we'll need to manually go through the steps.
```
# Set batch size to 1
m[0].bs=1
# Turn off dropout
m.eval()
# Reset hidden state
m.reset()
# Get predictions from model
res,*_ = m(t)
# Put the batch size back to what it was
m[0].bs=bs
```
Let's see what the top 10 predictions were for the next word after our short text:
```
nexts = torch.topk(res[-1], 10)[1]
[TEXT.vocab.itos[o] for o in to_np(nexts)]
```
...and let's see if our model can generate a bit more text all by itself!
```
print(ss,"\n")
for i in range(50):
n=res[-1].topk(2)[1]
n = n[1] if n.data[0]==0 else n[0]
print(TEXT.vocab.itos[n.data[0]], end=' ')
res,*_ = m(n[0].unsqueeze(0))
print('...')
```
### Sentiment
We'll need to the saved vocab from the language model, since we need to ensure the same words map to the same IDs.
```
TEXT = pickle.load(open(f'{PATH}models/TEXT.pkl','rb'))
```
`sequential=False` tells torchtext that a text field should be tokenized (in this case, we just want to store the 'positive' or 'negative' single label).
`splits` is a torchtext method that creates train, test, and validation sets. The IMDB dataset is built into torchtext, so we can take advantage of that. Take a look at `lang_model-arxiv.ipynb` to see how to define your own fastai/torchtext datasets.
```
IMDB_LABEL = data.Field(sequential=False)
splits = torchtext.datasets.IMDB.splits(TEXT, IMDB_LABEL, 'data/')
t = splits[0].examples[0]
t.label, ' '.join(t.text[:16])
```
fastai can create a ModelData object directly from torchtext splits.
```
md2 = TextData.from_splits(PATH, splits, bs)
m3 = md2.get_model(opt_fn, 1500, bptt, emb_sz=em_sz, n_hid=nh, n_layers=nl,
dropout=0.1, dropouti=0.4, wdrop=0.5, dropoute=0.05, dropouth=0.3)
m3.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
m3.load_encoder(f'adam3_10_enc')
```
Because we're fine-tuning a pretrained model, we'll use differential learning rates, and also increase the max gradient for clipping, to allow the SGDR to work better.
```
m3.clip=25.
lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2])
m3.freeze_to(-1)
m3.fit(lrs/2, 1, metrics=[accuracy])
m3.unfreeze()
m3.fit(lrs, 1, metrics=[accuracy], cycle_len=1)
m3.fit(lrs, 7, metrics=[accuracy], cycle_len=2, cycle_save_name='imdb2')
m3.load_cycle('imdb2', 4)
accuracy_np(*m3.predict_with_targs())
```
A recent paper from Bradbury et al, [Learned in translation: contextualized word vectors](https://einstein.ai/research/learned-in-translation-contextualized-word-vectors), has a handy summary of the latest academic research in solving this IMDB sentiment analysis problem. Many of the latest algorithms shown are tuned for this specific problem.

As you see, we just got a new state of the art result in sentiment analysis, decreasing the error from 5.9% to 5.5%! You should be able to get similarly world-class results on other NLP classification problems using the same basic steps.
There are many opportunities to further improve this, although we won't be able to get to them until part 2 of this course...
### End
| true |
code
| 0.603172 | null | null | null | null |
|
# Encoders and decoders
> In this post, we will implement simple autoencoder architecture. This is the summary of lecture "Probabilistic Deep Learning with Tensorflow 2" from Imperial College London.
- toc: true
- badges: true
- comments: true
- author: Chanseok Kang
- categories: [Python, Coursera, Tensorflow_probability, ICL]
- image: images/mnist_reconstruction.png
## Packages
```
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
tfd = tfp.distributions
tfpl = tfp.layers
tfb = tfp.bijectors
plt.rcParams['figure.figsize'] = (10, 6)
print("Tensorflow Version: ", tf.__version__)
print("Tensorflow Probability Version: ", tfp.__version__)
```
## Overview
### AutoEncoder architecture with tensorflow
AutoEncoder has bottleneck architecture. The width of each of the dense layers is decreasing at first, all the way down to the middle bottleneck layer. The network then starts to widen out again afterwards util the final layer is the same size of shape as the input layer.
```python
from tensorflow.karas.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Reshape
autoencoder = Sequential([
Flatten(input_shape=(28, 28)),
Dense(256, activation='sigmoid'),
Dnese(64, activation='sigmoid'),
Dense(2, activation='sigmoid'),
Dense(64, activation='sigmoid'),
Dense(256, activation='sigmoid'),
Dense(784, activation='sigmoid'),
Reshape((28, 28))
])
optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.0005)
autoencoder.compile(loss='mse', optimizer=optimizer)
autoencoder.fit(X_train, X_train, epochs=20)
```
You can see that this network is trained with X_train, not labels. So it is sort of unsupervised learning.
### Encoder and decoder
We can separate the autoencoder architecture with encoder and decoder.
```python
from tensorflow.keras.models import Models
encoder = Sequential([
Flatten(input_shape=(28, 28)),
Dense(256, activation='sigmoid'),
Dense(64, activation='sigmoid'),
Dense(2, activation='sigmoid')
])
decoder = Sequential([
Dense(64, activation='sigmoid', input_shape=(2, )),
Dense(256, activation='sigmoid'),
Dense(784, activation='sigmoid'),
Reshape((28, 28))
])
autoencoder = Model(inputs=encoder.input, outputs=decoder(encoder.output))
autoencoder.compile(loss='mse', optimizer='sgd')
autoencoder.fit(X_train, X_train, epochs=20)
```
### Test for reconstruction
```python
# X_test: (1, 28, 28)
reconstruction = autoencoder(X_test)
X_encoded = encoder(X_test)
z = tf.random.normal([1, 2])
z_decoded = decoder(z) # (1, 28, 28)
```
## Tutorial
We'll use Fashion MNIST for the tutorial.
```
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Flatten, Reshape
import seaborn as sns
# Load Fashion MNIST
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
class_names = np.array(['T-shirt/top', 'Trouser/pants', 'Pullover shirt', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag','Ankle boot'])
# Display a few examples
n_examples = 1000
example_images = X_test[0:n_examples]
example_labels = y_test[0:n_examples]
f, axs = plt.subplots(1, 5, figsize=(15, 4))
for j in range(len(axs)):
axs[j].imshow(example_images[j], cmap='binary')
axs[j].axis('off')
plt.show()
# Define the encoder
encoded_dim = 2
encoder = Sequential([
Flatten(input_shape=(28, 28)),
Dense(256, activation='sigmoid'),
Dense(64, activation='sigmoid'),
Dense(encoded_dim)
])
# Encode examples before training
pretrain_example_encodings = encoder(example_images).numpy()
# Plot encoded examples before training
f, ax = plt.subplots(1, 1, figsize=(7, 7))
sns.scatterplot(x=pretrain_example_encodings[:, 0],
y=pretrain_example_encodings[:, 1],
hue=class_names[example_labels], ax=ax,
palette=sns.color_palette("colorblind", 10));
ax.set_xlabel('Encoding dimension 1'); ax.set_ylabel('Encoding dimension 2')
ax.set_title('Encodings of example images before training');
plt.show()
# Define the decoder
decoder = Sequential([
Dense(64, activation='sigmoid', input_shape=(encoded_dim,)),
Dense(256, activation='sigmoid'),
Dense(28*28, activation='sigmoid'),
Reshape((28, 28))
])
# Compile and fit the model
autoencoder = Model(inputs=encoder.inputs, outputs=decoder(encoder.output))
# Specify loss - input and output is in [0., 1.], so we can use a binary cross-entropy loss
autoencoder.compile(loss='binary_crossentropy')
# Fit the model - highlight that labels and input are the same
autoencoder.fit(X_train, X_train, epochs=10, batch_size=32)
# Compute example encodings after training
posttrain_example_encodings = encoder(example_images).numpy()
# Compare the example encodings before and after training
f, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
sns.scatterplot(x=pretrain_example_encodings[:, 0],
y=pretrain_example_encodings[:, 1],
hue=class_names[example_labels], ax=axs[0],
palette=sns.color_palette("colorblind", 10));
sns.scatterplot(x=posttrain_example_encodings[:, 0],
y=posttrain_example_encodings[:, 1],
hue=class_names[example_labels], ax=axs[1],
palette=sns.color_palette("colorblind", 10));
axs[0].set_title('Encodings of example images before training');
axs[1].set_title('Encodings of example images after training');
for ax in axs:
ax.set_xlabel('Encoding dimension 1')
ax.set_ylabel('Encoding dimension 2')
ax.legend(loc='lower right')
plt.show()
# Compute the autoencoder's reconstructions
reconstructed_example_images = autoencoder(example_images)
# Evaluate the autoencoder's reconstructions
f, axs = plt.subplots(2, 5, figsize=(15, 4))
for j in range(5):
axs[0, j].imshow(example_images[j], cmap='binary')
axs[1, j].imshow(reconstructed_example_images[j].numpy().squeeze(), cmap='binary')
axs[0, j].axis('off')
axs[1, j].axis('off')
plt.show()
```
| true |
code
| 0.820505 | null | null | null | null |
|
# Change Detection - Image Ratio U-Net Classifier
### Summary
This notebook trains a Convolutional Neural Network (CNN) to identify building change from the pixel ratios between before/after [Sentinel-2](https://sentinel.esa.int/web/sentinel/user-guides/sentinel-2-msi/overview) imagery. For a better understanding of the ratio method begin with `change_detection.ipynb`. The model is trained on the pixel ratios of pre- & post-disaster imagery for events in the Caribbean. Ground truth building damage data is gathered from [Copernicus EMS](https://emergency.copernicus.eu/mapping/map-of-activations-rapid#zoom=3&lat=29.18235&lon=-70.57787&layers=BT00).
If you already have a trained model and simply wish to evaluate it's output at a new location, skip to section 5 after section 1.
### Contents
- 1 - [Visualise Ratio & Damage Labels](#trLabels)
- 2 - [Training Images](#trImages)
- 3 - [Build Model](#buildModel)
- 4 - [Train Model](#trainModel)
- 5 - [Predictions](#Predictions)
- 6 - [Evaluate Prediction Accuracy](#PredictionAccuracy)
- 7 - [Test Location](#TestLocation)
### Requirements
- Lat/Long of desired location
- Before and after dates for change detection
- Output of damages at location if evaluating model
__________________________
### Initialisation steps - Define variables & import packages
```
## Define location, dates and satellite
location = 'Roseau' # Name for saved .pngs
lat, lon = 15.3031, -61.3834 # Center of Area of Interest
zoom = 15 # Map tile zoom, default 16
st_date, end_date = ['2017-08-15', '2017-10-01'], ['2017-09-15', '2017-12-01'] # Timeframes for before-after imagery: start 1, start 2; end 1 ,end 2
satellite = "sentinel-2:L1C" # Descartes product name
bands = ['red','green','blue'] # Bands used for visualisation
cloudFraction = 0.05 # May need adjusted to get images from appropriate dates for Sentinel
## Testing
preModel = "models/optimalModel" # Use a pre-trained model - if training leave as ""
deployed = False # Run model for area without damage assessment
# If a damage geojson already exists for location - else leave as ""
dmgJsons = "" # Damage file name qualifying location and area size if already exists
# Form new damage assessment json from Copernicus EMS database
dmgAssess = "gradings/EMSR246_04ROSEAU_02GRADING_v1_5500_settlements_point_grading.dbf" # Copernicus EMS damage assessment database location (.dbf file needs .prj,.shp,.shx in same directory)
grades = ['Completely Destroyed','Highly Damaged'] # Copernicus EMS labels included, options: 'Not Applicable','Negligible to slight damage', 'Moderately Damaged', 'Highly Damaged'
area = 0.0004 # Building footprint diameter in lat/long degrees (0.0001~10m at equator)
newDmgLocation = 'geojsons/'+location+'Damage'+str(area)[2:]+'g'+str(len(grades))+'.geojson' # Location for newly created damage .json
## Training - Model training input
resolution = 10 # Resolution of satellite imagery -> 10 if Sentinel
tilesize, pad, trainArea = 16, 0, 0.0003 # Tilesize for rastering -> 32 as default, tile padding
records = "records/"+location+str(trainArea)[2:]+"g"+str(len(grades))+"x"+str(tilesize)+"p"+str(pad)+".tfrecords" # Name of file for training labels
learning_rate, epochs, batch_size, n_samples = 1e-3, 50, 8, 2000 # Model training parameters
modelName = "models/"+location+"g"+str(len(grades))+"ts"+str(tilesize)+"pd"+str(pad)+"lr"+str(learning_rate)[2:]+"e"+str(epochs)+"bs"+str(batch_size)+"a"+str(trainArea)[2:]+"n"+str(n_samples) if preModel is "" else preModel # Define output model name
# Import packages
# Python libraries
import IPython
import ipywidgets
import ipyleaflet
import json
import random
import os
import geojson
import numpy as np
import pandas as pd
import geopandas as gpd
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
# Library functions
from tqdm import tqdm
from ipyleaflet import Map, GeoJSON, GeoData, LegendControl
from shapely.geometry import Polygon, Point
from tensorflow.keras.models import load_model
# Descartes Labs
import descarteslabs as dl
import descarteslabs.workflows as wf
# Custom functions
from utils import make_ground_dataset_from_ratio_polygons, get_center_location
from unet import UNet
```
______________________
<a id='trLabels'></a>
## 1. Visualise Ratio & Damage Labels
First let's extract the training labels from EMS Copernicus data and visualise them. Use magic markers below map to scale imagery properly.
```
# Function to create damage json from EMS Copernicus database
if not deployed:
def createDmgJson(dmgAssess, grades,area,dmgJsons):
settlements = gpd.read_file(dmgAssess).to_crs({'init': 'epsg:4326'}) # Read from file
color_dict = {'Not Applicable':'green','Negligible to slight damage':'blue', 'Moderately Damaged':'yellow', 'Highly Damaged':'orange', 'Completely Destroyed':'red'}
damage = settlements[settlements.grading.isin(grades)] # Filter settlements to be within specified damage grade and location polygon
if damage.geometry[damage.index[0]].type is not 'Polygon': # Gets point assessment damages into geojson file
features = []
for i in tqdm(damage.index):
poly = Polygon([[damage.geometry.x[i], damage.geometry.y[i]], [damage.geometry.x[i]+area, damage.geometry.y[i]], [damage.geometry.x[i]+area, damage.geometry.y[i]+area], [damage.geometry.x[i], damage.geometry.y[i]+area], [damage.geometry.x[i], damage.geometry.y[i]]])
features.append(geojson.Feature(properties={"Damage": damage.grading[i]}, geometry=poly))
fc = geojson.FeatureCollection(features)
with open(dmgJsons, 'w') as f: geojson.dump(fc, f)
else:
with open(dmgJsons, 'w') as f: geojson.dump(damage, f) # Puts polygon assessments into geojson file
# If geojson of damage from EMS Copernicus does not exist - create one
if not os.path.exists(dmgJsons) and not os.path.exists(newDmgLocation): createDmgJson(dmgAssess,grades,area,newDmgLocation)
try: fc = gpd.read_file(dmgJsons) # Read training label data from damage file
except: fc, dmgJsons = gpd.read_file(newDmgLocation), newDmgLocation
# Initialise map
m1 = wf.interactive.MapApp()
m1.center, m1.zoom = (lat, lon), zoom
# Define function which displays satellite imagery on map
def getImage(time,bands,opacity,mapNum):
img = wf.ImageCollection.from_id(satellite,start_datetime=st_date[time], end_datetime=end_date[time])
if 'sentinel' in satellite: # Use sentinel cloud-mask band if available
img = img.filter(lambda img: img.properties["cloud_fraction"] <= cloudFraction)
img = img.map(lambda img: img.mask(img.pick_bands('cloud-mask')==1))
mos = (img.mosaic().pick_bands(bands))
globals()['mos_'+str(time+1)+str(bands)] = mos
display = mos.visualize('Image '+str(time+1)+' '+str(bands), map=mapNum)
display.opacity = opacity
# Display before and after images for selected bands - needs to be RGB for training this model
for i in range(len(st_date)): getImage(i,bands,0.7,m1)
# Calculate logarithmic ratio for RGB images and display
ratio = wf.log10(globals()['mos_1'+str(bands)] / globals()['mos_2'+str(bands)])
rdisplay = ratio.visualize('Ratio' ,map=m1)
rdisplay.opacity = 0
# Plot damage assessment data
if not deployed:
geo_data = GeoData(geo_dataframe = fc, style={"color": "red", "fillOpacity": 0.4}, hover_style={"fillOpacity": 0.5})
m1.add_layer(geo_data)
# Legend
m1.add_control(LegendControl({"Recorded Damage":"#FF0000"}))
m1 # Display map
```
Sections 2-4 are for training a new model. If assessing perfomance on new location with damage assessments jump to [section 5](#Predictions). If evaluating change over a new area without ground data (i.e. deployed = True), jump to [section 7](#TestLocation)
______________________
<a id='trImages'></a>
## 2. Training images
Next, let's make an image dataset for training. The training data for this segmentation model will be comprised of RGB image tiles with corresponding target rasters of the same size. Targets are binary rasters where 1 indicates the presence of a damaged building and 0 indicates the absence. The function below tiles the region covering the labels, it extracts the corresponding tile of the ratio image displayed above, and makes the corresponding target raster. Training pixel size can be varied in the variables section. These training data are saved as .tfRecords for efficient model training.
This step will take 5-10 minutes. The dataset only has to be created once. In case the notebook is re-run with same parameters as a previous run, this cell will be skipped.
```
if not os.path.exists(records): # If records have not already been created
if not os.path.exists("records"): os.mkdir("records") # Create directory for record output if not existing
trainJsons = 'geojsons/'+location+'Damage'+str(trainArea)[2:]+'g'+str(len(grades))+'.geojson'
if not os.path.exists(trainJsons): createDmgJson(dmgAssess,grades,trainArea,trainJsons)
n_samples = make_ground_dataset_from_ratio_polygons(
ratio,
trainJsons,
products=satellite,
bands=bands,
resolution=resolution,
tilesize=tilesize,
pad=pad,
start_datetime=st_date[0],
end_datetime=end_date[0],
out_file=records,
)
```
In order to read the TFRecords the data structure and a parsing function is defined next.
```
# Define the features in the TFRecords file
features = {
"image/image_data": tf.io.FixedLenSequenceFeature([], dtype=tf.float32, allow_missing=True),
"image/height": tf.io.FixedLenFeature([], tf.int64),
"image/width": tf.io.FixedLenFeature([], tf.int64),
"image/channels": tf.io.FixedLenFeature([], tf.int64),
"target/target_data": tf.io.FixedLenSequenceFeature([], dtype=tf.float32, allow_missing=True),
"target/height": tf.io.FixedLenFeature([], tf.int64),
"target/width": tf.io.FixedLenFeature([], tf.int64),
"target/channels": tf.io.FixedLenFeature([], tf.int64),
"dltile": tf.io.FixedLenFeature([], tf.string),
}
def parse_example(example_proto):
image_features = tf.io.parse_single_example(example_proto, features)
img_height = tf.cast(image_features["image/height"], tf.int32)
img_width = tf.cast(image_features["image/width"], tf.int32)
img_channels = tf.cast(image_features["image/channels"], tf.int32)
target_height = tf.cast(image_features["target/height"], tf.int32)
target_width = tf.cast(image_features["target/width"], tf.int32)
target_channels = tf.cast(image_features["target/channels"], tf.int32)
image_raw = tf.reshape(
tf.squeeze(image_features["image/image_data"]),
tf.stack([img_height, img_width, img_channels]),
)
target_raw = tf.reshape(
tf.squeeze(image_features["target/target_data"]),
tf.stack([target_height, target_width, target_channels]),
)
return image_raw, target_raw
```
Let's create a simple data pipeline to visualize some samples from the dataset.
```
# Create a TFRecordDataset to read images from these TFRecords
data = tf.data.TFRecordDataset(records).map(parse_example, num_parallel_calls=4)
data_viz = iter(data.batch(1))
# Visualize samples. You can re-run this cell to iterate through the dataset.
img, trg = next(data_viz)
fig, ax = plt.subplots(1, 2, figsize=(8, 5))
rat = ax[0].imshow(np.exp(img.numpy()).astype(np.float)[0])
lab = ax[1].imshow(trg.numpy().astype(np.uint8)[0].squeeze())
rat_title = ax[0].set_title("Ratio pixels")
lab_title = ax[1].set_title('Building damages (yellow)')
```
The above display shows the first training sample, displaying the image and corresponding target. Each image can have one or more damaged buildings or can be a negative image without any. You can iterate through the training images by re-running the cell above multiple times.
______________________
<a id='buildModel'></a>
## 3. Build Model
The model architecture is a [UNet classifier](https://arxiv.org/abs/1505.04597). We'll use a pre-built implementation in [TensorFlow](https://www.tensorflow.org/)_v2 / Keras .
```
# Insure tensorflow is version 2
assert int(tf.__version__[0]) > 1, "Please install Tensorflow 2"
learning_rate, epochs, batch_size, n_samples = 1e-4, 10, 2, 300
modelName = "models/"+location+"g"+str(len(grades))+"ts"+str(tilesize)+"pd"+str(pad)+"lr"+str(learning_rate)[2:]+"e"+str(epochs)+"bs"+str(batch_size)+"a"+str(trainArea)[2:]+"n"+str(n_samples) if preModel is "" else preModel # Define output model name
# Build the model. We could just use the base_model but then the input size would be fixed once we load a saved model.
# In order to be able to predict on larger tiles we create an input layer with no fixed size
base_model = UNet()
inputs = tf.keras.layers.Input(shape=(None, None, 3))
model = tf.keras.Model(inputs=inputs, outputs=base_model(inputs))
```
We will now compile the model and output a summary which takes three arguments:
- Optimizer: A reasonable choice of optimizer is [Adam](https://arxiv.org/abs/1412.6980v8) - it performs well in most real-world scenarios.
- Loss function: We will use [binary crossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/BinaryCrossentropy) as a loss as it is suitable for binary classification problems.
- Metric: From our experience using the simple ratio method, 0.7 precision should be achievable but a big problem is increasing recall. Therefore we will focus on this metric for training.
```
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss="binary_crossentropy",
metrics=["binary_accuracy","Precision","Recall"]#,"Precision","Recall"]#tfa.metrics.F1Score(num_classes=2, threshold=0.5)#tf.keras.metrics.RecallAtPrecision(precision=0.7) #["accuracy"]
)
model.summary()
```
______________________
<a id='trainModel'></a>
## 4. Train Model
We will now train the model using Stochastic Gradient Descent (SGD) with data batches. Before training the data is shuffled before splitting into a training and validation set.
```
# Get train/validation set sizes
n_train_samples = int(0.8 * n_samples)
n_val_samples = n_samples - n_train_samples
# Get data and apply transform
data = tf.data.TFRecordDataset("records/HaitiAbricots0002g3x16p0.tfrecords").map(parse_example, num_parallel_calls=4)
def type_transform(feature, target):
return tf.cast(feature, tf.float32), tf.cast(target, tf.float32)
data = data.map(type_transform, num_parallel_calls=4)
# # Concatenate second training location records if wanted
# data2 = tf.data.TFRecordDataset("records/HaitiAbricots0002g3x32.tfrecords").map(parse_example, num_parallel_calls=4)
# data2 = data2.map(type_transform, num_parallel_calls=4)
# data = data.concatenate(data2)
# # Concatenate third training location records if wanted
# data3 = tf.data.TFRecordDataset("records/HaitiLesCayes0002g2x32.tfrecords").map(parse_example, num_parallel_calls=4)
# data3 = data3.map(type_transform, num_parallel_calls=4)
# data = data.concatenate(data3)
# Shuffle the data and split into train and validation set
data = data.shuffle(buffer_size=300, seed=1)
data_train = data.take(n_train_samples).repeat().batch(batch_size)
data_val = data.skip(n_train_samples).repeat().batch(batch_size)
```
Let's train the model! This will take a while depending on training set size and number of epochs requested.
```
with tf.device('/gpu:0'):
history = model.fit(
data_train,
steps_per_epoch=n_train_samples // batch_size,
validation_data=data_val,
validation_steps=n_val_samples // batch_size,
epochs=epochs,
)
# Save the model to folder
if not os.path.exists("models"): os.mkdir("models")
tf.saved_model.save(model, modelName)
# Save to Descartes Labs storage if you so desire
print(modelName) # You'll have to copy this into both parts of the !zip command below
!zip -r copyHere.zip copyHere
print('Upload model to Storage')
storage = dl.Storage()
storage.set_file(modelName, modelName+".zip")
os.remove(modelName+".zip")
```
Let's plot the training history. Our model's loss should go down smoothly while the accuracy should go up.
```
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
ax[0].plot(history.history["loss"], label="train")
ax[0].plot(history.history["val_loss"], label="val")
ax[1].plot(history.history[list(history.history.items())[1][0]], label="train")
ax[1].plot(history.history[list(history.history.items())[3][0]], label="val")
ax[0].set_title("model loss")
ax[1].set_title("model accuracy")
ax[0].set_xlabel("epoch")
ax[0].set_ylabel("loss")
ax[1].set_xlabel("epoch")
ax[1].set_ylabel(list(history.history.items())[1][0])
ax[0].legend(loc="upper right")
p, r = history.history["precision"][epochs-1], history.history["recall"][epochs-1]
f1 = 2*(p*r)/(p+r)
print("Training metrics - Precision: ",p,", Recall: ",r,", F1 Score: ",f1)
plt.show()
n_test_samples = 500
data_test = tf.data.TFRecordDataset("records/HaitiPortSalut0004g3x32.tfrecords").map(parse_example, num_parallel_calls=4)
data_test = data_test.map(type_transform, num_parallel_calls=4)
data_test = data_test.shuffle(buffer_size=1000, seed=1)
data_test = data_test.take(n_test_samples).repeat().batch(batch_size)
results = model.evaluate(data_test,
batch_size=8,
steps=n_test_samples//batch_size)
p, r = results[2], results[3]
f1 = 2*(p*r)/(p+r)
print("F1: ",f1)
```
________________
<a id='Predictions'></a>
## 5. Predictions
Let's take a look at some predictions made by our model. We will retrieve a tile of the ratio image from our specified test location and get the predicted change from our model. If you want to use a model you've saved to Descartes Storage mark `dl_storage` as True in next box.
```
# Function for loading model from Descartes Labs storage
dl_storage = False
def load_model_from_storage(storage_key):
"""Load TF model from DL.Storage"""
import tempfile
model_zip = tempfile.NamedTemporaryFile()
model_dir = tempfile.TemporaryDirectory()
dl.Storage().get_file(storage_key, model_zip.name)
os.system("unzip {} -d {}".format(model_zip.name, model_dir.name))
os.path.join(model_dir.name, "saved_model")
model_zip.close()
model_dir.cleanup()
return model
# Function retrieving appropriate tile of the ratio
def get_ratio_image(dltile_key,ratio,tilesize,bands):
tile = dl.scenes.DLTile.from_key(dltile_key)
sc, ctx = dl.scenes.search(aoi=tile, products=satellite, start_datetime=st_date[0], end_datetime=end_date[0])
return ratio.compute(ctx).ndarray.reshape(tilesize,tilesize,len(bands))
# Function retrieving desired tile from Sentinel imagery for display
def get_sentinel_image(dltile_key, bands):
tile = dl.scenes.DLTile.from_key(dltile_key)
sc, ctx = dl.scenes.search(aoi=tile, products=satellite, start_datetime=st_date[0], end_datetime=end_date[0])
im = sc.mosaic(bands=bands, ctx=ctx, bands_axis=-1)
return im, ctx
```
For simplicity, we'll put all of the necessary steps into a single function that loads the model, retrieves the ratio tile for a location specified by `dltile_key`, pre-processes the tile and performs model prediction.
```
def predict_image(dltile_key,ratio,tilesize,bands):
print("Predict on image for dltile {}".format(dltile_key))
# load model
model = load_model_from_storage(modelName) if dl_storage else load_model(modelName)
# get imagery
im = get_ratio_image(dltile_key,ratio,tilesize,bands)
# add batch dimension
im = np.expand_dims(im, axis=0).astype(np.float32)
# predict
pred = model.predict(im)
return im, pred
# Type in here if you would like to change the coordinates from the map center defined in variables section.
lat, lon, tilesize = lat, lon, tilesize
tile = dl.scenes.DLTile.from_latlon(lat, lon, resolution=resolution, tilesize=tilesize, pad=pad) # Convert coordinates to nearest descartes labs tile with size of our choosing
im, pred = predict_image(tile.key,ratio,tilesize,bands) # Run prediction function for tile
sent, ctx = get_sentinel_image(tile.key,bands) # Get Sentinel imagery for tile
# Simple plot of predictions
fig, ax = plt.subplots(1, 3, figsize=(18, 6))
visBand = 0 # Choose band to visualise ratio of
# Plot ratio of chosen band
a = ax[0].imshow((im.data[0,:,:,visBand].squeeze()).astype("float"), cmap ='magma')
fig.colorbar(a, ax = ax[0])
a_tit = ax[0].set_title("Ratio for "+bands[visBand]+" band")
# Plot identified change
disting = pred > 0.2
b = ax[1].imshow(disting[0].squeeze().astype("float"))
fig.colorbar(b, ax = ax[1])
b_tit = ax[1].set_title("Building change classification")
# Plot confidence in prediction
c = ax[2].imshow(pred[0].squeeze().astype("float"))
fig.colorbar(c, ax = ax[2])
c_tit = ax[2].set_title("Damage probability")
# Extract latitude & longitude of each pixel in prediction (whether true or false)
bounds, disting = ctx.bounds, disting[0,:,:,0] if len(disting.shape) == 4 else disting # Get bounds from tile and reduce extra dimensionality of classification matrix
lats, longs = np.linspace(bounds[3],bounds[1],disting.shape[0]), np.linspace(bounds[0],bounds[2],disting.shape[1]) # Vector of lat, longs
# Create matrix of coordinates for pixels with change detected
xm, ym = np.meshgrid(longs,lats)
xc, yc = xm*(disting), ym*(disting)
# Get geodataframe for pixel points
df = pd.DataFrame(columns=['Northing', 'Easting'])
for i,j in zip(np.nonzero(xc)[0], np.nonzero(xc)[1]):
df = df.append({'Northing': yc[i][j],'Easting': xc[i][j]}, ignore_index=True)
det = gpd.GeoDataFrame(df, crs={'init':ctx.bounds_crs}, geometry=gpd.points_from_xy(df.Easting, df.Northing)).to_crs({'init': 'epsg:4326'})
# Initialise map
m3 = wf.interactive.MapApp()
m3.center, m3.zoom = (lat, lon), zoom
getImage(1,bands,0.7,m3) # Display sentinel imagery using function from map 1
# Add layer for predicted building damages
geo_data = GeoData(geo_dataframe = det, style={'color': 'yellow', 'radius':2, 'fillColor': 'yellow', 'opacity':1, 'weight':1.9, 'dashArray':'2', 'fillOpacity':1},
hover_style={'fillColor': 'red' , 'fillOpacity': 1},
point_style={'radius': 3, 'color': 'yellow', 'fillOpacity': 0.7, 'fillColor': 'yellow', 'weight': 3},
name = 'Damages')
m3.add_layer(geo_data)
# Plot bounding box for damage search
poly = gpd.GeoSeries(Polygon.from_bounds(ctx.bounds[0],ctx.bounds[1],ctx.bounds[2],ctx.bounds[3]), crs={'init':ctx.bounds_crs}).to_crs(epsg=4326)
box = GeoData(geo_dataframe = gpd.GeoDataFrame(geometry = poly.envelope), style={'color':'black','fillOpacity':0, 'opacity':0.9})
m3.add_layer(box)
# Legend
m3.add_control(LegendControl({"Detected Change":"#FFFF00","Search Area":"#000000"}))
m3
```
____________
<a id='PredictionAccuracy'></a>
## 6. Evaluate Prediction Accuracy
Finally, let's compare the prediction to known damages from Copernicus EMS assessments and evaluate the effectiveness of our learnt model.
As in the ratio method notebook (`change_detection.ipynb`) we determine the accuracy by evaluating the correspondance of detected change pixels to building footprints. The metrics are as follows:
- Precision (proportion of damage detected): $P = \frac{True Positives}{True Positives + False Positives}$
- Recall (proportion of detections corresponding to damage): $R = \frac{True Positives}{True Positives + False Negatives}$
- F1 Score: $F1 = 2x\frac{P*R}{P+R}$
```
# Load building damages and filter for within detection area
dmg = gpd.read_file(dmgJsons)
filtered = gpd.GeoDataFrame(crs={'init': 'epsg:4326'})
tilePoly = gpd.GeoSeries(Polygon.from_bounds(ctx.bounds[0],ctx.bounds[1],ctx.bounds[2],ctx.bounds[3]), crs={'init':ctx.bounds_crs}).to_crs(epsg=4326).geometry[0]
for i in dmg.index:
if dmg.geometry[i].centroid.within(tilePoly):
filtered = filtered.append(dmg.loc[i])
print('Changed pixels:',len(det), '\nDamaged buildings:',len(dmg))
# Initialise accuracy and recall vectors
acc, rec = np.zeros([max(filtered.index)+1,1]), np.zeros([max(det.index)+1,1]) # Initialise accuracy, recall arrays
# Loop through pixels to determine recall (if pixel corresponds to damaged building)
for i in tqdm(det.index):
# Loop through building to determine accuracy (damaged building has been detected)
for j in filtered.index:
if det.geometry[i].within(filtered.geometry[j]):
rec[i,0], acc[j,0] = True, True
# Calculate metrics from vector outputs
a = sum(acc)/len(filtered)
r = sum(rec)/len(det)
f1 = 2*(a*r)/(a+r)
print('Accuracy:',a[0],'\nRecall:',r[0],'\nF1 score:',f1[0])
## Plot success of change detection in matplotlib and save figure
# Damage detected true/false
filtered['found'] = pd.Series(acc[filtered.index,0], index=filtered.index)
filtPlot = filtered.plot(figsize=(12,8), column='found',legend=True,cmap='RdYlGn',alpha = 0.7)
# False detection points
points = np.vstack([rec[i] for i in det.index])
x1, y1 = np.array(det.geometry.x)*(1-points).transpose(), np.array(det.geometry.y)*(1-points).transpose()
x1, y1 = x1[x1 != 0], y1[y1 != 0]
filtPlot.scatter(x1,y1,s=0.05,color='b', label='False detections')
filtPlot.set_xlim([tilePoly.bounds[0], tilePoly.bounds[2]])
filtPlot.set_ylim([tilePoly.bounds[1], tilePoly.bounds[3]])
# # Set titles and save
# plt.set_title('Threshold:'+str(threshold)+', Area:'+str(area)+', Kernel:'+str(kSize)+' - Acc:'+str(a[0])[:6]+', Re:'+str(r[0])[:6])
# plt.legend()
# plt.figure.savefig('results/'+location+'_t'+str(threshold)[2:]+'a'+str(area)[2:]+'g'+str(len(grades))+str(bands)+'.png')
## Display on interactive map
# Initialise map
m4 = wf.interactive.MapApp()
m4.center, m4.zoom = (lat, lon), zoom
# Plot background imagery as image 2 using function from map 1
getImage(1,bands,0.7,m4)
det_data = GeoData(geo_dataframe = det, style={'color': 'blue', 'radius':2, 'fillColor': 'blue', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
point_style={'radius': 3, 'color': 'blue', 'fillOpacity': 0.7, 'fillColor': 'blue', 'weight': 3}, name = 'Damages')
m4.add_layer(det_data)
# Add layers for building polygons whether red for not found, green for found
not_found = GeoData(geo_dataframe = filtered.loc[filtered['found']==0], style={'color': 'red', 'radius':2, 'fillColor': 'red', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
hover_style={'fillColor': 'red' , 'fillOpacity': 0.5},
name = 'Damages')
found = GeoData(geo_dataframe = filtered.loc[filtered['found']==1], style={'color': 'green', 'radius':2, 'fillColor': 'green', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
hover_style={'fillColor': 'green' , 'fillOpacity': 0.5},
name = 'Damages')
m4.add_layer(not_found)
m4.add_layer(found)
# Plot bounding box for damage search
poly = gpd.GeoSeries(Polygon.from_bounds(ctx.bounds[0],ctx.bounds[1],ctx.bounds[2],ctx.bounds[3]), crs={'init':'EPSG:32618'}).to_crs(epsg=4326)
box = GeoData(geo_dataframe = gpd.GeoDataFrame(geometry = poly.envelope), style={'color':'yellow','fillOpacity':0, 'opacity':0.9})
m4.add_layer(box)
# Legend
m4.add_control(LegendControl({"Damage Identified":"#008000", "Damage Not Identified":"#FF0000", "Detected Change":"#0000FF", "Search Area":"#FFFF00"}))
m4
```
_________________
<a id='TestLocation'></a>
## 7. Test Location
Beyond predicting for a single tile, we would like to evaluate the model's performance over an arbitrary wider area. For this let's draw a polygon over the desired area. Then, each corresponding tile will individually be fed in to the model for assessing change detection. If over a location with ground data, accuracy will then be evaluated for the combined output for all tiles.
> One could question the decision not to just increase tilesize. However not only does this method make the evaluation area more flexible, but also the model does not cater well for tile sizes larger than that for which it was trained due to the input layer structure.
```
# Display map upon which to draw Polygon for analysis
r = 10*area
testPoly = ipyleaflet.Polygon(locations=[(lat-r, lon-r), (lat-r, lon+r), (lat+r, lon+r),(lat+r, lon-r)], color="yellow", fill_color="yellow", transform=True)
pos = Map(center=(lat, lon), zoom=zoom)
if not deployed:
pos.add_layer(geo_data)
pos.add_control(LegendControl({"Recorded Damage":"#FF0000"}))
pos.add_layer(testPoly)
pos
# Define all functions required for obtaining detections
if deployed: # Define functions if not defined in section 5
# Function retrieving appropriate tile of the ratio
def get_ratio_image(dltile_key,ratio,tilesize,bands):
tile = dl.scenes.DLTile.from_key(dltile_key)
sc, ctx = dl.scenes.search(aoi=tile, products=satellite, start_datetime=st_date[0], end_datetime=end_date[0])
return ratio.compute(ctx).ndarray.reshape(tilesize,tilesize,len(bands))
# Function retrieving desired tile from Sentinel imagery for display
def get_sentinel_image(dltile_key, bands):
tile = dl.scenes.DLTile.from_key(dltile_key)
sc, ctx = dl.scenes.search(aoi=tile, products=satellite, start_datetime=st_date[0], end_datetime=end_date[0])
im = sc.mosaic(bands=bands, ctx=ctx, bands_axis=-1)
return im, ctx
# Function running predict image for each tile
def predict_image(dltile_key,ratio,tilesize,bands):
print("Predict on image for dltile {}".format(dltile_key))
# load model
model = load_model(modelName)
# get imagery
im = get_ratio_image(dltile_key,ratio,tilesize,bands)
# add batch dimension
im = np.expand_dims(im, axis=0).astype(np.float32)
# predict
pred = model.predict(im)
return im, pred
## Function to get detections for each tile
def testTile(lat,lon,tilesize,threshold):
tile = dl.scenes.DLTile.from_latlon(lat, lon, resolution=resolution, tilesize=tilesize, pad=pad) # Convert coordinates to nearest descartes labs tile with size of our choosing
im, pred = predict_image(tile.key,ratio,tilesize,bands) # Run prediction function for tile
sent, ctx = get_sentinel_image(tile.key,bands) # Get Sentinel imagery for tile
disting = pred > threshold # Get damaged predictions
# Extract latitude & longitude of each pixel in prediction (whether true or false)
bounds, disting = ctx.bounds, disting[0,:,:,0] if len(disting.shape) == 4 else disting # Get bounds from tile and reduce extra dimensionality of classification matrix
lats, longs = np.linspace(bounds[3],bounds[1],disting.shape[0]), np.linspace(bounds[0],bounds[2],disting.shape[1]) # Vector of lat, longs
# Create matrix of coordinates for pixels with change detected
xm, ym = np.meshgrid(longs,lats)
xc, yc = xm*(disting), ym*(disting)
# Get geodataframe for pixel points
df = pd.DataFrame(columns=['Northing', 'Easting'])
for i,j in zip(np.nonzero(xc)[0], np.nonzero(xc)[1]):
df = df.append({'Northing': yc[i][j],'Easting': xc[i][j]}, ignore_index=True)
det = gpd.GeoDataFrame(df, crs={'init':ctx.bounds_crs}, geometry=gpd.points_from_xy(df.Easting, df.Northing)).to_crs({'init': 'epsg:4326'})
return det, ctx
```
Looping through tiles may take a while depending on polygon size and tile size. About 8 seconds per tile requested on 16GB RAM.
```
## Loop through tiles to get all detections
# Get latitudes and longitudes for tiles according to polygon drawn and tilesize
tileLats = np.arange(testPoly.locations[0][0]['lat'],testPoly.locations[0][2]['lat'],resolution*1E-5*tilesize)
tileLons = np.arange(testPoly.locations[0][0]['lng'],testPoly.locations[0][2]['lng'],resolution*1E-5*tilesize)
print("Number of tiles requested:",len(tileLats)*len(tileLons),". Approximately",8*len(tileLats)*len(tileLons),"seconds on 16GB RAM.")
threshold = 0.5
allDet = gpd.GeoDataFrame(crs={'init': 'epsg:4326'})
allCtx = np.array([])
for lat in tqdm(tileLats):
for lon in tqdm(tileLons):
newDet, newCtx = testTile(lat,lon,tilesize,threshold)
newDet.index = newDet.index + len(allDet.index)
allDet = allDet.append(newDet)
allCtx = np.append([allCtx], [np.array(newCtx.bounds)])
## Evaluate against damages
if not deployed:
# Load building damages and filter for within detection area
dmg = gpd.read_file(dmgJsons)
filtered = gpd.GeoDataFrame(crs={'init': 'epsg:4326'})
tilePoly = gpd.GeoSeries(Polygon.from_bounds(min(allCtx[0::4]),min(allCtx[1::4]),max(allCtx[2::4]),max(allCtx[3::4])), crs={'init':ctx.bounds_crs}).to_crs(epsg=4326).geometry[0]
for i in dmg.index:
if dmg.geometry[i].centroid.within(tilePoly):
filtered = filtered.append(dmg.loc[i])
print('Changed pixels:',len(allDet), '\nDamaged buildings:',len(filtered))
# Initialise accuracy and recall vectors
acc, rec = np.zeros([max(filtered.index)+1,1]), np.zeros([max(allDet.index)+1,1]) # Initialise accuracy, recall arrays
# Loop through pixels to determine recall (if pixel corresponds to damaged building)
for i in tqdm(allDet.index):
# Loop through building to determine accuracy (damaged building has been detected)
for j in filtered.index:
if allDet.geometry[i].within(filtered.geometry[j]):
rec[i,0], acc[j,0] = True, True
# Calculate metrics from vector outputs
a = sum(acc)/len(filtered)
r = sum(rec)/len(allDet)
f1 = 2*(a*r)/(a+r)
print('Accuracy:',a[0],'\nRecall:',r[0],'\nF1 score:',f1[0])
# Initialise map
m5 = wf.interactive.MapApp()
m5.center, m5.zoom = (lat, lon), zoom
getImage(1,bands,0.7,m5) # Display sentinel imagery using function from map 1
# Ass layer for detections from model
allDet_data = GeoData(geo_dataframe = allDet, style={'color': 'yellow', 'radius':2, 'fillColor': 'yellow', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
point_style={'radius': 2, 'color': 'yellow', 'fillOpacity': 0.7, 'fillColor': 'blue', 'weight': 3}, name = 'Damages')
m5.add_layer(allDet_data)
# Add layers for building polygons whether red for not found, green for found
if not deployed:
filtered['found'] = pd.Series(acc[filtered.index,0], index=filtered.index)
all_not_found = GeoData(geo_dataframe = filtered.loc[filtered['found']==0], style={'color': 'red', 'radius':2, 'fillColor': 'red', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
hover_style={'fillColor': 'red' , 'fillOpacity': 0.5},
name = 'Damages')
all_found = GeoData(geo_dataframe = filtered.loc[filtered['found']==1], style={'color': 'green', 'radius':2, 'fillColor': 'green', 'opacity':0.7, 'weight':1.9, 'dashArray':'2', 'fillOpacity':0.7},
hover_style={'fillColor': 'green' , 'fillOpacity': 0.5},
name = 'Damages')
m5.add_layer(all_not_found)
m5.add_layer(all_found)
# Legend
m5.add_control(LegendControl({"Damage Identified":"#008000", "Damage Not Identified":"#FF0000", "Detected Change":"#0000FF", "Search Area":"#FFFF00"}))
else:
m5.add_control(LegendControl({"Detected Change":"#FFFF00", "Search Area":"#0000FF"}))
# Plot bounding box for damage search
testPoly.color, testPoly.fill_opacity = 'blue', 0
m5.add_layer(testPoly)
m5
```
## --------- END ----------
| true |
code
| 0.458046 | null | null | null | null |
|
# Sample grouping
We are going to linger into the concept of sample groups. As in the previous
section, we will give an example to highlight some surprising results. This
time, we will use the handwritten digits dataset.
```
from sklearn.datasets import load_digits
digits = load_digits()
data, target = digits.data, digits.target
```
We will recreate the same model used in the previous exercise:
a logistic regression classifier with preprocessor to scale the data.
```
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
model = make_pipeline(StandardScaler(), LogisticRegression())
```
We will use the same baseline model. We will use a `KFold` cross-validation
without shuffling the data at first.
```
from sklearn.model_selection import cross_val_score, KFold
cv = KFold(shuffle=False)
test_score_no_shuffling = cross_val_score(model, data, target, cv=cv,
n_jobs=-1)
print(f"The average accuracy is "
f"{test_score_no_shuffling.mean():.3f} +/- "
f"{test_score_no_shuffling.std():.3f}")
```
Now, let's repeat the experiment by shuffling the data within the
cross-validation.
```
cv = KFold(shuffle=True)
test_score_with_shuffling = cross_val_score(model, data, target, cv=cv,
n_jobs=-1)
print(f"The average accuracy is "
f"{test_score_with_shuffling.mean():.3f} +/- "
f"{test_score_with_shuffling.std():.3f}")
```
We observe that shuffling the data improves the mean accuracy.
We could go a little further and plot the distribution of the testing
score. We can first concatenate the test scores.
```
import pandas as pd
all_scores = pd.DataFrame(
[test_score_no_shuffling, test_score_with_shuffling],
index=["KFold without shuffling", "KFold with shuffling"],
).T
```
Let's plot the distribution now.
```
import matplotlib.pyplot as plt
import seaborn as sns
all_scores.plot.hist(bins=10, edgecolor="black", density=True, alpha=0.7)
plt.xlim([0.8, 1.0])
plt.xlabel("Accuracy score")
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
_ = plt.title("Distribution of the test scores")
```
The cross-validation testing error that uses the shuffling has less
variance than the one that does not impose any shuffling. It means that some
specific fold leads to a low score in this case.
```
print(test_score_no_shuffling)
```
Thus, there is an underlying structure in the data that shuffling will break
and get better results. To get a better understanding, we should read the
documentation shipped with the dataset.
```
print(digits.DESCR)
```
If we read carefully, 13 writers wrote the digits of our dataset, accounting
for a total amount of 1797 samples. Thus, a writer wrote several times the
same numbers. Let's suppose that the writer samples are grouped.
Subsequently, not shuffling the data will keep all writer samples together
either in the training or the testing sets. Mixing the data will break this
structure, and therefore digits written by the same writer will be available
in both the training and testing sets.
Besides, a writer will usually tend to write digits in the same manner. Thus,
our model will learn to identify a writer's pattern for each digit instead of
recognizing the digit itself.
We can solve this problem by ensuring that the data associated with a writer
should either belong to the training or the testing set. Thus, we want to
group samples for each writer.
Here, we will manually define the group for the 13 writers.
```
from itertools import count
import numpy as np
# defines the lower and upper bounds of sample indices
# for each writer
writer_boundaries = [0, 130, 256, 386, 516, 646, 776, 915, 1029,
1157, 1287, 1415, 1545, 1667, 1797]
groups = np.zeros_like(target)
lower_bounds = writer_boundaries[:-1]
upper_bounds = writer_boundaries[1:]
for group_id, lb, up in zip(count(), lower_bounds, upper_bounds):
groups[lb:up] = group_id
```
We can check the grouping by plotting the indices linked to writer ids.
```
plt.plot(groups)
plt.yticks(np.unique(groups))
plt.xticks(writer_boundaries, rotation=90)
plt.xlabel("Target index")
plt.ylabel("Writer index")
_ = plt.title("Underlying writer groups existing in the target")
```
Once we group the digits by writer, we can use cross-validation to take this
information into account: the class containing `Group` should be used.
```
from sklearn.model_selection import GroupKFold
cv = GroupKFold()
test_score = cross_val_score(model, data, target, groups=groups, cv=cv,
n_jobs=-1)
print(f"The average accuracy is "
f"{test_score.mean():.3f} +/- "
f"{test_score.std():.3f}")
```
We see that this strategy is less optimistic regarding the model statistical
performance. However, this is the most reliable if our goal is to make
handwritten digits recognition writers independent. Besides, we can as well
see that the standard deviation was reduced.
```
all_scores = pd.DataFrame(
[test_score_no_shuffling, test_score_with_shuffling, test_score],
index=["KFold without shuffling", "KFold with shuffling",
"KFold with groups"],
).T
all_scores.plot.hist(bins=10, edgecolor="black", density=True, alpha=0.7)
plt.xlim([0.8, 1.0])
plt.xlabel("Accuracy score")
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
_ = plt.title("Distribution of the test scores")
```
As a conclusion, it is really important to take any sample grouping pattern
into account when evaluating a model. Otherwise, the results obtained will
be over-optimistic in regards with reality.
| true |
code
| 0.788827 | null | null | null | null |
|
# DefinedAEpTandZ0 media example
```
%load_ext autoreload
%autoreload 2
import skrf as rf
import skrf.mathFunctions as mf
import numpy as np
from numpy import real, log, log10, sum, absolute, pi, sqrt
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from scipy.optimize import minimize
rf.stylely()
```
## Measurement of two CPWG lines with different lenghts
The measurement where performed the 21th March 2017 on a Anritsu MS46524B 20GHz Vector Network Analyser. The setup is a linear frequency sweep from 1MHz to 10GHz with 10'000 points. Output power is 0dBm, IF bandwidth is 1kHz and neither averaging nor smoothing are used.
CPWGxxx is a L long, W wide, with a G wide gap to top ground, T thick copper coplanar waveguide on ground on a H height substrate with top and bottom ground plane. A closely spaced via wall is placed on both side of the line and the top and bottom ground planes are connected by many vias.
| Name | L (mm) | W (mm) | G (mm) | H (mm) | T (um) | Substrate |
| :--- | ---: | ---: | ---: | ---: | ---: | :--- |
| MSL100 | 100 | 1.70 | 0.50 | 1.55 | 50 | FR-4 |
| MSL200 | 200 | 1.70 | 0.50 | 1.55 | 50 | FR-4 |
The milling of the artwork is performed mechanically with a lateral wall of 45°.
The relative permittivity of the dielectric was assumed to be approximatively 4.5 for design purpose.

```
# Load raw measurements
TL100 = rf.Network('CPWG100.s2p')
TL200 = rf.Network('CPWG200.s2p')
TL100_dc = TL100.extrapolate_to_dc(kind='linear')
TL200_dc = TL200.extrapolate_to_dc(kind='linear')
plt.figure()
plt.suptitle('Raw measurement')
TL100.plot_s_db()
TL200.plot_s_db()
plt.figure()
t0 = -2
t1 = 4
plt.suptitle('Time domain reflexion step response (DC extrapolation)')
ax = plt.subplot(1, 1, 1)
TL100_dc.s11.plot_z_time_step(pad=2000, window='hamming', z0=50, label='TL100', ax=ax, color='0.0')
TL200_dc.s11.plot_z_time_step(pad=2000, window='hamming', z0=50, label='TL200', ax=ax, color='0.2')
ax.set_xlim(t0, t1)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.patch.set_facecolor('1.0')
ax.grid(True, color='0.8', which='minor')
ax.grid(True, color='0.4', which='major')
plt.show()
```
Impedance from the line and from the connector section may be estimated on the step response.
The line section is not flat, there is some variation in the impedance which may be induced by manufacturing tolerances and dielectric inhomogeneity.
Note that the delay on the reflexion plot are twice the effective section delays because the wave travel back and forth on the line.
Connector discontinuity is about 50 ps long. TL100 line plateau (flat impedance part) is about 450 ps long.
```
Z_conn = 53.2 # ohm, connector impedance
Z_line = 51.4 # ohm, line plateau impedance
d_conn = 0.05e-9 # s, connector discontinuity delay
d_line = 0.45e-9 # s, line plateau delay, without connectors
```
## Dielectric effective relative permittivity extraction by multiline method
```
#Make the missing reflect measurement
#Reflect only affects sign of the corrected
reflect = TL100.copy()
reflect.s[:,0,0] = 1
reflect.s[:,1,1] = 1
reflect.s[:,1,0] = 0
reflect.s[:,0,1] = 0
# Perform NISTMultilineTRL algorithm
cal = rf.NISTMultilineTRL([TL100, reflect, TL200], [1], [100e-3, 200e-3], er_est=3.0, refl_offset=[0])
plt.figure()
plt.title('Corrected lines')
cal.apply_cal(TL100).plot_s_db()
cal.apply_cal(TL200).plot_s_db()
plt.show()
```
Calibration results shows a very low residual noise floor. The error model is well fitted.
```
freq = TL100.frequency
f = TL100.frequency.f
f_ghz = TL100.frequency.f/1e9
L = 0.1
A = 0.0
f_A = 1e9
ep_r0 = 2.0
tanD0 = 0.001
f_ep = 1e9
x0 = [ep_r0, tanD0]
ep_r_mea = cal.er_eff.real
A_mea = 20/log(10)*cal.gamma.real
def model(x, freq, ep_r_mea, A_mea, f_ep):
ep_r, tanD = x[0], x[1]
m = rf.media.DefinedAEpTandZ0(frequency=freq, ep_r=ep_r, tanD=tanD, Z0=50,
f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')
ep_r_mod = m.ep_r_f.real
A_mod = m.alpha * log(10)/20
return sum((ep_r_mod - ep_r_mea)**2) + 0.001*sum((20/log(10)*A_mod - A_mea)**2)
res = minimize(model, x0, args=(TL100.frequency, ep_r_mea, A_mea, f_ep),
bounds=[(2, 4), (0.001, 0.013)])
ep_r, tanD = res.x[0], res.x[1]
print('epr={:.3f}, tand={:.4f} at {:.1f} GHz.'.format(ep_r, tanD, f_ep * 1e-9))
m = rf.media.DefinedAEpTandZ0(frequency=freq, ep_r=ep_r, tanD=tanD, Z0=50,
f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')
plt.figure()
plt.suptitle('Effective relative permittivity and attenuation')
plt.subplot(2,1,1)
plt.ylabel('$\epsilon_{r,eff}$')
plt.plot(f_ghz, ep_r_mea, label='measured')
plt.plot(f_ghz, m.ep_r_f.real, label='model')
plt.legend()
plt.subplot(2,1,2)
plt.xlabel('Frequency [GHz]')
plt.ylabel('A (dB/m)')
plt.plot(f_ghz, A_mea, label='measured')
plt.plot(f_ghz, 20/log(10)*m.alpha, label='model')
plt.legend()
plt.show()
```
Relative permittivity $\epsilon_{e,eff}$ and attenuation $A$ shows a reasonnable agreement.
A better agreement could be achieved by implementing the Kirschning and Jansen miscrostripline dispersion model or using a linear correction.
## Connectors effects estimation
```
# note: a half line is embedded in connector network
coefs = cal.coefs
r = mf.sqrt_phase_unwrap(coefs['forward reflection tracking'])
s1 = np.array([[coefs['forward directivity'],r],
[r, coefs['forward source match']]]).transpose()
conn = TL100.copy()
conn.name = 'Connector'
conn.s = s1
# delay estimation,
phi_conn = (np.angle(conn.s[:500,1,0]))
z = np.polyfit(f[:500], phi_conn, 1)
p = np.poly1d(z)
delay = -z[0]/(2*np.pi)
print('Connector + half thru delay: {:.0f} ps'.format(delay * 1e12))
print('TDR readed half thru delay: {:.0f} ps'.format(d_line/2 * 1e12))
d_conn_p = delay - d_line/2
print('Connector delay: {:.0f} ps'.format(d_conn_p * 1e12))
# connector model with guessed loss
half = m.line(d_line/2, 's', z0=Z_line)
mc = rf.media.DefinedAEpTandZ0(m.frequency, ep_r=1, tanD=0.025, Z0=50,
f_low=1e3, f_high=1e18, f_ep=f_ep, model='djordjevicsvensson')
left = mc.line(d_conn_p, 's', z0=Z_conn)
right = left.flipped()
check = mc.thru() ** left ** half ** mc.thru()
plt.figure()
plt.suptitle('Connector + half thru comparison')
plt.subplot(2,1,1)
conn.plot_s_deg(1, 0, label='measured')
check.plot_s_deg(1, 0, label='model')
plt.ylabel('phase (rad)')
plt.legend()
plt.subplot(2,1,2)
conn.plot_s_db(1, 0, label='Measured')
check.plot_s_db(1, 0, label='Model')
plt.xlabel('Frequency (GHz)')
plt.ylabel('Insertion Loss (dB)')
plt.legend()
plt.show()
```
Connector + thru plots shows a reasonable agreement between calibration results and model. There is a phase jump in the calibration results.
## Final check
```
DUT = m.line(d_line, 's', Z_line)
DUT.name = 'model'
Check = m.thru() ** left ** DUT ** right ** m.thru()
Check.name = 'model with connectors'
plt.figure()
TL100.plot_s_db()
Check.plot_s_db(1,0, color='k')
Check.plot_s_db(0,0, color='k')
plt.show()
Check_dc = Check.extrapolate_to_dc(kind='linear')
plt.figure()
plt.suptitle('Time domain step-response')
ax = plt.subplot(1,1,1)
TL100_dc.s11.plot_z_time_step(pad=2000, window='hamming', label='Measured', ax=ax, color='k')
Check_dc.s11.plot_z_time_step(pad=2000, window='hamming', label='Model', ax=ax, color='b')
t0 = -2
t1 = 4
ax.set_xlim(t0, t1)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.patch.set_facecolor('1.0')
ax.grid(True, color='0.8', which='minor')
ax.grid(True, color='0.5', which='major')
```
The plots shows a reasonnable agreement between model and measurement up to 4 GHz.
Further works may include implementing CPWG medium or modeling the line by more sections to account the impedance variation vs. position.
| true |
code
| 0.681382 | null | null | null | null |
|
# QCodes example with Mercury iPS
## Initial instantiation/connection
```
from qcodes.instrument_drivers.oxford.MercuryiPS_VISA import MercuryiPS
from time import sleep
# Note that the MercuryiPS_VISA is a VISA instrument using
# a socket connection. The VISA resource name therefore
# contains the port number and the word 'SOCKET'
mips = MercuryiPS('mips', 'TCPIP0::192.168.15.106::7020::SOCKET')
```
## Basic driver idea
The driver mainly deals with **field values** in Tesla. The driver is aware of the field values in three coordinate systems, cartesian, spherical, and cylindrical. The driver thus exposes the field coordinates x, y, z, phi, theta, rho, and r. Each coordinate comes in two versions: **target** and **measured**.
The idea is that the magnetic field is always changed in two steps; first a target is set, then the magnet is asked to ramp to said target.
## Safe regions
In addition to the safety limits baked in to the physical instrument, the driver can accept a safety limit function provided by the user. The function checks - upon receiving a new field target - whether the target is inside an allowed region.
The limit function must take input arguments Bx, By, Bz (in Tesla) and return a boolean that tells us whether that field value is safe.
```
# example: the safe region is a sphere
import numpy as np
def spherical_limit(x, y, z):
"""
Safe region is a sphere of radius 1 T
"""
return np.sqrt(x**2 + y**2 + z**2) <= 1
# assign the limit function (this can also be done at init)
mips.set_new_field_limits(spherical_limit)
```
## Two different ramps
The driver can perfom the ramp in two different ways: *simultaneous* ramping or *safe* ramping.
When simultaneously ramping, all three field components are ramped at the same time.
This method is non-blocking, and it is thus possible to query the field while it is ramping. The method does, however, **not** guarantee that the field stays inside the allowed region during the ramp. If the different axes have different ramp speeds, this is a real risk.
When safely ramping, all field components that are ramped *towards* the origin are ramped before those who are ramped *away from* the origin. The ramp is thus sequential and blocking, but if the safe region is convex (and contains the origin), you are guaranteed the the field never exceeds the safe region.
## Parameter overview
```
mips.print_readable_snapshot(update=True)
```
## Ramp examples
### First example: invalid targets
```
mips.x_target(1) # so far, so good
try:
mips.y_target(0.5) # this takes us out of the unit sphere
except ValueError as e:
print("Can not set that")
# reset and try in a different coordinate system
mips.x_target(0)
try:
mips.r_target(1.1)
except ValueError as e:
print("Can not set that")
```
### Second example: simul ramps to the origin
First we ramp the field to Bx = 1, By = 0, Bz = 0, then rotate out to thea=46, phi=30, then finally ramp it down to zero while measuring r, theta, and phi.
#### STEP A
```
mips.GRPX.field_ramp_rate(0.01)
mips.GRPY.field_ramp_rate(0.01)
mips.GRPZ.field_ramp_rate(0.01)
mips.x_target(0.1)
mips.y_target(0)
mips.z_target(0)
mips.ramp(mode='simul')
# since simul mode is non-blocking,
# we can read out during the ramp
while mips.is_ramping():
print(f'Ramping X to {mips.x_target()} T, now at {mips.x_measured()} T')
sleep(1)
sleep(1)
print(f'Done ramping, now at {mips.x_measured()} T')
```
#### STEP B
Note that since the magnet itself has no notion of any other coordinate system than cartesian coordinates, it does **NOT** follow a path where r is constant. The user must **MANUALLY** ensure to break up a ramp where r is meant to be constant into sufficiently many small steps.
```
mips.theta_target(45)
mips.phi_target(30)
mips.r_target(0.1)
mips.ramp(mode='simul')
while mips.is_ramping():
print(f"Ramping... r: {mips.r_measured():.6f} T, "
f"theta: {mips.theta_measured():.2f}, "
f"phi: {mips.phi_measured():.2f}")
sleep(1)
print(f"Done... r: {mips.r_measured():.6f} T, "
f"theta: {mips.theta_measured():.2f}, "
f"phi: {mips.phi_measured():.2f}")
```
#### STEP C
```
mips.theta_target(45)
mips.phi_target(30)
mips.r_target(0)
mips.ramp(mode='simul')
# since simul mode is non-blocking,
# we can read out during the ramp
while mips.is_ramping():
print(f"Ramping... r: {mips.r_measured():.6f} T, "
f"theta: {mips.theta_measured():.2f}, "
f"phi: {mips.phi_measured():.2f}")
sleep(1)
print(f"Done... r: {mips.r_measured():.6f} T, "
f"theta: {mips.theta_measured():.2f}, "
f"phi: {mips.phi_measured():.2f}")
```
### Third example: safe ramp away from the origin
At the origin, we can not meaningfully **measure** what theta and phi is, but the target values are persistent.
If we ramp up again and measure, we should thus get back to our target values. We use blocking safe ramp for this (just to also test/show a blocking ramp).
```
mips.r_target(0.05)
mips.ramp(mode='safe')
print('Ramped back out again.')
print(f'Field values are: theta: {mips.theta_measured()}, phi: {mips.phi_measured()}')
```
### That's it for now! Happy sweeping.
```
# sweep back down for good measures
mips.x_target(0)
mips.y_target(0)
mips.z_target(0)
mips.ramp(mode='safe')
mips.close()
```
| true |
code
| 0.49707 | null | null | null | null |
|
#### Define your project and region below. If you are not authenticated to GCP, do it by oncommenting the line below the definitions.
```
PROJECT_ID = "SOME_PROJECT"
REGION = "YOUR_REGION" #though us-central is cheaper
PIPELINE_ROOT = "gs://SOME_BUCKET/SOME_FOLDER"
#!gcloud auth login
```
#### Imports
Our imports:
* Artifact,
* Dataset,
* Input,
* Model,
* Output,
* Metrics,
* ClassificationMetrics,
Are powerful, metadata rich handles for objects "Artifacts", or its inherited classes. By using them, as shown below, we can manage paths, save and download them. The paths used are actually system path, as it is saved and shared between components via [GCS Fuse](https://cloud.google.com/storage/docs/gcs-fuse).
`component` is a decorator used for transforming a function into a KFP component. It allows us, for example, to set dependencies and base images for each of our components, with a easy-to-use and simple API.
```
from typing import NamedTuple
from kfp.v2 import dsl
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2 import compiler
```
As from (GCP AI Platform Official Github)[https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/unofficial/pipelines/lightweight_functions_component_io_kfp.ipynb], accessed in 2021-05-19:
KFP Python function-based components
A Kubeflow pipeline component is a self-contained set of code that performs one step in your ML workflow. A pipeline component is composed of:
The component code, which implements the logic needed to perform a step in your ML workflow.
A component specification, which defines the following:
* The component’s metadata, its name and description.
* The component’s interface, the component’s inputs and outputs.
* The component’s implementation, the Docker container image to run, how to pass inputs to your component code, and how to get the component’s outputs.
Lightweight Python function-based components make it easier to iterate quickly by letting you build your component code as a Python function and generating the component specification for you. This notebook shows how to create Python function-based components for use in Vertex AI Pipelines.
Python function-based components use the Kubeflow Pipelines SDK to handle the complexity of passing inputs into your component and passing your function’s outputs back to your pipeline.
There are two categories of inputs/outputs supported in Python function-based components: artifacts and parameters.
* Parameters are passed to your component by value and typically contain int, float, bool, or small string values.
* Artifacts are passed to your component as a reference to a path, to which you can write a file or a subdirectory structure. In addition to the artifact’s data, you can also read and write the artifact’s metadata. This lets you record arbitrary key-value pairs for an artifact such as the accuracy of a trained model, and use metadata in downstream components – for example, you could use metadata to decide if a model is accurate enough to deploy for predictions.
#### Our use case
Is to create three components, that will let us create and save a dataset, train a model, and evaluate it, saving beautiful, meaningfull classification plots for them.
As you will see, our components have dependencies on `pandas`, `sklearn`, and `xgboost`.
We use `Output[Dataset]` (or Model, or ClassificationMetrics) objects to create unique filepaths to save objects during the component's execution. We can then access them as:
```python
some_op = component()
some_output_object = some_op.outputs["some_object_name"].
```
Below we create two `Output[Dataset]` objects to save the train and test split of our model. The next operators will receive some inputs and handle the previously saved files on their processing steps.
```
@component(
packages_to_install = [
"pandas",
"sklearn"
],
)
def get_data(
dataset_train: Output[Dataset],
dataset_test: Output[Dataset]
):
from sklearn import datasets
from sklearn.model_selection import train_test_split as tts
import pandas as pd
# import some data to play with
data_raw = datasets.load_breast_cancer()
data = pd.DataFrame(data_raw.data, columns=data_raw.feature_names)
data["target"] = data_raw.target
train, test = tts(data, test_size=0.3)
train.to_csv(dataset_train.path)
test.to_csv(dataset_test.path)
```
#### The training component
Will receive an `Input[Dataset]` object, will be used from the outputs of the `get_data()` operator. It outputs an `Output[Model]` object, which will have some metadata written about itself.
We will use the `Output[Model]` from component train and `Output[Dataset]` (the test one) from `get_data()` to evaluate the model.
```
@component(
packages_to_install = [
"pandas",
"sklearn",
"xgboost"
],
)
def train_xgb_model(
dataset: Input[Dataset],
model_artifact: Output[Model]
):
from xgboost import XGBClassifier
import pandas as pd
data = pd.read_csv(dataset.path)
model = XGBClassifier(
objective="binary:logistic"
)
model.fit(
data.drop(columns=["target"]),
data.target,
)
score = model.score(
data.drop(columns=["target"]),
data.target,
)
model_artifact.metadata["train_score"] = float(score)
model_artifact.metadata["framework"] = "XGBoost"
model.save_model(model_artifact.path)
```
#### To evaluate the model
We will receibe the inputs and create some specific outputs. `Output[ClassificationMetrics]` lets us create beautiful plots on the UI, and `Output[Metrics]` lets us log arbitrary metrics onto it. We will use `sklearn` for the metric gathering and then convert everything to list for Vertex AI runner to be able to plot.
```
@component(
packages_to_install = [
"pandas",
"sklearn",
"xgboost"
],
)
def eval_model(
test_set: Input[Dataset],
xgb_model: Input[Model],
metrics: Output[ClassificationMetrics],
smetrics: Output[Metrics]
):
from xgboost import XGBClassifier
import pandas as pd
data = pd.read_csv(test_set.path)
model = XGBClassifier()
model.load_model(xgb_model.path)
score = model.score(
data.drop(columns=["target"]),
data.target,
)
from sklearn.metrics import roc_curve
y_scores = model.predict_proba(data.drop(columns=["target"]))[:, 1]
fpr, tpr, thresholds = roc_curve(
y_true=data.target.to_numpy(), y_score=y_scores, pos_label=True
)
metrics.log_roc_curve(fpr.tolist(), tpr.tolist(), thresholds.tolist())
from sklearn.metrics import confusion_matrix
y_pred = model.predict(data.drop(columns=["target"]))
metrics.log_confusion_matrix(
["False", "True"],
confusion_matrix(
data.target, y_pred
).tolist(), # .tolist() to convert np array to list.
)
xgb_model.metadata["test_score"] = float(score)
smetrics.log_metric("score", float(score))
```
#### The final step is to create the Pipeline
Notice that we get outputs from previous steps here. We then compile it into a `.json` file.
```
@dsl.pipeline(
# Default pipeline root. You can override it when submitting the pipeline.
pipeline_root=PIPELINE_ROOT,
# A name for the pipeline. Use to determine the pipeline Context.
name="pipeline-test-1",
)
def pipeline():
dataset_op = get_data()
train_op = train_xgb_model(dataset_op.outputs["dataset_train"])
eval_op = eval_model(
test_set=dataset_op.outputs["dataset_test"],
xgb_model=train_op.outputs["model_artifact"]
)
compiler.Compiler().compile(pipeline_func=pipeline,
package_path='xgb_pipe.json')
```
#### If you are authenticated to GCP and set everything up there
This snipped should create the run and a link for you to get to it.
Also, be sure your Vertex AI API is activated.
```
from kfp.v2.google.client import AIPlatformClient
api_client = AIPlatformClient(
project_id=PROJECT_ID,
region=REGION
)
response = api_client.create_run_from_job_spec(
'xgb_pipe.json',
)
```
| true |
code
| 0.54698 | null | null | null | null |
|
# Knowledge Graph Embeddings
Word embeddings aim at capturing the meaning of words based on very large corpora; however, there are decades of experience and approaches that have tried to capture this meaning by structuring knowledge into semantic nets, ontologies and graphs.
| | Neural | Symbolic |
| ------------- |-------------| -----|
| **representation** | vectors | symbols (URIs) |
| **input** | large corpora | human editors (Knowledge engineers) |
| **interpretability** | linked to model and training dataset | requires understanding of schema |
| **alignability** | parallel (annotated) corpora | heuristics + manual |
| **composability** | combine vectors | merge graphs |
| **extensibility** | fixed vocabulary | need to know how to link new nodes |
| **certainty** | fuzzy | exact |
| **debugability** | 'fix' training data? | edit graph |
In recent years, many new approaches have been proposed to derive 'neural' representations for existing knowledge graphs. Think of this as trying to capture the knowledge encoded in the KG to make it easier to use this in deep learning models.
- [TransE (2013)](http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf): try to assign an embedding to nodes and relations, so that $h + r$ is close to $t$, where $h$ and $t$ are nodes in the graph and $r$ is an edge. In the RDF world, this is simply an RDF triple where $h$ is the subject $r$ is the property and $t$ is the object of the triple.
- [HolE (2016)](http://arxiv.org/abs/1510.04935): Variant of TransE, but uses a different operator (circular correlation) to represent pairs of entities.
- [RDF2Vec(2016)](https://ub-madoc.bib.uni-mannheim.de/41307/1/Ristoski_RDF2Vec.pdf): applies word2vec to random walks on an RDF graph (essentially paths or sequences of nodes in the graph).
- [Graph convolutions(2018)](http://arxiv.org/abs/1703.06103): apply convolutional operations on graphs to learn the embeddings.
- [Neural message passing(2018)](https://arxiv.org/abs/1704.01212): merges two strands of research on KG embeddings: recurrent and convolutional approaches.
For more background: [Nickel, M., Murphy, K., Tresp, V., & Gabrilovich, E. (2016). A review of relational machine learning for knowledge graphs. Proceedings of the IEEE, 104(1), 11–33. https://doi.org/10.1109/JPROC.2015.2483592](http://www.dbs.ifi.lmu.de/~tresp/papers/1503.00759v3.pdf) provides a good overview (up to 2016).
# Creating embeddings for WordNet
In this section, we go through the steps of generating word and concept embeddings using WordNet, a lexico-semantic knowledge graph.
0. Choose (or implement) a KG embedding algorithm
1. Convert the KG into format required by embedding algorithm
2. Execute the training
3. Evaluate/inspect results
## Choose embedding algorithm: HolE
We will use an [existing implementation of the `HolE` algorithm available on GitHub](https://github.com/mnick/holographic-embeddings).
### Install `scikit-kge`
The `holographic-embeddings` repo is actually just a wrapper around `scikit-kge` or [SKGE](https://github.com/mnick/scikit-kge), a library that implements a few KG embedding algorithms. First, we need to install `scikit-kge` as a library in our environment. Execute the following cells to clone the repository and install the library.
```
# make sure we are in the right folder to perform the git clone
%cd /content/
!git clone https://github.com/hybridNLP2018/scikit-kge
%cd scikit-kge
# install a dependency of scikit-kge on the colaboratory environment, needed to correclty build scikit-kge
!pip install nose
# now build a source distribution for the project
!python setup.py sdist
```
Executing the previous cell should produce a lot of output as the project is built. Towards the end you should see something like:
```
Writing scikit-kge-0.1/setup.cfg
creating dist
Creating tar archive
```
This should have created a `tar.gz` file in the `dist` subfolder:
```
!ls dist/
```
which we can install on the local environment by using `pip`, the python package manager.
```
!pip install dist/scikit-kge-0.1.tar.gz
%cd /content
```
### Install and inspect `holographic_embeddings` repo
Now that `skge` is installed on this environment, we are ready to clone the [holographic-embeddings](https://github.com/mnick/holographic-embeddings) repository, which will enable us to train `HolE` embeddings.
```
# let's go back to the main \content folder and clone the holE repo
%cd /content/
!git clone https://github.com/mnick/holographic-embeddings
```
If you want, you can browse the contents of this repo on github, or execute the following to see how you can start training embeddings for the WordNet 1.8 knowledge graph. In the following sections we'll go into more detail about how to train embeddings, so there is no need to actually execute this training just yet.
```
%less holographic-embeddings/run_hole_wn18.sh
```
You should see a section on the bottom of the screen with the contents of the `run_hole_wn18.sh` file. The main execution is:
```
python kg/run_hole.py --fin data/wn18.bin \
--test-all 50 --nb 100 --me 500 \
--margin 0.2 --lr 0.1 --ncomp 150
```
which is just executing the `kg/run_hole.py` script on the input data `data/wn18.bin` and passing various arguments to control how to train and produce the embeddings:
* `me`: states the number of epochs to train for (i.e. number of times to go through the input dataset)
* `ncomp`: specifies the dimension of the embeddings, each embedding will be a vector of 150 dimensions
* `nb`: number of batches
* `test-all`: specifies how often to run validation of the intermediate embeddings. In this case, every 50 epochs.
## Convert WordNet KG to required input
### KG Input format required by SKGE
SKGE requires a graph to be represented as a serialized python dictionary with the following structure:
* `relations`: a list of relation names (the named edges in the graph)
* `entities`: a list of entity names (the nodes in the graph),
* `train_subs`: a list of triples of the form `(head_id, tail_id, rel_id)`, where `head_id` and `tail_id` refer to the index in the `entities`list and `rel_id` refers to the index in the `relations` list. This is the list of triples that will be used to train the embeddings.
* `valid_subs`: a list of triples of the same form as `train_subs`. These are used to validate the embeddings during training (and thus to tune hyperparameters).
* `test_subs`: a list of triples of the same form as `test_subs`. These are used to test the learned embeddings.
The `holographic-embeddings` GitHub repo comes with an example input file: `data/wn18.bin` for WordNet 1.8. In the following executable cell, we show how to read and inspect data:
```
import pickle
import os
with open('holographic-embeddings/data/wn18.bin', 'rb') as fin:
wn18_data = pickle.load(fin)
for k in wn18_data:
print(k, type(wn18_data[k]), len(wn18_data[k]), wn18_data[k][-3:])
```
The expected output should be similar to:
```
relations <class 'list'> 18 ['_synset_domain_region_of', '_verb_group', '_similar_to']
train_subs <class 'list'> 141442 [(5395, 37068, 9), (5439, 35322, 11), (28914, 1188, 10)]
entities <class 'list'> 40943 ['01164618', '02371344', '03788703']
test_subs <class 'list'> 5000 [(17206, 33576, 0), (1179, 11861, 0), (30287, 1443, 1)]
valid_subs <class 'list'> 5000 [(351, 25434, 0), (3951, 2114, 7), (756, 14490, 0)]
```
This shows that WordNet 1.8 has been represented as a graph of 40943 nodes (which we assume correspond to the synsets) interlinked using 18 relation types. The full set of relations has been split into 141K triples for training, and 5K triples each for testing and validation.
### Converting WordNet 3.0 into the required input format
WordNet 1.8 is a bit dated and it will be useful to have experience converting your KG into the required input format. Hence, rather than simply reusing the `wn18.bin` input file, we will generate our own directly from the [NLTK WordNet API](http://www.nltk.org/howto/wordnet.html).
First we need to download WordNet:
```
import nltk
nltk.download('wordnet')
```
#### Explore WordNet API
Now that we have the KG, we can use the WordNet API to explore the graph. Refer to the [howto doc](http://www.nltk.org/howto/wordnet.html) for a more in depth overview, here we only show a few methods that will be needed to generate our input file.
```
from nltk.corpus import wordnet as wn
```
The main nodes in WordNet are called synsets (synonym sets). These correspond roughly to *concepts*. You can find all the synstes related to a word like this:
```
wn.synsets('dog')
```
The output from the cell above shows how synsets are identified by the NLTK WordNet API. They have the form `<main-lemma>.<POS-code>.<sense-number>`. As far as we are aware, this is a format chosen by the implementors of the NLTK WordNet API and other APIs may choose diverging ways to refer to synsets.
You can get a list of all the synsets as follows (we only show the first 5):
```
for synset in list(wn.all_synsets())[:5]:
print(synset.name())
```
Similarly, you can also get a list of all the lemma names (again we only show 5):
```
for lemma in list(wn.all_lemma_names())[5000:5005]:
print(lemma)
```
For a given synset, you can find related synsets or lemmas, by calling the functions for each relation type. Below we provide a couple of examples for the first sense of adjective *adaxial*. In the first example, we see that this synset belongs to `topic domain` `biology.n.01`, which is again a synset. In the second example, we see that it has two lemmas, which are relative to the synset. In the third example, we retrieve the lemmas in a form that are not relative to the synset, which is the one we will use later on.
```
wn.synset('adaxial.a.01').topic_domains()
wn.synset('adaxial.a.01').lemmas()
wn.synset('adaxial.a.01').lemma_names()
```
#### Entities and relations to include
The main nodes in WordNet are the syncons, however, lemmas can also be considered to be nodes in the graph. Hence, you need to decide which nodes to include. Since we are interested in capturing as much information as can be provided by WordNet, we will include both synsets and lemmas.
WordNet defines a large number of relations between synsets and lemmas. Again, you can decide to include all or just some of these. One particularity of WordNet is that many relations are defined twice: e.g. hypernym and hyponym are the exact same relation, but in reverse order. Since this is not really providing additional information, we only include such relations once. The following cell defines all the relations we will be taking into account. We represent these as python dictionaries, where the keys are the name of the relation and the values are functions that accept a `head` entity and produce a list of `tail` entities for that specific relation:
```
syn_relations = {
'hyponym': lambda syn: syn.hyponyms(),
'instance_hyponym': lambda syn: syn.instance_hyponyms(),
'member_meronym': lambda syn: syn.member_meronyms(),
'has_part': lambda syn: syn.part_meronyms(),
'topic_domain': lambda syn: syn.topic_domains(),
'usage_domain': lambda syn: syn.usage_domains(),
'_member_of_domain_region': lambda syn: syn.region_domains(),
'attribute': lambda syn: syn.attributes(),
'entailment': lambda syn: syn.entailments(),
'cause': lambda syn: syn.causes(),
'also_see': lambda syn: syn.also_sees(),
'verb_group': lambda syn: syn.verb_groups(),
'similar_to': lambda syn: syn.similar_tos()
}
lem_relations = {
'antonym': lambda lem: lem.antonyms(),
'derivationally_related_form': lambda lem: lem.derivationally_related_forms(),
'pertainym': lambda lem: lem.pertainyms()
}
syn2lem_relations = {
'lemma': lambda syn: syn.lemma_names()
}
```
#### Triple generation
We are now ready to generate triples by using the WordNet API. Recall that `skge` requires triples of the form `(head_id, tail_id, rel_id)`, hence we will need to have some way of mapping entity (synset and lemma) names and relations types to unique ids. We therefore assume we will have an `entity_id_map` and a `rel_id_map`, which will map the entity name (or relation type) to an id. The following two cells implement functions which will iterate through the synsets and relations to generate the triples:
```
def generate_syn_triples(entity_id_map, rel_id_map):
result = []
for synset in list(wn.all_synsets()):
h_id = entity_id_map.get(synset.name())
if h_id is None:
print('No entity id for ', synset)
continue
for synrel, srfn in syn_relations.items():
r_id = rel_id_map.get(synrel)
if r_id is None:
print('No rel id for', synrel)
continue
for obj in srfn(synset):
t_id = entity_id_map.get(obj.name())
if t_id is None:
print('No entity id for object', obj)
continue
result.append((h_id, t_id, r_id))
for rel, fn in syn2lem_relations.items():
r_id = rel_id_map.get(rel)
if r_id is None:
print('No rel id for', rel)
continue
for obj in fn(synset):
lem = obj.lower()
t_id = entity_id_map.get(lem)
if t_id is None:
print('No entity id for object', obj, 'lowercased:', lem)
continue
result.append((h_id, t_id, r_id))
return result
def generate_lem_triples(entity_id_map, rel_id_map):
result = []
for lemma in list(wn.all_lemma_names()):
h_id = entity_id_map.get(lemma)
if h_id is None:
print('No entity id for lemma', lemma)
continue
_lems = wn.lemmas(lemma)
for lemrel, lrfn in lem_relations.items():
r_id = rel_id_map.get(lemrel)
if r_id is None:
print('No rel id for ', lemrel)
continue
for _lem in _lems:
for obj in lrfn(_lem):
t_id = entity_id_map.get(obj.name().lower())
if t_id is None:
print('No entity id for obj lemma', obj, obj.name())
continue
result.append((h_id, t_id, r_id))
return result
```
#### Putting it all together
Now that we have methods for generating lists of triples, we can generate the input dictionary and serialise it. We need to:
* create our lists of entities and relations,
* derive a map from entity and relation names to ids
* generate the triples
* split the triples into training, validation and test subsets
* write the python dict to a serialised file
We implement this in the following method:
```
import random # for shuffling list of triples
def wnet30_holE_bin(out):
"""Creates a skge-compatible bin file for training HolE embeddings based on WordNet31"""
synsets = [synset.name() for synset in wn.all_synsets()]
lemmas = [lemma for lemma in wn.all_lemma_names()]
entities = list(synsets + list(set(lemmas)))
print('Found %s synsets, %s lemmas, hence %s entities' % (len(synsets), len(lemmas), len(entities)))
entity_id_map = {ent_name: id for id, ent_name in enumerate(entities)}
n_entity = len(entity_id_map)
print("N_ENTITY: %d" % n_entity)
relations = list( list(syn_relations.keys()) + list(lem_relations.keys()) + list(syn2lem_relations.keys()))
relation_id_map = {rel_name: id for id, rel_name in enumerate(relations)}
n_rel = len(relation_id_map)
print("N_REL: %d" % n_rel)
print('relations', relation_id_map)
syn_triples = generate_syn_triples(entity_id_map, relation_id_map)
print("Syn2syn relations", len(syn_triples))
lem_triples = generate_lem_triples(entity_id_map, relation_id_map)
print("Lem2lem relations", len(lem_triples))
all_triples = syn_triples + lem_triples
print("All triples", len(all_triples))
random.shuffle(all_triples)
test_triple = all_triples[:500]
valid_triple = all_triples[500:1000]
train_triple = all_triples[1000:]
to_pickle = {
"entities": entities,
"relations": relations,
"train_subs": train_triple,
"test_subs": test_triple,
"valid_subs": valid_triple
}
with open(out, 'wb') as handle:
pickle.dump(to_pickle, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("wrote to %s" % out)
```
#### Generate `wn30.bin`
Now we are ready to generate the `wn30.bin` file which we can feed to the `HolE` algorithm implementation.
```
out_bin='/content/holographic-embeddings/data/wn30.bin'
wnet30_holE_bin(out_bin)
```
Notice, that the resulting dataset now contains 265K entities, compared to 41K in WordNet 1.8 (to be fair, only 118K of the entities are synsets).
## Learn the embeddings
Now, we will use the WordNet 3.0 dataset to learn embeddings for both synsets and lemmas. Since this is fairly slow, we only train for 2 epochs, which can take up to 10 minutes (In the exercises at the end of this notebook, we provide a link to download pre-computed embeddings which have been trained for 500 epochs.)
```
wn30_holE_out='/content/wn30_holE_2e.bin'
holE_dim=150
num_epochs=2
!python /content/holographic-embeddings/kg/run_hole.py --fin {out_bin} --fout {wn30_holE_out} \
--nb 100 --me {num_epochs} --margin 0.2 --lr 0.1 --ncomp {holE_dim}
```
The output should look similar to:
```
INFO:EX-KG:Fitting model HolE with trainer PairwiseStochasticTrainer and parameters Namespace(afs='sigmoid', fin='/content/holographic-embeddings/data/wn30.bin', fout='/content/wn30_holE_2e.bin', init='nunif', lr=0.1, margin=0.2, me=2, mode='rank', nb=100, ncomp=150, ne=1, no_pairwise=False, rparam=0, sampler='random-mode', test_all=10)
INFO:EX-KG:[ 1] time = 120s, violations = 773683
INFO:EX-KG:[ 2] time = 73s, violations = 334894
INFO:EX-KG:[ 2] time = 73s, violations = 334894
INFO:EX-KG:[ 2] VALID: MRR = 0.11/0.12, Mean Rank = 90012.28/90006.14, Hits@10 = 15.02/15.12
DEBUG:EX-KG:FMRR valid = 0.122450, best = -1.000000
INFO:EX-KG:[ 2] TEST: MRR = 0.11/0.12, Mean Rank = 95344.42/95335.96, Hits@10 = 15.74/15.74
```
## Inspect resulting embeddings
Now that we have trained the model, we can retrieve the embeddings for the entities and inspect them.
### `skge` output file format
The output file is again a pickled serialisation of a python dictionary. It contains the `model` itself, and results for the test and validation runs as well as execution times.
```
with open(wn30_holE_out, 'rb') as fin:
hole_model = pickle.load(fin)
print(type(hole_model), len(hole_model))
for k in hole_model:
print(k, type(hole_model[k]))
```
We are interested in the model itself, which is an instance of a `skge.hole.HolE` class and has various parameters. The entity embeddings are stored in parameter `E`, which is essentially a matrix of $n_e \times d$, where $n_e$ is the number of entities and $d$ is the dimension of each vector.
```
model = hole_model['model']
E = model.params['E']
print(type(E), E.shape)
```
### Converting embeddings to more inspectable format
Unfortunately, `skge` does not provide methods for exploring the embedding space. (KG embedding libraries are more geared towards prediction of relations) So, we will convert the embeddings into an easier to explore format. We first convert them into a pair of files for the vectors and the vocabulary and we will then use the `swivel` library to explore the results.
We first read the list of entities, this is our **vocabulary** (i.e. names of synsets and lemmas for which we have embeddings).
```
with open('/content/holographic-embeddings/data/wn30.bin', 'rb') as fin:
wn30_data = pickle.load(fin)
entities = wn30_data['entities']
len(entities)
```
Next, we generate a vocab file and a `tsv` file where each line contains the word and a list of $d$ numbers.
```
vec_file = '/content/wn30_holE_2e.tsv'
vocab_file = '/content/wn30_holE_2e.vocab.txt'
with open(vocab_file, 'w', encoding='utf_8') as f:
for i, w in enumerate(entities):
word = w.strip()
print(word, file=f)
with open(vec_file, 'w', encoding='utf_8') as f:
for i, w in enumerate(entities):
word = w.strip()
embedding = E[i]
print('\t'.join([word] + [str(x) for x in embedding]), file=f)
!wc -l {vec_file}
```
Now that we have these files, we can use `swivel`, which we used in the first notebook to inspect the embeddings.
#### Download tutorial materials and `swivel` (if necessary)
Download swivel, although you may already have it on your environment if you already executed the first notebook of this tutorial.
```
%cd /content
!git clone https://github.com/HybridNLP2018/tutorial
```
Use the `swivel/text2bin` script to convert the `tsv` embeddings into `swivel`'s binary format.
```
vecbin = '/content/wn30_holE_2e.tsv.bin'
!python /content/tutorial/scripts/swivel/text2bin.py --vocab={vocab_file} --output={vecbin} \
{vec_file}
```
Next, we can load the vectors using `swivel`'s `Vecs` class, which provides easy inspection of neighbors.
```
from tutorial.scripts.swivel import vecs
vectors = vecs.Vecs(vocab_file, vecbin)
```
#### Inspect a few example lemmas and synsets
```
import pandas as pd
pd.DataFrame(vectors.k_neighbors('california'))
wn.synsets('california')
pd.DataFrame(vectors.k_neighbors('california.n.01'))
pd.DataFrame(vectors.k_neighbors('conference'))
pd.DataFrame(vectors.k_neighbors('semantic'))
pd.DataFrame(vectors.k_neighbors('semantic.a.01'))
```
As you can see, the embeddings do not look very good at the moment. In part this is due to the fact we only trained the model for 2 epochs. We have pre-calculated a set of HolE embeddings for 500 epochs, which you can download and inspect as part of an optional excercise below. Results for these are much better:
| cosine sim | entity |
| ------------- |-------------|
| 1.0000 | lem_california |
| 0.4676 | lem_golden_state |
| 0.4327 | lem_ca |
| 0.4004 | lem_californian |
| 0.3838 | lem_calif. |
| 0.3500 | lem_fade |
| 0.3419 | lem_keystone_state |
| 0.3375 | wn31_antilles.n.01 |
| 0.3356 | wn31_austronesia.n.01 |
| 0.3340 | wn31_overbalance.v.02 |
For the synset for california, we also see 'sensible' results:
| cosine sim | entity |
| ------------- |-------------|
| 1.0000 | wn31_california.n.01 |
| 0.4909 | wn31_nevada.n.01 |
| 0.4673 | wn31_arizona.n.01 |
| 0.4593 | wn31_tennessee.n.01 |
| 0.4587 | wn31_new_hampshire.n.01 |
| 0.4555 | wn31_sierra_nevada.n.02 |
| 0.4073 | wn31_georgia.n.01 |
| 0.4048 | wn31_west_virginia.n.01 |
| 0.3991| wn31_north_carolina.n.01 |
| 0.3977 | wn31_virginia.n.01 |
One thing to notice here is that all of the top 10 closely related entities for `california.n.01` are also synsets. Similarly for lemma `california`, the most closely related entities are also lemmas, although some synsets also made it into the top 10 neighbours. This may indicate a tendency of `HolE` to keep lemmas close to other lemmas and synsets close to other synsets. In general, choices about how nodes in the KG are related will affect how their embeddings are interrelated.
# Conclusion and exercises
In this notebook we provided an overview of recent knowledge graph embedding approaches and showed how to use existing implementations to generate word and concept embeddings for WordNet 3.0.
## Excercise: train embeddings on your own KG
If you have a KG of your own, you can adapt the code shown above to generate a graph representation as expected by `skge` and you can train your embeddings in this way. Popular KGs are Freebase and DBpedia.
## Excercise: inspect embeddings for pre-calculated WordNet 3.0
We have used code similar to the one shown above to train embeddings for 500 epochs using HolE. You can execute the following cells to download and explore these embeddings. The embeddings are about 142MB, so dowloading them may take a few minutes.
```
!mkdir /content/vec/
%cd /content/vec/
!wget https://zenodo.org/record/1446214/files/wn-en-3.0-HolE-500e-150d.tar.gz
!tar -xzf wn-en-3.0-HolE-500e-150d.tar.gz
%ls /content/vec
```
The downloaded tar contains a `tsv.bin` and a `vocab` file like the one we created above. We can use it to load the vectors using `swivel`'s `Vecs`:
```
vocab_file = '/content/vec/wn-en-3.1-HolE-500e.vocab.txt'
vecbin = '/content/vec/wn-en-3.1-HolE-500e.tsv.bin'
wnHolE = vecs.Vecs(vocab_file, vecbin)
```
Now you are ready to start exploring. The only thing to notice is that we have added a prefix to `lem_` to all lemmas and `wn31_` to all synsets, as shown in the following examples:
```
pd.DataFrame(wnHolE.k_neighbors('lem_california'))
pd.DataFrame(wnHolE.k_neighbors('wn31_california.n.01'))
```
| true |
code
| 0.244882 | null | null | null | null |
|
.. _nb_repair:
## Repair Operator
The repair operator is mostly problem dependent. Most commonly it is used to make sure the algorithm is only searching in the feasible space. It is applied after the offsprings have been reproduced. In the following, we are using the knapsack problem to demonstrate the repair operator in *pymoo*.
In the well-known **Knapsack Problem**. In this problem, a knapsack has to be filled with items without violating the maximum weight constraint. Each item $j$ has a value $b_j \geq 0$ and a weight $w_j \geq 0$ where $j \in \{1, .., m\}$. The binary decision vector $z = (z_1, .., z_m)$ defines, if an item is picked or not. The aim is to maximize the profit $g(z)$:
\begin{eqnarray}
max & & g(z) \\[2mm] \notag
\text{s.t.} & & \sum_{j=1}^m z_j \, w_j \leq Q \\[1mm] \notag
& & z = (z_1, .., z_m) \in \mathbb{B}^m \\[1mm] \notag
g(z) & = & \sum_{j=1}^{m} z_j \, b_j \\[2mm] \notag
\end{eqnarray}
A simple GA will have some infeasible evaluations in the beginning and then concentrate on the infeasible space.
```
from pymoo.factory import get_algorithm, get_crossover, get_mutation, get_sampling
from pymoo.optimize import minimize
from pymoo.problems.single.knapsack import create_random_knapsack_problem
problem = create_random_knapsack_problem(30)
algorithm = get_algorithm("ga",
pop_size=200,
sampling=get_sampling("bin_random"),
crossover=get_crossover("bin_hux"),
mutation=get_mutation("bin_bitflip"),
eliminate_duplicates=True)
res = minimize(problem,
algorithm,
termination=('n_gen', 10),
verbose=True)
```
Because the constraint $\sum_{j=1}^m z_j \, w_j \leq Q$ is fairly easy to satisfy. Therefore, we can make sure before evaluating the objective function, that this constraint is not violated by repairing the individual.
A repair class has to be defined and the population are given as input. The repaired population has to be returned.
```
import numpy as np
from pymoo.model.repair import Repair
class ConsiderMaximumWeightRepair(Repair):
def _do(self, problem, pop, **kwargs):
# maximum capacity for the problem
Q = problem.C
# the packing plan for the whole population (each row one individual)
Z = pop.get("X")
# the corresponding weight of each individual
weights = (Z * problem.W).sum(axis=1)
# now repair each indvidiual i
for i in range(len(Z)):
# the packing plan for i
z = Z[i]
# while the maximum capacity violation holds
while weights[i] > Q:
# randomly select an item currently picked
item_to_remove = np.random.choice(np.where(z)[0])
# and remove it
z[item_to_remove] = False
# adjust the weight
weights[i] -= problem.W[item_to_remove]
# set the design variables for the population
pop.set("X", Z)
return pop
algorithm.repair = ConsiderMaximumWeightRepair()
res = minimize(problem,
algorithm,
termination=('n_gen', 10),
verbose=True)
```
As it can be seen, the repair operator makes sure no infeasible solution is evaluated. Even though this example seems to be quite easy, the repair operator makes especially sense for more complex constraints where domain specific knowledge is known.
| true |
code
| 0.773527 | null | null | null | null |
|
# Creation of the Alternative Classification for Modeling
In this notebook, we create a csv file containing the alternative classification of crimes, in 7 categories.
<br>
We also clean and segment the data according to time, localization and neighborhoods.
# Cleaning of the Data from clean_data.csv
```
data = pd.read_csv('data_clean.csv')
data.columns
data
df_data_cat = data[['Incident Datetime', 'Incident Date', 'Incident Time', 'Incident Year',
'Incident Day of Week', 'Report Datetime', 'Row ID', 'Incident ID',
'Incident Number', 'CAD Number', 'Report Type Code',
'Report Type Description', 'Incident Code',
'Incident Category', 'Incident Subcategory', 'Incident Description',
'Resolution', 'Intersection', 'CNN',
'Analysis Neighborhood', 'Latitude', 'Longitude']]
def clean_incident_category(df):
df['Incident Category'].replace('Offence','Offense',regex=True, inplace = True)
df['Incident Category'].replace('Offenses','Offense',regex=True, inplace = True)
#df['Incident Category'].replace('Offense Against The Family And Children', 'Family Offense', regex=False, inplace = True)
df['Incident Category'].replace('Human Trafficking (A), Commercial Sex Acts', 'Human Trafficking', regex=False, inplace = True)
df['Incident Category'].replace('Human Trafficking, Commercial Sex Acts', 'Human Trafficking', regex=False, inplace = True)
df['Incident Category'].replace('Human Trafficking (B), Involuntary Servitude', 'Human Trafficking', regex=False, inplace = True)
df['Incident Category'].replace('Motor Vehicle Theft?', 'Motor Vehicle Theft', regex=False, inplace = True)
df['Incident Category'].replace('Suspicious Occ', 'Suspicious', regex=False, inplace = True)
return
clean_incident_category(df_data_cat)
df_data_cat['Incident Category'].value_counts()
```
# Categorize to 4 groups
4 ways to localise crimes:
* Felonies
Murder (PC 187)
Homicide
Manslaughter
Rape (PC 261)
Assault with a deadly weapon (PC 245(a)(1))
Voluntary Manslaughter (PC 192(a))
Involuntary Manslaughter (PC 192(b))
Aggravated Battery (PC 243(d))
Gross Manslaughter while Intoxicated (PC 191.5(a))
Negligent Manslaughter while Intoxicated (PC 191.5(b))
Sexual battery (PC 243.4)
Kidnapping (PC 207)
False Imprisonment (PC 236)
Hate Crimes
Torture (PC 206)
Mayhem (PC 203)
Aggravated Mayhem (PC 205)
Child Pornography (PC 311.11)
Fraud
Internet Crimes
Drug Possession
Drug Distribution
Three strikes cases
Gang Cases
Burglary (PC 459)
Robbery (PC 211)
Carjacking (PC 215)
Grand Theft (PC 487)
Auto Theft
Domestic violence
DUI
Obstructing justice
Perjury (PC 118)
Criminal Threats (PC 422)
* Misdemeanors: DUI—VC 23152(a)
Driving on a suspended license—VC 14601.1(a)
Disorderly conduct—PC 415
Public drunkenness—647(f) pc m
Petty theft—PC 484/488
Shoplifting—PC 459.5
Soliciting for an act of prostitution PC 647(b)
Probation violations—PC code 1203
Domestic violence—PC 273.5
Reckless driving California —VC 23103 b
* Felony-misdemeanors
* Infractions
However, some records, namely 'fire report', 'stolen property', 'warrant', etc do not fit in any of these categories. We thus add three categories:
* risk_non_criminal, comprising records that are risky for the users.
* no_risk, comprising records that are no risk.
* unsure, comprising records that lack precision.
These three last categories will be used during the modelization but dropped for the visualization.
```
felony = ['Arson','Burglary', 'Motor Vehicle Theft', 'Robbery', 'Sex Offense', 'Offense Against The Family And Children', 'Family Offense','Weapons Offense', 'Fraud', 'Homicide', 'Human Trafficking']
#felony = ['Burglary', 'Motor Vehicle Theft', 'Robbery', 'Offense Against The Family And Children', 'Suspicious', 'Rape', 'Human Trafficking', 'Homicide', 'Family Offense (?)']
felony_misdemeanor = ['Weapons Carrying', 'Forgery and Counterfeiting', 'Embezzlement', 'Drug Violation', 'Non-Criminal']
#felony_misdemeanor = ['Larceny Theft', 'Assault', 'Fraud', 'Juvenile Offense (?)']
misdemeanor = ['Disorderly Conduct', 'Liquor Laws', 'Assault', 'Civil Sidewalks', 'Prostitution', 'Gambling', 'Vandalism']
#misdemeanor = ['Gambling', 'Prostitution' ]
infractions = ['Traffic Violation Arrest', 'Malicious Mischief', 'Suspicious', 'Other Offense', 'Stolen Property', 'Forgery and Counterfeiting', 'Traffic Collision', 'Juvenile Offense']
#infractions = ['Liquor Laws', 'Drug Violation', 'Drug Offense', 'Embezzlement', 'Vandalism']
risk_non_criminal = ['Fire Report', 'Stolen Property']
no_risk = ['Warrant', 'Recovered Vehicle', 'Lost Property','Vehicle Misplaced', 'Suicide', 'Vehicle Impounded', 'Case Closure', 'Courtesy Report']
unsure = ['Missing Person', 'Other Miscellaneous', 'Miscellaneous Investigation', 'Other']
groups = [felony, felony_misdemeanor, misdemeanor, infractions, risk_non_criminal, no_risk, unsure]
def categorize_incident(x, groups):
for i in range(len(groups)):
if x in groups[i-1]:
return i
df_data_cat.loc[:, ['Incident Level']] = df_data_cat['Incident Category'].apply(lambda x: categorize_incident(x, groups))
df_data_cat['Incident Level'].value_counts(normalize=True)
```
# Localize
```
def round_nearest(x):
a=0.0025
return round(x / a) * a
df_data_cat['NewLat'] = round_nearest(df_data_cat['Latitude'])
df_data_cat['NewLon'] = round_nearest(df_data_cat['Longitude'])
df_data_cat.head()
```
# Add time segments
```
# visualize counts of crimes in 24 hr
df_data_cat['Whole Time'] = df_data_cat['Incident Time'].apply(lambda x: x[:2])
plt.plot(df_data_cat['Whole Time'].value_counts().sort_index())
df_clean = df_data_cat
# visualize counts of crimes in 24 hr, ordered by counts
plt.plot(df_clean['Whole Time'].value_counts())
# 00 seems a little inconsistant because both 20-23 1-2 has much lower counts
# visualize 00 to see that almost all the crimes "happened" at 00:00
# guess it might be reported like this for simplicity
plt.figure(figsize=(20,5))
plt.plot(df_clean[df_clean['Whole Time']=='00']['Incident Time'].value_counts())
plt.xticks(rotation = 90)
plt.show()
# based on visulization, catogerize times to 4 equal length periods
morning = ['08','09','10','11','12','13'] # med
afternoon = ['14','15','16','17','18','19'] # high
evening = ['20','21','22','23','00','01'] # med
night = ['02','03','04','05','06','07'] # low
times = [morning, afternoon, evening, night]
def categorize_time(x, times):
if x in times[0]:
return 'Morning'
if x in times[1]:
return 'Afternoon'
if x in times[2]:
return 'Evening'
if x in times[3]:
return 'Night'
df_clean.loc[:, ['Time Seg']] = df_clean['Whole Time'].apply(lambda x: categorize_time(x, times))
df_clean.head()
```
# Add neighborhood
```
def find_neighborhood(x):
l = x.value_counts(normalize=True).index.values
if len(l)==0:
return np.nan
else:
return l[0]
df_nb = df_data_cat.groupby(['NewLat','NewLon'])['Analysis Neighborhood'].apply(lambda x: find_neighborhood(x)).reset_index()
df_nb
```
# Add together
```
df_final = df_data_cat.groupby(['NewLat','NewLon','Time Seg','Incident Level']
).count().sort_values('Incident ID', ascending=False)['Incident ID'].reset_index()
df_final = df_final.pivot(index=['NewLat','NewLon','Time Seg'], columns='Incident Level', values='Incident ID')
df_final = pd.DataFrame(df_final.to_records()).fillna(0)
#df_final['Total'] = df_final.iloc[:, 3:].sum(axis=1)
#df_final['Weighted'] = df_final['0.0']*16 + df_final['1.0']*8 + df_final['2.0']*4 + df_final['3.0']*2 + df_final['4.0']*1 + df_final['5#.0']*0
df_final = df_final.merge(df_nb, how='left', on=['NewLat','NewLon'])
df_final
df_final[df_final['Analysis Neighborhood'].isna()]
df_final['Analysis Neighborhood'].fillna('Oceanview/Merced/Ingleside', inplace=True)
df_final.to_csv('alternative_classification_data_localized.csv')
from google.colab import files
files.download('alternative_classification_data_localized.csv')
file = '/content/drive/MyDrive/NavSafe/Copy of data_localized.csv'
df_data = pd.read_csv(file, index_col=0).drop(['Total','Weighted'],axis=1)
df_data.head()
df_data.sample(n=5, random_state=10)
neighborhood_file = '/content/drive/MyDrive/NavSafe/Copy of data_neighborhood_safety.csv'
neighborhood = pd.read_csv(neighborhood_file)
neighborhood.head()
df_all = df_data.merge(neighborhood, how='left', left_on='Analysis Neighborhood', right_on='Neighborhood').drop(['Analysis Neighborhood','Neighborhood'],axis=1)
df_all.head()
def safety_calc(row):
if row['Time Seg'] == 'Morning':
return row['Average of safe_day']
elif row['Time Seg'] == 'Afternoon':
return row['Average of safe_rate']
else:
return row['Average of safe_night']
df_all['Safe'] = df_all.apply(lambda row: safety_calc(row), axis=1)
df_all = df_all.drop(['Average of safe_day','Average of safe_night','Average of safe_rate'],axis=1)
df_all.head()
group_data = df_all[['1.0','2.0','3.0','4.0','5.0','6.0']]
group_data.describe()
# df_all.loc[(df_all['Average of safe_rate']<3.67) & (df_all['1.0']>10)]
df_all.loc[(df_all['1.0']>50) | (df_all['2.0']>100) | (df_all['3.0']>150)]
df_all['Avoid'] = 0
# df_all.loc[(df_all['Average of safe_rate']<3.67) & (df_all['1.0']>10), 'Avoid'] = 1
df_all.loc[(df_all['1.0']>75) | (df_all['2.0']>100) | (df_all['3.0']>200), 'Avoid'] = 1
df_all.head()
time = pd.get_dummies(df_all['Time Seg'],drop_first=True)
df_train = pd.concat([time, df_all.drop(['NewLat','NewLon','Time Seg'],axis=1)], axis=1)
# df_train[['NewLat','NewLon','Evening','Morning','Night','1.0','2.0','3.0','4.0','5.0','6.0','Safe','Avoid']].head()
df_train.head()
x_train, x_test, y_train, y_test = train_test_split(df_train.drop('Avoid',axis=1), df_train['Avoid'], test_size=0.3, random_state=10)
```
## Logistic Regression with Cross Validation
```
def plot_cv_curve(hyperparm_grid,train_scores,val_scores):
ax = plt.subplot(111)
ax.errorbar(hyperparm_grid,np.mean(train_scores,axis=1),yerr=np.std(train_scores,axis=1),label="train")
ax.errorbar(hyperparm_grid,np.mean(val_scores,axis=1),yerr=np.std(val_scores,axis=1),label="validation")
ax.set_xlabel('Hyperparameter')
ax.set_ylabel('Score')
ax.legend()
ax.grid()
return ax
kf = KFold(5, shuffle=True, random_state=10)
C_grid = np.logspace(-2,2,10)
features = ['1.0','2.0','3.0','4.0','5.0','6.0']
logit_pipe = Pipeline([('columns', ColumnTransformer([('keep', StandardScaler(with_mean=False), features)],
remainder='passthrough')),
('logit', LogisticRegression(max_iter=5000, solver='newton-cg'))])
train_scores, val_scores = validation_curve(logit_pipe, x_train, y_train,
param_name='logit__C', param_range=C_grid, cv=kf)
ax = plot_cv_curve(C_grid,train_scores,val_scores)
ax.set_xlabel('C')
ax.set_ylabel('Accuracy')
ax.set_xscale('log')
logit_final = Pipeline([('columns', ColumnTransformer([('keep', StandardScaler(with_mean=False), features)], remainder='passthrough')),
('logit', LogisticRegression(max_iter=5000, solver='newton-cg', C=10))])
logit_final.fit(x_train, y_train)
pred = logit_final.predict_proba(x_test)[:,1]
y_pred = [1 if i >=0.5 else 0 for i in pred]
cm = confusion_matrix(y_test, y_pred)
tn, fp, fn, tp = cm.ravel()
cm
print ("\nPrecision:", tp/(tp+fp))
print ("\nRecall:", tp/(tp+fn))
!pip install gmaps
!pip install ipywidgets
!pip install widgetsnbextension
import gmaps
import ipywidgets as widgets
from ipywidgets.embed import embed_minimal_html
import IPython
gmaps.configure(api_key='AIzaSyDgJrLjmtTKlpLjwAfmseJJ-w8ZEy_YHeM')
```
## Map Visualization
```
# x = df_group1[['Latitude', 'Longitude']].copy()
# kmeans = KMeans(n_clusters=5, random_state=0).fit(x)
# kmeans.cluster_centers_
# x['label'] = kmeans.labels_
# x
# #filter rows of original data
# filtered_label1 = x[x['label'] == 1]
# filtered_label2 = x[x['label'] == 2]
# filtered_label3 = x[x['label'] == 3]
# filtered_label4 = x[x['label'] == 4]
# filtered_label5 = x[x['label'] == 5]
# #Plotting the results
# plt.scatter(filtered_label1['Latitude'] , filtered_label1['Longitude'] , color = 'red')
# plt.scatter(filtered_label2['Latitude'] , filtered_label2['Longitude'] , color = 'black')
# plt.scatter(filtered_label3['Latitude'] , filtered_label3['Longitude'] , color = 'green')
# plt.scatter(filtered_label4['Latitude'] , filtered_label4['Longitude'] , color = 'yellow')
# plt.scatter(filtered_label5['Latitude'] , filtered_label5['Longitude'] , color = 'blue')
# plt.show()
centers = df_data.groupby(['NewLat','NewLon','Incident Level']).count().sort_values('Incident ID', ascending=False)['Incident ID'].reset_index()
centers['Level Weight'] = 7 - centers['Incident Level']
centers['Weight'] = centers['Level Weight'] * centers['Incident ID']
centers = centers.groupby(['NewLat','NewLon']).sum().reset_index()[['NewLat','NewLon','Weight']]
centers.sort_values('Weight', ascending=False).head(20)
locations = centers[['NewLat', 'NewLon']]
weights = centers['Weight']
fig = gmaps.figure()
heatmap_layer = gmaps.heatmap_layer(locations, weights=weights)
fig.add_layer(gmaps.heatmap_layer(locations, weights=weights))
embed_minimal_html('export.html', views=[fig])
IPython.display.HTML(filename="export.html")
```
## Areas to Avoid
```
centers.head()
centers = df_all[(df_all['Avoid']==1) & ((df_all['Time Seg']=='Afternoon'))][['NewLat','NewLon']].drop_duplicates()
centers['Weight'] = 100
locations = centers[['NewLat', 'NewLon']]
weights = centers['Weight']
fig = gmaps.figure()
heatmap_layer = gmaps.heatmap_layer(locations, weights=weights)
fig.add_layer(gmaps.heatmap_layer(locations, weights=weights))
embed_minimal_html('export.html', views=[fig])
IPython.display.HTML(filename="export.html")
# plot time segment points
```
| true |
code
| 0.334155 | null | null | null | null |
|
Training and Testing Data
=====================================
To evaluate how well our supervised models generalize, we can split our data into a training and a test set:
<img src="../images/train_test_split.svg" width="80%">
```
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
iris = load_iris()
X, y = iris.data, iris.target
classifier = KNeighborsClassifier()
```
* Thinking about how machine learning is normally performed, the idea of a train/test split makes sense.
* Real world systems train on the data they have, and as other data comes in (from customers, sensors, or other sources) the classifier that was trained must predict on fundamentally *new* data.
* We can simulate this during training using a train/test split - the test data is a simulation of "future data" which will come into the system during production.
Specifically for iris, the labels in iris are sorted, which means that if we split the data using a proportional split, we will get all of specific labels (0 and 1) and very little of another (2). We want to split as illustrated above, but *after* the data has been randomly shuffled.
```
y
```
To get an accurate simulation of the real world, we will shuffle our data then split.
```
import numpy as np
rng = np.random.RandomState(0)
permutation = rng.permutation(len(X))
X, y = X[permutation], y[permutation]
print(y)
```
* Now we need to split the data into training and testing.
* Luckily, this is a common pattern in machine learning and scikit-learn has a prebuilt function to split data into training and testing for you.
* Here we use 50% of the data as training, and 50% testing. 80% and 20% is another common split, but there are no hard and fast rules.
* The most important thing is to fairly evaluate your system on data it *has not* seen during training!
```
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.5, random_state=1999)
print("Labels for training and testing data")
print(train_y)
print(test_y)
```
By evaluating our classifier performance on data that has been seen during training, we could get false confidence in the power of our system.
This might lead to putting a system into production which *fails* at predicting new data! It is much better to use a train/test split in order to properly see how your trained model is doing on new data.
```
classifier.fit(train_X, train_y)
pred_y = classifier.predict(test_X)
print("Fraction Correct")
print(np.sum(pred_y == test_y) / float(len(test_y)))
```
We can also visualize the correct and failed predictions
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
correct_idx = np.where(pred_y == test_y)[0]
print(correct_idx)
incorrect_idx = np.where(pred_y != test_y)[0]
print(incorrect_idx)
# Plot two dimensions
from itertools import combinations
colors = ["darkblue", "darkgreen", "gray"]
for x, y in combinations(range(4), 2):
for n, color in enumerate(colors):
idx = np.where(test_y == n)[0]
plt.scatter(test_X[idx, x], test_X[idx, y], color=color,
label="Class %s" % str(n))
plt.scatter(test_X[incorrect_idx, x], test_X[incorrect_idx, y],
color="darkred")
# Make xlim larger to accommodate legend
plt.xlim(3, 9)
plt.legend(loc='best')
plt.title("Iris Classification results")
plt.show()
```
We can see that the errors occur in the area where green (class 1) and gray (class 2) overlap. This gives us insight about what features to add - any feature which helps separate class 1 and class 2 should improve classifier performance.
| true |
code
| 0.752517 | null | null | null | null |
|
# SVM classification/SMOTE oversampling for an imbalanced data set
Date created: Oct 14, 2016
Last modified: Nov 16, 2016
Tags: SVM, SMOTE, ROC/AUC, oversampling, imbalanced data set, semiconductor data
About: Rebalance imbalanced semicondutor manufacturing dataset by oversampling the minority class using SMOTE. Classify using SVM. Assess the value of oversampling using ROC/AUC.
<h3>I. Introduction</h3>
The [SECOM dataset](http://archive.ics.uci.edu/ml/datasets/SECOM) in the [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml) is semicondutor manufacturing data. There are 1567 records, 590 anonymized features and 104 fails. This makes it an imbalanced dataset with a 14:1 ratio of pass to fails. The process yield has a simple pass/fail response (encoded -1/1).
<h4>Objective</h4>
We consider some of the different approaches to classify imbalanced data. In the [previous example](https://github.com/Meena-Mani/SECOM_class_imbalance/blob/master/secomdata_ocsvm.ipynb) we looked at one-class SVM.
Another strategy is to rebalance the dataset by oversampling the minority class and/or undersampling the majority class. This is done to improve the sensitivity (i.e the true positive rate) of the minority class. For this exercise, we will look at:
- rebalancing the dataset using SMOTE (which oversamples the minority class)
- ROC curves for different oversampling ratios
<h4>Methodology</h4>
The *sklearn* [imblearn toolbox](http://contrib.scikit-learn.org/imbalanced-learn/index.html) has many methods for oversamplng/undersampling. We will use the SMOTE (Synthetic Minority Over-sampling Technique) method introduced in 2002 by Chawla et al. <a href="#ref1">[1]</a>, <a href="#ref2">[2]</a>. With SMOTE, synthetic examples are interpolated along the line segments joining some/all of the <i>k</i> minority class nearest neighbors.
In the experiment, the oversampling rate is varied between 10-70%, in 10% increments. The percentage represents the final minority class fraction after oversampling: if the majority class has 1000 data points (and the minority class 50), at 10% the minority class will have 100 data points after oversampling (not 5 or 50+5 = 55).
The rebalanced data is classified using an SVM. The *imblearn* toolbox has a *pipeline* method which will be used to chain all the steps. The SMOTE+SVM method is evaluated by the area under the Receiver Operating Characteristic curve (AUC).
<h4>Preprocessing</h4>
The data represents measurements from a large number of processes or sensors and many of the records are missing. In addition some measurements are identical/constant and so not useful for prediction. We will remove those columns with high missing count or constant values.
The Random Forest variable importance is used to rank the variables in terms of their importance. For the random forest, we will impute the remaining missing values with the median for the column.
We will additionally scale the data that is applied to the SVM. We will use the <i>sklearn preprocessing</i> module for both imputing and scaling.
These are the same steps used for the [one-class SVM](https://github.com/Meena-Mani/SECOM_class_imbalance/blob/master/secomdata_ocsvm.ipynb) and a more detailed explanation can be seen there.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split as tts
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from sklearn.metrics import roc_curve, auc
from __future__ import division
import warnings
warnings.filterwarnings("ignore")
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data"
secom = pd.read_table(url, header=None, delim_whitespace=True)
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data"
y = pd.read_table(url, header=None, usecols=[0], squeeze=True, delim_whitespace=True)
print 'The dataset has {} observations/rows and {} variables/columns.' \
.format(secom.shape[0], secom.shape[1])
print 'The ratio of majority class to minority class is {}:1.' \
.format(int(y[y == -1].size/y[y == 1].size))
```
<h3>II. Preprocessing </h3>
We process the missing values first, dropping columns which have a large number of missing values and imputing values for those that have only a few missing values.
The Random Forest variable importance is used to rank the variables in terms of their importance. The [one-class SVM](https://github.com/Meena-Mani/SECOM_class_imbalance/blob/master/secomdata_ocsvm.ipynb) exercise has a more detailed version of these steps.
```
# dropping columns which have large number of missing entries
m = map(lambda x: sum(secom[x].isnull()), xrange(secom.shape[1]))
m_200thresh = filter(lambda i: (m[i] > 200), xrange(secom.shape[1]))
secom_drop_200thresh = secom.dropna(subset=[m_200thresh], axis=1)
dropthese = [x for x in secom_drop_200thresh.columns.values if \
secom_drop_200thresh[x].std() == 0]
secom_drop_200thresh.drop(dropthese, axis=1, inplace=True)
print 'The SECOM data set now has {} variables.'\
.format(secom_drop_200thresh.shape[1])
# imputing missing values for the random forest
imp = Imputer(missing_values='NaN', strategy='median', axis=0)
secom_imp = pd.DataFrame(imp.fit_transform(secom_drop_200thresh))
# use Random Forest to assess variable importance
rf = RandomForestClassifier(n_estimators=100, random_state=7)
rf.fit(secom_imp, y)
# sorting features according to their rank
importance = rf.feature_importances_
ranked_indices = np.argsort(importance)[::-1]
```
<h3>III. SVM Classification </h3>
<h4> Preprocessing </h4>
The SVM is sensitive to feature scale so the first step is to center and normalize the data. The train and test sets are scaled separately using the mean and variance computed from the training data. This is done to estimate the ability of the model to generalize.
```
# split data into train and holdout sets
# stratify the sample used for modeling to preserve the class proportions
X_train, X_holdout, y_train, y_holdout = tts(secom_imp[ranked_indices[:40]], y, \
test_size=0.2, stratify=y, random_state=5)
print 'Train data: The majority/minority class have {} and {} elements respectively.'\
.format(y_train[y_train == -1].size, y_train[y_train == 1].size)
print 'The maj/min class ratio is: {0:2.0f}' \
.format(round(y_train[y_train == -1].size/y_train[y_train == 1].size))
print 'Holdout data: The majority/minority class have {} and {} elements respectively.'\
.format(y_holdout[y_holdout == -1].size, y_holdout[y_holdout == 1].size)
print 'The maj/min class ratio for the holdout set is: {0:2.0f}' \
.format(round(y_holdout[y_holdout == -1].size/y_holdout[y_holdout == 1].size))
# scaling the split data. The holdout data uses scaling parameters
# computed from the training data
standard_scaler = StandardScaler()
X_train_scaled = pd.DataFrame(standard_scaler.fit_transform(X_train), \
index=X_train.index)
X_holdout_scaled = pd.DataFrame(standard_scaler.transform(X_holdout))
# Note: we convert to a DataFrame because the plot functions
# we will use need DataFrame inputs.
```
<h4> Finding parameters </h4>
The usual way to select parameters is via grid-search and cross-validation (CV). The scoring is based on the accuracy. When the classes are imbalanced, the true positive of the majority class dominates. Often, there is a high cost associated with the misclassification of the minority class, and in those cases alternative [scoring measures](http://scikit-learn.org/stable/modules/model_evaluation.html) such as the [F1](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html) and [$F_{\beta}$](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html) scores or the [Matthews Correlation Coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html) (which uses all four values of the confusion matrix) are used.
In CV experiments on this data, the majority class still dominates so that for the best CV F1-scores, the True Negative Rate (TNR - the rate at which the minority class is correctly classified) is zero.
Instead of automating the selection of hyperparameters, I have manually selected <i>C</i> and $\gamma$ values for which the precision/recall/F1 values as well as the TNR are high.
An example is shown below.
```
# oversampling
ratio = 0.5
smote = SMOTE(ratio = ratio, kind='regular')
smox, smoy = smote.fit_sample(X_train_scaled, y_train)
print 'Before resampling: \n\
The majority/minority class have {} and {} elements respectively.'\
.format(y_train[y == -1].size, y_train[y == 1].size)
print 'After oversampling at {}%: \n\
The majority/minority class have {} and {} elements respectively.'\
.format(ratio, smoy[smoy == -1].size, smoy[smoy == 1].size)
# plotting minority class distribution after SMOTE
# column 4 displayed
from IPython.html.widgets import interact
@interact(ratio=[0.1,1.0])
def plot_dist(ratio):
sns.set(style="white", font_scale=1.3)
fig, ax = plt.subplots(figsize=(7,5))
smote = SMOTE(ratio = ratio, kind='regular')
smox, smoy = smote.fit_sample(X_train_scaled, y_train)
smox_df = pd.DataFrame(smox)
ax = sns.distplot(smox_df[4][smoy == 1], color='b', \
kde=False, label='after')
ax = sns.distplot(X_train_scaled[4][y_train == 1], color='r', \
kde=False, label='before')
ax.set_ylim([0, 130])
ax.set(xlabel='')
ax.legend(title='Ratio = {}'.format(ratio))
plt.title('Minority class distribution before and after oversampling')
plt.show()
# classification results
from sklearn.metrics import confusion_matrix, matthews_corrcoef,\
classification_report, roc_auc_score, accuracy_score
# manually selected parameters
clf = SVC(C = 2, gamma = .0008)
clf.fit(smox, smoy)
y_predicted = clf.predict(X_holdout_scaled)
print 'The accuracy is: {0:4.2} \n' \
.format(accuracy_score(y_holdout, y_predicted))
print 'The confusion matrix: '
cm = confusion_matrix(y_holdout, y_predicted)
print cm
print '\nThe True Negative rate is: {0:4.2}' \
.format(float(cm[1][1])/np.sum(cm[1]))
print '\nThe Matthews correlation coefficient: {0:4.2f} \n' \
.format(matthews_corrcoef(y_holdout, y_predicted))
print(classification_report(y_holdout, y_predicted))
print 'The AUC is: {0:4.2}'\
.format(roc_auc_score(y_holdout, y_predicted))
```
For these manually selected parameters, the TNR is 0.38, the Matthews correlation coefficient is 0.21 and the precision/recall/F1 is in the 0.86 - 0.90 range. Selecting the best CV score (usually in the 0.90 range), on the other hand, would have given a TNR of 0 for all the scoring metrics I looked at.
<h4>The Pipeline -- Oversampling, classification and ROC computations </h4>
The *imblearn package* includes a [pipeline](http://contrib.scikit-learn.org/imbalanced-learn/generated/imblearn.pipeline.Pipeline.html#imblearn.pipeline.Pipeline) module which allows one to chain transformers, resamplers and estimators. We compute the ROC curves for each of the oversampling ratios and corresponding hyperparameters C and gamma and for this we use the pipeline to oversample with SMOTE and classify with the SVM.
```
# oversampling, classification and computing ROC values
fpr = dict()
tpr = dict()
roc_auc = dict()
ratio = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
C = [3, 3, 3, 2, 2, 2, 2]
gamma = [.02, .009, .009, .005, .0008, .0009, .0007]
estimators = [('smt', SMOTE(random_state=42)),
('clf', SVC(probability=True, random_state=42))]
pipe = Pipeline(estimators)
print pipe
for i, ratio, C, gamma in zip(range(7), ratio, C, gamma):
pipe.set_params(smt__ratio = ratio, clf__C = C, clf__gamma = gamma)
probas_ = pipe.fit(X_train_scaled, y_train).predict_proba(X_holdout_scaled)
fpr[i], tpr[i], _ = roc_curve(y_holdout, probas_[:,1])
roc_auc[i] = auc(fpr[i], tpr[i])
# plotting the ROC curves
def plot_roc(fpr, tpr, roc_auc):
colors = ['darkorange', 'deeppink', 'red', 'aqua', 'cornflowerblue','navy', 'blue']
plt.figure(figsize=(10,8.5))
for i, color in zip(range(7), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2, linestyle=':',
label='{0} (area = {1:0.2f})'
''.format((i+1)/10, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=1)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curves: SMOTE oversampled minority class', fontsize=14)
plt.legend(title='Class ratio after oversampling', loc="lower right")
plt.show()
plt.savefig('ROC_oversampling.png')
plot_roc(fpr, tpr, roc_auc)
```
<h3>IV. Discussion</h3>
There is a trend in the ROC curves (the ROC convex hull) in the figure above with the higher oversampling ratios --0.7 vs 0.1-- having a higher AUC. An obvious question then is whether increasing the oversampling ratio to get a balanced data set would give the best results.
In the experiment, no significant improvements were seen in the 0.5 - 0.8 (0.8 not plotted) regime. Oversampling with SMOTE broadens the decision region around the minority points (so we would expect better results) but the coverage may exceed the decision surface<sup>**</sup>. The level of oversampling therefore needs to be experimentally determined.
Another strategy to balance the classes is to combine oversampling (the minority class) with undersampling (the majority class). Chawla et al. had reported <a href="#ref1">[1]</a> that a combination of oversampling and undersampling gave the best results. We will experiment with this combination in a future exercise.
<sup>**</sup>It should also be noted that oversampling results in a significant increase (and bias) in the minority class. For instance, for a 0.5 ratio, the minority class is increased seven-fold (from 83 to 585). A completely balanced data set would involve a fourteen-fold increase in the minority class and this would alter the decision surface.
<h3>V. References and Further Reading </h3>
<a name="ref1"></a>[1] [Nitesh V. Chawla, Kevin W. Bowyer, Lawrence O. Hall, and W. Philip Kegelmeyer. SMOTE: synthetic minority over-sampling technique. J. Artif. Int. Res. 16, 1 (June 2002), 321-357. ](https://www.cs.cmu.edu/afs/cs/project/jair/pub/volume16/chawla02a-html/chawla2002.html)
<a name="ref2"></a>[2] [Chawla, Nitesh V. Data Mining for Imbalanced Datasets: An Overview. In: Maimon, Oded; Rokach, Lior (Eds) Data Mining and Knowledge Discovery Handbook, Springer, (2010), 875-886.](http://www3.nd.edu/~dial/publications/chawla2005data.pdf)
<a name="ref3"></a>[3] [Altini, Marco. "Dealing with Imbalanced Data: Undersampling, Oversampling and Proper Cross-validation." Web log post. Marco Altini Blog. N.p., 17 Aug. 2015. Web.](http://www.marcoaltini.com/blog/dealing-with-imbalanced-data-undersampling-oversampling-and-proper-cross-validation)
<div style="background-color: #FAAC58; margin-left: 0px; margin-right: 20px; padding-bottom: 8px; padding-left: 8px; padding-right: 8px; padding-top: 8px;">
Author: Meena Mani <br>
email: meenas.mailbag@gmail.com <br>
twitter: @meena_uvaca <br>
</div>
| true |
code
| 0.658088 | null | null | null | null |
|
# Morisita-Horn similarity calculation
```
from __future__ import print_function
from collections import Counter
from datetime import datetime
import itertools
import multiprocessing as mp
import os
import subprocess as sp
import sys
import tempfile
import time
import numpy as np
import pandas as pd
from abutils.utils.jobs import monitor_mp_jobs
from abutils.utils.pipeline import list_files, make_dir
from abutils.utils.progbar import progress_bar
```
### User-defined options
By default, the size of the bootstrap samples will increase exponentially, as the similarity plots will be drawn with a logarithmic x-axis. However, the option is also given to draw bootstrap samples that increase linearly in size rather than exponentially. The following options may be adjusted depending on the desired output:
* `iterations` is the number of replicate samplings for each subsample size. Default is `10`.
* `max_power_of_10` is the highest exponent of 10 for which subsamples will be drawn. For example, the default value of `7` means that the largest bootstrap sample will be `10^7`, or 10 million, sequences. The lowest acceptable value is `2`, as subsampling fewer than 100 sequences is not especially useful.
* `subsample_fraction` is the fraction of each `power_of_10` multiple at which the bootstrap sample size increases. For example, the default value of `0.3` results in the following multipliers: `[1.0, 1.3, 1.6, 1.9]`. For a `power_of_10` of 10^6, for example, a `subsample_fraction` of `0.3` would result in the following bootstrap sample sizes: `1.0x10^6, 1.3x10^6, 1.6x10^6, and 1,9x10^6`.
* `subsample_size` is the size multiple for each subsample. By default (if `subsample_fraction` is provided), this will not be used. This option is only provided in case you would prefer the subsample size pools to increase in linear fashion, rather than exponentially.
Note that the data directory (`'./data/techrep-merged_vj-cdr3len_no-header/'`) is not present in this Github repo, as the size of the files far exceeds what is allowed by Github. You can download a compressed archive containing the appropriate data files [**here**](http://burtonlab.s3.amazonaws.com/GRP_github_data/techrep-merged_vj-cdr3len_no-header.tar.gz). Decompressing the archive inside of `./data` (the "data" directory found in the same parent directory as this notebook) should allow you to run the following code without alteration.
```
iterations = 10
max_power_of_10 = 7
subsample_fraction = 0.3
subsample_size = 25000
data_dir = './data/techrep-merged_vj-cdr3len_no-header/'
temp_dir = './data/temp/'
output_dir = './data/user-calculated_mh_similarity/'
```
### Subjects and directories
```
individual_files_dir = os.path.join(output_dir, 'individual_comparisons')
make_dir(temp_dir)
make_dir(output_dir)
make_dir(individual_files_dir)
with open('../data_processing/data/subjects.txt') as f:
subjects = sorted(f.read().split())
```
### Morisita-Horn similarity
```
def mh_similarity(sample1, sample2):
'''
Calculates the Marista-Horn similarity for two samples.
.. note:
sample1 and sample2 should be the same length, and
the sum of each sample should be greater than 0.
Args:
sample1 (list): list of frequencies for sample 1
sample2 (list): list of frequencies for sample 2
Returns:
float: Marista-Horn similarity (between 0 and 1)
'''
X = sum(sample1)
Y = sum(sample2)
XY = X * Y
sumXiYi = 0
sumXiSq = 0
sumYiSq = 0
for x, y in zip(sample1, sample2):
sumXiYi += x * y
sumXiSq += x * x
sumYiSq += y * y
num = 2 * sumXiYi
denom = (float(sumXiSq) / (X * X) + float(sumYiSq) / (Y * Y)) * XY
return 1. * num / denom
def load_data(files):
'''
Loads VJ-CDR3len data from a list of files corresponding to a single subject.
Args:
files (list): a list of files containing data to load. Data will be
loaded for all files and returned as a single list.
Returns:
list: a combined list of data from all of the files
'''
data = []
for f in files:
with open(f) as of:
for line in of:
_d = '_'.join(line.strip().split())
if _d:
data.append(_d)
return data
def get_subsample_sizes():
'''
Returns a list of subsample sizes, based on user-defined subsampling options.
'''
if subsample_fraction is not None:
sizes = []
for mpt in range(2, max_power_of_10 + 1):
start = 10**(mpt - 1)
end = 10**mpt
step = int(10**mpt * float(subsample_fraction))
sizes += list(range(start, end, step))
sizes.append(10**mpt)
else:
sizes = range(subsample_step, 10 ** max_power_of_10, subsample_step)
return sizes
def compute_frequencies(data, iterations, size):
'''
Subsamples a dataset (with replacement) and computes the VJ-CDR3len
frequency for each bootstrap sample.
Args:
data (list): a list of antibody sequences collapsed to just VJ-CDR3len
iterations (int): the number of bootstrap samplings to be performed
size (int): the size (in sequences) of each bootstrap sample
Returns:
list(Counter): a list of VJ-CDR3len frequencies (as Counter objects)
'''
subsamples = np.random.choice(data, size=(iterations, size), replace=True)
freqs = []
for subsample in subsamples:
freqs.append(Counter(subsample))
return freqs
def compute_similarity_for_single_size(sub1_data, sub2_data, iterations, size):
'''
For a single bootstrap sampling size, computes Morisita-Horn similarity of
two datasets.
Args:
sub1_data (list): a list of VJ-CDR3len values
sub2_data (list): a list of VJ-CDR3len values
iterations (int): the number of iterations to be performed
size (int): size (in sequences) of each bootstrap sampling
Returns:
int: the size of each bootstrap sampling
list: a list of Morisita-Horn similarities, of length `iterations`
'''
similarities = []
sub1_freqs = get_frequencies(sub1_data, iterations, size)
sub2_freqs = get_frequencies(sub2_data, iterations, size)
for s1, s2 in zip(sub1_freqs, sub2_freqs):
freq_df = pd.DataFrame({'sub1': s1, 'sub2': s2}).fillna(0)
similarities.append(mh_similarity(freq_df['sub1'], freq_df['sub2']))
return size, similarities
def calculate_similarities(subject1, subject2, iterations, sizes):
'''
Performs Morisita-Horn similarity calculations on VJ-CDR3len data for two subjects.
Args:
subject1 (str): name of subject 1
subject2 (str): name of subject 2
iterations (int): number of iterations to be performed for each bootstrap sample size
sizes (list(int)): a list of bootstrap sample sizes
Returns:
sub_header (str): a header line containing subject information
similarities (dict): similarity scores, with the dict keys being sample sizes
and values being lists of similarity scores of length `iterations`
'''
sub_header = '#{} {}'.format(subject1, subject2)
sub1_dir = os.path.join(data_dir, subject1)
sub2_dir = os.path.join(data_dir, subject2)
sub1_files = list_files(sub1_dir)
sub2_files = list_files(sub2_dir)
similarities = {}
output_data = [sub_header, ]
output_file = os.path.join(individual_files_dir, '{}-{}'.format(subject1, subject2))
# load all of the files into memory
sub1 = os.path.basename(os.path.dirname(sub1_files[0]))
sub1_data = load_data(sub1_files)
sub2 = os.path.basename(os.path.dirname(sub2_files[0]))
sub2_data = load_data(sub2_files)
for size in sizes:
similarities[size] = []
sub1_freqs = compute_frequencies(sub1_data, iterations, size)
sub2_freqs = compute_frequencies(sub2_data, iterations, size)
for s1, s2 in zip(sub1_freqs, sub2_freqs):
freq_df = pd.DataFrame({'sub1': s1, 'sub2': s2}).fillna(0)
similarities[size].append(mh_similarity(freq_df['sub1'], freq_df['sub2']))
output_data.append(' '.join([str(v) for v in [size] + similarities[size]]))
with open(output_file, 'w') as f:
f.write('\n'.join(output_data))
return sub_header, similarities
```
### Calculate similarity
Morisita-Horn similarity will be calculated for each pairwise combination of subjects (including self-comparisons). The process is multiprocess (one pairwise comparison per process) and will use as many cores as necessary to perform all comparisons (there are a total of 55 comparisons from 10 subjects, so the max number of cores that will be used is 55).
```
# get a list of subsample sizes, based on user-defined options
sizes = get_subsample_sizes()
# get a list of all pairwise combinations of subjects (including self-comparison)
combinations = list(itertools.combinations_with_replacement(subjects, 2))
p = mp.Pool(processes=7, maxtasksperchild=1)
start = datetime.now()
async_results = []
# initialize the progress bar
jobs = len(combinations)
progress_bar(0, jobs, start_time=start)
# calculate the similarity score for each pairwise combination of subjects
for subject1, subject2 in combinations:
async_results.append(p.apply_async(calculate_similarities, args=(subject1, subject2, iterations, sizes)))
monitor_mp_jobs(async_results, start_time=start)
results = [ar.get() for ar in async_results]
p.close()
p.join()
```
### Combine similarity files
Each pairwise comparison resulted in a separate output file. Here we combine them into a single similarities file.
```
combined_output_file = os.path.join(output_dir, 'mh-similarities_combined.txt')
individual_files = list_files(individual_files_dir)
with open(combined_output_file, 'w') as f:
f.write('')
cat_cmd = 'for f in {}/*; do (cat "${{f}}"; echo) >> {}; done'.format(individual_files_dir.rstrip('/'), combined_output_file)
p = sp.Popen(cat_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
for s in subjects:
print(s)
```
| true |
code
| 0.731824 | null | null | null | null |
|
```
%matplotlib inline
import sys
import numpy as np
import numpy.random as rnd
import time
import GPflow
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
M = 50
```
# Create a dataset and initialise model
```
def func(x):
return np.sin(x * 3*3.14) + 0.3*np.cos(x * 9*3.14) + 0.5 * np.sin(x * 7*3.14)
X = rnd.rand(10000, 1) * 2 - 1
Y = func(X) + rnd.randn(10000, 1) * 0.2
plt.plot(X, Y, 'x')
D = X.shape[1]
Xt = np.linspace(-1.1, 1.1, 100)[:, None]
Yt = func(Xt)
def init():
kern = GPflow.kernels.RBF(D, 1)
Z = X[:M, :].copy()
m = GPflow.svgp.SVGP(X, Y, kern, GPflow.likelihoods.Gaussian(), Z, minibatch_size=len(X))
return m
m = init()
```
# Stochastically calculate bound and show noise
The minibatch estimate should be an unbiased estimator of the `ground_truth`. Here we show a histogram of the value from different evaluations, together with its mean and the ground truth. The small difference between the mean of the minibatch estimations and the ground truth shows that the minibatch estimator is working as expected.
```
ground_truth = m.compute_log_likelihood()
m.X.minibatch_size = 100
m.Y.minibatch_size = 100
evals = [m.compute_log_likelihood() for _ in range(100)]
plt.hist(evals)
plt.axvline(ground_truth)
```
# Show that minibatches speed up computation
The use of using minibatches is that it decreases the time needed to make an optimisation step, since estmating the objective is cheaper. Here we plot the change in time required with the size of the minibatch. We see that smaller minibatches result in a cheaper estimate of the objective.
```
mbps = np.logspace(-2, 0, 10)
times = []
objs = []
for mbp in mbps:
m.X.minibatch_size = m.Y.minibatch_size = int(len(X) * mbp)
start_time = time.time()
objs.append([m.compute_log_likelihood() for _ in range(20)])
# plt.hist(objs, bins = 100)
# plt.axvline(ground_truth, color='r')
times.append(time.time() - start_time)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbps, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax2.plot(mbps, np.array(objs), 'kx')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
```
# Show actual stochastic optimization
```
def plot():
pX = np.linspace(-1, 1, 100)[:, None]
pY, pYv = m.predict_y(pX)
plt.plot(X, Y, 'x')
line, = plt.plot(pX, pY, lw=1.5)
col = line.get_color()
plt.plot(pX, pY+2*pYv**0.5, col, lw=1.5)
plt.plot(pX, pY-2*pYv**0.5, col, lw=1.5)
plt.plot(m.Z.value, np.zeros(m.Z.value.shape), 'k|', mew=2)
plot()
plt.title("Predictions before training")
st = time.time()
logt = []
logx = []
logf = []
def logger(x):
if (logger.i % 10) == 0:
logx.append(x)
logf.append(m._objective(x)[0])
logt.append(time.time() - st)
logger.i+=1
logger.i = 1
m.X.minibatch_size = 100
m.Y.minibatch_size = 100
m.Z.fixed = True
m.optimize(method=tf.train.AdamOptimizer(), max_iters=np.inf, callback=logger)
plt.plot(-np.array(logf))
plt.xlabel('iteration')
plt.ylabel('ELBO')
plot()
plt.title("Predictions after training")
```
| true |
code
| 0.496338 | null | null | null | null |
|
# Train and deploy on Kubeflow from Notebooks
This notebook introduces you to using Kubeflow Fairing to train and deploy a model to Kubeflow on Google Kubernetes Engine (GKE), and Google Cloud ML Engine. This notebook demonstrate how to:
* Train an XGBoost model in a local notebook,
* Use Kubeflow Fairing to train an XGBoost model remotely on Kubeflow,
* Data is read from a PVC
* The append builder is used to rapidly build a docker image
* Use Kubeflow Fairing to deploy a trained model to Kubeflow, and
* Call the deployed endpoint for predictions.
To learn more about how to run this notebook locally, see the guide to [training and deploying on GCP from a local notebook][gcp-local-notebook].
[gcp-local-notebook]: https://kubeflow.org/docs/fairing/gcp-local-notebook/
## Set up your notebook for training an XGBoost model
Import the libraries required to train this model.
```
import demo_util
from pathlib import Path
import os
fairing_code = os.path.join(Path.home(), "git_jlewi-kubecon-demo", "fairing")
demo_util.notebook_setup(fairing_code)
# fairing:include-cell
import ames
import fire
import joblib
import logging
import nbconvert
import os
import pathlib
import sys
from pathlib import Path
import pandas as pd
import pprint
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
from importlib import reload
# Imports not to be included in the built docker image
import kfp
import kfp.components as comp
import kfp.gcp as gcp
import kfp.dsl as dsl
import kfp.compiler as compiler
from kubernetes import client as k8s_client
import fairing
from fairing.builders import append
from fairing.deployers import job
import fairing_util
```
Define various constants
```
nfs_path = os.path.join("/mnt/kubeflow-gcfs/data/ames_dataset")
model_dir = os.path.join("/mnt/kubeflow-gcfs/models")
train_data = "/mnt/kubeflow-gcfs/data/ames_dataset/train.csv"
model_file = os.path.join(model_dir, "trained_ames_model.dat")
# Base image is built from the Dockerfile in the repo
# Can be the same image as your notebook
base_image = "gcr.io/code-search-demo/kubecon-demo/notebook:v20190518-2d04328-dirty-a8c2a5"
# Copy data to nfs
demo_util.copy_data_to_nfs(nfs_path, model_dir)
```
## Define Train and Predict functions
```
# fairing:include-cell
class HousingServe(object):
def __init__(self, model_file=None):
self.n_estimators = 50
self.learning_rate = 0.1
if not model_file:
print("model_file not supplied; checking environment variable")
model_file = os.getenv("MODEL_FILE")
self.model_file = model_file
print("model_file={0}".format(self.model_file))
self.model = None
def train(self, train_input, model_file):
(train_X, train_y), (test_X, test_y) = ames.read_input(train_input)
model = ames.train_model(train_X,
train_y,
test_X,
test_y,
self.n_estimators,
self.learning_rate)
ames.eval_model(model, test_X, test_y)
ames.save_model(model, model_file)
def predict(self, X, feature_names):
"""Predict using the model for given ndarray."""
if not self.model:
print("Loading model {0}".format(self.model_file))
self.model = ames.load_model(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
# Do any postprocessing
return [[prediction.item(0), prediction.item(1)]]
def create_pr_to_update_model(self, job_spec_file, new_model):
ames.create_pr_to_update_model(job_spec_file, new_model)
def deploy_model(self, model_file):
ames.deploy_model(model_file)
def validate_model(self, endpoint):
ames.validate_model(endpoint)
```
## Train your Model Locally
* Train your model locally inside your notebook
```
local_model_file = "/tmp/trained_model.dat"
housing = HousingServe(local_model_file)
housing.train(train_data, local_model_file)
```
## Predict locally
* Run prediction inside the notebook using the newly created notebook
```
(train_X, train_y), (test_X, test_y) = ames.read_input("ames_dataset/train.csv")
housing.predict(test_X, None)
```
## Use Fairing to Launch a K8s Job to train your model
### Set up Kubeflow Fairing for training and predictions
Import the `fairing` library and configure the environment that your training or prediction job will run in.
```
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)
PY_VERSION = ".".join([str(x) for x in sys.version_info[0:3]])
BASE_IMAGE = 'python:{}'.format(PY_VERSION)
```
## Use fairing to build the docker image
* This uses the append builder to rapidly build docker images
```
preprocessor = fairing_util.ConvertNotebookPreprocessorWithFire("HousingServe")
if not preprocessor.input_files:
preprocessor.input_files = set()
input_files=["ames.py", "deployment/update_model_job.yaml", "update_model.py"]
preprocessor.input_files = set([os.path.normpath(f) for f in input_files])
preprocessor.preprocess()
builder = append.append.AppendBuilder(registry=DOCKER_REGISTRY,
base_image=base_image, preprocessor=preprocessor)
builder.build()
```
## Launch the K8s Job
* Use pod mutators to attach a PVC and credentials to the pod
```
pod_spec = builder.generate_pod_spec()
pvc_mutator = fairing_util.add_pvc_mutator("kubeflow-gcfs", "/mnt/kubeflow-gcfs")
train_deployer = job.job.Job(namespace="kubeflow",
cleanup=False,
pod_spec_mutators=[
fairing.cloud.gcp.add_gcp_credentials_if_exists, pvc_mutator])
# Add command line arguments
pod_spec.containers[0].command.extend(["train", train_data, model_file])
result = train_deployer.deploy(pod_spec)
!kubectl get jobs -l fairing-id={train_deployer.job_id} -o yaml
```
## Deploy the trained model to Kubeflow for predictions
```
from fairing.deployers import serving
import fairing_util
pod_spec = builder.generate_pod_spec()
pvc_mutator = fairing_util.add_pvc_mutator("kubeflow-gcfs", "/mnt/kubeflow-gcfs")
module_name = os.path.splitext(preprocessor.executable.name)[0]
deployer = serving.serving.Serving(module_name + ".HousingServe",
service_type="ClusterIP",
labels={"app": "ames"})
pvc_mutator(None, pod_spec, deployer.namespace)
pod_spec.containers[0].env.append({"name": "MODEL_FILE", "value": model_file})
url = deployer.deploy(pod_spec)
!kubectl get deploy -o yaml {deployer.deployment.metadata.name}
```
## Call the prediction endpoint
Create a test dataset, then call the endpoint on Kubeflow for predictions.
```
(train_X, train_y), (test_X, test_y) = ames.read_input("ames_dataset/train.csv")
full_url = url + ":5000/predict"
result = fairing_util.predict_nparray(full_url, test_X)
pprint.pprint(result.content)
```
## Clean up the prediction endpoint
Delete the prediction endpoint created by this notebook.
```
# !kubectl delete service -l app=ames
# !kubectl delete deploy -l app=ames
```
## Build a simple 1 step pipeline
```
EXPERIMENT_NAME = 'Ames'
```
#### Define the pipeline
Pipeline function has to be decorated with the `@dsl.pipeline` decorator
```
@dsl.pipeline(
name='Training pipeline',
description='A pipeline that trains an xgboost model for the Ames dataset.'
)
def train_pipeline(
train_data="gs://code-search-demo_ames/data/ames_dataset/train.csv",
model_file="gs://code-search-demo_ames/output/hello-world1.txt",
):
command=["python", preprocessor.executable.name, "train", train_data, model_file]
train_op = dsl.ContainerOp(
name="train",
image=builder.image_tag,
command=command,
).apply(
gcp.use_gcp_secret('user-gcp-sa'),
)
train_op.container.working_dir = "/app"
```
#### Compile the pipeline
```
pipeline_func = train_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.zip'
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```
#Specify pipeline argument values
arguments = {"train_data": "gs://code-search-demo_ames/data/ames_dataset/train.csv",
"model_file": "gs://code-search-demo_ames/output/hello-world1.txt"}
# Get or create an experiment and submit a pipeline run
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
#vvvvvvvvv This link leads to the run information page. (Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working)
```
## Define a pipeline for CI/CD
* Define a pipeline that trains the model
* Then deploy the model and verify its working
* If the model is good we create a PR updating the model in the deployment
```
CICD_EXPERIMENT_NAME = 'Ames CICD'
@dsl.pipeline(
name='Ames CICD pipeline',
description='A pipeline that trains an xgboost model for the Ames dataset and updates it.'
)
def cicd_pipeline(
train_data="gs://code-search-demo_ames/data/ames_dataset/train.csv",
model_file="gs://code-search-demo_ames/output/default.txt",
):
command=["python", preprocessor.executable.name, "train", train_data, model_file]
train_op = dsl.ContainerOp(
name="train",
image=builder.image_tag,
command=command,
).apply(
gcp.use_gcp_secret('user-gcp-sa'),
)
train_op.container.working_dir = "/app"
command=["python3", preprocessor.executable.name, "deploy-model",
model_file]
deploy_op = dsl.ContainerOp(
name="deploy-model",
image=builder.image_tag,
command=command,
)
deploy_op.container.working_dir = "/app"
deploy_op.after(train_op)
command=["python3", preprocessor.executable.name, "validate-model",
model_file]
validate_op = dsl.ContainerOp(
name="validate-model",
image=builder.image_tag,
command=command,
)
validate_op.container.working_dir = "/app"
validate_op.after(deploy_op)
command=["python3", preprocessor.executable.name, "create-pr-to-update-model",
"deployment/update_model_job.yaml", model_file]
pr_op = dsl.ContainerOp(
name="create-pr",
image=builder.image_tag,
command=command,
)
pr_op.container.working_dir = "/app"
pr_op.after(validate_op)
pipeline_func = cicd_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.zip'
compiler.Compiler().compile(pipeline_func, pipeline_filename)
import datetime
gcs_model_file = "gs://code-search-demo_ames/models/" + datetime.datetime.now().strftime("%y%m%d_%H%M%S")
#Specify pipeline argument values
arguments = {"train_data": "gs://code-search-demo_ames/data/ames_dataset/train.csv",
"model_file": gcs_model_file}
# Get or create an experiment and submit a pipeline run
client = kfp.Client()
experiment = client.create_experiment(CICD_EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
| true |
code
| 0.502625 | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.